From 59506197c7d0a94088dc15d488d8c964f909a6b1 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 23 Aug 2010 22:53:23 +0200 Subject: [PATCH 001/476] drm: don't export drm_sg_alloc It's not used internally by any driver, only by some generic ioctls. So don't export it. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_scatter.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c index 9034c4c6100d..d15e09b0ae0b 100644 --- a/drivers/gpu/drm/drm_scatter.c +++ b/drivers/gpu/drm/drm_scatter.c @@ -184,8 +184,6 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request) drm_sg_cleanup(entry); return -ENOMEM; } -EXPORT_SYMBOL(drm_sg_alloc); - int drm_sg_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) From 1da3f87ebb7edb3e0b829ec4bbe5fb3d9d93986f Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 23 Aug 2010 22:53:24 +0200 Subject: [PATCH 002/476] drm: kill kernel_context_switch callbacks Not used by any in-kernel driver. So drop it. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_lock.c | 18 ------------------ include/drm/drmP.h | 3 --- 2 files changed, 21 deletions(-) diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c index 9bf93bc9a32c..609f2d504f72 100644 --- a/drivers/gpu/drm/drm_lock.c +++ b/drivers/gpu/drm/drm_lock.c @@ -136,12 +136,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) } } - if (dev->driver->kernel_context_switch && - dev->last_context != lock->context) { - dev->driver->kernel_context_switch(dev, dev->last_context, - lock->context); - } - return 0; } @@ -159,7 +153,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_lock *lock = data; - struct drm_master *master = file_priv->master; if (lock->context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", @@ -169,17 +162,6 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); - /* kernel_context_switch isn't used by any of the x86 drm - * modules but is required by the Sparc driver. - */ - if (dev->driver->kernel_context_switch_unlock) - dev->driver->kernel_context_switch_unlock(dev); - else { - if (drm_lock_free(&master->lock, lock->context)) { - /* FIXME: Should really bail out here. */ - } - } - unblock_all_signals(); return 0; } diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 7809d230adee..15ea8c44f28d 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -703,9 +703,6 @@ struct drm_driver { int (*dma_quiescent) (struct drm_device *); int (*context_ctor) (struct drm_device *dev, int context); int (*context_dtor) (struct drm_device *dev, int context); - int (*kernel_context_switch) (struct drm_device *dev, int old, - int new); - void (*kernel_context_switch_unlock) (struct drm_device *dev); /** * get_vblank_counter - get raw hardware vblank counter From be72ae26b11478c00c64858c86b647b438791671 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 23 Aug 2010 22:53:26 +0200 Subject: [PATCH 003/476] drm: kill procfs callbacks Not used by any driver (rightly so!). Kill them. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_proc.c | 13 ------------- include/drm/drmP.h | 2 -- 2 files changed, 15 deletions(-) diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c index a9ba6b69ad35..e571de536dc5 100644 --- a/drivers/gpu/drm/drm_proc.c +++ b/drivers/gpu/drm/drm_proc.c @@ -151,7 +151,6 @@ fail: int drm_proc_init(struct drm_minor *minor, int minor_id, struct proc_dir_entry *root) { - struct drm_device *dev = minor->dev; char name[64]; int ret; @@ -172,14 +171,6 @@ int drm_proc_init(struct drm_minor *minor, int minor_id, return ret; } - if (dev->driver->proc_init) { - ret = dev->driver->proc_init(minor); - if (ret) { - DRM_ERROR("DRM: Driver failed to initialize " - "/proc/dri.\n"); - return ret; - } - } return 0; } @@ -216,15 +207,11 @@ int drm_proc_remove_files(struct drm_info_list *files, int count, */ int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root) { - struct drm_device *dev = minor->dev; char name[64]; if (!root || !minor->proc_root) return 0; - if (dev->driver->proc_cleanup) - dev->driver->proc_cleanup(minor); - drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor); sprintf(name, "%d", minor->index); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 15ea8c44f28d..0d7af3f39652 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -792,8 +792,6 @@ struct drm_driver { void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv, bool from_release); - int (*proc_init)(struct drm_minor *minor); - void (*proc_cleanup)(struct drm_minor *minor); int (*debugfs_init)(struct drm_minor *minor); void (*debugfs_cleanup)(struct drm_minor *minor); From 23ddc0243d7313942b94f1a2e44e6394f7bb996e Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 23 Aug 2010 22:53:25 +0200 Subject: [PATCH 004/476] drm: kill dma_ready callbacks Not used by any driver. So drop it. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_lock.c | 3 --- include/drm/drmP.h | 1 - 2 files changed, 4 deletions(-) diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c index 609f2d504f72..d9146f240d33 100644 --- a/drivers/gpu/drm/drm_lock.c +++ b/drivers/gpu/drm/drm_lock.c @@ -124,9 +124,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); } - if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY)) - dev->driver->dma_ready(dev); - if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT)) { if (dev->driver->dma_quiescent(dev)) { diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 0d7af3f39652..d5a2b8869246 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -699,7 +699,6 @@ struct drm_driver { int (*suspend) (struct drm_device *, pm_message_t state); int (*resume) (struct drm_device *); int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); - void (*dma_ready) (struct drm_device *); int (*dma_quiescent) (struct drm_device *); int (*context_ctor) (struct drm_device *dev, int context); int (*context_dtor) (struct drm_device *dev, int context); From fd2e7931cdefa8e9acf63f0a4efd61ae0f89e77b Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 23 Aug 2010 22:53:33 +0200 Subject: [PATCH 005/476] drm: kill gem_free_object_unlocked driver callback Not used by any current driver. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_gem.c | 4 +--- include/drm/drmP.h | 1 - 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index bf92d07510df..cff7317d3830 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -474,9 +474,7 @@ drm_gem_object_free_unlocked(struct kref *kref) struct drm_gem_object *obj = (struct drm_gem_object *) kref; struct drm_device *dev = obj->dev; - if (dev->driver->gem_free_object_unlocked != NULL) - dev->driver->gem_free_object_unlocked(obj); - else if (dev->driver->gem_free_object != NULL) { + if (dev->driver->gem_free_object != NULL) { mutex_lock(&dev->struct_mutex); dev->driver->gem_free_object(obj); mutex_unlock(&dev->struct_mutex); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index d5a2b8869246..6dbdbf45cd1a 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -802,7 +802,6 @@ struct drm_driver { */ int (*gem_init_object) (struct drm_gem_object *obj); void (*gem_free_object) (struct drm_gem_object *obj); - void (*gem_free_object_unlocked) (struct drm_gem_object *obj); /* vga arb irq handler */ void (*vgaarb_irq)(struct drm_device *dev, bool state); From b3da8f7d2d1fa81fb65cb3f5d9e50dde40a83182 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 23 Aug 2010 22:53:29 +0200 Subject: [PATCH 006/476] drm: kill context_ctor callback It's not used by any driver. The destructor callback is unfortunately used by the via driver in a rather convoluted piece of code used to reimplement something resembling broken futexes. I didn't dare to touch this code. But at least kill the needless NULL assignemt in the sis driver. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_context.c | 8 -------- drivers/gpu/drm/sis/sis_drv.c | 1 - include/drm/drmP.h | 1 - 3 files changed, 10 deletions(-) diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c index 2607753a320b..6d440fb894cf 100644 --- a/drivers/gpu/drm/drm_context.c +++ b/drivers/gpu/drm/drm_context.c @@ -333,14 +333,6 @@ int drm_addctx(struct drm_device *dev, void *data, return -ENOMEM; } - if (ctx->handle != DRM_KERNEL_CONTEXT) { - if (dev->driver->context_ctor) - if (!dev->driver->context_ctor(dev, ctx->handle)) { - DRM_DEBUG("Running out of ctxs or memory.\n"); - return -ENOMEM; - } - } - ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL); if (!ctx_entry) { DRM_DEBUG("out of memory\n"); diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c index 776bf9e9ea1a..2d1292131500 100644 --- a/drivers/gpu/drm/sis/sis_drv.c +++ b/drivers/gpu/drm/sis/sis_drv.c @@ -67,7 +67,6 @@ static struct drm_driver driver = { .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR, .load = sis_driver_load, .unload = sis_driver_unload, - .context_dtor = NULL, .dma_quiescent = sis_idle, .reclaim_buffers = NULL, .reclaim_buffers_idlelocked = sis_reclaim_buffers_locked, diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 6dbdbf45cd1a..eb4f7edcc314 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -700,7 +700,6 @@ struct drm_driver { int (*resume) (struct drm_device *); int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); int (*dma_quiescent) (struct drm_device *); - int (*context_ctor) (struct drm_device *dev, int context); int (*context_dtor) (struct drm_device *dev, int context); /** From a2a273c94357ffd24e635cf9ec9b2e5c6f02b63b Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 23 Aug 2010 22:53:30 +0200 Subject: [PATCH 007/476] drm: don't export drm_get_drawable_info Not used by any in-tree user. So drop it. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_drawable.c | 3 +-- include/drm/drmP.h | 2 -- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/gpu/drm/drm_drawable.c b/drivers/gpu/drm/drm_drawable.c index c53c9768cc11..170e53178d8b 100644 --- a/drivers/gpu/drm/drm_drawable.c +++ b/drivers/gpu/drm/drm_drawable.c @@ -173,11 +173,10 @@ error: /** * Caller must hold the drawable spinlock! */ -struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id) +static struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id) { return idr_find(&dev->drw_idr, id); } -EXPORT_SYMBOL(drm_get_drawable_info); static int drm_drawable_free(int idr, void *p, void *data) { diff --git a/include/drm/drmP.h b/include/drm/drmP.h index eb4f7edcc314..7a5c91c2aa60 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1238,8 +1238,6 @@ extern int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, - drm_drawable_t id); extern void drm_drawable_free_all(struct drm_device *dev); /* Authentication IOCTL support (drm_auth.h) */ From 690bb51b54a986e48c7b8b2dba51a3cd262a7266 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 23 Aug 2010 22:53:35 +0200 Subject: [PATCH 008/476] drm: drop return value of drm_free_agp No caller (rightly) cares about it, so drop it. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_memory.c | 4 ++-- include/drm/drmP.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c index 7732268eced2..70ca27edc3c9 100644 --- a/drivers/gpu/drm/drm_memory.c +++ b/drivers/gpu/drm/drm_memory.c @@ -106,9 +106,9 @@ DRM_AGP_MEM *drm_alloc_agp(struct drm_device * dev, int pages, u32 type) } /** Wrapper around agp_free_memory() */ -int drm_free_agp(DRM_AGP_MEM * handle, int pages) +void drm_free_agp(DRM_AGP_MEM * handle, int pages) { - return drm_agp_free_memory(handle) ? 0 : -EINVAL; + drm_agp_free_memory(handle); } EXPORT_SYMBOL(drm_free_agp); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 7a5c91c2aa60..7fd1870b6a70 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1178,8 +1178,8 @@ extern int drm_mem_info(char *buf, char **start, off_t offset, int request, int *eof, void *data); extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area); +extern void drm_free_agp(DRM_AGP_MEM * handle, int pages); extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type); -extern int drm_free_agp(DRM_AGP_MEM * handle, int pages); extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev, struct page **pages, From 793a97e4cc38f834e0488ccc1ecbfe52ff6f5b84 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 23 Aug 2010 22:53:27 +0200 Subject: [PATCH 009/476] drm: kill drm_map_ofs callbacks All drivers happily copy&pasted the default implementation without checking whether this callback is used at all. It's not. Sigh. Kill it. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_vm.c | 7 ------- drivers/gpu/drm/i810/i810_drv.c | 1 - drivers/gpu/drm/i830/i830_drv.c | 1 - drivers/gpu/drm/i915/i915_drv.c | 1 - drivers/gpu/drm/mga/mga_drv.c | 1 - drivers/gpu/drm/nouveau/nouveau_drv.c | 1 - drivers/gpu/drm/r128/r128_drv.c | 1 - drivers/gpu/drm/radeon/radeon_drv.c | 2 -- drivers/gpu/drm/savage/savage_drv.c | 1 - drivers/gpu/drm/sis/sis_drv.c | 1 - drivers/gpu/drm/tdfx/tdfx_drv.c | 1 - drivers/gpu/drm/via/via_drv.c | 1 - drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 1 - include/drm/drmP.h | 2 -- 14 files changed, 22 deletions(-) diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index fda67468e603..2fea2e63a313 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -515,13 +515,6 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) return 0; } -resource_size_t drm_core_get_map_ofs(struct drm_local_map * map) -{ - return map->offset; -} - -EXPORT_SYMBOL(drm_core_get_map_ofs); - resource_size_t drm_core_get_reg_ofs(struct drm_device *dev) { #ifdef __alpha__ diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c index b4250b2cac1f..084a85c3d5d5 100644 --- a/drivers/gpu/drm/i810/i810_drv.c +++ b/drivers/gpu/drm/i810/i810_drv.c @@ -52,7 +52,6 @@ static struct drm_driver driver = { .device_is_agp = i810_driver_device_is_agp, .reclaim_buffers_locked = i810_driver_reclaim_buffers_locked, .dma_quiescent = i810_driver_dma_quiescent, - .get_map_ofs = drm_core_get_map_ofs, .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = i810_ioctls, .fops = { diff --git a/drivers/gpu/drm/i830/i830_drv.c b/drivers/gpu/drm/i830/i830_drv.c index a5c66aa82f0c..16352954311c 100644 --- a/drivers/gpu/drm/i830/i830_drv.c +++ b/drivers/gpu/drm/i830/i830_drv.c @@ -57,7 +57,6 @@ static struct drm_driver driver = { .device_is_agp = i830_driver_device_is_agp, .reclaim_buffers_locked = i830_driver_reclaim_buffers_locked, .dma_quiescent = i830_driver_dma_quiescent, - .get_map_ofs = drm_core_get_map_ofs, .get_reg_ofs = drm_core_get_reg_ofs, #if USE_IRQS .irq_preinstall = i830_driver_irq_preinstall, diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 00befce8fbb7..d079f7b86cca 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -524,7 +524,6 @@ static struct drm_driver driver = { .irq_uninstall = i915_driver_irq_uninstall, .irq_handler = i915_driver_irq_handler, .reclaim_buffers = drm_core_reclaim_buffers, - .get_map_ofs = drm_core_get_map_ofs, .get_reg_ofs = drm_core_get_reg_ofs, .master_create = i915_master_create, .master_destroy = i915_master_destroy, diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c index 26d0d8ced80d..e9c0cbc21fe2 100644 --- a/drivers/gpu/drm/mga/mga_drv.c +++ b/drivers/gpu/drm/mga/mga_drv.c @@ -60,7 +60,6 @@ static struct drm_driver driver = { .irq_uninstall = mga_driver_irq_uninstall, .irq_handler = mga_driver_irq_handler, .reclaim_buffers = drm_core_reclaim_buffers, - .get_map_ofs = drm_core_get_map_ofs, .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = mga_ioctls, .dma_ioctl = mga_dma_buffers, diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 1de5eb53e016..0d64259b21cc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -379,7 +379,6 @@ static struct drm_driver driver = { .irq_uninstall = nouveau_irq_uninstall, .irq_handler = nouveau_irq_handler, .reclaim_buffers = drm_core_reclaim_buffers, - .get_map_ofs = drm_core_get_map_ofs, .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = nouveau_ioctls, .fops = { diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c index 1e2971f13aa1..42ec20a10eed 100644 --- a/drivers/gpu/drm/r128/r128_drv.c +++ b/drivers/gpu/drm/r128/r128_drv.c @@ -56,7 +56,6 @@ static struct drm_driver driver = { .irq_uninstall = r128_driver_irq_uninstall, .irq_handler = r128_driver_irq_handler, .reclaim_buffers = drm_core_reclaim_buffers, - .get_map_ofs = drm_core_get_map_ofs, .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = r128_ioctls, .dma_ioctl = r128_cce_buffers, diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 795403b0e2cd..8fd89bb8e610 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -203,7 +203,6 @@ static struct drm_driver driver_old = { .irq_uninstall = radeon_driver_irq_uninstall, .irq_handler = radeon_driver_irq_handler, .reclaim_buffers = drm_core_reclaim_buffers, - .get_map_ofs = drm_core_get_map_ofs, .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = radeon_ioctls, .dma_ioctl = radeon_cp_buffers, @@ -290,7 +289,6 @@ static struct drm_driver kms_driver = { .irq_uninstall = radeon_driver_irq_uninstall_kms, .irq_handler = radeon_driver_irq_handler_kms, .reclaim_buffers = drm_core_reclaim_buffers, - .get_map_ofs = drm_core_get_map_ofs, .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = radeon_ioctls_kms, .gem_init_object = radeon_gem_object_init, diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c index 021de44c15ab..f539996ba230 100644 --- a/drivers/gpu/drm/savage/savage_drv.c +++ b/drivers/gpu/drm/savage/savage_drv.c @@ -42,7 +42,6 @@ static struct drm_driver driver = { .lastclose = savage_driver_lastclose, .unload = savage_driver_unload, .reclaim_buffers = savage_reclaim_buffers, - .get_map_ofs = drm_core_get_map_ofs, .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = savage_ioctls, .dma_ioctl = savage_bci_buffers, diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c index 2d1292131500..72b0a74f265f 100644 --- a/drivers/gpu/drm/sis/sis_drv.c +++ b/drivers/gpu/drm/sis/sis_drv.c @@ -71,7 +71,6 @@ static struct drm_driver driver = { .reclaim_buffers = NULL, .reclaim_buffers_idlelocked = sis_reclaim_buffers_locked, .lastclose = sis_lastclose, - .get_map_ofs = drm_core_get_map_ofs, .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = sis_ioctls, .fops = { diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c index ec5a43e65722..38a562647d6f 100644 --- a/drivers/gpu/drm/tdfx/tdfx_drv.c +++ b/drivers/gpu/drm/tdfx/tdfx_drv.c @@ -42,7 +42,6 @@ static struct pci_device_id pciidlist[] = { static struct drm_driver driver = { .driver_features = DRIVER_USE_MTRR, .reclaim_buffers = drm_core_reclaim_buffers, - .get_map_ofs = drm_core_get_map_ofs, .get_reg_ofs = drm_core_get_reg_ofs, .fops = { .owner = THIS_MODULE, diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c index 7a1b210401e0..0b9ad8bbb031 100644 --- a/drivers/gpu/drm/via/via_drv.c +++ b/drivers/gpu/drm/via/via_drv.c @@ -51,7 +51,6 @@ static struct drm_driver driver = { .reclaim_buffers_locked = NULL, .reclaim_buffers_idlelocked = via_reclaim_buffers_locked, .lastclose = via_lastclose, - .get_map_ofs = drm_core_get_map_ofs, .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = via_ioctls, .fops = { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 72ec2e2b6e97..4a832de43dd5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -723,7 +723,6 @@ static struct drm_driver driver = { .irq_uninstall = vmw_irq_uninstall, .irq_handler = vmw_irq_handler, .reclaim_buffers_locked = NULL, - .get_map_ofs = drm_core_get_map_ofs, .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = vmw_ioctls, .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls), diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 7fd1870b6a70..70a14a4faa1e 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -772,7 +772,6 @@ struct drm_driver { struct drm_file *file_priv); void (*reclaim_buffers_idlelocked) (struct drm_device *dev, struct drm_file *file_priv); - resource_size_t (*get_map_ofs) (struct drm_local_map * map); resource_size_t (*get_reg_ofs) (struct drm_device *dev); void (*set_version) (struct drm_device *dev, struct drm_set_version *sv); @@ -1167,7 +1166,6 @@ extern int drm_release(struct inode *inode, struct file *filp); extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); extern void drm_vm_open_locked(struct vm_area_struct *vma); -extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map); extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev); extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); From 4ac5ec40ec70022e4dea8cc6254d2dadd1e43d57 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 23 Aug 2010 22:53:34 +0200 Subject: [PATCH 010/476] drm: don't export dri1 locking functions Only used by ioctl, not by any in-tree drivers. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_lock.c | 10 +++------- include/drm/drmP.h | 1 - 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c index d9146f240d33..1e28b9072068 100644 --- a/drivers/gpu/drm/drm_lock.c +++ b/drivers/gpu/drm/drm_lock.c @@ -37,6 +37,8 @@ static int drm_notifier(void *priv); +static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); + /** * Lock ioctl. * @@ -172,6 +174,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) * * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. */ +static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context) { @@ -208,7 +211,6 @@ int drm_lock_take(struct drm_lock_data *lock_data, } return 0; } -EXPORT_SYMBOL(drm_lock_take); /** * This takes a lock forcibly and hands it to context. Should ONLY be used @@ -276,7 +278,6 @@ int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context) wake_up_interruptible(&lock_data->lock_queue); return 0; } -EXPORT_SYMBOL(drm_lock_free); /** * If we get here, it means that the process has called DRM_IOCTL_LOCK @@ -339,7 +340,6 @@ void drm_idlelock_take(struct drm_lock_data *lock_data) } spin_unlock_bh(&lock_data->spinlock); } -EXPORT_SYMBOL(drm_idlelock_take); void drm_idlelock_release(struct drm_lock_data *lock_data) { @@ -359,8 +359,6 @@ void drm_idlelock_release(struct drm_lock_data *lock_data) } spin_unlock_bh(&lock_data->spinlock); } -EXPORT_SYMBOL(drm_idlelock_release); - int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv) { @@ -369,5 +367,3 @@ int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv) _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) && master->lock.file_priv == file_priv); } - -EXPORT_SYMBOL(drm_i_have_hw_lock); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 70a14a4faa1e..45d09639e9d2 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1252,7 +1252,6 @@ extern int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context); extern void drm_idlelock_take(struct drm_lock_data *lock_data); extern void drm_idlelock_release(struct drm_lock_data *lock_data); From 8f879194f88742d9c452f669482b6d6abdc1e1e7 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 23 Aug 2010 22:53:31 +0200 Subject: [PATCH 011/476] drm: replace drawable ioctl by noops MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The information supplied by userspace through these ioctls is only accessible by dev->drw_idr. But there's no in-tree user of that. Also userspace does not really care about return values of these ioctls, either. Only hw/xfree86/dri/dri.c from the xserver actually checks the return from adddraw and keeps on trying to create a kernel drawable every time somebody creates a dri drawable. But since that's now a noop, who cares. Therefore it's safe to replace these three ioctls with noops and rip out the implementation. Signed-off-by: Daniel Vetter Reviewed-by: Kristian Høgsberg Reviewed-by: Michel Dänzer Signed-off-by: Dave Airlie --- drivers/gpu/drm/Makefile | 2 +- drivers/gpu/drm/drm_drawable.c | 197 --------------------------------- drivers/gpu/drm/drm_drv.c | 8 +- drivers/gpu/drm/drm_stub.c | 3 - include/drm/drmP.h | 15 --- 5 files changed, 4 insertions(+), 221 deletions(-) delete mode 100644 drivers/gpu/drm/drm_drawable.c diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index f3a23a329f4e..997c43d04909 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -5,7 +5,7 @@ ccflags-y := -Iinclude/drm drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \ - drm_context.o drm_dma.o drm_drawable.o \ + drm_context.o drm_dma.o \ drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ diff --git a/drivers/gpu/drm/drm_drawable.c b/drivers/gpu/drm/drm_drawable.c deleted file mode 100644 index 170e53178d8b..000000000000 --- a/drivers/gpu/drm/drm_drawable.c +++ /dev/null @@ -1,197 +0,0 @@ -/** - * \file drm_drawable.c - * IOCTLs for drawables - * - * \author Rickard E. (Rik) Faith - * \author Gareth Hughes - * \author Michel Dänzer - */ - -/* - * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com - * - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * Copyright 2006 Tungsten Graphics, Inc., Bismarck, North Dakota. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include "drmP.h" - -/** - * Allocate drawable ID and memory to store information about it. - */ -int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - unsigned long irqflags; - struct drm_draw *draw = data; - int new_id = 0; - int ret; - -again: - if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) { - DRM_ERROR("Out of memory expanding drawable idr\n"); - return -ENOMEM; - } - - spin_lock_irqsave(&dev->drw_lock, irqflags); - ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id); - if (ret == -EAGAIN) { - spin_unlock_irqrestore(&dev->drw_lock, irqflags); - goto again; - } - - spin_unlock_irqrestore(&dev->drw_lock, irqflags); - - draw->handle = new_id; - - DRM_DEBUG("%d\n", draw->handle); - - return 0; -} - -/** - * Free drawable ID and memory to store information about it. - */ -int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - struct drm_draw *draw = data; - unsigned long irqflags; - struct drm_drawable_info *info; - - spin_lock_irqsave(&dev->drw_lock, irqflags); - - info = drm_get_drawable_info(dev, draw->handle); - if (info == NULL) { - spin_unlock_irqrestore(&dev->drw_lock, irqflags); - return -EINVAL; - } - kfree(info->rects); - kfree(info); - - idr_remove(&dev->drw_idr, draw->handle); - - spin_unlock_irqrestore(&dev->drw_lock, irqflags); - DRM_DEBUG("%d\n", draw->handle); - return 0; -} - -int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - struct drm_update_draw *update = data; - unsigned long irqflags; - struct drm_clip_rect *rects; - struct drm_drawable_info *info; - int err; - - info = idr_find(&dev->drw_idr, update->handle); - if (!info) { - info = kzalloc(sizeof(*info), GFP_KERNEL); - if (!info) - return -ENOMEM; - if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) { - DRM_ERROR("No such drawable %d\n", update->handle); - kfree(info); - return -EINVAL; - } - } - - switch (update->type) { - case DRM_DRAWABLE_CLIPRECTS: - if (update->num == 0) - rects = NULL; - else if (update->num != info->num_rects) { - rects = kmalloc(update->num * - sizeof(struct drm_clip_rect), - GFP_KERNEL); - } else - rects = info->rects; - - if (update->num && !rects) { - DRM_ERROR("Failed to allocate cliprect memory\n"); - err = -ENOMEM; - goto error; - } - - if (update->num && DRM_COPY_FROM_USER(rects, - (struct drm_clip_rect __user *) - (unsigned long)update->data, - update->num * - sizeof(*rects))) { - DRM_ERROR("Failed to copy cliprects from userspace\n"); - err = -EFAULT; - goto error; - } - - spin_lock_irqsave(&dev->drw_lock, irqflags); - - if (rects != info->rects) { - kfree(info->rects); - } - - info->rects = rects; - info->num_rects = update->num; - - spin_unlock_irqrestore(&dev->drw_lock, irqflags); - - DRM_DEBUG("Updated %d cliprects for drawable %d\n", - info->num_rects, update->handle); - break; - default: - DRM_ERROR("Invalid update type %d\n", update->type); - return -EINVAL; - } - - return 0; - -error: - if (rects != info->rects) - kfree(rects); - - return err; -} - -/** - * Caller must hold the drawable spinlock! - */ -static struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id) -{ - return idr_find(&dev->drw_idr, id); -} - -static int drm_drawable_free(int idr, void *p, void *data) -{ - struct drm_drawable_info *info = p; - - if (info) { - kfree(info->rects); - kfree(info); - } - - return 0; -} - -void drm_drawable_free_all(struct drm_device *dev) -{ - idr_for_each(&dev->drw_idr, drm_drawable_free, NULL); - idr_remove_all(&dev->drw_idr); -} diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 84da748555bc..a35a41002c33 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -91,8 +91,8 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH), @@ -127,7 +127,7 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), - DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED), @@ -180,8 +180,6 @@ int drm_lastclose(struct drm_device * dev) mutex_lock(&dev->struct_mutex); - /* Free drawable information memory */ - drm_drawable_free_all(dev); del_timer(&dev->timer); /* Clear AGP information */ diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index d1ad57450df1..f797ae9da77c 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -240,14 +240,11 @@ int drm_fill_in_dev(struct drm_device *dev, INIT_LIST_HEAD(&dev->vblank_event_list); spin_lock_init(&dev->count_lock); - spin_lock_init(&dev->drw_lock); spin_lock_init(&dev->event_lock); init_timer(&dev->timer); mutex_init(&dev->struct_mutex); mutex_init(&dev->ctxlist_mutex); - idr_init(&dev->drw_idr); - if (drm_ht_create(&dev->map_hash, 12)) { return -ENOMEM; } diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 45d09639e9d2..989cefe33c7b 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1037,12 +1037,6 @@ struct drm_device { struct drm_minor *control; /**< Control node for card */ struct drm_minor *primary; /**< render type primary screen head */ - /** \name Drawable information */ - /*@{ */ - spinlock_t drw_lock; - struct idr drw_idr; - /*@} */ - struct drm_mode_config mode_config; /**< Current mode config */ /** \name GEM information */ @@ -1229,15 +1223,6 @@ extern int drm_setsareactx(struct drm_device *dev, void *data, extern int drm_getsareactx(struct drm_device *dev, void *data, struct drm_file *file_priv); - /* Drawable IOCTL support (drm_drawable.h) */ -extern int drm_adddraw(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int drm_rmdraw(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int drm_update_drawable_info(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern void drm_drawable_free_all(struct drm_device *dev); - /* Authentication IOCTL support (drm_auth.h) */ extern int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv); From 89c372647d1d698a96e2189ef4312a977b939839 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 23 Aug 2010 22:53:36 +0200 Subject: [PATCH 012/476] drm: kill agp indirection mess There's no point in jumping through two indirections. So kill one and call the kernels agp functions directly. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_agpsupport.c | 40 ++++---------------------------- drivers/gpu/drm/drm_memory.c | 12 +++------- include/drm/drmP.h | 5 ---- 3 files changed, 7 insertions(+), 50 deletions(-) diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c index ba38e0147220..252fdb98b73a 100644 --- a/drivers/gpu/drm/drm_agpsupport.c +++ b/drivers/gpu/drm/drm_agpsupport.c @@ -193,7 +193,7 @@ int drm_agp_enable_ioctl(struct drm_device *dev, void *data, * \return zero on success or a negative number on failure. * * Verifies the AGP device is present and has been acquired, allocates the - * memory via alloc_agp() and creates a drm_agp_mem entry for it. + * memory via agp_allocate_memory() and creates a drm_agp_mem entry for it. */ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request) { @@ -211,7 +211,7 @@ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request) pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; type = (u32) request->type; - if (!(memory = drm_alloc_agp(dev, pages, type))) { + if (!(memory = agp_allocate_memory(dev->agp->bridge, pages, type))) { kfree(entry); return -ENOMEM; } @@ -423,38 +423,6 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev) return head; } -/** Calls agp_allocate_memory() */ -DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data * bridge, - size_t pages, u32 type) -{ - return agp_allocate_memory(bridge, pages, type); -} - -/** Calls agp_free_memory() */ -int drm_agp_free_memory(DRM_AGP_MEM * handle) -{ - if (!handle) - return 0; - agp_free_memory(handle); - return 1; -} - -/** Calls agp_bind_memory() */ -int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start) -{ - if (!handle) - return -EINVAL; - return agp_bind_memory(handle, start); -} - -/** Calls agp_unbind_memory() */ -int drm_agp_unbind_memory(DRM_AGP_MEM * handle) -{ - if (!handle) - return -EINVAL; - return agp_unbind_memory(handle); -} - /** * Binds a collection of pages into AGP memory at the given offset, returning * the AGP memory structure containing them. @@ -474,7 +442,7 @@ drm_agp_bind_pages(struct drm_device *dev, DRM_DEBUG("\n"); - mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages, + mem = agp_allocate_memory(dev->agp->bridge, num_pages, type); if (mem == NULL) { DRM_ERROR("Failed to allocate memory for %ld pages\n", @@ -487,7 +455,7 @@ drm_agp_bind_pages(struct drm_device *dev, mem->page_count = num_pages; mem->is_flushed = true; - ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE); + ret = agp_bind_memory(mem, gtt_offset / PAGE_SIZE); if (ret != 0) { DRM_ERROR("Failed to bind AGP memory: %d\n", ret); agp_free_memory(mem); diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c index 70ca27edc3c9..c9b805000a11 100644 --- a/drivers/gpu/drm/drm_memory.c +++ b/drivers/gpu/drm/drm_memory.c @@ -99,29 +99,23 @@ static void *agp_remap(unsigned long offset, unsigned long size, return addr; } -/** Wrapper around agp_allocate_memory() */ -DRM_AGP_MEM *drm_alloc_agp(struct drm_device * dev, int pages, u32 type) -{ - return drm_agp_allocate_memory(dev->agp->bridge, pages, type); -} - /** Wrapper around agp_free_memory() */ void drm_free_agp(DRM_AGP_MEM * handle, int pages) { - drm_agp_free_memory(handle); + agp_free_memory(handle); } EXPORT_SYMBOL(drm_free_agp); /** Wrapper around agp_bind_memory() */ int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start) { - return drm_agp_bind_memory(handle, start); + return agp_bind_memory(handle, start); } /** Wrapper around agp_unbind_memory() */ int drm_unbind_agp(DRM_AGP_MEM * handle) { - return drm_agp_unbind_memory(handle); + return agp_unbind_memory(handle); } EXPORT_SYMBOL(drm_unbind_agp); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 989cefe33c7b..ffe6035cf471 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1171,7 +1171,6 @@ extern int drm_mem_info(char *buf, char **start, off_t offset, extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area); extern void drm_free_agp(DRM_AGP_MEM * handle, int pages); -extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type); extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev, struct page **pages, @@ -1331,10 +1330,6 @@ extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size_t pages, u32 type); -extern int drm_agp_free_memory(DRM_AGP_MEM * handle); -extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start); -extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); extern void drm_agp_chipset_flush(struct drm_device *dev); /* Stub support (drm_stub.h) */ From df8fcb09667c1b2c9dcf65de23f0bfa851e8138e Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 23 Aug 2010 22:53:32 +0200 Subject: [PATCH 013/476] drm: kill dev->timer Totally unused. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_drv.c | 2 -- drivers/gpu/drm/drm_stub.c | 1 - include/drm/drmP.h | 1 - 3 files changed, 4 deletions(-) diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index a35a41002c33..5ff75a3a6b9d 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -180,8 +180,6 @@ int drm_lastclose(struct drm_device * dev) mutex_lock(&dev->struct_mutex); - del_timer(&dev->timer); - /* Clear AGP information */ if (drm_core_has_AGP(dev) && dev->agp && !drm_core_check_feature(dev, DRIVER_MODESET)) { diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index f797ae9da77c..cdc89ee042cc 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -241,7 +241,6 @@ int drm_fill_in_dev(struct drm_device *dev, spin_lock_init(&dev->count_lock); spin_lock_init(&dev->event_lock); - init_timer(&dev->timer); mutex_init(&dev->struct_mutex); mutex_init(&dev->ctxlist_mutex); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index ffe6035cf471..757b63a23b14 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -964,7 +964,6 @@ struct drm_device { __volatile__ long context_flag; /**< Context swapping flag */ __volatile__ long interrupt_flag; /**< Interruption handler flag */ __volatile__ long dma_flag; /**< DMA dispatch flag */ - struct timer_list timer; /**< Timer for delaying ctx switch */ wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */ int last_checked; /**< Last context checked for DMA */ int last_context; /**< Last current context */ From cbc60ca04b342a4e1f2a1086a7277c077f07dbed Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 23 Aug 2010 22:53:28 +0200 Subject: [PATCH 014/476] drm: kill get_reg_ofs callback Every driver used the default implementation. Fold that one into the only callsite and drop the callback. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_vm.c | 6 ++---- drivers/gpu/drm/i810/i810_drv.c | 1 - drivers/gpu/drm/i830/i830_drv.c | 1 - drivers/gpu/drm/i915/i915_drv.c | 1 - drivers/gpu/drm/mga/mga_drv.c | 1 - drivers/gpu/drm/nouveau/nouveau_drv.c | 1 - drivers/gpu/drm/r128/r128_drv.c | 1 - drivers/gpu/drm/radeon/radeon_drv.c | 2 -- drivers/gpu/drm/savage/savage_drv.c | 1 - drivers/gpu/drm/sis/sis_drv.c | 1 - drivers/gpu/drm/tdfx/tdfx_drv.c | 1 - drivers/gpu/drm/via/via_drv.c | 1 - drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 1 - include/drm/drmP.h | 2 -- 14 files changed, 2 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index 2fea2e63a313..ee879d6bb522 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -515,7 +515,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) return 0; } -resource_size_t drm_core_get_reg_ofs(struct drm_device *dev) +static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev) { #ifdef __alpha__ return dev->hose->dense_mem_base - dev->hose->mem_space->start; @@ -524,8 +524,6 @@ resource_size_t drm_core_get_reg_ofs(struct drm_device *dev) #endif } -EXPORT_SYMBOL(drm_core_get_reg_ofs); - /** * mmap DMA memory. * @@ -612,7 +610,7 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) #endif case _DRM_FRAME_BUFFER: case _DRM_REGISTERS: - offset = dev->driver->get_reg_ofs(dev); + offset = drm_core_get_reg_ofs(dev); vma->vm_flags |= VM_IO; /* not in core dump */ vma->vm_page_prot = drm_io_prot(map->type, vma); #if !defined(__arm__) diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c index 084a85c3d5d5..1c73b0c43c1e 100644 --- a/drivers/gpu/drm/i810/i810_drv.c +++ b/drivers/gpu/drm/i810/i810_drv.c @@ -52,7 +52,6 @@ static struct drm_driver driver = { .device_is_agp = i810_driver_device_is_agp, .reclaim_buffers_locked = i810_driver_reclaim_buffers_locked, .dma_quiescent = i810_driver_dma_quiescent, - .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = i810_ioctls, .fops = { .owner = THIS_MODULE, diff --git a/drivers/gpu/drm/i830/i830_drv.c b/drivers/gpu/drm/i830/i830_drv.c index 16352954311c..7140ffc12eee 100644 --- a/drivers/gpu/drm/i830/i830_drv.c +++ b/drivers/gpu/drm/i830/i830_drv.c @@ -57,7 +57,6 @@ static struct drm_driver driver = { .device_is_agp = i830_driver_device_is_agp, .reclaim_buffers_locked = i830_driver_reclaim_buffers_locked, .dma_quiescent = i830_driver_dma_quiescent, - .get_reg_ofs = drm_core_get_reg_ofs, #if USE_IRQS .irq_preinstall = i830_driver_irq_preinstall, .irq_postinstall = i830_driver_irq_postinstall, diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index d079f7b86cca..e6afa68775b0 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -524,7 +524,6 @@ static struct drm_driver driver = { .irq_uninstall = i915_driver_irq_uninstall, .irq_handler = i915_driver_irq_handler, .reclaim_buffers = drm_core_reclaim_buffers, - .get_reg_ofs = drm_core_get_reg_ofs, .master_create = i915_master_create, .master_destroy = i915_master_destroy, #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c index e9c0cbc21fe2..65ea42cf1795 100644 --- a/drivers/gpu/drm/mga/mga_drv.c +++ b/drivers/gpu/drm/mga/mga_drv.c @@ -60,7 +60,6 @@ static struct drm_driver driver = { .irq_uninstall = mga_driver_irq_uninstall, .irq_handler = mga_driver_irq_handler, .reclaim_buffers = drm_core_reclaim_buffers, - .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = mga_ioctls, .dma_ioctl = mga_dma_buffers, .fops = { diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 0d64259b21cc..209912a1b7a5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -379,7 +379,6 @@ static struct drm_driver driver = { .irq_uninstall = nouveau_irq_uninstall, .irq_handler = nouveau_irq_handler, .reclaim_buffers = drm_core_reclaim_buffers, - .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = nouveau_ioctls, .fops = { .owner = THIS_MODULE, diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c index 42ec20a10eed..67309f84f16d 100644 --- a/drivers/gpu/drm/r128/r128_drv.c +++ b/drivers/gpu/drm/r128/r128_drv.c @@ -56,7 +56,6 @@ static struct drm_driver driver = { .irq_uninstall = r128_driver_irq_uninstall, .irq_handler = r128_driver_irq_handler, .reclaim_buffers = drm_core_reclaim_buffers, - .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = r128_ioctls, .dma_ioctl = r128_cce_buffers, .fops = { diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 8fd89bb8e610..663cdc10a5c2 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -203,7 +203,6 @@ static struct drm_driver driver_old = { .irq_uninstall = radeon_driver_irq_uninstall, .irq_handler = radeon_driver_irq_handler, .reclaim_buffers = drm_core_reclaim_buffers, - .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = radeon_ioctls, .dma_ioctl = radeon_cp_buffers, .fops = { @@ -289,7 +288,6 @@ static struct drm_driver kms_driver = { .irq_uninstall = radeon_driver_irq_uninstall_kms, .irq_handler = radeon_driver_irq_handler_kms, .reclaim_buffers = drm_core_reclaim_buffers, - .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = radeon_ioctls_kms, .gem_init_object = radeon_gem_object_init, .gem_free_object = radeon_gem_object_free, diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c index f539996ba230..c0385633667d 100644 --- a/drivers/gpu/drm/savage/savage_drv.c +++ b/drivers/gpu/drm/savage/savage_drv.c @@ -42,7 +42,6 @@ static struct drm_driver driver = { .lastclose = savage_driver_lastclose, .unload = savage_driver_unload, .reclaim_buffers = savage_reclaim_buffers, - .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = savage_ioctls, .dma_ioctl = savage_bci_buffers, .fops = { diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c index 72b0a74f265f..4d9f311d249d 100644 --- a/drivers/gpu/drm/sis/sis_drv.c +++ b/drivers/gpu/drm/sis/sis_drv.c @@ -71,7 +71,6 @@ static struct drm_driver driver = { .reclaim_buffers = NULL, .reclaim_buffers_idlelocked = sis_reclaim_buffers_locked, .lastclose = sis_lastclose, - .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = sis_ioctls, .fops = { .owner = THIS_MODULE, diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c index 38a562647d6f..e0768adbeccd 100644 --- a/drivers/gpu/drm/tdfx/tdfx_drv.c +++ b/drivers/gpu/drm/tdfx/tdfx_drv.c @@ -42,7 +42,6 @@ static struct pci_device_id pciidlist[] = { static struct drm_driver driver = { .driver_features = DRIVER_USE_MTRR, .reclaim_buffers = drm_core_reclaim_buffers, - .get_reg_ofs = drm_core_get_reg_ofs, .fops = { .owner = THIS_MODULE, .open = drm_open, diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c index 0b9ad8bbb031..02f733db61c1 100644 --- a/drivers/gpu/drm/via/via_drv.c +++ b/drivers/gpu/drm/via/via_drv.c @@ -51,7 +51,6 @@ static struct drm_driver driver = { .reclaim_buffers_locked = NULL, .reclaim_buffers_idlelocked = via_reclaim_buffers_locked, .lastclose = via_lastclose, - .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = via_ioctls, .fops = { .owner = THIS_MODULE, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 4a832de43dd5..e645f44e4302 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -723,7 +723,6 @@ static struct drm_driver driver = { .irq_uninstall = vmw_irq_uninstall, .irq_handler = vmw_irq_handler, .reclaim_buffers_locked = NULL, - .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = vmw_ioctls, .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls), .dma_quiescent = NULL, /*vmw_dma_quiescent, */ diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 757b63a23b14..30e827aeba02 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -772,7 +772,6 @@ struct drm_driver { struct drm_file *file_priv); void (*reclaim_buffers_idlelocked) (struct drm_device *dev, struct drm_file *file_priv); - resource_size_t (*get_reg_ofs) (struct drm_device *dev); void (*set_version) (struct drm_device *dev, struct drm_set_version *sv); @@ -1159,7 +1158,6 @@ extern int drm_release(struct inode *inode, struct file *filp); extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); extern void drm_vm_open_locked(struct vm_area_struct *vma); -extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev); extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); /* Memory management support (drm_memory.h) */ From 24d05927c37adf62fe8833eceba50585cb78f906 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 20 Aug 2010 18:08:28 +0200 Subject: [PATCH 015/476] drm/i915: unload: fix intel dp encoder cleanup struct intel_dp contains both struct intel_encoder at the beginning (as it's base-class) and an i2c adapater. When initializing, the i2c adapter gets assigned intel_encoder->ddc_adaptor = &intel_dp->adapter and the generic intel_encode_destroy happily calls kfree on this pointer. Ouch. Fix this by using a dp specific cleanup function. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_dp.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 51d142939a26..b1fc65b41275 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1479,6 +1479,15 @@ intel_dp_destroy (struct drm_connector *connector) kfree(connector); } +static void intel_dp_encoder_destroy(struct drm_encoder *encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + i2c_del_adapter(&intel_dp->adapter); + drm_encoder_cleanup(encoder); + kfree(intel_dp); +} + static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { .dpms = intel_dp_dpms, .mode_fixup = intel_dp_mode_fixup, @@ -1501,7 +1510,7 @@ static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = }; static const struct drm_encoder_funcs intel_dp_enc_funcs = { - .destroy = intel_encoder_destroy, + .destroy = intel_dp_encoder_destroy, }; void From bc0c7f14432f7f94b16f972f2d23b8c1248249b4 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 20 Aug 2010 18:18:48 +0200 Subject: [PATCH 016/476] drm/i915: unload: fix error_work races This is the first patch to clean up module unload races due to outstanding timers/work. Preparatory step: Thou shalt not destroy the workqueue when new work might still get enqued. Now error_work gets queued by the hangcheck timer and only (atomically) reads the chip wedged status. So cancel it right after the hangcheck timer is killed. But the hangcheck is armed by interrupts, so move everything after irqs are disabled. Also change a del_timer to a del_timer_sync in the ums gem code, the hangcheck timer is self-rearming. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 8 +++++--- drivers/gpu/drm/i915/i915_gem.c | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 9d67b4853030..736cca8a03d4 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -2256,9 +2256,6 @@ int i915_driver_unload(struct drm_device *dev) i915_mch_dev = NULL; spin_unlock(&mchdev_lock); - destroy_workqueue(dev_priv->wq); - del_timer_sync(&dev_priv->hangcheck_timer); - io_mapping_free(dev_priv->mm.gtt_mapping); if (dev_priv->mm.gtt_mtrr >= 0) { mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, @@ -2283,6 +2280,9 @@ int i915_driver_unload(struct drm_device *dev) vga_client_register(dev->pdev, NULL, NULL, NULL); } + del_timer_sync(&dev_priv->hangcheck_timer); + cancel_work_sync(&dev_priv->error_work); + if (dev->pdev->msi_enabled) pci_disable_msi(dev->pdev); @@ -2307,6 +2307,8 @@ int i915_driver_unload(struct drm_device *dev) intel_teardown_mchbar(dev); + destroy_workqueue(dev_priv->wq); + pci_dev_put(dev_priv->bridge_dev); kfree(dev->dev_private); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 16fca1d1799a..4cccdce5f80f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4408,7 +4408,7 @@ i915_gem_idle(struct drm_device *dev) * And not confound mm.suspended! */ dev_priv->mm.suspended = 1; - del_timer(&dev_priv->hangcheck_timer); + del_timer_sync(&dev_priv->hangcheck_timer); i915_kernel_lost_context(dev); i915_gem_cleanup_ringbuffer(dev); From 6c0d93500eb50098e4e35b8b79e073f2f2f5b773 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 20 Aug 2010 18:26:46 +0200 Subject: [PATCH 017/476] drm/i915: unload: fix hotplug_work races hotplug_work is queued by the hotplug interrupt and only either emits a hotplug uevent or queues a crt poll slow-work. No other locking. So it's safe to cancel this work _after_ irq's have been turned off. But before the modesetting objects are destroyed because the hotplug function accesses them (without locking). The current code (for kms) only switches irqs off after modesetting teardown, hence move the irq teardown into the modeset cleanup right before the crtc cleanup. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 2 +- drivers/gpu/drm/i915/intel_display.c | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 736cca8a03d4..45236e716669 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -2275,7 +2275,7 @@ int i915_driver_unload(struct drm_device *dev) dev_priv->child_dev = NULL; dev_priv->child_dev_num = 0; } - drm_irq_uninstall(dev); + vga_switcheroo_unregister_client(dev->pdev); vga_client_register(dev->pdev, NULL, NULL, NULL); } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 7c9103030036..20be935830b0 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6073,6 +6073,11 @@ void intel_modeset_cleanup(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); + /* Disable the irq before mode object teardown, for the irq might + * enqueue unpin/hotplug work. */ + drm_irq_uninstall(dev); + cancel_work_sync(&dev_priv->hotplug_work); + drm_mode_config_cleanup(dev); } From a8b4899e4658e53c0c8f4206af105e358e39ee93 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 20 Aug 2010 21:25:11 +0200 Subject: [PATCH 018/476] drm/i915: unload: don't leak error state With kms, interrupts now get disabled in the modesetting cleanup. So free the error state afterwards, it currently gets allocated in the interrupt handler. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 45236e716669..970c338323a8 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -2250,8 +2250,6 @@ int i915_driver_unload(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - i915_destroy_error_state(dev); - spin_lock(&mchdev_lock); i915_mch_dev = NULL; spin_unlock(&mchdev_lock); @@ -2280,8 +2278,10 @@ int i915_driver_unload(struct drm_device *dev) vga_client_register(dev->pdev, NULL, NULL, NULL); } + /* Free error state after interrupts are fully disabled. */ del_timer_sync(&dev_priv->hangcheck_timer); cancel_work_sync(&dev_priv->error_work); + i915_destroy_error_state(dev); if (dev->pdev->msi_enabled) pci_disable_msi(dev->pdev); From 3dec0095f71e7d00b7f6180229fd32a2d0a6ce8d Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 20 Aug 2010 21:40:52 +0200 Subject: [PATCH 019/476] drm/i915: unload: fix idle_timer/idle_work races idle_work wasn't cleaned up at all. It takes &dev->struct_mutex, but accesss the mode_config crtc list (without any other locking!). Hence this work needs to be canceled before calling drm_mode_config_cleanup. As evidenced by the kernel's object debuggin code, the current code also cleans up the timer to early (it gets rearmed). So move it right before the final cleanup (it seems to work). Also unconditionally set up the idle_timer in intel_increase_pllclock. If we're unlucky the timer might fire right away, rendering the call in the modesetting teardown pointless. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 20be935830b0..ccfc10559c17 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -43,7 +43,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type); static void intel_update_watermarks(struct drm_device *dev); -static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule); +static void intel_increase_pllclock(struct drm_crtc *crtc); static void intel_crtc_update_cursor(struct drm_crtc *crtc); typedef struct { @@ -1527,7 +1527,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, intel_update_fbc(crtc, &crtc->mode); intel_wait_for_vblank(dev, intel_crtc->pipe); - intel_increase_pllclock(crtc, true); + intel_increase_pllclock(crtc); return 0; } @@ -4664,7 +4664,7 @@ static void intel_crtc_idle_timer(unsigned long arg) queue_work(dev_priv->wq, &dev_priv->idle_work); } -static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule) +static void intel_increase_pllclock(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; drm_i915_private_t *dev_priv = dev->dev_private; @@ -4699,9 +4699,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule) } /* Schedule downclock */ - if (schedule) - mod_timer(&intel_crtc->idle_timer, jiffies + - msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); + mod_timer(&intel_crtc->idle_timer, jiffies + + msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); } static void intel_decrease_pllclock(struct drm_crtc *crtc) @@ -4837,7 +4836,7 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK); } /* Non-busy -> busy, upclock */ - intel_increase_pllclock(crtc, true); + intel_increase_pllclock(crtc); intel_crtc->busy = true; } else { /* Busy -> busy, put off timer */ @@ -6039,12 +6038,9 @@ void intel_modeset_cleanup(struct drm_device *dev) continue; intel_crtc = to_intel_crtc(crtc); - intel_increase_pllclock(crtc, false); - del_timer_sync(&intel_crtc->idle_timer); + intel_increase_pllclock(crtc); } - del_timer_sync(&dev_priv->idle_timer); - if (dev_priv->display.disable_fbc) dev_priv->display.disable_fbc(dev); @@ -6078,10 +6074,17 @@ void intel_modeset_cleanup(struct drm_device *dev) drm_irq_uninstall(dev); cancel_work_sync(&dev_priv->hotplug_work); + /* Shut off idle work before the crtcs get freed. */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + intel_crtc = to_intel_crtc(crtc); + del_timer_sync(&intel_crtc->idle_timer); + } + del_timer_sync(&dev_priv->idle_timer); + cancel_work_sync(&dev_priv->idle_work); + drm_mode_config_cleanup(dev); } - /* * Return which encoder is currently attached for connector. */ From 67e77c5ae8bff6f805d207541f1315051248a87b Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 20 Aug 2010 22:26:30 +0200 Subject: [PATCH 020/476] drm/i915: unload: fix unpin_work related races Kill any outstanding unpin_work when destroying the corresponding crtc. Then flush the workqueue before the gem teardown, in case any unpin work is still outstanding. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 3 +++ drivers/gpu/drm/i915/intel_display.c | 14 ++++++++++++++ 2 files changed, 17 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 970c338323a8..27a826e3170a 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -2292,6 +2292,9 @@ int i915_driver_unload(struct drm_device *dev) intel_opregion_free(dev, 0); if (drm_core_check_feature(dev, DRIVER_MODESET)) { + /* Flush any outstanding unpin_work. */ + flush_workqueue(dev_priv->wq); + i915_gem_free_all_phys_object(dev); mutex_lock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index ccfc10559c17..794d4ac0c40f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4850,8 +4850,22 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) static void intel_crtc_destroy(struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct intel_unpin_work *work; + unsigned long flags; + + spin_lock_irqsave(&dev->event_lock, flags); + work = intel_crtc->unpin_work; + intel_crtc->unpin_work = NULL; + spin_unlock_irqrestore(&dev->event_lock, flags); + + if (work) { + cancel_work_sync(&work->work); + kfree(work); + } drm_crtc_cleanup(crtc); + kfree(intel_crtc); } From c911fc1c6ad61b56869ee521f1a477c741b039da Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 20 Aug 2010 21:23:20 +0200 Subject: [PATCH 021/476] drm/i915: unload: ensure that gem is idle When the module unloads, all users should be gone, hence all bo references held by userspace, too. This should already result in an idle ringbuffer. Still, be paranoid and idle gem before starting the unload dance. Also kill the call to i915_gem_lastclose under an if (kms), it's a noop for kms. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 27a826e3170a..14133ebef33b 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -2249,11 +2249,18 @@ free_priv: int i915_driver_unload(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + int ret; spin_lock(&mchdev_lock); i915_mch_dev = NULL; spin_unlock(&mchdev_lock); + mutex_lock(&dev->struct_mutex); + ret = i915_gpu_idle(dev); + if (ret) + DRM_ERROR("failed to idle hardware: %d\n", ret); + mutex_unlock(&dev->struct_mutex); + io_mapping_free(dev_priv->mm.gtt_mapping); if (dev_priv->mm.gtt_mtrr >= 0) { mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, @@ -2303,7 +2310,6 @@ int i915_driver_unload(struct drm_device *dev) if (I915_HAS_FBC(dev) && i915_powersave) i915_cleanup_compression(dev); drm_mm_takedown(&dev_priv->vram); - i915_gem_lastclose(dev); intel_cleanup_overlay(dev); } From 75ef9da2cdb64e7926404dd2b755bbbfe98eaeaf Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sat, 21 Aug 2010 00:25:16 +0200 Subject: [PATCH 022/476] drm/i915: unload: fix retire_work races ums-gem code correctly cancels the retire work (at lastclose time), kms does not do so. Fix this by canceling the work right after ideling the gpu. While staring at the code I noticed that the work function is not static. Fix this, too. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 3 +++ drivers/gpu/drm/i915/i915_drv.h | 1 - drivers/gpu/drm/i915/i915_gem.c | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 14133ebef33b..c58ec5c02919 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -2261,6 +2261,9 @@ int i915_driver_unload(struct drm_device *dev) DRM_ERROR("failed to idle hardware: %d\n", ret); mutex_unlock(&dev->struct_mutex); + /* Cancel the retire work handler, which should be idle now. */ + cancel_delayed_work_sync(&dev_priv->mm.retire_work); + io_mapping_free(dev_priv->mm.gtt_mapping); if (dev_priv->mm.gtt_mtrr >= 0) { mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index af4a263cf257..04aada08a6f9 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -978,7 +978,6 @@ bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); void i915_gem_retire_requests(struct drm_device *dev); -void i915_gem_retire_work_handler(struct work_struct *work); void i915_gem_clflush_object(struct drm_gem_object *obj); int i915_gem_object_set_domain(struct drm_gem_object *obj, uint32_t read_domains, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 4cccdce5f80f..26eb6e31c743 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1797,7 +1797,7 @@ i915_gem_retire_requests(struct drm_device *dev) i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); } -void +static void i915_gem_retire_work_handler(struct work_struct *work) { drm_i915_private_t *dev_priv; From 481b6af3d1f36d4a19bd36321c1e9f713db49aad Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 23 Aug 2010 17:43:35 +0100 Subject: [PATCH 023/476] drm/i915: Drop the msleep parameter to wait_for() Jesse's feedback from using the wait_for() macro was that the msleep argument was that it was superfluous and made the macro more difficult to use and to read. As the actually amount of time to sleep is not critical, the crucial part is to sleep and let the processor schedule something else whilst we wait for the event, replace the argument with a hardcoded value. Signed-off-by: Chris Wilson Cc: Jesse Barnes --- drivers/gpu/drm/i915/intel_crt.c | 4 ++-- drivers/gpu/drm/i915/intel_display.c | 16 ++++++++-------- drivers/gpu/drm/i915/intel_dp.c | 4 ++-- drivers/gpu/drm/i915/intel_drv.h | 5 ++++- drivers/gpu/drm/i915/intel_lvds.c | 4 ++-- 5 files changed, 18 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 4b7735196cd5..c2982e48b61f 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -187,7 +187,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) I915_WRITE(PCH_ADPA, adpa); if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, - 1000, 1)) + 1000)) DRM_ERROR("timed out waiting for FORCE_TRIGGER"); if (turn_off_dac) { @@ -244,7 +244,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) /* wait for FORCE_DETECT to go off */ if (wait_for((I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT) == 0, - 1000, 1)) + 1000)) DRM_ERROR("timed out waiting for FORCE_DETECT to go off"); } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 794d4ac0c40f..854d8f4415b7 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1007,9 +1007,9 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe) I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); /* Wait for vblank interrupt bit to set */ - if (wait_for((I915_READ(pipestat_reg) & - PIPE_VBLANK_INTERRUPT_STATUS), - 50, 0)) + if (wait_for(I915_READ(pipestat_reg) & + PIPE_VBLANK_INTERRUPT_STATUS, + 50)) DRM_DEBUG_KMS("vblank wait timed out\n"); } @@ -1108,7 +1108,7 @@ void i8xx_disable_fbc(struct drm_device *dev) I915_WRITE(FBC_CONTROL, fbc_ctl); /* Wait for compressing bit to clear */ - if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10, 0)) { + if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) { DRM_DEBUG_KMS("FBC idle timed out\n"); return; } @@ -2070,7 +2070,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) I915_WRITE(transconf_reg, temp | TRANS_ENABLE); I915_READ(transconf_reg); - if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100, 1)) + if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100)) DRM_ERROR("failed to enable transcoder\n"); } @@ -2102,7 +2102,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); /* wait for cpu pipe off, pipe state */ - if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 50, 1)) + if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 50)) DRM_ERROR("failed to turn off cpu pipe\n"); } else DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); @@ -2160,7 +2160,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE); /* wait for PCH transcoder off, transcoder state */ - if (wait_for((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0, 50, 1)) + if (wait_for((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0, 50)) DRM_ERROR("failed to disable transcoder\n"); } @@ -5521,7 +5521,7 @@ void ironlake_enable_drps(struct drm_device *dev) rgvmodectl |= MEMMODE_SWMODE_EN; I915_WRITE(MEMMODECTL, rgvmodectl); - if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 1, 0)) + if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) DRM_ERROR("stuck trying to change perf mode\n"); msleep(1); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index b1fc65b41275..3449a3b89e7f 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -772,7 +772,7 @@ static void ironlake_edp_panel_on (struct drm_device *dev) pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON; I915_WRITE(PCH_PP_CONTROL, pp); - if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000, 10)) + if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000)) DRM_ERROR("panel on wait timed out: 0x%08x\n", I915_READ(PCH_PP_STATUS)); @@ -797,7 +797,7 @@ static void ironlake_edp_panel_off (struct drm_device *dev) pp &= ~POWER_TARGET_ON; I915_WRITE(PCH_PP_CONTROL, pp); - if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000, 10)) + if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000)) DRM_ERROR("panel off wait timed out: 0x%08x\n", I915_READ(PCH_PP_STATUS)); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index ad312ca6b3e5..686ed533dbe3 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -33,7 +33,7 @@ #include "drm_crtc_helper.h" -#define wait_for(COND, MS, W) ({ \ +#define _wait_for(COND, MS, W) ({ \ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ int ret__ = 0; \ while (! (COND)) { \ @@ -46,6 +46,9 @@ ret__; \ }) +#define wait_for(COND, MS) _wait_for(COND, MS, 1) +#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0) + /* * Display related stuff */ diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 4fbb0165b26f..fe79c5a2740c 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -114,7 +114,7 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on) I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); - if (wait_for(I915_READ(status_reg) & PP_ON, 1000, 0)) + if (wait_for(I915_READ(status_reg) & PP_ON, 1000)) DRM_ERROR("timed out waiting to enable LVDS pipe"); intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle); @@ -123,7 +123,7 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on) I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); - if (wait_for((I915_READ(status_reg) & PP_ON) == 0, 1000, 0)) + if (wait_for((I915_READ(status_reg) & PP_ON) == 0, 1000)) DRM_ERROR("timed out waiting for LVDS pipe to turn off"); I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); From 82d7c9e7da9fa11b8ed968c94a19c7732e11c1ad Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 7 Aug 2010 12:16:25 +0100 Subject: [PATCH 024/476] drm/i915: Avoid using msleep under kdb and wait_for() wait_for() uses msleep() to yield the cpu whilst spinning waiting for a register to change. kdb asserts that mode changes are atomic and so prohibits msleep. The alternative would be to use mdelay or to simply probe the register more often instead of busy waiting. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_drv.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 686ed533dbe3..1ca3c9e2667a 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -41,7 +41,7 @@ ret__ = -ETIMEDOUT; \ break; \ } \ - if (W) msleep(W); \ + if (W && !in_dbg_master()) msleep(W); \ } \ ret__; \ }) From 19c55da11660fea1a0f1ddbb33ecf38d4f728799 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 9 Aug 2010 14:50:53 +0100 Subject: [PATCH 025/476] drm/i915/crt: Flush register prior to waiting for vblank. If we don't flush the write then we can not be sure that the border colour will have taken effect by the time we try to read it back. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_crt.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index c2982e48b61f..626279791b89 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -327,6 +327,7 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder if (IS_I9XX(dev)) { uint32_t pipeconf = I915_READ(pipeconf_reg); I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER); + POSTING_READ(pipeconf_reg); /* Wait for next Vblank to substitue * border color for Color info */ intel_wait_for_vblank(dev, pipe); From 3b61796785e7b0ca8846b7a709216dceb6e2f68d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 24 Aug 2010 09:02:58 +0100 Subject: [PATCH 026/476] drm/i915: Rename i915_opregion.c to intel_opregion.c It's part of the generic Intel driver infrastructure so rename it in prepreparation for using it for VBT. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/Makefile | 2 +- drivers/gpu/drm/i915/i915_drv.h | 14 +++++++------- drivers/gpu/drm/i915/i915_irq.c | 8 ++++---- .../drm/i915/{i915_opregion.c => intel_opregion.c} | 9 +++++---- 4 files changed, 17 insertions(+), 16 deletions(-) rename drivers/gpu/drm/i915/{i915_opregion.c => intel_opregion.c} (98%) diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 5c8e53458edb..345ca52d6e84 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -32,7 +32,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ dvo_tfp410.o \ dvo_sil164.o -i915-$(CONFIG_ACPI) += i915_opregion.o +i915-$(CONFIG_ACPI) += intel_opregion.o i915-$(CONFIG_COMPAT) += i915_ioc32.o obj-$(CONFIG_DRM_I915) += i915.o diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 04aada08a6f9..980061ff5c80 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1054,18 +1054,18 @@ extern int i915_save_state(struct drm_device *dev); extern int i915_restore_state(struct drm_device *dev); #ifdef CONFIG_ACPI -/* i915_opregion.c */ +/* intel_opregion.c */ extern int intel_opregion_init(struct drm_device *dev, int resume); extern void intel_opregion_free(struct drm_device *dev, int suspend); -extern void opregion_asle_intr(struct drm_device *dev); -extern void ironlake_opregion_gse_intr(struct drm_device *dev); -extern void opregion_enable_asle(struct drm_device *dev); +extern void intel_opregion_asle_intr(struct drm_device *dev); +extern void intel_opregion_gse_intr(struct drm_device *dev); +extern void intel_opregion_enable_asle(struct drm_device *dev); #else static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; } static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; } -static inline void opregion_asle_intr(struct drm_device *dev) { return; } -static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return; } -static inline void opregion_enable_asle(struct drm_device *dev) { return; } +static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } +static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; } +static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; } #endif /* modesetting */ diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 59457e83b011..3afd6e5662d1 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -347,7 +347,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) if (de_iir & DE_GSE) - ironlake_opregion_gse_intr(dev); + intel_opregion_gse_intr(dev); if (de_iir & DE_PLANEA_FLIP_DONE) { intel_prepare_page_flip(dev, 0); @@ -1065,7 +1065,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || (iir & I915_ASLE_INTERRUPT)) - opregion_asle_intr(dev); + intel_opregion_asle_intr(dev); /* With MSI, interrupts are only generated when iir * transitions from zero to nonzero. If another bit got @@ -1252,7 +1252,7 @@ void i915_enable_interrupt (struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; if (!HAS_PCH_SPLIT(dev)) - opregion_enable_asle(dev); + intel_opregion_enable_asle(dev); dev_priv->irq_enabled = 1; } @@ -1570,7 +1570,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev) I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); } - opregion_enable_asle(dev); + intel_opregion_enable_asle(dev); return 0; } diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c similarity index 98% rename from drivers/gpu/drm/i915/i915_opregion.c rename to drivers/gpu/drm/i915/intel_opregion.c index ea5d3fea4b61..97d5329079fe 100644 --- a/drivers/gpu/drm/i915/i915_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -211,7 +211,7 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) return 0; } -void opregion_asle_intr(struct drm_device *dev) +void intel_opregion_asle_intr(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct opregion_asle *asle = dev_priv->opregion.asle; @@ -273,7 +273,8 @@ static u32 asle_set_backlight_ironlake(struct drm_device *dev, u32 bclp) return 0; } -void ironlake_opregion_gse_intr(struct drm_device *dev) +/* Only present on Ironlake+ */ +void intel_opregion_gse_intr(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct opregion_asle *asle = dev_priv->opregion.asle; @@ -315,7 +316,7 @@ void ironlake_opregion_gse_intr(struct drm_device *dev) #define ASLE_PFIT_EN (1<<2) #define ASLE_PFMB_EN (1<<3) -void opregion_enable_asle(struct drm_device *dev) +void intel_opregion_enable_asle(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct opregion_asle *asle = dev_priv->opregion.asle; @@ -510,7 +511,7 @@ int intel_opregion_init(struct drm_device *dev, int resume) if (mboxes & MBOX_ASLE) { DRM_DEBUG_DRIVER("ASLE supported\n"); opregion->asle = base + OPREGION_ASLE_OFFSET; - opregion_enable_asle(dev); + intel_opregion_enable_asle(dev); } if (!resume) From 44834a67c0082e2cf74b16be91e49108b1432d65 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 19 Aug 2010 16:09:23 +0100 Subject: [PATCH 027/476] drm/i915: Use the VBT from OpRegion when available (v3) It is recommended that we use the Video BIOS tables that were copied into the OpRegion during POST when initialising the driver. This saves us from having to furtle around inside the ROM ourselves and possibly allows the vBIOS to adjust the tables prior to initialisation. On some systems, such as the Samsung N210, there is no accessible VBIOS and the only means of finding the VBT is through the OpRegion. v2: Rearrange the code so that ASLE is enabled along with ACPI v3: Enable OpRegion parsing even without ACPI Signed-off-by: Chris Wilson Cc: Matthew Garrett --- drivers/gpu/drm/i915/Makefile | 2 +- drivers/gpu/drm/i915/i915_debugfs.c | 21 +++++ drivers/gpu/drm/i915/i915_dma.c | 9 +- drivers/gpu/drm/i915/i915_drv.c | 7 +- drivers/gpu/drm/i915/i915_drv.h | 14 ++-- drivers/gpu/drm/i915/intel_bios.c | 69 +++++++++------- drivers/gpu/drm/i915/intel_opregion.c | 113 +++++++++++++------------- 7 files changed, 139 insertions(+), 96 deletions(-) diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 345ca52d6e84..f6e98dd416c9 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -26,13 +26,13 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ intel_dvo.o \ intel_ringbuffer.o \ intel_overlay.o \ + intel_opregion.o \ dvo_ch7xxx.o \ dvo_ch7017.o \ dvo_ivch.o \ dvo_tfp410.o \ dvo_sil164.o -i915-$(CONFIG_ACPI) += intel_opregion.o i915-$(CONFIG_COMPAT) += i915_ioc32.o obj-$(CONFIG_DRM_I915) += i915.o diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 5e43d7076789..16133f10ffaa 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -718,6 +718,26 @@ static int i915_gfxec(struct seq_file *m, void *unused) return 0; } +static int i915_opregion(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_opregion *opregion = &dev_priv->opregion; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + if (opregion->header) + seq_write(m, opregion->header, OPREGION_SIZE); + + mutex_unlock(&dev->struct_mutex); + + return 0; +} + static int i915_wedged_open(struct inode *inode, struct file *filp) @@ -845,6 +865,7 @@ static struct drm_info_list i915_debugfs_list[] = { {"i915_gfxec", i915_gfxec, 0}, {"i915_fbc_status", i915_fbc_status, 0}, {"i915_sr_status", i915_sr_status, 0}, + {"i915_opregion", i915_opregion, 0}, }; #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index c58ec5c02919..2dae3be9ebef 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -40,6 +40,7 @@ #include #include #include +#include extern int intel_max_stolen; /* from AGP driver */ @@ -2166,6 +2167,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) /* Try to make sure MCHBAR is enabled before poking at it */ intel_setup_mchbar(dev); + intel_opregion_setup(dev); i915_gem_load(dev); @@ -2221,7 +2223,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) } /* Must be done after probing outputs */ - intel_opregion_init(dev, 0); + intel_opregion_init(dev); + acpi_video_register(); setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, (unsigned long) dev); @@ -2271,6 +2274,8 @@ int i915_driver_unload(struct drm_device *dev) dev_priv->mm.gtt_mtrr = -1; } + acpi_video_unregister(); + if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_modeset_cleanup(dev); @@ -2299,7 +2304,7 @@ int i915_driver_unload(struct drm_device *dev) if (dev_priv->regs != NULL) iounmap(dev_priv->regs); - intel_opregion_free(dev, 0); + intel_opregion_fini(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) { /* Flush any outstanding unpin_work. */ diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ba75255ec450..2879a768d65c 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -236,7 +236,7 @@ static int i915_drm_freeze(struct drm_device *dev) i915_save_state(dev); - intel_opregion_free(dev, 1); + intel_opregion_fini(dev); /* Modeset on resume, not lid events */ dev_priv->modeset_on_lid = 0; @@ -276,8 +276,7 @@ static int i915_drm_thaw(struct drm_device *dev) int error = 0; i915_restore_state(dev); - - intel_opregion_init(dev, 1); + intel_opregion_setup(dev); /* KMS EnterVT equivalent */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { @@ -293,6 +292,8 @@ static int i915_drm_thaw(struct drm_device *dev) drm_helper_resume_force_mode(dev); } + intel_opregion_init(dev); + dev_priv->modeset_on_lid = 0; return error; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 980061ff5c80..f6940f1b1286 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -110,8 +110,9 @@ struct intel_opregion { struct opregion_acpi *acpi; struct opregion_swsci *swsci; struct opregion_asle *asle; - int enabled; + void *vbt; }; +#define OPREGION_SIZE (8*1024) struct intel_overlay; struct intel_overlay_error_state; @@ -1053,16 +1054,17 @@ extern int i915_restore_state(struct drm_device *dev); extern int i915_save_state(struct drm_device *dev); extern int i915_restore_state(struct drm_device *dev); -#ifdef CONFIG_ACPI /* intel_opregion.c */ -extern int intel_opregion_init(struct drm_device *dev, int resume); -extern void intel_opregion_free(struct drm_device *dev, int suspend); +extern int intel_opregion_setup(struct drm_device *dev); +#ifdef CONFIG_ACPI +extern void intel_opregion_init(struct drm_device *dev); +extern void intel_opregion_fini(struct drm_device *dev); extern void intel_opregion_asle_intr(struct drm_device *dev); extern void intel_opregion_gse_intr(struct drm_device *dev); extern void intel_opregion_enable_asle(struct drm_device *dev); #else -static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; } -static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; } +static inline void intel_opregion_init(struct drm_device *dev) { return; } +static inline void intel_opregion_fini(struct drm_device *dev) { return; } static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; } static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; } diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 96f75d7f6633..8d7deca69830 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -317,7 +317,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv, static void parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, - struct bdb_header *bdb) + struct bdb_header *bdb) { struct sdvo_device_mapping *p_mapping; struct bdb_general_definitions *p_defs; @@ -327,7 +327,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); if (!p_defs) { - DRM_DEBUG_KMS("No general definition block is found\n"); + DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n"); return; } /* judge whether the size of child device meets the requirements. @@ -460,7 +460,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv, p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); if (!p_defs) { - DRM_DEBUG_KMS("No general definition block is found\n"); + DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n"); return; } /* judge whether the size of child device meets the requirements. @@ -513,6 +513,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv, } return; } + /** * intel_init_bios - initialize VBIOS settings & find VBT * @dev: DRM device @@ -520,11 +521,6 @@ parse_device_mapping(struct drm_i915_private *dev_priv, * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers * to appropriate values. * - * VBT existence is a sanity check that is relied on by other i830_bios.c code. - * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may - * feed an updated VBT back through that, compared to what we'll fetch using - * this method of groping around in the BIOS data. - * * Returns 0 on success, nonzero on failure. */ bool @@ -532,31 +528,45 @@ intel_init_bios(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct pci_dev *pdev = dev->pdev; - struct vbt_header *vbt = NULL; - struct bdb_header *bdb; - u8 __iomem *bios; - size_t size; - int i; + struct bdb_header *bdb = NULL; + u8 __iomem *bios = NULL; - bios = pci_map_rom(pdev, &size); - if (!bios) - return -1; + /* XXX Should this validation be moved to intel_opregion.c? */ + if (dev_priv->opregion.vbt) { + struct vbt_header *vbt = dev_priv->opregion.vbt; + if (memcmp(vbt->signature, "$VBT", 4) == 0) { + DRM_DEBUG_DRIVER("Using VBT from OpRegion: %20s\n", + vbt->signature); + bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset); + } else + dev_priv->opregion.vbt = NULL; + } - /* Scour memory looking for the VBT signature */ - for (i = 0; i + 4 < size; i++) { - if (!memcmp(bios + i, "$VBT", 4)) { - vbt = (struct vbt_header *)(bios + i); - break; + if (bdb == NULL) { + struct vbt_header *vbt = NULL; + size_t size; + int i; + + bios = pci_map_rom(pdev, &size); + if (!bios) + return -1; + + /* Scour memory looking for the VBT signature */ + for (i = 0; i + 4 < size; i++) { + if (!memcmp(bios + i, "$VBT", 4)) { + vbt = (struct vbt_header *)(bios + i); + break; + } } - } - if (!vbt) { - DRM_ERROR("VBT signature missing\n"); - pci_unmap_rom(pdev, bios); - return -1; - } + if (!vbt) { + DRM_ERROR("VBT signature missing\n"); + pci_unmap_rom(pdev, bios); + return -1; + } - bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset); + bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset); + } /* Grab useful general definitions */ parse_general_features(dev_priv, bdb); @@ -568,7 +578,8 @@ intel_init_bios(struct drm_device *dev) parse_driver_features(dev_priv, bdb); parse_edp(dev_priv, bdb); - pci_unmap_rom(pdev, bios); + if (bios) + pci_unmap_rom(pdev, bios); return 0; } diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 97d5329079fe..3cb13237ba58 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -36,12 +36,11 @@ #define PCI_LBPC 0xf4 #define PCI_ASLS 0xfc -#define OPREGION_SZ (8*1024) #define OPREGION_HEADER_OFFSET 0 #define OPREGION_ACPI_OFFSET 0x100 #define OPREGION_SWSCI_OFFSET 0x200 #define OPREGION_ASLE_OFFSET 0x300 -#define OPREGION_VBT_OFFSET 0x1000 +#define OPREGION_VBT_OFFSET 0x400 #define OPREGION_SIGNATURE "IntelGraphicsMem" #define MBOX_ACPI (1<<0) @@ -143,6 +142,7 @@ struct opregion_asle { #define ACPI_DIGITAL_OUTPUT (3<<8) #define ACPI_LVDS_OUTPUT (4<<8) +#ifdef CONFIG_ACPI static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -465,7 +465,58 @@ blind_set: goto end; } -int intel_opregion_init(struct drm_device *dev, int resume) +void intel_opregion_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_opregion *opregion = &dev_priv->opregion; + + if (!opregion->header) + return; + + if (opregion->acpi) { + if (drm_core_check_feature(dev, DRIVER_MODESET)) + intel_didl_outputs(dev); + + /* Notify BIOS we are ready to handle ACPI video ext notifs. + * Right now, all the events are handled by the ACPI video module. + * We don't actually need to do anything with them. */ + opregion->acpi->csts = 0; + opregion->acpi->drdy = 1; + + system_opregion = opregion; + register_acpi_notifier(&intel_opregion_notifier); + } + + if (opregion->asle) + intel_opregion_enable_asle(dev); +} + +void intel_opregion_fini(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_opregion *opregion = &dev_priv->opregion; + + if (!opregion->header) + return; + + if (opregion->acpi) { + opregion->acpi->drdy = 0; + + system_opregion = NULL; + unregister_acpi_notifier(&intel_opregion_notifier); + } + + /* just clear all opregion memory pointers now */ + iounmap(opregion->header); + opregion->header = NULL; + opregion->acpi = NULL; + opregion->swsci = NULL; + opregion->asle = NULL; + opregion->vbt = NULL; +} +#endif + +int intel_opregion_setup(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; @@ -480,29 +531,23 @@ int intel_opregion_init(struct drm_device *dev, int resume) return -ENOTSUPP; } - base = ioremap(asls, OPREGION_SZ); + base = ioremap(asls, OPREGION_SIZE); if (!base) return -ENOMEM; - opregion->header = base; - if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) { + if (memcmp(base, OPREGION_SIGNATURE, 16)) { DRM_DEBUG_DRIVER("opregion signature mismatch\n"); err = -EINVAL; goto err_out; } + opregion->header = base; + opregion->vbt = base + OPREGION_VBT_OFFSET; mboxes = opregion->header->mboxes; if (mboxes & MBOX_ACPI) { DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); opregion->acpi = base + OPREGION_ACPI_OFFSET; - if (drm_core_check_feature(dev, DRIVER_MODESET)) - intel_didl_outputs(dev); - } else { - DRM_DEBUG_DRIVER("Public ACPI methods not supported\n"); - err = -ENOTSUPP; - goto err_out; } - opregion->enabled = 1; if (mboxes & MBOX_SWSCI) { DRM_DEBUG_DRIVER("SWSCI supported\n"); @@ -511,53 +556,11 @@ int intel_opregion_init(struct drm_device *dev, int resume) if (mboxes & MBOX_ASLE) { DRM_DEBUG_DRIVER("ASLE supported\n"); opregion->asle = base + OPREGION_ASLE_OFFSET; - intel_opregion_enable_asle(dev); } - if (!resume) - acpi_video_register(); - - - /* Notify BIOS we are ready to handle ACPI video ext notifs. - * Right now, all the events are handled by the ACPI video module. - * We don't actually need to do anything with them. */ - opregion->acpi->csts = 0; - opregion->acpi->drdy = 1; - - system_opregion = opregion; - register_acpi_notifier(&intel_opregion_notifier); - return 0; err_out: iounmap(opregion->header); - opregion->header = NULL; - acpi_video_register(); return err; } - -void intel_opregion_free(struct drm_device *dev, int suspend) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_opregion *opregion = &dev_priv->opregion; - - if (!opregion->enabled) - return; - - if (!suspend) - acpi_video_unregister(); - - opregion->acpi->drdy = 0; - - system_opregion = NULL; - unregister_acpi_notifier(&intel_opregion_notifier); - - /* just clear all opregion memory pointers now */ - iounmap(opregion->header); - opregion->header = NULL; - opregion->acpi = NULL; - opregion->swsci = NULL; - opregion->asle = NULL; - - opregion->enabled = 0; -} From 425904dd8a86d9ca3a3be38eaaa12b4844dceed6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 22 Aug 2010 18:21:42 +0100 Subject: [PATCH 028/476] drm/i915: Addin-offset is an unreliable indicator of LVDS presence (v2) My Samsung N210 has a VBT with DEVICE_TYPE_INT_LFP with a zero addin-offset. With the check in place, the panel was declared absent. v2: Only trust BIOS writers that have graduated to writing OpRegions. (We are all doomed.) Signed-off-by: Chris Wilson Cc: Zhao Yakui Cc: Adam Jackson Reviewed-by: Adam Jackson --- drivers/gpu/drm/i915/intel_lvds.c | 46 +++++++++++++++++-------------- 1 file changed, 26 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index fe79c5a2740c..047bd9538c6a 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -777,38 +777,44 @@ static void intel_find_lvds_downclock(struct drm_device *dev, * If it is present, return 1. * If it is not present, return false. * If no child dev is parsed from VBT, it assumes that the LVDS is present. - * Note: The addin_offset should also be checked for LVDS panel. - * Only when it is non-zero, it is assumed that it is present. */ -static int lvds_is_present_in_vbt(struct drm_device *dev) +static bool lvds_is_present_in_vbt(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct child_device_config *p_child; - int i, ret; + int i; if (!dev_priv->child_dev_num) - return 1; + return true; - ret = 0; for (i = 0; i < dev_priv->child_dev_num; i++) { - p_child = dev_priv->child_dev + i; - /* - * If the device type is not LFP, continue. - * If the device type is 0x22, it is also regarded as LFP. + struct child_device_config *child = dev_priv->child_dev + i; + + /* If the device type is not LFP, continue. + * We have to check both the new identifiers as well as the + * old for compatibility with some BIOSes. */ - if (p_child->device_type != DEVICE_TYPE_INT_LFP && - p_child->device_type != DEVICE_TYPE_LFP) + if (child->device_type != DEVICE_TYPE_INT_LFP && + child->device_type != DEVICE_TYPE_LFP) continue; - /* The addin_offset should be checked. Only when it is - * non-zero, it is regarded as present. + /* However, we cannot trust the BIOS writers to populate + * the VBT correctly. Since LVDS requires additional + * information from AIM blocks, a non-zero addin offset is + * a good indicator that the LVDS is actually present. */ - if (p_child->addin_offset) { - ret = 1; - break; - } + if (child->addin_offset) + return true; + + /* But even then some BIOS writers perform some black magic + * and instantiate the device without reference to any + * additional data. Trust that if the VBT was written into + * the OpRegion then they have validated the LVDS's existence. + */ + if (dev_priv->opregion.vbt) + return true; } - return ret; + + return false; } /** From 2bbda389632dd810d80c055fc1ec90827a16f687 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 2 Sep 2010 17:59:39 +0100 Subject: [PATCH 029/476] drm/i915: Ironlake page-flipping is per-plane not per-pipe Fix a minor confusion between intel_page_flip_finish(pipe) and intel_page_flip_finish_plane(plane) -- should have no effect as currently we map pipe 0 to plane 0 (and pipe 1 to plane 1). Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_irq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 3afd6e5662d1..e797157f4bb9 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -351,12 +351,12 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) if (de_iir & DE_PLANEA_FLIP_DONE) { intel_prepare_page_flip(dev, 0); - intel_finish_page_flip(dev, 0); + intel_finish_page_flip_plane(dev, 0); } if (de_iir & DE_PLANEB_FLIP_DONE) { intel_prepare_page_flip(dev, 1); - intel_finish_page_flip(dev, 1); + intel_finish_page_flip_plane(dev, 1); } if (de_iir & DE_PIPEA_VBLANK) From efe8c25680fcd3548142f956dcd02d5fdaf3f159 Mon Sep 17 00:00:00 2001 From: Sitsofe Wheeler Date: Tue, 24 Aug 2010 16:56:16 +0100 Subject: [PATCH 030/476] drm/i915: Revert extra intel_wait_for_vblank to prevent stalls. With the extra intel_wait_for_vblank added in commit 9d0498a2bf7455159b317f19531a3e5db2ecc9c4 periodic stalls were being triggered (which were detected by i915_hangcheck_elapsed). Partially revert this change for now. Signed-off-by: Sitsofe Wheeler Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 854d8f4415b7..3a03c62496d3 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2328,8 +2328,10 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) I915_READ(dspbase_reg); } - /* Wait for vblank for the disable to take effect */ - intel_wait_for_vblank_off(dev, pipe); + if (!IS_I9XX(dev)) { + /* Wait for vblank for the disable to take effect */ + intel_wait_for_vblank_off(dev, pipe); + } /* Don't disable pipe A or pipe A PLLs if needed */ if (pipeconf_reg == PIPEACONF && From b8ed2a4f12870bf2ea9c07ff83ccd9d8b6abc2c6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 5 Sep 2010 00:43:42 +0100 Subject: [PATCH 031/476] drm/i915/tv: Preserve reserved DAC bits during mode-setting Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_reg.h | 1 + drivers/gpu/drm/i915/intel_tv.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index d094e9129223..e240de9eed57 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1481,6 +1481,7 @@ # define TV_TEST_MODE_MASK (7 << 0) #define TV_DAC 0x68004 +# define TV_DAC_SAVE 0x00ffff00 /** * Reports that DAC state change logic has reported change (RO). * diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index c671f60ce80b..fc5c6f2008fb 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1196,7 +1196,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]); for (i = 0; i < 43; i++) I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]); - I915_WRITE(TV_DAC, 0); + I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE); I915_WRITE(TV_CTL, tv_ctl); } From 974b93315b2213b74a42a87e8a9d4fc8c0dbe90c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 5 Sep 2010 00:44:20 +0100 Subject: [PATCH 032/476] drm/i915/tv: Poll for DAC state change Instead of sleeping for an arbitrary length of time (the documentation fails to specify how long to wait for) wait until the load detection has changed state (or at most the 20ms as before). Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_tv.c | 73 +++++++++++++++------------------ 1 file changed, 33 insertions(+), 40 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index fc5c6f2008fb..b7f4dca186a9 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1234,9 +1234,7 @@ intel_tv_detect_type (struct intel_tv *intel_tv) unsigned long irqflags; u32 tv_ctl, save_tv_ctl; u32 tv_dac, save_tv_dac; - int type = DRM_MODE_CONNECTOR_Unknown; - - tv_dac = I915_READ(TV_DAC); + int type; /* Disable TV interrupts around load detect or we'll recurse */ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); @@ -1244,19 +1242,14 @@ intel_tv_detect_type (struct intel_tv *intel_tv) PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); - /* - * Detect TV by polling) - */ - save_tv_dac = tv_dac; - tv_ctl = I915_READ(TV_CTL); - save_tv_ctl = tv_ctl; - tv_ctl &= ~TV_ENC_ENABLE; - tv_ctl &= ~TV_TEST_MODE_MASK; + save_tv_dac = tv_dac = I915_READ(TV_DAC); + save_tv_ctl = tv_ctl = I915_READ(TV_CTL); + + /* Poll for TV detection */ + tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK); tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; - tv_dac &= ~TVDAC_SENSE_MASK; - tv_dac &= ~DAC_A_MASK; - tv_dac &= ~DAC_B_MASK; - tv_dac &= ~DAC_C_MASK; + + tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK); tv_dac |= (TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL | TVDAC_B_SENSE_CTL | @@ -1265,37 +1258,37 @@ intel_tv_detect_type (struct intel_tv *intel_tv) DAC_A_0_7_V | DAC_B_0_7_V | DAC_C_0_7_V); + I915_WRITE(TV_CTL, tv_ctl); I915_WRITE(TV_DAC, tv_dac); POSTING_READ(TV_DAC); - msleep(20); - tv_dac = I915_READ(TV_DAC); - I915_WRITE(TV_DAC, save_tv_dac); - I915_WRITE(TV_CTL, save_tv_ctl); - POSTING_READ(TV_CTL); - msleep(20); - - /* - * A B C - * 0 1 1 Composite - * 1 0 X svideo - * 0 0 0 Component - */ - if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) { - DRM_DEBUG_KMS("Detected Composite TV connection\n"); - type = DRM_MODE_CONNECTOR_Composite; - } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) { - DRM_DEBUG_KMS("Detected S-Video TV connection\n"); - type = DRM_MODE_CONNECTOR_SVIDEO; - } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) { - DRM_DEBUG_KMS("Detected Component TV connection\n"); - type = DRM_MODE_CONNECTOR_Component; - } else { - DRM_DEBUG_KMS("No TV connection detected\n"); - type = -1; + type = -1; + if (wait_for((tv_dac = I915_READ(TV_DAC)) & TVDAC_STATE_CHG, 20) == 0) { + /* + * A B C + * 0 1 1 Composite + * 1 0 X svideo + * 0 0 0 Component + */ + if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) { + DRM_DEBUG_KMS("Detected Composite TV connection\n"); + type = DRM_MODE_CONNECTOR_Composite; + } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) { + DRM_DEBUG_KMS("Detected S-Video TV connection\n"); + type = DRM_MODE_CONNECTOR_SVIDEO; + } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) { + DRM_DEBUG_KMS("Detected Component TV connection\n"); + type = DRM_MODE_CONNECTOR_Component; + } else { + DRM_DEBUG_KMS("Unrecognised TV connection: %x\n", + tv_dac); + } } + I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); + I915_WRITE(TV_CTL, save_tv_ctl); + /* Restore interrupt config */ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE | From 763a4a019105dccdcd44883f1712571ae8ea8f1f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 5 Sep 2010 00:52:34 +0100 Subject: [PATCH 033/476] drm/i915/tv: Mark the format names as constant and so avoid the memleak Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_tv.c | 36 ++++++++++++++++----------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index b7f4dca186a9..4a6534239fa3 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -48,7 +48,7 @@ struct intel_tv { struct intel_encoder base; int type; - char *tv_format; + const char *tv_format; int margin[4]; u32 save_TV_H_CTL_1; u32 save_TV_H_CTL_2; @@ -350,7 +350,7 @@ static const struct video_levels component_levels = { struct tv_mode { - char *name; + const char *name; int clock; int refresh; /* in millihertz (for precision) */ u32 oversample; @@ -922,7 +922,7 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode) } static const struct tv_mode * -intel_tv_mode_lookup (char *tv_format) +intel_tv_mode_lookup(const char *tv_format) { int i; @@ -936,13 +936,14 @@ intel_tv_mode_lookup (char *tv_format) } static const struct tv_mode * -intel_tv_mode_find (struct intel_tv *intel_tv) +intel_tv_mode_find(struct intel_tv *intel_tv) { return intel_tv_mode_lookup(intel_tv->tv_format); } static enum drm_mode_status -intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) +intel_tv_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) { struct drm_encoder *encoder = intel_attached_encoder(connector); struct intel_tv *intel_tv = enc_to_intel_tv(encoder); @@ -952,6 +953,7 @@ intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mo if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) < 1000) return MODE_OK; + return MODE_CLOCK_RANGE; } @@ -1369,11 +1371,10 @@ intel_tv_detect(struct drm_connector *connector) return connector_status_connected; } -static struct input_res { - char *name; +static const struct input_res { + const char *name; int w, h; -} input_res_table[] = -{ +} input_res_table[] = { {"640x480", 640, 480}, {"800x600", 800, 600}, {"1024x768", 1024, 768}, @@ -1424,7 +1425,7 @@ intel_tv_get_modes(struct drm_connector *connector) for (j = 0; j < ARRAY_SIZE(input_res_table); j++) { - struct input_res *input = &input_res_table[j]; + const struct input_res *input = &input_res_table[j]; unsigned int hactive_s = input->w; unsigned int vactive_s = input->h; @@ -1601,7 +1602,7 @@ intel_tv_init(struct drm_device *dev) struct intel_encoder *intel_encoder; struct intel_connector *intel_connector; u32 tv_dac_on, tv_dac_off, save_tv_dac; - char **tv_format_names; + char *tv_format_names[ARRAY_SIZE(tv_modes)]; int i, initial_mode = 0; if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED) @@ -1672,7 +1673,7 @@ intel_tv_init(struct drm_device *dev) intel_tv->margin[TV_MARGIN_RIGHT] = 46; intel_tv->margin[TV_MARGIN_BOTTOM] = 37; - intel_tv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL); + intel_tv->tv_format = tv_modes[initial_mode].name; drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs); drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); @@ -1680,13 +1681,11 @@ intel_tv_init(struct drm_device *dev) connector->doublescan_allowed = false; /* Create TV properties then attach current values */ - tv_format_names = kmalloc(sizeof(char *) * ARRAY_SIZE(tv_modes), - GFP_KERNEL); - if (!tv_format_names) - goto out; for (i = 0; i < ARRAY_SIZE(tv_modes); i++) - tv_format_names[i] = tv_modes[i].name; - drm_mode_create_tv_properties(dev, ARRAY_SIZE(tv_modes), tv_format_names); + tv_format_names[i] = (char *)tv_modes[i].name; + drm_mode_create_tv_properties(dev, + ARRAY_SIZE(tv_modes), + tv_format_names); drm_connector_attach_property(connector, dev->mode_config.tv_mode_property, initial_mode); @@ -1702,6 +1701,5 @@ intel_tv_init(struct drm_device *dev) drm_connector_attach_property(connector, dev->mode_config.tv_bottom_margin_property, intel_tv->margin[TV_MARGIN_BOTTOM]); -out: drm_sysfs_connector_add(connector); } From 57cd6508da65adabcb14be6ba3b9370d750b647d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 8 Aug 2010 12:34:44 +0100 Subject: [PATCH 034/476] drm/i915: Sanity check user framebuffer parameters on creation Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3a03c62496d3..adce19304eee 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5371,8 +5371,25 @@ int intel_framebuffer_init(struct drm_device *dev, struct drm_mode_fb_cmd *mode_cmd, struct drm_gem_object *obj) { + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int ret; + if (obj_priv->tiling_mode == I915_TILING_Y) + return -EINVAL; + + if (mode_cmd->pitch & 63) + return -EINVAL; + + switch (mode_cmd->bpp) { + case 8: + case 16: + case 24: + case 32: + break; + default: + return -EINVAL; + } + ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); if (ret) { DRM_ERROR("framebuffer init failed %d\n", ret); From 6c9547ff354d867318d78094aa8e9cf5218851e2 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 25 Aug 2010 10:05:17 +0100 Subject: [PATCH 035/476] drm/i915/sdvo: Preserve pixel-multiplier Store the pixel-multiplier on the adjusted mode and avoid modifying the requested mode. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 25 ++++--- drivers/gpu/drm/i915/intel_drv.h | 18 +++++ drivers/gpu/drm/i915/intel_sdvo.c | 99 ++++++++++++---------------- 3 files changed, 75 insertions(+), 67 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index adce19304eee..120a9c0c2da6 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3519,7 +3519,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, int trans_dpll_sel = (pipe == 0) ? 0 : 1; int lvds_reg = LVDS; u32 temp; - int sdvo_pixel_multiply; int target_clock; drm_vblank_pre_modeset(dev, pipe); @@ -3770,12 +3769,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, else dpll |= DPLLB_MODE_DAC_SERIAL; if (is_sdvo) { + int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); + if (pixel_multiplier > 1) { + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; + else if (HAS_PCH_SPLIT(dev)) + dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; + } dpll |= DPLL_DVO_HIGH_SPEED; - sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; - if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) - dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; - else if (HAS_PCH_SPLIT(dev)) - dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; } if (is_dp) dpll |= DPLL_DVO_HIGH_SPEED; @@ -3982,9 +3983,15 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { if (is_sdvo) { - sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; - I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | - ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); + int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); + if (pixel_multiplier > 1) + pixel_multiplier = (pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; + else + pixel_multiplier = 0; + + I915_WRITE(dpll_md_reg, + (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | + pixel_multiplier); } else I915_WRITE(dpll_md_reg, 0); } else { diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 1ca3c9e2667a..64a7c87817d7 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -99,6 +99,24 @@ #define INTEL_DVO_CHIP_TMDS 2 #define INTEL_DVO_CHIP_TVOUT 4 +/* drm_display_mode->private_flags */ +#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0) +#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT) + +static inline void +intel_mode_set_pixel_multiplier(struct drm_display_mode *mode, + int multiplier) +{ + mode->clock *= multiplier; + mode->private_flags |= multiplier; +} + +static inline int +intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode) +{ + return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK) >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT; +} + struct intel_i2c_chan { struct drm_device *drm_dev; /* for getting at dev. private (mmio etc.) */ u32 reg; /* GPIO reg */ diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index e3b7a7ee39cb..1c1aeea81e56 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -106,15 +106,11 @@ struct intel_sdvo { bool is_hdmi; /** - * This is set if we detect output of sdvo device as LVDS. + * This is set if we detect output of sdvo device as LVDS and + * have a valid fixed mode to use with the panel. */ bool is_lvds; - /** - * This is sdvo flags for input timing. - */ - uint8_t sdvo_flags; - /** * This is sdvo fixed pannel mode pointer */ @@ -132,6 +128,8 @@ struct intel_sdvo { /* Mac mini hack -- use the same DDC as the analog connector */ struct i2c_adapter *analog_ddc_bus; + /* Input timings for adjusted_mode */ + struct intel_sdvo_dtd input_dtd; }; struct intel_sdvo_connector { @@ -1022,8 +1020,6 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct intel_sdvo_dtd input_dtd; - /* Reset the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) return false; @@ -1035,14 +1031,12 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo, return false; if (!intel_sdvo_get_preferred_input_timing(intel_sdvo, - &input_dtd)) + &intel_sdvo->input_dtd)) return false; - intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); - intel_sdvo->sdvo_flags = input_dtd.part2.sdvo_flags; + intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd); drm_mode_set_crtcinfo(adjusted_mode, 0); - mode->clock = adjusted_mode->clock; return true; } @@ -1051,6 +1045,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); + int multiplier; /* We need to construct preferred input timings based on our * output timings. To do that, we have to set the output @@ -1065,10 +1060,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, mode, adjusted_mode); } else if (intel_sdvo->is_lvds) { - drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0); - if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, - intel_sdvo->sdvo_lvds_fixed_mode)) + intel_sdvo->sdvo_lvds_fixed_mode)) return false; (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo, @@ -1077,9 +1070,10 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, } /* Make the CRTC code factor in the SDVO pixel multiplier. The - * SDVO device will be told of the multiplier during mode_set. + * SDVO device will factor out the multiplier during mode_set. */ - adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode); + multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode); + intel_mode_set_pixel_multiplier(adjusted_mode, multiplier); return true; } @@ -1093,10 +1087,11 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, struct drm_crtc *crtc = encoder->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); - u32 sdvox = 0; - int sdvo_pixel_multiply, rate; + u32 sdvox; struct intel_sdvo_in_out_map in_out; struct intel_sdvo_dtd input_dtd; + int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); + int rate; if (!mode) return; @@ -1114,28 +1109,23 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, SDVO_CMD_SET_IN_OUT_MAP, &in_out, sizeof(in_out)); - if (intel_sdvo->is_hdmi) { - if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode)) - return; - - sdvox |= SDVO_AUDIO_ENABLE; - } + /* Set the output timings to the screen */ + if (!intel_sdvo_set_target_output(intel_sdvo, + intel_sdvo->attached_output)) + return; /* We have tried to get input timing in mode_fixup, and filled into - adjusted_mode */ - intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); - if (intel_sdvo->is_tv || intel_sdvo->is_lvds) - input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags; - - /* If it's a TV, we already set the output timing in mode_fixup. - * Otherwise, the output timing is equal to the input timing. + * adjusted_mode. */ - if (!intel_sdvo->is_tv && !intel_sdvo->is_lvds) { + if (intel_sdvo->is_tv || intel_sdvo->is_lvds) { + input_dtd = intel_sdvo->input_dtd; + } else { /* Set the output timing to the screen */ if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output)) return; + intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd); } @@ -1143,31 +1133,18 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, if (!intel_sdvo_set_target_input(intel_sdvo)) return; - if (intel_sdvo->is_tv) { - if (!intel_sdvo_set_tv_format(intel_sdvo)) - return; - } + if (intel_sdvo->is_hdmi && + !intel_sdvo_set_avi_infoframe(intel_sdvo, mode)) + return; - /* We would like to use intel_sdvo_create_preferred_input_timing() to - * provide the device with a timing it can support, if it supports that - * feature. However, presumably we would need to adjust the CRTC to - * output the preferred timing, and we don't support that currently. - */ -#if 0 - success = intel_sdvo_create_preferred_input_timing(encoder, clock, - width, height); - if (success) { - struct intel_sdvo_dtd *input_dtd; + if (intel_sdvo->is_tv && + !intel_sdvo_set_tv_format(intel_sdvo)) + return; - intel_sdvo_get_preferred_input_timing(encoder, &input_dtd); - intel_sdvo_set_input_timing(encoder, &input_dtd); - } -#else (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd); -#endif - sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode); - switch (sdvo_pixel_multiply) { + switch (pixel_multiplier) { + default: case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break; case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break; @@ -1177,13 +1154,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, /* Set the SDVO control regs. */ if (IS_I965G(dev)) { - sdvox |= SDVO_BORDER_ENABLE; + sdvox = SDVO_BORDER_ENABLE; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) sdvox |= SDVO_VSYNC_ACTIVE_HIGH; if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) sdvox |= SDVO_HSYNC_ACTIVE_HIGH; } else { - sdvox |= I915_READ(intel_sdvo->sdvo_reg); + sdvox = I915_READ(intel_sdvo->sdvo_reg); switch (intel_sdvo->sdvo_reg) { case SDVOB: sdvox &= SDVOB_PRESERVE_MASK; @@ -1196,16 +1173,18 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, } if (intel_crtc->pipe == 1) sdvox |= SDVO_PIPE_B_SELECT; + if (intel_sdvo->is_hdmi) + sdvox |= SDVO_AUDIO_ENABLE; if (IS_I965G(dev)) { /* done in crtc_mode_set as the dpll_md reg must be written early */ } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { /* done in crtc_mode_set as it lives inside the dpll register */ } else { - sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT; + sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT; } - if (intel_sdvo->sdvo_flags & SDVO_NEED_TO_STALL) + if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL) sdvox |= SDVO_STALL_SELECT; intel_sdvo_write_sdvox(intel_sdvo, sdvox); } @@ -1692,6 +1671,10 @@ end: if (newmode->type & DRM_MODE_TYPE_PREFERRED) { intel_sdvo->sdvo_lvds_fixed_mode = drm_mode_duplicate(connector->dev, newmode); + + drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, + 0); + intel_sdvo->is_lvds = true; break; } From be282fd48e7492812402a22d73a348c44bf95b63 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 13 Aug 2010 15:50:28 -0700 Subject: [PATCH 036/476] drm/i915: add MMIO debug output Useful for capturing register read/write traces to send to the hw guys. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f6940f1b1286..2692410bd605 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -264,6 +264,9 @@ typedef struct drm_i915_private { int front_offset; int current_page; int page_flipping; +#define I915_DEBUG_READ (1<<0) +#define I915_DEBUG_WRITE (1<<1) + unsigned long debug_flags; wait_queue_head_t irq_queue; atomic_t irq_received; @@ -1100,8 +1103,26 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove LOCK_TEST_WITH_RETURN(dev, file_priv); \ } while (0) -#define I915_READ(reg) readl(dev_priv->regs + (reg)) -#define I915_WRITE(reg, val) writel(val, dev_priv->regs + (reg)) +static inline u32 i915_read(struct drm_i915_private *dev_priv, u32 reg) +{ + u32 val; + + val = readl(dev_priv->regs + reg); + if (dev_priv->debug_flags & I915_DEBUG_READ) + printk(KERN_ERR "read 0x%08x from 0x%08x\n", val, reg); + return val; +} + +static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, + u32 val) +{ + writel(val, dev_priv->regs + reg); + if (dev_priv->debug_flags & I915_DEBUG_WRITE) + printk(KERN_ERR "wrote 0x%08x to 0x%08x\n", val, reg); +} + +#define I915_READ(reg) i915_read(dev_priv, (reg)) +#define I915_WRITE(reg, val) i915_write(dev_priv, (reg), (val)) #define I915_READ16(reg) readw(dev_priv->regs + (reg)) #define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg)) #define I915_READ8(reg) readb(dev_priv->regs + (reg)) @@ -1111,6 +1132,11 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove #define POSTING_READ(reg) (void)I915_READ(reg) #define POSTING_READ16(reg) (void)I915_READ16(reg) +#define I915_DEBUG_ENABLE_IO() (dev_priv->debug_flags |= I915_DEBUG_READ | \ + I915_DEBUG_WRITE) +#define I915_DEBUG_DISABLE_IO() (dev_priv->debug_flags &= ~(I915_DEBUG_READ | \ + I915_DEBUG_WRITE)) + #define I915_VERBOSE 0 #define BEGIN_LP_RING(n) do { \ From e35a41de3926ec81e3ed2ed31d1f30cecb3513f9 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 11 Feb 2010 22:13:59 +0100 Subject: [PATCH 037/476] drm/i915: allow lazy emitting of requests Sometimes (like when flushing in preparation of batchbuffer execution) we know that we'll emit a request but haven't yet done so. Allow this case by simply taking the next seqno by default. Ensure that a request is eventually emitted before waiting for an request by issuing it in i915_wait_request iff this is not yet done. Also replace one open-coded version of i915_gem_object_wait_rendering, to prevent future code-diversion. Chris Wilson asked me to explain and clarify what this patch does and why. Here it goes: Old way of moving objects onto the active list and associating them with a reques: 1. i915_add_request + store the returned seqno somewhere 2. i915_gem_object_move_to_active (with the stored seqno as parameter) For the current users, this is all fine. But I'd like to associate objects (and fence regs) with the batchbuffer request deep down in the execbuf call-chain. I thought about three ways of implementing this. a) Don't care, just emit request when we need a new seqno. When heavily pipelining fence reg changes, this would have caused tons of superflous request (and corresponding irqs). b) Thread all changed fences, objects, whatever through the execbuf-maze, so that when we emit a request, we can store the new seqno at all the right places. c) Kill that seqno-threading-around business by simply storing the next seqno, i.e. allow 2. to be done before 1. in the above sequence. I've decided to implement c) (in this patch). The following patches are just fall-out that resulted from this small conceptual change. * We can handle the flushing list processing where we actually emit a flush (i915_gem_flush and i915_retire_commands) instead of in i915_add_request. The code makes IMHO more sense this way (and i915_add_request looses the flush_domains parameter, obviously). * We can avoid emitting unnecessary requests. IMHO there's no point in emitting more than one request per batchbuffer (with or without an corresponding irq). * By enforcing 2. before 1. ordering in the above sequence the seqno argument of i915_gem_object_move_to_active is redundant and can be dropped. v2: Now i915_wait_request issues request if it is not yet emitted. Also introduce i915_gem_next_request_seqno(dev) just in case we ever need to do some prep work before using a new seqno. Signed-off-by: Daniel Vetter [ickle: Keep i915_gem_object_set_to_display_plane() uninterruptible.] Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 55 ++++++++++++++++++++------------- 1 file changed, 34 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 26eb6e31c743..1fed3e65a09c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -46,7 +46,8 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, uint64_t offset, uint64_t size); static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); -static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); +static int i915_gem_object_wait_rendering(struct drm_gem_object *obj, + bool interruptible); static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment); static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); @@ -1468,6 +1469,14 @@ i915_gem_object_put_pages(struct drm_gem_object *obj) obj_priv->pages = NULL; } +static uint32_t +i915_gem_next_request_seqno(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + return dev_priv->next_seqno; +} + static void i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno, struct intel_ring_buffer *ring) @@ -1483,6 +1492,11 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno, drm_gem_object_reference(obj); obj_priv->active = 1; } + + /* Take the seqno of the next request if none is given */ + if (seqno == 0) + seqno = i915_gem_next_request_seqno(dev); + /* Move from whatever list we were on to the tail of execution. */ spin_lock(&dev_priv->mm.active_list_lock); list_move_tail(&obj_priv->list, &ring->active_list); @@ -1828,6 +1842,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, BUG_ON(seqno == 0); + if (seqno == dev_priv->next_seqno) { + seqno = i915_add_request(dev, NULL, 0, ring); + if (seqno == 0) + return -ENOMEM; + } + if (atomic_read(&dev_priv->mm.wedged)) return -EIO; @@ -1915,7 +1935,8 @@ i915_gem_flush(struct drm_device *dev, * safe to unbind from the GTT or access from the CPU. */ static int -i915_gem_object_wait_rendering(struct drm_gem_object *obj) +i915_gem_object_wait_rendering(struct drm_gem_object *obj, + bool interruptible) { struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); @@ -1934,8 +1955,10 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj) DRM_INFO("%s: object %p wait for seqno %08x\n", __func__, obj, obj_priv->last_rendering_seqno); #endif - ret = i915_wait_request(dev, - obj_priv->last_rendering_seqno, obj_priv->ring); + ret = i915_do_wait_request(dev, + obj_priv->last_rendering_seqno, + interruptible, + obj_priv->ring); if (ret != 0) return ret; } @@ -2438,7 +2461,7 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj) if (ret != 0) return ret; - ret = i915_gem_object_wait_rendering(obj); + ret = i915_gem_object_wait_rendering(obj, true); if (ret != 0) return ret; } @@ -2694,7 +2717,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) return ret; /* Wait on any GPU rendering and flushing to occur. */ - ret = i915_gem_object_wait_rendering(obj); + ret = i915_gem_object_wait_rendering(obj, true); if (ret != 0) return ret; @@ -2733,7 +2756,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) { - struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t old_write_domain, old_read_domains; int ret; @@ -2747,18 +2769,9 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) return ret; /* Wait on any GPU rendering and flushing to occur. */ - if (obj_priv->active) { -#if WATCH_BUF - DRM_INFO("%s: object %p wait for seqno %08x\n", - __func__, obj, obj_priv->last_rendering_seqno); -#endif - ret = i915_do_wait_request(dev, - obj_priv->last_rendering_seqno, - 0, - obj_priv->ring); - if (ret != 0) - return ret; - } + ret = i915_gem_object_wait_rendering(obj, false); + if (ret != 0) + return ret; i915_gem_object_flush_cpu_write_domain(obj); @@ -2797,7 +2810,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) return ret; /* Wait on any GPU rendering and flushing to occur. */ - ret = i915_gem_object_wait_rendering(obj); + ret = i915_gem_object_wait_rendering(obj, true); if (ret != 0) return ret; @@ -3098,7 +3111,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, return ret; /* Wait on any GPU rendering and flushing to occur. */ - ret = i915_gem_object_wait_rendering(obj); + ret = i915_gem_object_wait_rendering(obj, true); if (ret != 0) return ret; i915_gem_object_flush_gtt_write_domain(obj); From 8bff917c93e365a8a145f9b1be99c81257038151 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 11 Feb 2010 22:19:40 +0100 Subject: [PATCH 038/476] drm/i915: move flushing list processing to i915_gem_flush Now that we can move objects to the active list without already having emitted a request, move the flushing list handling into i915_gem_flush. This makes more sense and allows to drop a few i915_add_request calls that are not strictly necessary. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1fed3e65a09c..eea8232928bc 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1637,7 +1637,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, } /* Associate any objects on the flushing list matching the write - * domain we're flushing with our flush. + * domain we're flushing with our request. */ if (flush_domains != 0) i915_gem_process_flushing_list(dev, flush_domains, seqno, ring); @@ -1887,8 +1887,9 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, ret = -EIO; if (ret && ret != -ERESTARTSYS) - DRM_ERROR("%s returns %d (awaiting %d at %d)\n", - __func__, ret, seqno, ring->get_gem_seqno(dev, ring)); + DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n", + __func__, ret, seqno, ring->get_gem_seqno(dev, ring), + dev_priv->next_seqno); /* Directly dispatch request retiring. While we have the work queue * to handle this, the waiter on a request often wants an associated @@ -1918,8 +1919,10 @@ i915_gem_flush(struct drm_device *dev, uint32_t flush_domains) { drm_i915_private_t *dev_priv = dev->dev_private; + if (flush_domains & I915_GEM_DOMAIN_CPU) drm_agp_chipset_flush(dev); + dev_priv->render_ring.flush(dev, &dev_priv->render_ring, invalidate_domains, flush_domains); @@ -1928,6 +1931,17 @@ i915_gem_flush(struct drm_device *dev, dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring, invalidate_domains, flush_domains); + + /* Associate any objects on the flushing list matching the write + * domain we're flushing with the next request. + */ + if (flush_domains != 0) { + i915_gem_process_flushing_list(dev, flush_domains, 0, + &dev_priv->render_ring); + if (HAS_BSD(dev)) + i915_gem_process_flushing_list(dev, flush_domains, 0, + &dev_priv->bsd_ring); + } } /** @@ -2061,14 +2075,14 @@ i915_gpu_idle(struct drm_device *dev) /* Flush everything onto the inactive list. */ i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); - seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS, + seqno1 = i915_add_request(dev, NULL, 0, &dev_priv->render_ring); if (seqno1 == 0) return -ENOMEM; ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring); if (HAS_BSD(dev)) { - seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS, + seqno2 = i915_add_request(dev, NULL, 0, &dev_priv->bsd_ring); if (seqno2 == 0) return -ENOMEM; @@ -2078,7 +2092,6 @@ i915_gpu_idle(struct drm_device *dev) return ret; } - return ret; } @@ -3771,12 +3784,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, dev->invalidate_domains, dev->flush_domains); if (dev_priv->flush_rings & FLUSH_RENDER_RING) - (void)i915_add_request(dev, file_priv, - dev->flush_domains, + (void)i915_add_request(dev, file_priv, 0, &dev_priv->render_ring); if (dev_priv->flush_rings & FLUSH_BSD_RING) - (void)i915_add_request(dev, file_priv, - dev->flush_domains, + (void)i915_add_request(dev, file_priv, 0, &dev_priv->bsd_ring); } From a6910434e1b5f2a9fe7cab39b01bae9a7a7bbe70 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 2 Feb 2010 17:08:37 +0100 Subject: [PATCH 039/476] drm/i915: only one interrupt per batchbuffer is not enough! Previously I thought that one interrupt per batchbuffer should be enough. Now tedious benchmarking showed this to be wrong. Therefore track whether any commands have been isssued with a future seqno (like pipelined fencing changes or flushes). If this is the case emit a request before issueing the batchbuffer. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 3 --- drivers/gpu/drm/i915/i915_gem.c | 33 ++++++++++++------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 5 ++++ 3 files changed, 21 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2692410bd605..fb8d68125f6b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -293,9 +293,6 @@ typedef struct drm_i915_private { unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; int vblank_pipe; int num_pipe; - u32 flush_rings; -#define FLUSH_RENDER_RING 0x1 -#define FLUSH_BSD_RING 0x2 /* For hangcheck timer */ #define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index eea8232928bc..b52f47af41f4 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1470,10 +1470,13 @@ i915_gem_object_put_pages(struct drm_gem_object *obj) } static uint32_t -i915_gem_next_request_seqno(struct drm_device *dev) +i915_gem_next_request_seqno(struct drm_device *dev, + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; + ring->outstanding_lazy_request = true; + return dev_priv->next_seqno; } @@ -1495,7 +1498,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno, /* Take the seqno of the next request if none is given */ if (seqno == 0) - seqno = i915_gem_next_request_seqno(dev); + seqno = i915_gem_next_request_seqno(dev, ring); /* Move from whatever list we were on to the tail of execution. */ spin_lock(&dev_priv->mm.active_list_lock); @@ -2979,7 +2982,6 @@ static void i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; - drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t invalidate_domains = 0; uint32_t flush_domains = 0; @@ -3042,13 +3044,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) obj->pending_write_domain = obj->write_domain; obj->read_domains = obj->pending_read_domains; - if (flush_domains & I915_GEM_GPU_DOMAINS) { - if (obj_priv->ring == &dev_priv->render_ring) - dev_priv->flush_rings |= FLUSH_RENDER_RING; - else if (obj_priv->ring == &dev_priv->bsd_ring) - dev_priv->flush_rings |= FLUSH_BSD_RING; - } - dev->invalidate_domains |= invalidate_domains; dev->flush_domains |= flush_domains; #if WATCH_BUF @@ -3762,7 +3757,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, */ dev->invalidate_domains = 0; dev->flush_domains = 0; - dev_priv->flush_rings = 0; for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; @@ -3783,12 +3777,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, i915_gem_flush(dev, dev->invalidate_domains, dev->flush_domains); - if (dev_priv->flush_rings & FLUSH_RENDER_RING) - (void)i915_add_request(dev, file_priv, 0, - &dev_priv->render_ring); - if (dev_priv->flush_rings & FLUSH_BSD_RING) - (void)i915_add_request(dev, file_priv, 0, - &dev_priv->bsd_ring); + } + + if (dev_priv->render_ring.outstanding_lazy_request) { + (void)i915_add_request(dev, file_priv, 0, + &dev_priv->render_ring); + dev_priv->render_ring.outstanding_lazy_request = false; + } + if (dev_priv->bsd_ring.outstanding_lazy_request) { + (void)i915_add_request(dev, file_priv, 0, + &dev_priv->bsd_ring); + dev_priv->bsd_ring.outstanding_lazy_request = false; } for (i = 0; i < args->buffer_count; i++) { diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 525e7d3edda8..d3e5f40a8040 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -83,6 +83,11 @@ struct intel_ring_buffer { */ struct list_head request_list; + /** + * Do we have some not yet emitted requests outstanding? + */ + bool outstanding_lazy_request; + wait_queue_head_t irq_queue; drm_local_map_t map; }; From 8a1a49f954734040dbc7b87e3b1221a050045e43 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 11 Feb 2010 22:29:04 +0100 Subject: [PATCH 040/476] drm/i915: move flushing list processing to i915_retire_commands ... instead of threading flush_domains through the execbuf code to i915_add_request. With this change 2 small cleanups are possible (likewise the majority of the patch): - The flush_domains parameter of i915_add_request is always 0. Drop it and the corresponding logic. - Ditto for the seqno param of i915_gem_process_flushing_list. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 9 ++-- drivers/gpu/drm/i915/i915_gem.c | 64 +++++++++---------------- drivers/gpu/drm/i915/intel_overlay.c | 17 +++---- drivers/gpu/drm/i915/intel_ringbuffer.c | 4 ++ 4 files changed, 41 insertions(+), 53 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index fb8d68125f6b..f983130a702d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -991,12 +991,15 @@ int i915_gpu_idle(struct drm_device *dev); int i915_gem_idle(struct drm_device *dev); uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv, - uint32_t flush_domains, struct intel_ring_buffer *ring); int i915_do_wait_request(struct drm_device *dev, - uint32_t seqno, int interruptible, - struct intel_ring_buffer *ring); + uint32_t seqno, + bool interruptible, + struct intel_ring_buffer *ring); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); +void i915_gem_process_flushing_list(struct drm_device *dev, + uint32_t flush_domains, + struct intel_ring_buffer *ring); int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write); int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b52f47af41f4..46394f735a81 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1570,9 +1570,9 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) i915_verify_inactive(dev, __FILE__, __LINE__); } -static void +void i915_gem_process_flushing_list(struct drm_device *dev, - uint32_t flush_domains, uint32_t seqno, + uint32_t flush_domains, struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -1590,7 +1590,7 @@ i915_gem_process_flushing_list(struct drm_device *dev, obj->write_domain = 0; list_del_init(&obj_priv->gpu_write_list); - i915_gem_object_move_to_active(obj, seqno, ring); + i915_gem_object_move_to_active(obj, 0, ring); /* update the fence lru list */ if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { @@ -1608,8 +1608,9 @@ i915_gem_process_flushing_list(struct drm_device *dev, } uint32_t -i915_add_request(struct drm_device *dev, struct drm_file *file_priv, - uint32_t flush_domains, struct intel_ring_buffer *ring) +i915_add_request(struct drm_device *dev, + struct drm_file *file_priv, + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_file_private *i915_file_priv = NULL; @@ -1624,7 +1625,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, if (request == NULL) return 0; - seqno = ring->add_request(dev, ring, file_priv, flush_domains); + seqno = ring->add_request(dev, ring, file_priv, 0); request->seqno = seqno; request->ring = ring; @@ -1639,12 +1640,6 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, INIT_LIST_HEAD(&request->client_list); } - /* Associate any objects on the flushing list matching the write - * domain we're flushing with our request. - */ - if (flush_domains != 0) - i915_gem_process_flushing_list(dev, flush_domains, seqno, ring); - if (!dev_priv->mm.suspended) { mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); if (was_empty) @@ -1659,7 +1654,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, * Ensures that all commands in the ring are finished * before signalling the CPU */ -static uint32_t +static void i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring) { uint32_t flush_domains = 0; @@ -1670,7 +1665,6 @@ i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring) ring->flush(dev, ring, I915_GEM_DOMAIN_COMMAND, flush_domains); - return flush_domains; } /** @@ -1837,7 +1831,7 @@ i915_gem_retire_work_handler(struct work_struct *work) int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, - int interruptible, struct intel_ring_buffer *ring) + bool interruptible, struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; u32 ier; @@ -1846,7 +1840,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, BUG_ON(seqno == 0); if (seqno == dev_priv->next_seqno) { - seqno = i915_add_request(dev, NULL, 0, ring); + seqno = i915_add_request(dev, NULL, ring); if (seqno == 0) return -ENOMEM; } @@ -1934,17 +1928,6 @@ i915_gem_flush(struct drm_device *dev, dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring, invalidate_domains, flush_domains); - - /* Associate any objects on the flushing list matching the write - * domain we're flushing with the next request. - */ - if (flush_domains != 0) { - i915_gem_process_flushing_list(dev, flush_domains, 0, - &dev_priv->render_ring); - if (HAS_BSD(dev)) - i915_gem_process_flushing_list(dev, flush_domains, 0, - &dev_priv->bsd_ring); - } } /** @@ -2078,24 +2061,23 @@ i915_gpu_idle(struct drm_device *dev) /* Flush everything onto the inactive list. */ i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); - seqno1 = i915_add_request(dev, NULL, 0, - &dev_priv->render_ring); + seqno1 = i915_add_request(dev, NULL, &dev_priv->render_ring); if (seqno1 == 0) return -ENOMEM; ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring); + if (ret) + return ret; if (HAS_BSD(dev)) { - seqno2 = i915_add_request(dev, NULL, 0, - &dev_priv->bsd_ring); + seqno2 = i915_add_request(dev, NULL, &dev_priv->bsd_ring); if (seqno2 == 0) return -ENOMEM; - ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring); if (ret) return ret; } - return ret; + return 0; } int @@ -2641,7 +2623,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) /* Queue the GPU write cache flushing we need. */ old_write_domain = obj->write_domain; i915_gem_flush(dev, 0, obj->write_domain); - if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0) + if (i915_add_request(dev, NULL, obj_priv->ring) == 0) return -ENOMEM; trace_i915_gem_object_change_domain(obj, @@ -3564,7 +3546,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_i915_gem_relocation_entry *relocs = NULL; int ret = 0, ret2, i, pinned = 0; uint64_t exec_offset; - uint32_t seqno, flush_domains, reloc_index; + uint32_t seqno, reloc_index; int pin_tries, flips; struct intel_ring_buffer *ring = NULL; @@ -3780,13 +3762,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } if (dev_priv->render_ring.outstanding_lazy_request) { - (void)i915_add_request(dev, file_priv, 0, - &dev_priv->render_ring); + (void)i915_add_request(dev, file_priv, &dev_priv->render_ring); dev_priv->render_ring.outstanding_lazy_request = false; } if (dev_priv->bsd_ring.outstanding_lazy_request) { - (void)i915_add_request(dev, file_priv, 0, - &dev_priv->bsd_ring); + (void)i915_add_request(dev, file_priv, &dev_priv->bsd_ring); dev_priv->bsd_ring.outstanding_lazy_request = false; } @@ -3835,7 +3815,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, * Ensure that the commands in the batch buffer are * finished before the interrupt fires */ - flush_domains = i915_retire_commands(dev, ring); + i915_retire_commands(dev, ring); i915_verify_inactive(dev, __FILE__, __LINE__); @@ -3846,7 +3826,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, * *some* interrupts representing completion of buffers that we can * wait on when trying to clear up gtt space). */ - seqno = i915_add_request(dev, file_priv, flush_domains, ring); + seqno = i915_add_request(dev, file_priv, ring); BUG_ON(seqno == 0); for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; @@ -4244,7 +4224,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, */ if (obj->write_domain) { i915_gem_flush(dev, 0, obj->write_domain); - (void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring); + (void)i915_add_request(dev, file_priv, obj_priv->ring); } /* Update the active list for the hardware's current position. diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 1d306a458be6..a203b5c7ff71 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -230,7 +230,7 @@ static int intel_overlay_on(struct intel_overlay *overlay) ADVANCE_LP_RING(); overlay->last_flip_req = - i915_add_request(dev, NULL, 0, &dev_priv->render_ring); + i915_add_request(dev, NULL, &dev_priv->render_ring); if (overlay->last_flip_req == 0) return -ENOMEM; @@ -269,7 +269,7 @@ static void intel_overlay_continue(struct intel_overlay *overlay, ADVANCE_LP_RING(); overlay->last_flip_req = - i915_add_request(dev, NULL, 0, &dev_priv->render_ring); + i915_add_request(dev, NULL, &dev_priv->render_ring); } static int intel_overlay_wait_flip(struct intel_overlay *overlay) @@ -301,7 +301,7 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay) ADVANCE_LP_RING(); overlay->last_flip_req = - i915_add_request(dev, NULL, 0, &dev_priv->render_ring); + i915_add_request(dev, NULL, &dev_priv->render_ring); if (overlay->last_flip_req == 0) return -ENOMEM; @@ -342,7 +342,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) ADVANCE_LP_RING(); overlay->last_flip_req = - i915_add_request(dev, NULL, 0, &dev_priv->render_ring); + i915_add_request(dev, NULL, &dev_priv->render_ring); if (overlay->last_flip_req == 0) return -ENOMEM; @@ -362,7 +362,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) ADVANCE_LP_RING(); overlay->last_flip_req = - i915_add_request(dev, NULL, 0, &dev_priv->render_ring); + i915_add_request(dev, NULL, &dev_priv->render_ring); if (overlay->last_flip_req == 0) return -ENOMEM; @@ -409,7 +409,7 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, if (overlay->last_flip_req == 0) { overlay->last_flip_req = - i915_add_request(dev, NULL, 0, &dev_priv->render_ring); + i915_add_request(dev, NULL, &dev_priv->render_ring); if (overlay->last_flip_req == 0) return -ENOMEM; } @@ -439,8 +439,9 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, OUT_RING(MI_NOOP); ADVANCE_LP_RING(); - overlay->last_flip_req = i915_add_request(dev, NULL, - 0, &dev_priv->render_ring); + overlay->last_flip_req = + i915_add_request(dev, NULL, + &dev_priv->render_ring); if (overlay->last_flip_req == 0) return -ENOMEM; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index cb3508f78bc3..1ae2b25bf7e4 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -116,6 +116,8 @@ render_ring_flush(struct drm_device *dev, intel_ring_emit(dev, ring, MI_NOOP); intel_ring_advance(dev, ring); } + + i915_gem_process_flushing_list(dev, flush_domains, ring); } static unsigned int render_ring_get_head(struct drm_device *dev, @@ -384,6 +386,8 @@ bsd_ring_flush(struct drm_device *dev, intel_ring_emit(dev, ring, MI_FLUSH); intel_ring_emit(dev, ring, MI_NOOP); intel_ring_advance(dev, ring); + + i915_gem_process_flushing_list(dev, flush_domains, ring); } static inline unsigned int bsd_ring_get_head(struct drm_device *dev, From 86394c669a485cb773ddef9f2b66bebdb23241d0 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 2 Feb 2010 22:54:18 +0100 Subject: [PATCH 041/476] drm/i915: kill a no longer necessary BUG_ON i915_gem_object_move_to_active can handle zero seqno for us now. And not emitting a request is not fatal here - we'll try to emit a new one if we have to wait for some rendering to complete. In case this assumption ever gets accidentally broken, there's already a BUG_ON to catch it in i915_do_wait_request. So just silently ignore ENOMEM here instead of screwing up the whole drm. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 46394f735a81..a54b8132f891 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3827,7 +3827,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, * wait on when trying to clear up gtt space). */ seqno = i915_add_request(dev, file_priv, ring); - BUG_ON(seqno == 0); for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; obj_priv = to_intel_bo(obj); From 617dbe2787568316eed976f533f93c31fdbc75b9 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 11 Feb 2010 22:16:02 +0100 Subject: [PATCH 042/476] drm/i915: drop seqno argument from i915_gem_object_move_to_active By moving one i915_add_request we can solely depend on the new auto-seqno-numbering behaviour. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a54b8132f891..4c043cb02b38 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1481,12 +1481,14 @@ i915_gem_next_request_seqno(struct drm_device *dev, } static void -i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno, +i915_gem_object_move_to_active(struct drm_gem_object *obj, struct intel_ring_buffer *ring) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + uint32_t seqno = i915_gem_next_request_seqno(dev, ring); + BUG_ON(ring == NULL); obj_priv->ring = ring; @@ -1496,10 +1498,6 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno, obj_priv->active = 1; } - /* Take the seqno of the next request if none is given */ - if (seqno == 0) - seqno = i915_gem_next_request_seqno(dev, ring); - /* Move from whatever list we were on to the tail of execution. */ spin_lock(&dev_priv->mm.active_list_lock); list_move_tail(&obj_priv->list, &ring->active_list); @@ -1590,7 +1588,7 @@ i915_gem_process_flushing_list(struct drm_device *dev, obj->write_domain = 0; list_del_init(&obj_priv->gpu_write_list); - i915_gem_object_move_to_active(obj, 0, ring); + i915_gem_object_move_to_active(obj, ring); /* update the fence lru list */ if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { @@ -3819,6 +3817,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, i915_verify_inactive(dev, __FILE__, __LINE__); + for (i = 0; i < args->buffer_count; i++) { + struct drm_gem_object *obj = object_list[i]; + obj_priv = to_intel_bo(obj); + + i915_gem_object_move_to_active(obj, ring); +#if WATCH_LRU + DRM_INFO("%s: move to exec list %p\n", __func__, obj); +#endif + } + /* * Get a seqno representing the execution of the current buffer, * which we can wait on. We would like to mitigate these interrupts, @@ -3827,15 +3835,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, * wait on when trying to clear up gtt space). */ seqno = i915_add_request(dev, file_priv, ring); - for (i = 0; i < args->buffer_count; i++) { - struct drm_gem_object *obj = object_list[i]; - obj_priv = to_intel_bo(obj); - i915_gem_object_move_to_active(obj, seqno, ring); -#if WATCH_LRU - DRM_INFO("%s: move to exec list %p\n", __func__, obj); -#endif - } #if WATCH_LRU i915_dump_lru(dev, __func__); #endif From ba3d8d749b01548b918519a49024341d8dc9e71c Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 11 Feb 2010 22:37:04 +0100 Subject: [PATCH 043/476] drm/i915: move the wait_rendering call into flush_gpu_write_domain One caller (for the pageflip support) wants a purely pipelined flush. Distinguish this case by a new parameter. This will also be useful later on for pipelined fencing. v2: Simplify the code by depending upon the implicit request emitting of i915_wait_request. Signed-off-by: Daniel Vetter [ickle: And drop the non-interruptible support in the process.] Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 88 +++++++++++++-------------------- 1 file changed, 33 insertions(+), 55 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 4c043cb02b38..9f13aaa7820e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -37,7 +37,9 @@ #include static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj); -static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); + +static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, + bool pipelined); static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, @@ -46,8 +48,7 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, uint64_t offset, uint64_t size); static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); -static int i915_gem_object_wait_rendering(struct drm_gem_object *obj, - bool interruptible); +static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment); static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); @@ -1933,8 +1934,7 @@ i915_gem_flush(struct drm_device *dev, * safe to unbind from the GTT or access from the CPU. */ static int -i915_gem_object_wait_rendering(struct drm_gem_object *obj, - bool interruptible) +i915_gem_object_wait_rendering(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); @@ -1953,10 +1953,9 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj, DRM_INFO("%s: object %p wait for seqno %08x\n", __func__, obj, obj_priv->last_rendering_seqno); #endif - ret = i915_do_wait_request(dev, - obj_priv->last_rendering_seqno, - interruptible, - obj_priv->ring); + ret = i915_wait_request(dev, + obj_priv->last_rendering_seqno, + obj_priv->ring); if (ret != 0) return ret; } @@ -2453,11 +2452,7 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj) if (!IS_I965G(dev)) { int ret; - ret = i915_gem_object_flush_gpu_write_domain(obj); - if (ret != 0) - return ret; - - ret = i915_gem_object_wait_rendering(obj, true); + ret = i915_gem_object_flush_gpu_write_domain(obj, false); if (ret != 0) return ret; } @@ -2609,11 +2604,11 @@ i915_gem_clflush_object(struct drm_gem_object *obj) /** Flushes any GPU write domain for the object if it's dirty. */ static int -i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) +i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, + bool pipelined) { struct drm_device *dev = obj->dev; uint32_t old_write_domain; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) return 0; @@ -2621,13 +2616,15 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) /* Queue the GPU write cache flushing we need. */ old_write_domain = obj->write_domain; i915_gem_flush(dev, 0, obj->write_domain); - if (i915_add_request(dev, NULL, obj_priv->ring) == 0) - return -ENOMEM; trace_i915_gem_object_change_domain(obj, obj->read_domains, old_write_domain); - return 0; + + if (pipelined) + return 0; + + return i915_gem_object_wait_rendering(obj); } /** Flushes the GTT write domain for the object if it's dirty. */ @@ -2684,7 +2681,7 @@ i915_gem_object_flush_write_domain(struct drm_gem_object *obj) i915_gem_object_flush_cpu_write_domain(obj); break; default: - ret = i915_gem_object_flush_gpu_write_domain(obj); + ret = i915_gem_object_flush_gpu_write_domain(obj, true); break; } @@ -2708,12 +2705,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) if (obj_priv->gtt_space == NULL) return -EINVAL; - ret = i915_gem_object_flush_gpu_write_domain(obj); - if (ret != 0) - return ret; - - /* Wait on any GPU rendering and flushing to occur. */ - ret = i915_gem_object_wait_rendering(obj, true); + ret = i915_gem_object_flush_gpu_write_domain(obj, false); if (ret != 0) return ret; @@ -2723,8 +2715,13 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) /* If we're writing through the GTT domain, then CPU and GPU caches * will need to be invalidated at next use. */ - if (write) + if (write) { + ret = i915_gem_object_wait_rendering(obj); + if (ret) + return ret; + obj->read_domains &= I915_GEM_DOMAIN_GTT; + } i915_gem_object_flush_cpu_write_domain(obj); @@ -2753,38 +2750,25 @@ int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) { struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - uint32_t old_write_domain, old_read_domains; + uint32_t old_read_domains; int ret; /* Not valid to be called on unbound objects. */ if (obj_priv->gtt_space == NULL) return -EINVAL; - ret = i915_gem_object_flush_gpu_write_domain(obj); - if (ret) - return ret; - - /* Wait on any GPU rendering and flushing to occur. */ - ret = i915_gem_object_wait_rendering(obj, false); + ret = i915_gem_object_flush_gpu_write_domain(obj, true); if (ret != 0) return ret; i915_gem_object_flush_cpu_write_domain(obj); - old_write_domain = obj->write_domain; old_read_domains = obj->read_domains; - - /* It should now be out of any other write domains, and we can update - * the domain values for our changes. - */ - BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); obj->read_domains = I915_GEM_DOMAIN_GTT; - obj->write_domain = I915_GEM_DOMAIN_GTT; - obj_priv->dirty = 1; trace_i915_gem_object_change_domain(obj, old_read_domains, - old_write_domain); + obj->write_domain); return 0; } @@ -2801,12 +2785,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) uint32_t old_write_domain, old_read_domains; int ret; - ret = i915_gem_object_flush_gpu_write_domain(obj); - if (ret) - return ret; - - /* Wait on any GPU rendering and flushing to occur. */ - ret = i915_gem_object_wait_rendering(obj, true); + ret = i915_gem_object_flush_gpu_write_domain(obj, false); if (ret != 0) return ret; @@ -2836,6 +2815,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) * need to be invalidated at next use. */ if (write) { + ret = i915_gem_object_wait_rendering(obj); + if (ret) + return ret; + obj->read_domains &= I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU; } @@ -3094,12 +3077,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, if (offset == 0 && size == obj->size) return i915_gem_object_set_to_cpu_domain(obj, 0); - ret = i915_gem_object_flush_gpu_write_domain(obj); - if (ret) - return ret; - - /* Wait on any GPU rendering and flushing to occur. */ - ret = i915_gem_object_wait_rendering(obj, true); + ret = i915_gem_object_flush_gpu_write_domain(obj, false); if (ret != 0) return ret; i915_gem_object_flush_gtt_write_domain(obj); From 4fc6ee764620eb3a9364e736d03605d4b233ea61 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 11 Feb 2010 22:53:20 +0100 Subject: [PATCH 044/476] drm/i915: drop i915_add_request right in front of i915_wait_request ... take advantage of the new implicit request issuing of i915_wait_request. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 9f13aaa7820e..ce66f90716c7 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2043,7 +2043,6 @@ i915_gpu_idle(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; bool lists_empty; - uint32_t seqno1, seqno2; int ret; spin_lock(&dev_priv->mm.active_list_lock); @@ -2058,18 +2057,17 @@ i915_gpu_idle(struct drm_device *dev) /* Flush everything onto the inactive list. */ i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); - seqno1 = i915_add_request(dev, NULL, &dev_priv->render_ring); - if (seqno1 == 0) - return -ENOMEM; - ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring); + + ret = i915_wait_request(dev, + i915_gem_next_request_seqno(dev, &dev_priv->render_ring), + &dev_priv->render_ring); if (ret) return ret; if (HAS_BSD(dev)) { - seqno2 = i915_add_request(dev, NULL, &dev_priv->bsd_ring); - if (seqno2 == 0) - return -ENOMEM; - ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring); + ret = i915_wait_request(dev, + i915_gem_next_request_seqno(dev, &dev_priv->bsd_ring), + &dev_priv->bsd_ring); if (ret) return ret; } From 722506f04dae7c88193dab2fc836ff15070190f0 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 Aug 2010 09:28:50 +0100 Subject: [PATCH 045/476] drm/i915/overlay: Whitespace Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_drv.h | 2 +- drivers/gpu/drm/i915/intel_overlay.c | 474 ++++++++++++++------------- 2 files changed, 244 insertions(+), 232 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 64a7c87817d7..72f72f52931d 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -291,7 +291,7 @@ extern void intel_setup_overlay(struct drm_device *dev); extern void intel_cleanup_overlay(struct drm_device *dev); extern int intel_overlay_switch_off(struct intel_overlay *overlay); extern int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, - int interruptible); + bool interruptible); extern int intel_overlay_put_image(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int intel_overlay_attrs(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index a203b5c7ff71..5ed1783a69a0 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -176,7 +176,6 @@ struct overlay_registers { #define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev)) #define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev)) - static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay) { drm_i915_private_t *dev_priv = overlay->dev->dev_private; @@ -235,7 +234,8 @@ static int intel_overlay_on(struct intel_overlay *overlay) return -ENOMEM; ret = i915_do_wait_request(dev, - overlay->last_flip_req, 1, &dev_priv->render_ring); + overlay->last_flip_req, true, + &dev_priv->render_ring); if (ret != 0) return ret; @@ -246,7 +246,7 @@ static int intel_overlay_on(struct intel_overlay *overlay) /* overlay needs to be enabled in OCMD reg */ static void intel_overlay_continue(struct intel_overlay *overlay, - bool load_polyphase_filter) + bool load_polyphase_filter) { struct drm_device *dev = overlay->dev; drm_i915_private_t *dev_priv = dev->dev_private; @@ -275,13 +275,14 @@ static void intel_overlay_continue(struct intel_overlay *overlay, static int intel_overlay_wait_flip(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_private_t *dev_priv = dev->dev_private; int ret; u32 tmp; if (overlay->last_flip_req != 0) { - ret = i915_do_wait_request(dev, overlay->last_flip_req, - 1, &dev_priv->render_ring); + ret = i915_do_wait_request(dev, + overlay->last_flip_req, true, + &dev_priv->render_ring); if (ret == 0) { overlay->last_flip_req = 0; @@ -296,17 +297,18 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay) overlay->hw_wedged = RELEASE_OLD_VID; BEGIN_LP_RING(2); - OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); - OUT_RING(MI_NOOP); - ADVANCE_LP_RING(); + OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + OUT_RING(MI_NOOP); + ADVANCE_LP_RING(); overlay->last_flip_req = i915_add_request(dev, NULL, &dev_priv->render_ring); if (overlay->last_flip_req == 0) return -ENOMEM; - ret = i915_do_wait_request(dev, overlay->last_flip_req, - 1, &dev_priv->render_ring); + ret = i915_do_wait_request(dev, + overlay->last_flip_req, true, + &dev_priv->render_ring); if (ret != 0) return ret; @@ -337,28 +339,8 @@ static int intel_overlay_off(struct intel_overlay *overlay) BEGIN_LP_RING(4); OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); OUT_RING(flip_addr); - OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); - OUT_RING(MI_NOOP); - ADVANCE_LP_RING(); - - overlay->last_flip_req = - i915_add_request(dev, NULL, &dev_priv->render_ring); - if (overlay->last_flip_req == 0) - return -ENOMEM; - - ret = i915_do_wait_request(dev, overlay->last_flip_req, - 1, &dev_priv->render_ring); - if (ret != 0) - return ret; - - /* turn overlay off */ - overlay->hw_wedged = SWITCH_OFF_STAGE_2; - - BEGIN_LP_RING(4); - OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); - OUT_RING(flip_addr); - OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); - OUT_RING(MI_NOOP); + OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + OUT_RING(MI_NOOP); ADVANCE_LP_RING(); overlay->last_flip_req = @@ -366,8 +348,30 @@ static int intel_overlay_off(struct intel_overlay *overlay) if (overlay->last_flip_req == 0) return -ENOMEM; - ret = i915_do_wait_request(dev, overlay->last_flip_req, - 1, &dev_priv->render_ring); + ret = i915_do_wait_request(dev, + overlay->last_flip_req, true, + &dev_priv->render_ring); + if (ret != 0) + return ret; + + /* turn overlay off */ + overlay->hw_wedged = SWITCH_OFF_STAGE_2; + + BEGIN_LP_RING(4); + OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); + OUT_RING(flip_addr); + OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + OUT_RING(MI_NOOP); + ADVANCE_LP_RING(); + + overlay->last_flip_req = + i915_add_request(dev, NULL, &dev_priv->render_ring); + if (overlay->last_flip_req == 0) + return -ENOMEM; + + ret = i915_do_wait_request(dev, + overlay->last_flip_req, true, + &dev_priv->render_ring); if (ret != 0) return ret; @@ -396,7 +400,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay) /* recover from an interruption due to a signal * We have to be careful not to repeat work forever an make forward progess. */ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, - int interruptible) + bool interruptible) { struct drm_device *dev = overlay->dev; struct drm_gem_object *obj; @@ -415,46 +419,47 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, } ret = i915_do_wait_request(dev, overlay->last_flip_req, - interruptible, &dev_priv->render_ring); + interruptible, &dev_priv->render_ring); if (ret != 0) return ret; switch (overlay->hw_wedged) { - case RELEASE_OLD_VID: - obj = &overlay->old_vid_bo->base; - i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); - overlay->old_vid_bo = NULL; - break; - case SWITCH_OFF_STAGE_1: - flip_addr = overlay->flip_addr; - flip_addr |= OFC_UPDATE; + case RELEASE_OLD_VID: + obj = &overlay->old_vid_bo->base; + i915_gem_object_unpin(obj); + drm_gem_object_unreference(obj); + overlay->old_vid_bo = NULL; + break; + case SWITCH_OFF_STAGE_1: + flip_addr = overlay->flip_addr; + flip_addr |= OFC_UPDATE; - overlay->hw_wedged = SWITCH_OFF_STAGE_2; + overlay->hw_wedged = SWITCH_OFF_STAGE_2; - BEGIN_LP_RING(4); - OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); - OUT_RING(flip_addr); - OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); - OUT_RING(MI_NOOP); - ADVANCE_LP_RING(); + BEGIN_LP_RING(4); + OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); + OUT_RING(flip_addr); + OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + OUT_RING(MI_NOOP); + ADVANCE_LP_RING(); - overlay->last_flip_req = - i915_add_request(dev, NULL, - &dev_priv->render_ring); - if (overlay->last_flip_req == 0) - return -ENOMEM; + overlay->last_flip_req = + i915_add_request(dev, NULL, + &dev_priv->render_ring); + if (overlay->last_flip_req == 0) + return -ENOMEM; - ret = i915_do_wait_request(dev, overlay->last_flip_req, - interruptible, &dev_priv->render_ring); - if (ret != 0) - return ret; + ret = i915_do_wait_request(dev, overlay->last_flip_req, + interruptible, + &dev_priv->render_ring); + if (ret != 0) + return ret; - case SWITCH_OFF_STAGE_2: - intel_overlay_off_tail(overlay); - break; - default: - BUG_ON(overlay->hw_wedged != NEEDS_WAIT_FOR_FLIP); + case SWITCH_OFF_STAGE_2: + intel_overlay_off_tail(overlay); + break; + default: + BUG_ON(overlay->hw_wedged != NEEDS_WAIT_FOR_FLIP); } overlay->hw_wedged = 0; @@ -507,50 +512,50 @@ struct put_image_params { static int packed_depth_bytes(u32 format) { switch (format & I915_OVERLAY_DEPTH_MASK) { - case I915_OVERLAY_YUV422: - return 4; - case I915_OVERLAY_YUV411: - /* return 6; not implemented */ - default: - return -EINVAL; + case I915_OVERLAY_YUV422: + return 4; + case I915_OVERLAY_YUV411: + /* return 6; not implemented */ + default: + return -EINVAL; } } static int packed_width_bytes(u32 format, short width) { switch (format & I915_OVERLAY_DEPTH_MASK) { - case I915_OVERLAY_YUV422: - return width << 1; - default: - return -EINVAL; + case I915_OVERLAY_YUV422: + return width << 1; + default: + return -EINVAL; } } static int uv_hsubsampling(u32 format) { switch (format & I915_OVERLAY_DEPTH_MASK) { - case I915_OVERLAY_YUV422: - case I915_OVERLAY_YUV420: - return 2; - case I915_OVERLAY_YUV411: - case I915_OVERLAY_YUV410: - return 4; - default: - return -EINVAL; + case I915_OVERLAY_YUV422: + case I915_OVERLAY_YUV420: + return 2; + case I915_OVERLAY_YUV411: + case I915_OVERLAY_YUV410: + return 4; + default: + return -EINVAL; } } static int uv_vsubsampling(u32 format) { switch (format & I915_OVERLAY_DEPTH_MASK) { - case I915_OVERLAY_YUV420: - case I915_OVERLAY_YUV410: - return 2; - case I915_OVERLAY_YUV422: - case I915_OVERLAY_YUV411: - return 1; - default: - return -EINVAL; + case I915_OVERLAY_YUV420: + case I915_OVERLAY_YUV410: + return 2; + case I915_OVERLAY_YUV422: + case I915_OVERLAY_YUV411: + return 1; + default: + return -EINVAL; } } @@ -588,7 +593,9 @@ static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = { 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060, 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040, 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020, - 0xb000, 0x3000, 0x0800, 0x3000, 0xb000}; + 0xb000, 0x3000, 0x0800, 0x3000, 0xb000 +}; + static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = { 0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60, 0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40, @@ -598,7 +605,8 @@ static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = { 0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0, 0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240, 0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0, - 0x3000, 0x0800, 0x3000}; + 0x3000, 0x0800, 0x3000 +}; static void update_polyphase_filter(struct overlay_registers *regs) { @@ -631,29 +639,31 @@ static bool update_scaling_factors(struct intel_overlay *overlay, yscale = 1 << FP_SHIFT; /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/ - xscale_UV = xscale/uv_hscale; - yscale_UV = yscale/uv_vscale; - /* make the Y scale to UV scale ratio an exact multiply */ - xscale = xscale_UV * uv_hscale; - yscale = yscale_UV * uv_vscale; + xscale_UV = xscale/uv_hscale; + yscale_UV = yscale/uv_vscale; + /* make the Y scale to UV scale ratio an exact multiply */ + xscale = xscale_UV * uv_hscale; + yscale = yscale_UV * uv_vscale; /*} else { - xscale_UV = 0; - yscale_UV = 0; - }*/ + xscale_UV = 0; + yscale_UV = 0; + }*/ if (xscale != overlay->old_xscale || yscale != overlay->old_yscale) scale_changed = true; overlay->old_xscale = xscale; overlay->old_yscale = yscale; - regs->YRGBSCALE = ((yscale & FRACT_MASK) << 20) - | ((xscale >> FP_SHIFT) << 16) - | ((xscale & FRACT_MASK) << 3); - regs->UVSCALE = ((yscale_UV & FRACT_MASK) << 20) - | ((xscale_UV >> FP_SHIFT) << 16) - | ((xscale_UV & FRACT_MASK) << 3); - regs->UVSCALEV = ((yscale >> FP_SHIFT) << 16) - | ((yscale_UV >> FP_SHIFT) << 0); + regs->YRGBSCALE = (((yscale & FRACT_MASK) << 20) | + ((xscale >> FP_SHIFT) << 16) | + ((xscale & FRACT_MASK) << 3)); + + regs->UVSCALE = (((yscale_UV & FRACT_MASK) << 20) | + ((xscale_UV >> FP_SHIFT) << 16) | + ((xscale_UV & FRACT_MASK) << 3)); + + regs->UVSCALEV = ((((yscale >> FP_SHIFT) << 16) | + ((yscale_UV >> FP_SHIFT) << 0))); if (scale_changed) update_polyphase_filter(regs); @@ -666,21 +676,21 @@ static void update_colorkey(struct intel_overlay *overlay, { u32 key = overlay->color_key; switch (overlay->crtc->base.fb->bits_per_pixel) { - case 8: - regs->DCLRKV = 0; - regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE; - case 16: - if (overlay->crtc->base.fb->depth == 15) { - regs->DCLRKV = RGB15_TO_COLORKEY(key); - regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE; - } else { - regs->DCLRKV = RGB16_TO_COLORKEY(key); - regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE; - } - case 24: - case 32: - regs->DCLRKV = key; - regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE; + case 8: + regs->DCLRKV = 0; + regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE; + case 16: + if (overlay->crtc->base.fb->depth == 15) { + regs->DCLRKV = RGB15_TO_COLORKEY(key); + regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE; + } else { + regs->DCLRKV = RGB16_TO_COLORKEY(key); + regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE; + } + case 24: + case 32: + regs->DCLRKV = key; + regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE; } } @@ -690,39 +700,39 @@ static u32 overlay_cmd_reg(struct put_image_params *params) if (params->format & I915_OVERLAY_YUV_PLANAR) { switch (params->format & I915_OVERLAY_DEPTH_MASK) { - case I915_OVERLAY_YUV422: - cmd |= OCMD_YUV_422_PLANAR; - break; - case I915_OVERLAY_YUV420: - cmd |= OCMD_YUV_420_PLANAR; - break; - case I915_OVERLAY_YUV411: - case I915_OVERLAY_YUV410: - cmd |= OCMD_YUV_410_PLANAR; - break; + case I915_OVERLAY_YUV422: + cmd |= OCMD_YUV_422_PLANAR; + break; + case I915_OVERLAY_YUV420: + cmd |= OCMD_YUV_420_PLANAR; + break; + case I915_OVERLAY_YUV411: + case I915_OVERLAY_YUV410: + cmd |= OCMD_YUV_410_PLANAR; + break; } } else { /* YUV packed */ switch (params->format & I915_OVERLAY_DEPTH_MASK) { - case I915_OVERLAY_YUV422: - cmd |= OCMD_YUV_422_PACKED; - break; - case I915_OVERLAY_YUV411: - cmd |= OCMD_YUV_411_PACKED; - break; + case I915_OVERLAY_YUV422: + cmd |= OCMD_YUV_422_PACKED; + break; + case I915_OVERLAY_YUV411: + cmd |= OCMD_YUV_411_PACKED; + break; } switch (params->format & I915_OVERLAY_SWAP_MASK) { - case I915_OVERLAY_NO_SWAP: - break; - case I915_OVERLAY_UV_SWAP: - cmd |= OCMD_UV_SWAP; - break; - case I915_OVERLAY_Y_SWAP: - cmd |= OCMD_Y_SWAP; - break; - case I915_OVERLAY_Y_AND_UV_SWAP: - cmd |= OCMD_Y_AND_UV_SWAP; - break; + case I915_OVERLAY_NO_SWAP: + break; + case I915_OVERLAY_UV_SWAP: + cmd |= OCMD_UV_SWAP; + break; + case I915_OVERLAY_Y_SWAP: + cmd |= OCMD_Y_SWAP; + break; + case I915_OVERLAY_Y_AND_UV_SWAP: + cmd |= OCMD_Y_AND_UV_SWAP; + break; } } @@ -789,7 +799,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, regs->SWIDTH = params->src_w; regs->SWIDTHSW = calc_swidthsw(overlay->dev, - params->offset_Y, tmp_width); + params->offset_Y, tmp_width); regs->SHEIGHT = params->src_h; regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y; regs->OSTRIDE = params->stride_Y; @@ -800,9 +810,9 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, u32 tmp_U, tmp_V; regs->SWIDTH |= (params->src_w/uv_hscale) << 16; tmp_U = calc_swidthsw(overlay->dev, params->offset_U, - params->src_w/uv_hscale); + params->src_w/uv_hscale); tmp_V = calc_swidthsw(overlay->dev, params->offset_V, - params->src_w/uv_hscale); + params->src_w/uv_hscale); regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16; regs->SHEIGHT |= (params->src_h/uv_vscale) << 16; regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U; @@ -868,7 +878,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay) static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, struct intel_crtc *crtc) { - drm_i915_private_t *dev_priv = overlay->dev->dev_private; + drm_i915_private_t *dev_priv = overlay->dev->dev_private; u32 pipeconf; int pipeconf_reg = (crtc->pipe == 0) ? PIPEACONF : PIPEBCONF; @@ -887,7 +897,7 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, static void update_pfit_vscale_ratio(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_private_t *dev_priv = dev->dev_private; u32 ratio; u32 pfit_control = I915_READ(PFIT_CONTROL); @@ -911,12 +921,10 @@ static int check_overlay_dst(struct intel_overlay *overlay, { struct drm_display_mode *mode = &overlay->crtc->base.mode; - if ((rec->dst_x < mode->crtc_hdisplay) - && (rec->dst_x + rec->dst_width - <= mode->crtc_hdisplay) - && (rec->dst_y < mode->crtc_vdisplay) - && (rec->dst_y + rec->dst_height - <= mode->crtc_vdisplay)) + if (rec->dst_x < mode->crtc_hdisplay && + rec->dst_x + rec->dst_width <= mode->crtc_hdisplay && + rec->dst_y < mode->crtc_vdisplay && + rec->dst_y + rec->dst_height <= mode->crtc_vdisplay) return 0; else return -EINVAL; @@ -949,45 +957,45 @@ static int check_overlay_src(struct drm_device *dev, /* check src dimensions */ if (IS_845G(dev) || IS_I830(dev)) { - if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY - || rec->src_width > IMAGE_MAX_WIDTH_LEGACY) + if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY || + rec->src_width > IMAGE_MAX_WIDTH_LEGACY) return -EINVAL; } else { - if (rec->src_height > IMAGE_MAX_HEIGHT - || rec->src_width > IMAGE_MAX_WIDTH) + if (rec->src_height > IMAGE_MAX_HEIGHT || + rec->src_width > IMAGE_MAX_WIDTH) return -EINVAL; } /* better safe than sorry, use 4 as the maximal subsampling ratio */ - if (rec->src_height < N_VERT_Y_TAPS*4 - || rec->src_width < N_HORIZ_Y_TAPS*4) + if (rec->src_height < N_VERT_Y_TAPS*4 || + rec->src_width < N_HORIZ_Y_TAPS*4) return -EINVAL; /* check alignment constraints */ switch (rec->flags & I915_OVERLAY_TYPE_MASK) { - case I915_OVERLAY_RGB: - /* not implemented */ + case I915_OVERLAY_RGB: + /* not implemented */ + return -EINVAL; + case I915_OVERLAY_YUV_PACKED: + depth = packed_depth_bytes(rec->flags); + if (uv_vscale != 1) return -EINVAL; - case I915_OVERLAY_YUV_PACKED: - depth = packed_depth_bytes(rec->flags); - if (uv_vscale != 1) - return -EINVAL; - if (depth < 0) - return depth; - /* ignore UV planes */ - rec->stride_UV = 0; - rec->offset_U = 0; - rec->offset_V = 0; - /* check pixel alignment */ - if (rec->offset_Y % depth) - return -EINVAL; - break; - case I915_OVERLAY_YUV_PLANAR: - if (uv_vscale < 0 || uv_hscale < 0) - return -EINVAL; - /* no offset restrictions for planar formats */ - break; - default: + if (depth < 0) + return depth; + /* ignore UV planes */ + rec->stride_UV = 0; + rec->offset_U = 0; + rec->offset_V = 0; + /* check pixel alignment */ + if (rec->offset_Y % depth) return -EINVAL; + break; + case I915_OVERLAY_YUV_PLANAR: + if (uv_vscale < 0 || uv_hscale < 0) + return -EINVAL; + /* no offset restrictions for planar formats */ + break; + default: + return -EINVAL; } if (rec->src_width % uv_hscale) @@ -1011,32 +1019,32 @@ static int check_overlay_src(struct drm_device *dev, /* check buffer dimensions */ switch (rec->flags & I915_OVERLAY_TYPE_MASK) { - case I915_OVERLAY_RGB: - case I915_OVERLAY_YUV_PACKED: - /* always 4 Y values per depth pixels */ - if (packed_width_bytes(rec->flags, rec->src_width) - > rec->stride_Y) - return -EINVAL; + case I915_OVERLAY_RGB: + case I915_OVERLAY_YUV_PACKED: + /* always 4 Y values per depth pixels */ + if (packed_width_bytes(rec->flags, rec->src_width) > rec->stride_Y) + return -EINVAL; - tmp = rec->stride_Y*rec->src_height; - if (rec->offset_Y + tmp > new_bo->size) - return -EINVAL; - break; - case I915_OVERLAY_YUV_PLANAR: - if (rec->src_width > rec->stride_Y) - return -EINVAL; - if (rec->src_width/uv_hscale > rec->stride_UV) - return -EINVAL; + tmp = rec->stride_Y*rec->src_height; + if (rec->offset_Y + tmp > new_bo->size) + return -EINVAL; + break; - tmp = rec->stride_Y*rec->src_height; - if (rec->offset_Y + tmp > new_bo->size) - return -EINVAL; - tmp = rec->stride_UV*rec->src_height; - tmp /= uv_vscale; - if (rec->offset_U + tmp > new_bo->size - || rec->offset_V + tmp > new_bo->size) - return -EINVAL; - break; + case I915_OVERLAY_YUV_PLANAR: + if (rec->src_width > rec->stride_Y) + return -EINVAL; + if (rec->src_width/uv_hscale > rec->stride_UV) + return -EINVAL; + + tmp = rec->stride_Y*rec->src_height; + if (rec->offset_Y + tmp > new_bo->size) + return -EINVAL; + tmp = rec->stride_UV*rec->src_height; + tmp /= uv_vscale; + if (rec->offset_U + tmp > new_bo->size || + rec->offset_V + tmp > new_bo->size) + return -EINVAL; + break; } return 0; @@ -1082,7 +1090,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, return -ENOMEM; drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id, - DRM_MODE_OBJECT_CRTC); + DRM_MODE_OBJECT_CRTC); if (!drmmode_obj) { ret = -ENOENT; goto out_free; @@ -1090,7 +1098,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); new_bo = drm_gem_object_lookup(dev, file_priv, - put_image_rec->bo_handle); + put_image_rec->bo_handle); if (!new_bo) { ret = -ENOENT; goto out_free; @@ -1133,10 +1141,10 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, if (overlay->pfit_active) { params->dst_y = ((((u32)put_image_rec->dst_y) << 12) / - overlay->pfit_vscale_ratio); + overlay->pfit_vscale_ratio); /* shifting right rounds downwards, so add 1 */ params->dst_h = ((((u32)put_image_rec->dst_height) << 12) / - overlay->pfit_vscale_ratio) + 1; + overlay->pfit_vscale_ratio) + 1; } else { params->dst_y = put_image_rec->dst_y; params->dst_h = put_image_rec->dst_height; @@ -1148,8 +1156,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, params->src_h = put_image_rec->src_height; params->src_scan_w = put_image_rec->src_scan_width; params->src_scan_h = put_image_rec->src_scan_height; - if (params->src_scan_h > params->src_h - || params->src_scan_w > params->src_w) { + if (params->src_scan_h > params->src_h || + params->src_scan_w > params->src_w) { ret = -EINVAL; goto out_unlock; } @@ -1205,7 +1213,7 @@ static bool check_gamma_bounds(u32 gamma1, u32 gamma2) return false; for (i = 0; i < 3; i++) { - if (((gamma1 >> i * 8) & 0xff) >= ((gamma2 >> i*8) & 0xff)) + if (((gamma1 >> i*8) & 0xff) >= ((gamma2 >> i*8) & 0xff)) return false; } @@ -1226,16 +1234,18 @@ static bool check_gamma5_errata(u32 gamma5) static int check_gamma(struct drm_intel_overlay_attrs *attrs) { - if (!check_gamma_bounds(0, attrs->gamma0) - || !check_gamma_bounds(attrs->gamma0, attrs->gamma1) - || !check_gamma_bounds(attrs->gamma1, attrs->gamma2) - || !check_gamma_bounds(attrs->gamma2, attrs->gamma3) - || !check_gamma_bounds(attrs->gamma3, attrs->gamma4) - || !check_gamma_bounds(attrs->gamma4, attrs->gamma5) - || !check_gamma_bounds(attrs->gamma5, 0x00ffffff)) + if (!check_gamma_bounds(0, attrs->gamma0) || + !check_gamma_bounds(attrs->gamma0, attrs->gamma1) || + !check_gamma_bounds(attrs->gamma1, attrs->gamma2) || + !check_gamma_bounds(attrs->gamma2, attrs->gamma3) || + !check_gamma_bounds(attrs->gamma3, attrs->gamma4) || + !check_gamma_bounds(attrs->gamma4, attrs->gamma5) || + !check_gamma_bounds(attrs->gamma5, 0x00ffffff)) return -EINVAL; + if (!check_gamma5_errata(attrs->gamma5)) return -EINVAL; + return 0; } @@ -1285,12 +1295,14 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, ret = -EINVAL; goto out_unlock; } + if (attrs->contrast <= 255) { overlay->contrast = attrs->contrast; } else { ret = -EINVAL; goto out_unlock; } + if (attrs->saturation <= 1023) { overlay->saturation = attrs->saturation; } else { @@ -1409,7 +1421,7 @@ out_free: void intel_cleanup_overlay(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_private_t *dev_priv = dev->dev_private; if (dev_priv->overlay) { /* The bo's should be free'd by the generic code already. From 6ba3ddd9838f5e4d6ac7c6dce95648d205e11bff Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 Aug 2010 09:30:58 +0100 Subject: [PATCH 046/476] drm/i915/overlay: Missing breaks between case statements for color depth Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_overlay.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 5ed1783a69a0..4972b5ca1e8f 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -675,10 +675,13 @@ static void update_colorkey(struct intel_overlay *overlay, struct overlay_registers *regs) { u32 key = overlay->color_key; + switch (overlay->crtc->base.fb->bits_per_pixel) { case 8: regs->DCLRKV = 0; regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE; + break; + case 16: if (overlay->crtc->base.fb->depth == 15) { regs->DCLRKV = RGB15_TO_COLORKEY(key); @@ -687,10 +690,13 @@ static void update_colorkey(struct intel_overlay *overlay, regs->DCLRKV = RGB16_TO_COLORKEY(key); regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE; } + break; + case 24: case 32: regs->DCLRKV = key; regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE; + break; } } From 0ddc1289f3ffd779779ddd3922f26ae7d0a21604 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 Aug 2010 09:35:00 +0100 Subject: [PATCH 047/476] drm/i915/overlay: Ensure that the reg_bo is in the GTT prior to writing. Just makes sure that writes are not being aliased by the CPU cache and do make it out to main memory. Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=24977 Cc: stable@kernel.org --- drivers/gpu/drm/i915/intel_overlay.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 4972b5ca1e8f..fef4dd61dcfd 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -1386,6 +1386,12 @@ void intel_setup_overlay(struct drm_device *dev) goto out_free_bo; } overlay->flip_addr = overlay->reg_bo->gtt_offset; + + ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); + if (ret) { + DRM_ERROR("failed to move overlay register bo into the GTT\n"); + goto out_unpin_bo; + } } else { ret = i915_gem_attach_phys_object(dev, reg_bo, I915_GEM_PHYS_OVERLAY_REGS, @@ -1418,6 +1424,8 @@ void intel_setup_overlay(struct drm_device *dev) DRM_INFO("initialized overlay support\n"); return; +out_unpin_bo: + i915_gem_object_unpin(reg_bo); out_free_bo: drm_gem_object_unreference(reg_bo); out_free: From 31578148b2c62612f9516fdcf5ebb64ab32ed12d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 Aug 2010 09:42:51 +0100 Subject: [PATCH 048/476] drm/i915/overlay: Move capabilities bits to common info block. Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.c | 12 +++++++++ drivers/gpu/drm/i915/i915_drv.h | 5 ++++ drivers/gpu/drm/i915/intel_overlay.c | 40 +++++++++++++--------------- 3 files changed, 36 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 2879a768d65c..dffc1bcf7b79 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -62,49 +62,60 @@ extern int intel_agp_enabled; static const struct intel_device_info intel_i830_info = { .gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, + .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_845g_info = { .gen = 2, .is_i8xx = 1, + .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i85x_info = { .gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, .cursor_needs_physical = 1, + .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i865g_info = { .gen = 2, .is_i8xx = 1, + .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i915g_info = { .gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, + .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i915gm_info = { .gen = 3, .is_i9xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, + .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i945g_info = { .gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, + .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i945gm_info = { .gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_hotplug = 1, .cursor_needs_physical = 1, + .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i965g_info = { .gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, + .has_overlay = 1, }; static const struct intel_device_info intel_i965gm_info = { .gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, + .has_overlay = 1, }; static const struct intel_device_info intel_g33_info = { .gen = 3, .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1, .has_hotplug = 1, + .has_overlay = 1, }; static const struct intel_device_info intel_g45_info = { @@ -121,6 +132,7 @@ static const struct intel_device_info intel_gm45_info = { static const struct intel_device_info intel_pineview_info = { .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1, .has_hotplug = 1, + .has_overlay = 1, }; static const struct intel_device_info intel_ironlake_d_info = { diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f983130a702d..10c9e416c96b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -213,6 +213,8 @@ struct intel_device_info { u8 has_pipe_cxsr : 1; u8 has_hotplug : 1; u8 cursor_needs_physical : 1; + u8 has_overlay : 1; + u8 overlay_needs_physical : 1; }; enum no_fbc_reason { @@ -1218,6 +1220,9 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, #define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev)) #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) +#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) +#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) + /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte * rows, which changed the alignment requirements and fence programming. */ diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index fef4dd61dcfd..df5277aaa98b 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -173,9 +173,6 @@ struct overlay_registers { /* overlay flip addr flag */ #define OFC_UPDATE 0x1 -#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev)) -#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev)) - static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay) { drm_i915_private_t *dev_priv = overlay->dev->dev_private; @@ -184,7 +181,9 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over /* no recursive mappings */ BUG_ON(overlay->virt_addr); - if (OVERLAY_NONPHYSICAL(overlay->dev)) { + if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) { + regs = overlay->reg_bo->phys_obj->handle->vaddr; + } else { regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, overlay->reg_bo->gtt_offset, KM_USER0); @@ -193,15 +192,14 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over DRM_ERROR("failed to map overlay regs in GTT\n"); return NULL; } - } else - regs = overlay->reg_bo->phys_obj->handle->vaddr; + } return overlay->virt_addr = regs; } static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) { - if (OVERLAY_NONPHYSICAL(overlay->dev)) + if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) io_mapping_unmap_atomic(overlay->virt_addr, KM_USER0); overlay->virt_addr = NULL; @@ -1366,7 +1364,7 @@ void intel_setup_overlay(struct drm_device *dev) struct overlay_registers *regs; int ret; - if (!OVERLAY_EXISTS(dev)) + if (!HAS_OVERLAY(dev)) return; overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL); @@ -1379,7 +1377,16 @@ void intel_setup_overlay(struct drm_device *dev) goto out_free; overlay->reg_bo = to_intel_bo(reg_bo); - if (OVERLAY_NONPHYSICAL(dev)) { + if (OVERLAY_NEEDS_PHYSICAL(dev)) { + ret = i915_gem_attach_phys_object(dev, reg_bo, + I915_GEM_PHYS_OVERLAY_REGS, + 0); + if (ret) { + DRM_ERROR("failed to attach phys overlay regs\n"); + goto out_free_bo; + } + overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr; + } else { ret = i915_gem_object_pin(reg_bo, PAGE_SIZE); if (ret) { DRM_ERROR("failed to pin overlay register bo\n"); @@ -1392,15 +1399,6 @@ void intel_setup_overlay(struct drm_device *dev) DRM_ERROR("failed to move overlay register bo into the GTT\n"); goto out_unpin_bo; } - } else { - ret = i915_gem_attach_phys_object(dev, reg_bo, - I915_GEM_PHYS_OVERLAY_REGS, - 0); - if (ret) { - DRM_ERROR("failed to attach phys overlay regs\n"); - goto out_free_bo; - } - overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr; } /* init all values */ @@ -1471,10 +1469,10 @@ intel_overlay_capture_error_state(struct drm_device *dev) error->dovsta = I915_READ(DOVSTA); error->isr = I915_READ(ISR); - if (OVERLAY_NONPHYSICAL(overlay->dev)) - error->base = (long) overlay->reg_bo->gtt_offset; - else + if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr; + else + error->base = (long) overlay->reg_bo->gtt_offset; regs = intel_overlay_map_regs_atomic(overlay); if (!regs) From 8d74f656dd78ae1ba813389cd46197c6329696bc Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 Aug 2010 10:35:26 +0100 Subject: [PATCH 049/476] drm/i915/overlay: Use non-atomic mappings for the common case. The only time where an atomic mapping is required is during error-capture and there we cannot use the default slot, but need to specifically use one of the IRQ slots. So separate out the two conditions and use the atomic mapping only when appropriate. Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_overlay.c | 72 +++++++++++++++++++++------- 1 file changed, 55 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index df5277aaa98b..0c13e1b88cbe 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -173,7 +173,9 @@ struct overlay_registers { /* overlay flip addr flag */ #define OFC_UPDATE 0x1 -static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay) +static struct overlay_registers * +intel_overlay_map_regs_atomic(struct intel_overlay *overlay, + int slot) { drm_i915_private_t *dev_priv = overlay->dev->dev_private; struct overlay_registers *regs; @@ -186,7 +188,7 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over } else { regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, overlay->reg_bo->gtt_offset, - KM_USER0); + slot); if (!regs) { DRM_ERROR("failed to map overlay regs in GTT\n"); @@ -197,10 +199,45 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over return overlay->virt_addr = regs; } -static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) +static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay, + int slot) { if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) - io_mapping_unmap_atomic(overlay->virt_addr, KM_USER0); + io_mapping_unmap_atomic(overlay->virt_addr, slot); + + overlay->virt_addr = NULL; + + return; +} + +static struct overlay_registers * +intel_overlay_map_regs(struct intel_overlay *overlay) +{ + drm_i915_private_t *dev_priv = overlay->dev->dev_private; + struct overlay_registers *regs; + + /* no recursive mappings */ + BUG_ON(overlay->virt_addr); + + if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) { + regs = overlay->reg_bo->phys_obj->handle->vaddr; + } else { + regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping, + overlay->reg_bo->gtt_offset); + + if (!regs) { + DRM_ERROR("failed to map overlay regs in GTT\n"); + return NULL; + } + } + + return overlay->virt_addr = regs; +} + +static void intel_overlay_unmap_regs(struct intel_overlay *overlay) +{ + if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) + io_mapping_unmap(overlay->virt_addr); overlay->virt_addr = NULL; @@ -467,7 +504,8 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, /* Wait for pending overlay flip and release old frame. * Needs to be called before the overlay register are changed - * via intel_overlay_(un)map_regs_atomic */ + * via intel_overlay_(un)map_regs + */ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) { int ret; @@ -770,7 +808,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, goto out_unpin; if (!overlay->active) { - regs = intel_overlay_map_regs_atomic(overlay); + regs = intel_overlay_map_regs(overlay); if (!regs) { ret = -ENOMEM; goto out_unpin; @@ -780,14 +818,14 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, regs->OCONFIG |= OCONF_CSC_MODE_BT709; regs->OCONFIG |= overlay->crtc->pipe == 0 ? OCONF_PIPE_A : OCONF_PIPE_B; - intel_overlay_unmap_regs_atomic(overlay); + intel_overlay_unmap_regs(overlay); ret = intel_overlay_on(overlay); if (ret != 0) goto out_unpin; } - regs = intel_overlay_map_regs_atomic(overlay); + regs = intel_overlay_map_regs(overlay); if (!regs) { ret = -ENOMEM; goto out_unpin; @@ -830,7 +868,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, regs->OCMD = overlay_cmd_reg(params); - intel_overlay_unmap_regs_atomic(overlay); + intel_overlay_unmap_regs(overlay); intel_overlay_continue(overlay, scale_changed); @@ -866,9 +904,9 @@ int intel_overlay_switch_off(struct intel_overlay *overlay) if (ret != 0) return ret; - regs = intel_overlay_map_regs_atomic(overlay); + regs = intel_overlay_map_regs(overlay); regs->OCMD = 0; - intel_overlay_unmap_regs_atomic(overlay); + intel_overlay_unmap_regs(overlay); ret = intel_overlay_off(overlay); if (ret != 0) @@ -1314,7 +1352,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, goto out_unlock; } - regs = intel_overlay_map_regs_atomic(overlay); + regs = intel_overlay_map_regs(overlay); if (!regs) { ret = -ENOMEM; goto out_unlock; @@ -1322,7 +1360,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, update_reg_attrs(overlay, regs); - intel_overlay_unmap_regs_atomic(overlay); + intel_overlay_unmap_regs(overlay); if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { if (!IS_I9XX(dev)) { @@ -1407,7 +1445,7 @@ void intel_setup_overlay(struct drm_device *dev) overlay->contrast = 75; overlay->saturation = 146; - regs = intel_overlay_map_regs_atomic(overlay); + regs = intel_overlay_map_regs(overlay); if (!regs) goto out_free_bo; @@ -1416,7 +1454,7 @@ void intel_setup_overlay(struct drm_device *dev) update_reg_attrs(overlay, regs); - intel_overlay_unmap_regs_atomic(overlay); + intel_overlay_unmap_regs(overlay); dev_priv->overlay = overlay; DRM_INFO("initialized overlay support\n"); @@ -1474,12 +1512,12 @@ intel_overlay_capture_error_state(struct drm_device *dev) else error->base = (long) overlay->reg_bo->gtt_offset; - regs = intel_overlay_map_regs_atomic(overlay); + regs = intel_overlay_map_regs_atomic(overlay, KM_IRQ0); if (!regs) goto err; memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers)); - intel_overlay_unmap_regs_atomic(overlay); + intel_overlay_unmap_regs_atomic(overlay, KM_IRQ0); return error; From 60fc332cb5ab19e5a86d696b210df65814b2ad8a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 Aug 2010 10:44:45 +0100 Subject: [PATCH 050/476] drm/i915/overlay: Tidy attribute checking. Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_overlay.c | 42 ++++++++++------------------ 1 file changed, 15 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 0c13e1b88cbe..ab2a8cad8129 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -1314,10 +1314,11 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, mutex_lock(&dev->mode_config.mutex); mutex_lock(&dev->struct_mutex); + ret = -EINVAL; if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) { - attrs->color_key = overlay->color_key; + attrs->color_key = overlay->color_key; attrs->brightness = overlay->brightness; - attrs->contrast = overlay->contrast; + attrs->contrast = overlay->contrast; attrs->saturation = overlay->saturation; if (IS_I9XX(dev)) { @@ -1328,29 +1329,18 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, attrs->gamma4 = I915_READ(OGAMC4); attrs->gamma5 = I915_READ(OGAMC5); } - ret = 0; } else { - overlay->color_key = attrs->color_key; - if (attrs->brightness >= -128 && attrs->brightness <= 127) { - overlay->brightness = attrs->brightness; - } else { - ret = -EINVAL; + if (attrs->brightness < -128 || attrs->brightness > 127) + goto out_unlock; + if (attrs->contrast > 255) + goto out_unlock; + if (attrs->saturation > 1023) goto out_unlock; - } - if (attrs->contrast <= 255) { - overlay->contrast = attrs->contrast; - } else { - ret = -EINVAL; - goto out_unlock; - } - - if (attrs->saturation <= 1023) { - overlay->saturation = attrs->saturation; - } else { - ret = -EINVAL; - goto out_unlock; - } + overlay->color_key = attrs->color_key; + overlay->brightness = attrs->brightness; + overlay->contrast = attrs->contrast; + overlay->saturation = attrs->saturation; regs = intel_overlay_map_regs(overlay); if (!regs) { @@ -1363,10 +1353,8 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, intel_overlay_unmap_regs(overlay); if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { - if (!IS_I9XX(dev)) { - ret = -EINVAL; + if (!IS_I9XX(dev)) goto out_unlock; - } if (overlay->active) { ret = -EBUSY; @@ -1374,7 +1362,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, } ret = check_gamma(attrs); - if (ret != 0) + if (ret) goto out_unlock; I915_WRITE(OGAMC0, attrs->gamma0); @@ -1384,9 +1372,9 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, I915_WRITE(OGAMC4, attrs->gamma4); I915_WRITE(OGAMC5, attrs->gamma5); } - ret = 0; } + ret = 0; out_unlock: mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->mode_config.mutex); From a29301288f1840bdf9c5456da9cd7c944436edd5 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 Aug 2010 10:47:56 +0100 Subject: [PATCH 051/476] drm/i915/overlay: Use the recommended page alignment for physical regs Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_overlay.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index ab2a8cad8129..b0aea47cb445 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -1406,7 +1406,7 @@ void intel_setup_overlay(struct drm_device *dev) if (OVERLAY_NEEDS_PHYSICAL(dev)) { ret = i915_gem_attach_phys_object(dev, reg_bo, I915_GEM_PHYS_OVERLAY_REGS, - 0); + PAGE_SIZE); if (ret) { DRM_ERROR("failed to attach phys overlay regs\n"); goto out_free_bo; From 62cf4e6fef35b4422e206b63b7f0ac90261d4ad9 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 Aug 2010 10:50:36 +0100 Subject: [PATCH 052/476] drm/i915/overlay: Destroy reg_bo on shutdown. Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_overlay.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index b0aea47cb445..3236cca0bb56 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -1461,14 +1461,16 @@ void intel_cleanup_overlay(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - if (dev_priv->overlay) { - /* The bo's should be free'd by the generic code already. - * Furthermore modesetting teardown happens beforehand so the - * hardware should be off already */ - BUG_ON(dev_priv->overlay->active); + if (!dev_priv->overlay) + return; - kfree(dev_priv->overlay); - } + /* The bo's should be free'd by the generic code already. + * Furthermore modesetting teardown happens beforehand so the + * hardware should be off already */ + BUG_ON(dev_priv->overlay->active); + + drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base); + kfree(dev_priv->overlay); } struct intel_overlay_error_state { From 7340ea7dcf32227a77ed9df154fc2b2f00ad2fb1 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 Aug 2010 10:57:04 +0100 Subject: [PATCH 053/476] drm/i915/overlay: Remove duplicated definition of OFC_UPDATE Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_overlay.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 3236cca0bb56..cc86b3e0849d 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -170,9 +170,6 @@ struct overlay_registers { u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES]; }; -/* overlay flip addr flag */ -#define OFC_UPDATE 0x1 - static struct overlay_registers * intel_overlay_map_regs_atomic(struct intel_overlay *overlay, int slot) From 446d2183af68c0fd2772f5ef97a033efe69904a5 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 Aug 2010 11:15:58 +0100 Subject: [PATCH 054/476] drm/i915/overlay: Tidy update_pfit_vscale_ratio() Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_overlay.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index cc86b3e0849d..484058216e9d 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -937,19 +937,20 @@ static void update_pfit_vscale_ratio(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; drm_i915_private_t *dev_priv = dev->dev_private; - u32 ratio; u32 pfit_control = I915_READ(PFIT_CONTROL); + u32 ratio; /* XXX: This is not the same logic as in the xorg driver, but more in - * line with the intel documentation for the i965 */ - if (!IS_I965G(dev) && (pfit_control & VERT_AUTO_SCALE)) { - ratio = I915_READ(PFIT_AUTO_RATIOS) >> PFIT_VERT_SCALE_SHIFT; - } else { /* on i965 use the PGM reg to read out the autoscaler values */ - ratio = I915_READ(PFIT_PGM_RATIOS); - if (IS_I965G(dev)) - ratio >>= PFIT_VERT_SCALE_SHIFT_965; + * line with the intel documentation for the i965 + */ + if (!IS_I965G(dev)) { + if (pfit_control & VERT_AUTO_SCALE) + ratio = I915_READ(PFIT_AUTO_RATIOS); else - ratio >>= PFIT_VERT_SCALE_SHIFT; + ratio = I915_READ(PFIT_PGM_RATIOS); + ratio >>= PFIT_VERT_SCALE_SHIFT; + } else { /* on i965 use the PGM reg to read out the autoscaler values */ + ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965; } overlay->pfit_vscale_ratio = ratio; From 9f7c3f442bb7c0d0a0ed25cc287932450a1f2bab Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 Aug 2010 11:29:34 +0100 Subject: [PATCH 055/476] drm/i915/overlay: Tidy check_overlay_dst() Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_overlay.c | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 484058216e9d..7fbc0f3096fd 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -989,25 +989,24 @@ static int check_overlay_src(struct drm_device *dev, struct drm_intel_overlay_put_image *rec, struct drm_gem_object *new_bo) { - u32 stride_mask; - int depth; int uv_hscale = uv_hsubsampling(rec->flags); int uv_vscale = uv_vsubsampling(rec->flags); - size_t tmp; + u32 stride_mask, depth, tmp; /* check src dimensions */ if (IS_845G(dev) || IS_I830(dev)) { if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY || - rec->src_width > IMAGE_MAX_WIDTH_LEGACY) + rec->src_width > IMAGE_MAX_WIDTH_LEGACY) return -EINVAL; } else { if (rec->src_height > IMAGE_MAX_HEIGHT || - rec->src_width > IMAGE_MAX_WIDTH) + rec->src_width > IMAGE_MAX_WIDTH) return -EINVAL; } + /* better safe than sorry, use 4 as the maximal subsampling ratio */ if (rec->src_height < N_VERT_Y_TAPS*4 || - rec->src_width < N_HORIZ_Y_TAPS*4) + rec->src_width < N_HORIZ_Y_TAPS*4) return -EINVAL; /* check alignment constraints */ @@ -1015,12 +1014,15 @@ static int check_overlay_src(struct drm_device *dev, case I915_OVERLAY_RGB: /* not implemented */ return -EINVAL; + case I915_OVERLAY_YUV_PACKED: - depth = packed_depth_bytes(rec->flags); if (uv_vscale != 1) return -EINVAL; + + depth = packed_depth_bytes(rec->flags); if (depth < 0) return depth; + /* ignore UV planes */ rec->stride_UV = 0; rec->offset_U = 0; @@ -1029,11 +1031,13 @@ static int check_overlay_src(struct drm_device *dev, if (rec->offset_Y % depth) return -EINVAL; break; + case I915_OVERLAY_YUV_PLANAR: if (uv_vscale < 0 || uv_hscale < 0) return -EINVAL; /* no offset restrictions for planar formats */ break; + default: return -EINVAL; } @@ -1053,8 +1057,8 @@ static int check_overlay_src(struct drm_device *dev, return -EINVAL; tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ? - 4 : 8; - if (rec->stride_Y > tmp*1024 || rec->stride_UV > 2*1024) + 4096 : 8192; + if (rec->stride_Y > tmp || rec->stride_UV > 2*1024) return -EINVAL; /* check buffer dimensions */ @@ -1076,11 +1080,11 @@ static int check_overlay_src(struct drm_device *dev, if (rec->src_width/uv_hscale > rec->stride_UV) return -EINVAL; - tmp = rec->stride_Y*rec->src_height; + tmp = rec->stride_Y * rec->src_height; if (rec->offset_Y + tmp > new_bo->size) return -EINVAL; - tmp = rec->stride_UV*rec->src_height; - tmp /= uv_vscale; + + tmp = rec->stride_UV * (rec->src_height / uv_vscale); if (rec->offset_U + tmp > new_bo->size || rec->offset_V + tmp > new_bo->size) return -EINVAL; From b6c028e00445de9dfde2cd0c26521ac53965320a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 Aug 2010 11:55:08 +0100 Subject: [PATCH 056/476] drm/i915/overlay: Refactor do_wait_request() Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_overlay.c | 119 +++++++++------------------ 1 file changed, 38 insertions(+), 81 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 7fbc0f3096fd..7055c4613b60 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -241,17 +241,39 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay) return; } +static int intel_overlay_do_wait_request(struct intel_overlay *overlay, + bool interruptible, + int stage) +{ + struct drm_device *dev = overlay->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + + overlay->last_flip_req = + i915_add_request(dev, NULL, &dev_priv->render_ring); + if (overlay->last_flip_req == 0) + return -ENOMEM; + + overlay->hw_wedged = stage; + ret = i915_do_wait_request(dev, + overlay->last_flip_req, true, + &dev_priv->render_ring); + if (ret) + return ret; + + overlay->hw_wedged = 0; + overlay->last_flip_req = 0; + return 0; +} + /* overlay needs to be disable in OCMD reg */ static int intel_overlay_on(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; - int ret; - drm_i915_private_t *dev_priv = dev->dev_private; BUG_ON(overlay->active); overlay->active = 1; - overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP; BEGIN_LP_RING(4); OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); @@ -260,20 +282,8 @@ static int intel_overlay_on(struct intel_overlay *overlay) OUT_RING(MI_NOOP); ADVANCE_LP_RING(); - overlay->last_flip_req = - i915_add_request(dev, NULL, &dev_priv->render_ring); - if (overlay->last_flip_req == 0) - return -ENOMEM; - - ret = i915_do_wait_request(dev, - overlay->last_flip_req, true, - &dev_priv->render_ring); - if (ret != 0) - return ret; - - overlay->hw_wedged = 0; - overlay->last_flip_req = 0; - return 0; + return intel_overlay_do_wait_request(overlay, true, + NEEDS_WAIT_FOR_FLIP); } /* overlay needs to be enabled in OCMD reg */ @@ -326,27 +336,12 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay) } /* synchronous slowpath */ - overlay->hw_wedged = RELEASE_OLD_VID; - BEGIN_LP_RING(2); OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); OUT_RING(MI_NOOP); ADVANCE_LP_RING(); - overlay->last_flip_req = - i915_add_request(dev, NULL, &dev_priv->render_ring); - if (overlay->last_flip_req == 0) - return -ENOMEM; - - ret = i915_do_wait_request(dev, - overlay->last_flip_req, true, - &dev_priv->render_ring); - if (ret != 0) - return ret; - - overlay->hw_wedged = 0; - overlay->last_flip_req = 0; - return 0; + return intel_overlay_do_wait_request(overlay, true, RELEASE_OLD_VID); } /* overlay needs to be disabled in OCMD reg */ @@ -354,7 +349,6 @@ static int intel_overlay_off(struct intel_overlay *overlay) { u32 flip_addr = overlay->flip_addr; struct drm_device *dev = overlay->dev; - drm_i915_private_t *dev_priv = dev->dev_private; int ret; BUG_ON(!overlay->active); @@ -366,8 +360,6 @@ static int intel_overlay_off(struct intel_overlay *overlay) flip_addr |= OFC_UPDATE; /* wait for overlay to go idle */ - overlay->hw_wedged = SWITCH_OFF_STAGE_1; - BEGIN_LP_RING(4); OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); OUT_RING(flip_addr); @@ -375,20 +367,12 @@ static int intel_overlay_off(struct intel_overlay *overlay) OUT_RING(MI_NOOP); ADVANCE_LP_RING(); - overlay->last_flip_req = - i915_add_request(dev, NULL, &dev_priv->render_ring); - if (overlay->last_flip_req == 0) - return -ENOMEM; - - ret = i915_do_wait_request(dev, - overlay->last_flip_req, true, - &dev_priv->render_ring); - if (ret != 0) + ret = intel_overlay_do_wait_request(overlay, true, + SWITCH_OFF_STAGE_1); + if (ret) return ret; /* turn overlay off */ - overlay->hw_wedged = SWITCH_OFF_STAGE_2; - BEGIN_LP_RING(4); OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); OUT_RING(flip_addr); @@ -396,20 +380,8 @@ static int intel_overlay_off(struct intel_overlay *overlay) OUT_RING(MI_NOOP); ADVANCE_LP_RING(); - overlay->last_flip_req = - i915_add_request(dev, NULL, &dev_priv->render_ring); - if (overlay->last_flip_req == 0) - return -ENOMEM; - - ret = i915_do_wait_request(dev, - overlay->last_flip_req, true, - &dev_priv->render_ring); - if (ret != 0) - return ret; - - overlay->hw_wedged = 0; - overlay->last_flip_req = 0; - return ret; + return intel_overlay_do_wait_request(overlay, true, + SWITCH_OFF_STAGE_2); } static void intel_overlay_off_tail(struct intel_overlay *overlay) @@ -443,16 +415,9 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, if (overlay->hw_wedged == HW_WEDGED) return -EIO; - if (overlay->last_flip_req == 0) { - overlay->last_flip_req = - i915_add_request(dev, NULL, &dev_priv->render_ring); - if (overlay->last_flip_req == 0) - return -ENOMEM; - } - ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible, &dev_priv->render_ring); - if (ret != 0) + if (ret) return ret; switch (overlay->hw_wedged) { @@ -462,12 +427,11 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, drm_gem_object_unreference(obj); overlay->old_vid_bo = NULL; break; + case SWITCH_OFF_STAGE_1: flip_addr = overlay->flip_addr; flip_addr |= OFC_UPDATE; - overlay->hw_wedged = SWITCH_OFF_STAGE_2; - BEGIN_LP_RING(4); OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); OUT_RING(flip_addr); @@ -475,16 +439,9 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, OUT_RING(MI_NOOP); ADVANCE_LP_RING(); - overlay->last_flip_req = - i915_add_request(dev, NULL, - &dev_priv->render_ring); - if (overlay->last_flip_req == 0) - return -ENOMEM; - - ret = i915_do_wait_request(dev, overlay->last_flip_req, - interruptible, - &dev_priv->render_ring); - if (ret != 0) + ret = intel_overlay_do_wait_request(overlay, interruptible, + SWITCH_OFF_STAGE_2); + if (ret) return ret; case SWITCH_OFF_STAGE_2: From 9bb2ff731b32c023e7a502efdc0dee46157290d5 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 Aug 2010 12:02:11 +0100 Subject: [PATCH 057/476] drm/i915/overlay: Explicitly pass regs from map to unmap The scoping of the validity of the mapping is thus clarified. Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_drv.h | 1 - drivers/gpu/drm/i915/intel_overlay.c | 61 ++++++++-------------------- 2 files changed, 18 insertions(+), 44 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 72f72f52931d..f757fbd7a8de 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -161,7 +161,6 @@ struct intel_overlay { /* register access */ u32 flip_addr; struct drm_i915_gem_object *reg_bo; - void *virt_addr; /* flip handling */ uint32_t last_flip_req; int hw_wedged; diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 7055c4613b60..88c2d1f8d05c 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -177,34 +177,22 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay, drm_i915_private_t *dev_priv = overlay->dev->dev_private; struct overlay_registers *regs; - /* no recursive mappings */ - BUG_ON(overlay->virt_addr); - - if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) { + if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) regs = overlay->reg_bo->phys_obj->handle->vaddr; - } else { + else regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, overlay->reg_bo->gtt_offset, slot); - if (!regs) { - DRM_ERROR("failed to map overlay regs in GTT\n"); - return NULL; - } - } - - return overlay->virt_addr = regs; + return regs; } static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay, - int slot) + int slot, + struct overlay_registers *regs) { if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) - io_mapping_unmap_atomic(overlay->virt_addr, slot); - - overlay->virt_addr = NULL; - - return; + io_mapping_unmap_atomic(regs, slot); } static struct overlay_registers * @@ -213,32 +201,20 @@ intel_overlay_map_regs(struct intel_overlay *overlay) drm_i915_private_t *dev_priv = overlay->dev->dev_private; struct overlay_registers *regs; - /* no recursive mappings */ - BUG_ON(overlay->virt_addr); - - if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) { + if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) regs = overlay->reg_bo->phys_obj->handle->vaddr; - } else { + else regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping, overlay->reg_bo->gtt_offset); - if (!regs) { - DRM_ERROR("failed to map overlay regs in GTT\n"); - return NULL; - } - } - - return overlay->virt_addr = regs; + return regs; } -static void intel_overlay_unmap_regs(struct intel_overlay *overlay) +static void intel_overlay_unmap_regs(struct intel_overlay *overlay, + struct overlay_registers *regs) { if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) - io_mapping_unmap(overlay->virt_addr); - - overlay->virt_addr = NULL; - - return; + io_mapping_unmap(regs); } static int intel_overlay_do_wait_request(struct intel_overlay *overlay, @@ -772,7 +748,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, regs->OCONFIG |= OCONF_CSC_MODE_BT709; regs->OCONFIG |= overlay->crtc->pipe == 0 ? OCONF_PIPE_A : OCONF_PIPE_B; - intel_overlay_unmap_regs(overlay); + intel_overlay_unmap_regs(overlay, regs); ret = intel_overlay_on(overlay); if (ret != 0) @@ -822,7 +798,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, regs->OCMD = overlay_cmd_reg(params); - intel_overlay_unmap_regs(overlay); + intel_overlay_unmap_regs(overlay, regs); intel_overlay_continue(overlay, scale_changed); @@ -860,7 +836,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay) regs = intel_overlay_map_regs(overlay); regs->OCMD = 0; - intel_overlay_unmap_regs(overlay); + intel_overlay_unmap_regs(overlay, regs); ret = intel_overlay_off(overlay); if (ret != 0) @@ -1309,7 +1285,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, update_reg_attrs(overlay, regs); - intel_overlay_unmap_regs(overlay); + intel_overlay_unmap_regs(overlay, regs); if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { if (!IS_I9XX(dev)) @@ -1398,10 +1374,9 @@ void intel_setup_overlay(struct drm_device *dev) memset(regs, 0, sizeof(struct overlay_registers)); update_polyphase_filter(regs); - update_reg_attrs(overlay, regs); - intel_overlay_unmap_regs(overlay); + intel_overlay_unmap_regs(overlay, regs); dev_priv->overlay = overlay; DRM_INFO("initialized overlay support\n"); @@ -1466,7 +1441,7 @@ intel_overlay_capture_error_state(struct drm_device *dev) goto err; memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers)); - intel_overlay_unmap_regs_atomic(overlay, KM_IRQ0); + intel_overlay_unmap_regs_atomic(overlay, KM_IRQ0, regs); return error; From 8dfbc3403113bcc51f0350c3471fa1abf664305f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 Aug 2010 12:07:32 +0100 Subject: [PATCH 058/476] drm/i915/overlay: Combine SWITCH_OFF into a single step We can program the h/w to first wait on the flip and then switch off without relying on s/w intervention. This removes the need for a double step switch off, bringing much rejoicing. Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_drv.h | 3 +-- drivers/gpu/drm/i915/intel_overlay.c | 38 ++++------------------------ 2 files changed, 6 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index f757fbd7a8de..01ca494c6d87 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -167,8 +167,7 @@ struct intel_overlay { #define HW_WEDGED 1 #define NEEDS_WAIT_FOR_FLIP 2 #define RELEASE_OLD_VID 3 -#define SWITCH_OFF_STAGE_1 4 -#define SWITCH_OFF_STAGE_2 5 +#define SWITCH_OFF 4 }; struct intel_crtc { diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 88c2d1f8d05c..d0f901e4665f 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -325,7 +325,6 @@ static int intel_overlay_off(struct intel_overlay *overlay) { u32 flip_addr = overlay->flip_addr; struct drm_device *dev = overlay->dev; - int ret; BUG_ON(!overlay->active); @@ -335,29 +334,18 @@ static int intel_overlay_off(struct intel_overlay *overlay) * of the hw. Do it in both cases */ flip_addr |= OFC_UPDATE; + BEGIN_LP_RING(6); /* wait for overlay to go idle */ - BEGIN_LP_RING(4); OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); OUT_RING(flip_addr); OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); - OUT_RING(MI_NOOP); - ADVANCE_LP_RING(); - - ret = intel_overlay_do_wait_request(overlay, true, - SWITCH_OFF_STAGE_1); - if (ret) - return ret; - /* turn overlay off */ - BEGIN_LP_RING(4); OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); OUT_RING(flip_addr); OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); - OUT_RING(MI_NOOP); ADVANCE_LP_RING(); - return intel_overlay_do_wait_request(overlay, true, - SWITCH_OFF_STAGE_2); + return intel_overlay_do_wait_request(overlay, true, SWITCH_OFF); } static void intel_overlay_off_tail(struct intel_overlay *overlay) @@ -383,9 +371,8 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, bool interruptible) { struct drm_device *dev = overlay->dev; - struct drm_gem_object *obj; drm_i915_private_t *dev_priv = dev->dev_private; - u32 flip_addr; + struct drm_gem_object *obj; int ret; if (overlay->hw_wedged == HW_WEDGED) @@ -404,25 +391,10 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, overlay->old_vid_bo = NULL; break; - case SWITCH_OFF_STAGE_1: - flip_addr = overlay->flip_addr; - flip_addr |= OFC_UPDATE; - - BEGIN_LP_RING(4); - OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); - OUT_RING(flip_addr); - OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); - OUT_RING(MI_NOOP); - ADVANCE_LP_RING(); - - ret = intel_overlay_do_wait_request(overlay, interruptible, - SWITCH_OFF_STAGE_2); - if (ret) - return ret; - - case SWITCH_OFF_STAGE_2: + case SWITCH_OFF: intel_overlay_off_tail(overlay); break; + default: BUG_ON(overlay->hw_wedged != NEEDS_WAIT_FOR_FLIP); } From 5cd68c9864d65e49c910c701716e4e94e09f7ce0 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 Aug 2010 12:21:54 +0100 Subject: [PATCH 059/476] drm/i915/overlay: Tidy release_old_vid() Inline the call to wait_flip() and simplify the resulting code. Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_overlay.c | 73 +++++++++++----------------- 1 file changed, 28 insertions(+), 45 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index d0f901e4665f..8c660abb0e66 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -290,36 +290,6 @@ static void intel_overlay_continue(struct intel_overlay *overlay, i915_add_request(dev, NULL, &dev_priv->render_ring); } -static int intel_overlay_wait_flip(struct intel_overlay *overlay) -{ - struct drm_device *dev = overlay->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - int ret; - u32 tmp; - - if (overlay->last_flip_req != 0) { - ret = i915_do_wait_request(dev, - overlay->last_flip_req, true, - &dev_priv->render_ring); - if (ret == 0) { - overlay->last_flip_req = 0; - - tmp = I915_READ(ISR); - - if (!(tmp & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) - return 0; - } - } - - /* synchronous slowpath */ - BEGIN_LP_RING(2); - OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); - OUT_RING(MI_NOOP); - ADVANCE_LP_RING(); - - return intel_overlay_do_wait_request(overlay, true, RELEASE_OLD_VID); -} - /* overlay needs to be disabled in OCMD reg */ static int intel_overlay_off(struct intel_overlay *overlay) { @@ -348,6 +318,16 @@ static int intel_overlay_off(struct intel_overlay *overlay) return intel_overlay_do_wait_request(overlay, true, SWITCH_OFF); } +static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) +{ + struct drm_gem_object *obj = &overlay->old_vid_bo->base; + + i915_gem_object_unpin(obj); + drm_gem_object_unreference(obj); + + overlay->old_vid_bo = NULL; +} + static void intel_overlay_off_tail(struct intel_overlay *overlay) { struct drm_gem_object *obj; @@ -372,7 +352,6 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, { struct drm_device *dev = overlay->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj; int ret; if (overlay->hw_wedged == HW_WEDGED) @@ -385,10 +364,7 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, switch (overlay->hw_wedged) { case RELEASE_OLD_VID: - obj = &overlay->old_vid_bo->base; - i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); - overlay->old_vid_bo = NULL; + intel_overlay_release_old_vid_tail(overlay); break; case SWITCH_OFF: @@ -410,23 +386,30 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, */ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) { + struct drm_device *dev = overlay->dev; + drm_i915_private_t *dev_priv = dev->dev_private; int ret; - struct drm_gem_object *obj; - /* only wait if there is actually an old frame to release to - * guarantee forward progress */ + /* Only wait if there is actually an old frame to release to + * guarantee forward progress. + */ if (!overlay->old_vid_bo) return 0; - ret = intel_overlay_wait_flip(overlay); - if (ret != 0) - return ret; + if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) { + /* synchronous slowpath */ + BEGIN_LP_RING(2); + OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + OUT_RING(MI_NOOP); + ADVANCE_LP_RING(); - obj = &overlay->old_vid_bo->base; - i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); - overlay->old_vid_bo = NULL; + ret = intel_overlay_do_wait_request(overlay, true, + RELEASE_OLD_VID); + if (ret) + return ret; + } + intel_overlay_release_old_vid_tail(overlay); return 0; } From 8dc5d14741dc1ee0074a14b360993a10c2c02d24 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 Aug 2010 12:36:12 +0100 Subject: [PATCH 060/476] drm/i915: Preallocate requests By allocating the request prior to writing to the ringbuffer, we can abort the operation without leaving the GPU in an inconsistent state. Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 5 ++-- drivers/gpu/drm/i915/i915_gem.c | 32 +++++++++++++------- drivers/gpu/drm/i915/intel_overlay.c | 45 +++++++++++++++++++++------- 3 files changed, 59 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 10c9e416c96b..101607391c4f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -992,8 +992,9 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start, int i915_gpu_idle(struct drm_device *dev); int i915_gem_idle(struct drm_device *dev); uint32_t i915_add_request(struct drm_device *dev, - struct drm_file *file_priv, - struct intel_ring_buffer *ring); + struct drm_file *file_priv, + struct drm_i915_gem_request *request, + struct intel_ring_buffer *ring); int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, bool interruptible, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ce66f90716c7..afe4a9b0a03d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1609,20 +1609,22 @@ i915_gem_process_flushing_list(struct drm_device *dev, uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv, + struct drm_i915_gem_request *request, struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_file_private *i915_file_priv = NULL; - struct drm_i915_gem_request *request; uint32_t seqno; int was_empty; if (file_priv != NULL) i915_file_priv = file_priv->driver_priv; - request = kzalloc(sizeof(*request), GFP_KERNEL); - if (request == NULL) - return 0; + if (request == NULL) { + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (request == NULL) + return 0; + } seqno = ring->add_request(dev, ring, file_priv, 0); @@ -1839,7 +1841,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, BUG_ON(seqno == 0); if (seqno == dev_priv->next_seqno) { - seqno = i915_add_request(dev, NULL, ring); + seqno = i915_add_request(dev, NULL, NULL, ring); if (seqno == 0) return -ENOMEM; } @@ -3505,8 +3507,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev, return ret; } - -int +static int i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv, struct drm_i915_gem_execbuffer2 *args, @@ -3518,6 +3519,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_i915_gem_object *obj_priv; struct drm_clip_rect *cliprects = NULL; struct drm_i915_gem_relocation_entry *relocs = NULL; + struct drm_i915_gem_request *request = NULL; int ret = 0, ret2, i, pinned = 0; uint64_t exec_offset; uint32_t seqno, reloc_index; @@ -3571,6 +3573,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } } + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (request == NULL) { + ret = -ENOMEM; + goto pre_mutex_err; + } + ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count, &relocs); if (ret != 0) @@ -3736,11 +3744,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } if (dev_priv->render_ring.outstanding_lazy_request) { - (void)i915_add_request(dev, file_priv, &dev_priv->render_ring); + (void)i915_add_request(dev, file_priv, NULL, &dev_priv->render_ring); dev_priv->render_ring.outstanding_lazy_request = false; } if (dev_priv->bsd_ring.outstanding_lazy_request) { - (void)i915_add_request(dev, file_priv, &dev_priv->bsd_ring); + (void)i915_add_request(dev, file_priv, NULL, &dev_priv->bsd_ring); dev_priv->bsd_ring.outstanding_lazy_request = false; } @@ -3810,7 +3818,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, * *some* interrupts representing completion of buffers that we can * wait on when trying to clear up gtt space). */ - seqno = i915_add_request(dev, file_priv, ring); + seqno = i915_add_request(dev, file_priv, request, ring); + request = NULL; #if WATCH_LRU i915_dump_lru(dev, __func__); @@ -3849,6 +3858,7 @@ pre_mutex_err: drm_free_large(object_list); kfree(cliprects); + kfree(request); return ret; } @@ -4199,7 +4209,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, */ if (obj->write_domain) { i915_gem_flush(dev, 0, obj->write_domain); - (void)i915_add_request(dev, file_priv, obj_priv->ring); + (void)i915_add_request(dev, file_priv, NULL, obj_priv->ring); } /* Update the active list for the hardware's current position. diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 8c660abb0e66..cd0c4bf88b94 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -218,6 +218,7 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay, } static int intel_overlay_do_wait_request(struct intel_overlay *overlay, + struct drm_i915_gem_request *request, bool interruptible, int stage) { @@ -226,7 +227,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, int ret; overlay->last_flip_req = - i915_add_request(dev, NULL, &dev_priv->render_ring); + i915_add_request(dev, NULL, request, &dev_priv->render_ring); if (overlay->last_flip_req == 0) return -ENOMEM; @@ -246,11 +247,15 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, static int intel_overlay_on(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; + struct drm_i915_gem_request *request; BUG_ON(overlay->active); - overlay->active = 1; + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (request == NULL) + return -ENOMEM; + BEGIN_LP_RING(4); OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); OUT_RING(overlay->flip_addr | OFC_UPDATE); @@ -258,21 +263,26 @@ static int intel_overlay_on(struct intel_overlay *overlay) OUT_RING(MI_NOOP); ADVANCE_LP_RING(); - return intel_overlay_do_wait_request(overlay, true, + return intel_overlay_do_wait_request(overlay, request, true, NEEDS_WAIT_FOR_FLIP); } /* overlay needs to be enabled in OCMD reg */ -static void intel_overlay_continue(struct intel_overlay *overlay, - bool load_polyphase_filter) +static int intel_overlay_continue(struct intel_overlay *overlay, + bool load_polyphase_filter) { struct drm_device *dev = overlay->dev; drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_request *request; u32 flip_addr = overlay->flip_addr; u32 tmp; BUG_ON(!overlay->active); + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (request == NULL) + return -ENOMEM; + if (load_polyphase_filter) flip_addr |= OFC_UPDATE; @@ -287,17 +297,23 @@ static void intel_overlay_continue(struct intel_overlay *overlay, ADVANCE_LP_RING(); overlay->last_flip_req = - i915_add_request(dev, NULL, &dev_priv->render_ring); + i915_add_request(dev, NULL, request, &dev_priv->render_ring); + return 0; } /* overlay needs to be disabled in OCMD reg */ static int intel_overlay_off(struct intel_overlay *overlay) { - u32 flip_addr = overlay->flip_addr; struct drm_device *dev = overlay->dev; + u32 flip_addr = overlay->flip_addr; + struct drm_i915_gem_request *request; BUG_ON(!overlay->active); + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (request == NULL) + return -ENOMEM; + /* According to intel docs the overlay hw may hang (when switching * off) without loading the filter coeffs. It is however unclear whether * this applies to the disabling of the overlay or to the switching off @@ -315,7 +331,8 @@ static int intel_overlay_off(struct intel_overlay *overlay) OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); ADVANCE_LP_RING(); - return intel_overlay_do_wait_request(overlay, true, SWITCH_OFF); + return intel_overlay_do_wait_request(overlay, request, true, + SWITCH_OFF); } static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) @@ -397,13 +414,19 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) return 0; if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) { + struct drm_i915_gem_request *request; + /* synchronous slowpath */ + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (request == NULL) + return -ENOMEM; + BEGIN_LP_RING(2); OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); OUT_RING(MI_NOOP); ADVANCE_LP_RING(); - ret = intel_overlay_do_wait_request(overlay, true, + ret = intel_overlay_do_wait_request(overlay, request, true, RELEASE_OLD_VID); if (ret) return ret; @@ -755,7 +778,9 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, intel_overlay_unmap_regs(overlay, regs); - intel_overlay_continue(overlay, scale_changed); + ret = intel_overlay_continue(overlay, scale_changed); + if (ret) + goto out_unpin; overlay->old_vid_bo = overlay->vid_bo; overlay->vid_bo = to_intel_bo(new_bo); From 5fe82c5ee1ba2d04183c376038c5d233a0311ec9 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 Aug 2010 12:38:21 +0100 Subject: [PATCH 061/476] drm/i915/overlay: Make do_put_image() as static Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_overlay.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index cd0c4bf88b94..5ca7ef01f959 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -689,9 +689,9 @@ static u32 overlay_cmd_reg(struct put_image_params *params) return cmd; } -int intel_overlay_do_put_image(struct intel_overlay *overlay, - struct drm_gem_object *new_bo, - struct put_image_params *params) +static int intel_overlay_do_put_image(struct intel_overlay *overlay, + struct drm_gem_object *new_bo, + struct put_image_params *params) { int ret, tmp_width; struct overlay_registers *regs; From 106dadacbeeea92f61a2c32f3651ee31c1b34e31 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 16 Jul 2010 17:13:01 +0100 Subject: [PATCH 062/476] drm/i915/overlay: Workaround i830 overlay activation bug. On i830, there exists a bug where an overlay on pipe B requires the mode clock on pipe A in order to activate. So workaround this by activating pipe A when trying to enable the overlay on pipe B. References: [Bug 29007] GPU hang on video playback with overlay https://bugs.freedesktop.org/show_bug.cgi?id=29007 Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_overlay.c | 71 ++++++++++++++++++++++++++-- 1 file changed, 67 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 5ca7ef01f959..389690d36b59 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -243,18 +243,76 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, return 0; } +/* Workaround for i830 bug where pipe a must be enable to change control regs */ +static int +i830_activate_pipe_a(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_crtc *crtc; + struct drm_crtc_helper_funcs *crtc_funcs; + struct drm_display_mode vesa_640x480 = { + DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, + 752, 800, 0, 480, 489, 492, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) + }, *mode; + + crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[0]); + if (crtc->dpms_mode == DRM_MODE_DPMS_ON) + return 0; + + /* most i8xx have pipe a forced on, so don't trust dpms mode */ + if (I915_READ(PIPEACONF) & PIPEACONF_ENABLE) + return 0; + + crtc_funcs = crtc->base.helper_private; + if (crtc_funcs->dpms == NULL) + return 0; + + DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n"); + + mode = drm_mode_duplicate(dev, &vesa_640x480); + drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); + if(!drm_crtc_helper_set_mode(&crtc->base, mode, + crtc->base.x, crtc->base.y, + crtc->base.fb)) + return 0; + + crtc_funcs->dpms(&crtc->base, DRM_MODE_DPMS_ON); + return 1; +} + +static void +i830_deactivate_pipe_a(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0]; + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + + crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); +} + /* overlay needs to be disable in OCMD reg */ static int intel_overlay_on(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; struct drm_i915_gem_request *request; + int pipe_a_quirk = 0; + int ret; BUG_ON(overlay->active); overlay->active = 1; + if (IS_I830(dev)) { + pipe_a_quirk = i830_activate_pipe_a(dev); + if (pipe_a_quirk < 0) + return pipe_a_quirk; + } + request = kzalloc(sizeof(*request), GFP_KERNEL); - if (request == NULL) - return -ENOMEM; + if (request == NULL) { + ret = -ENOMEM; + goto out; + } BEGIN_LP_RING(4); OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); @@ -263,8 +321,13 @@ static int intel_overlay_on(struct intel_overlay *overlay) OUT_RING(MI_NOOP); ADVANCE_LP_RING(); - return intel_overlay_do_wait_request(overlay, request, true, - NEEDS_WAIT_FOR_FLIP); + ret = intel_overlay_do_wait_request(overlay, request, true, + NEEDS_WAIT_FOR_FLIP); +out: + if (pipe_a_quirk) + i830_deactivate_pipe_a(dev); + + return ret; } /* overlay needs to be enabled in OCMD reg */ From 5dcdbcb06badbdf2faa698bf3198e421a1e12840 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 Aug 2010 13:50:28 +0100 Subject: [PATCH 063/476] drm/i915/overlay: Pass interruptible to switch_off() During DPMS we currently do not want the overlay code to be interruptible, so pass that information down and only take the uninterrruptible paths. Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 28 ++++++---------------------- drivers/gpu/drm/i915/intel_drv.h | 5 ++--- drivers/gpu/drm/i915/intel_overlay.c | 23 +++++++++++++---------- 3 files changed, 21 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 120a9c0c2da6..e4fb5364a533 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2223,33 +2223,17 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) { - struct intel_overlay *overlay; - int ret; - if (!enable && intel_crtc->overlay) { - overlay = intel_crtc->overlay; - mutex_lock(&overlay->dev->struct_mutex); - for (;;) { - ret = intel_overlay_switch_off(overlay); - if (ret == 0) - break; + struct intel_overlay *overlay = intel_crtc->overlay; - ret = intel_overlay_recover_from_interrupt(overlay, 0); - if (ret != 0) { - /* overlay doesn't react anymore. Usually - * results in a black screen and an unkillable - * X server. */ - BUG(); - overlay->hw_wedged = HW_WEDGED; - break; - } - } + mutex_lock(&overlay->dev->struct_mutex); + (void) intel_overlay_switch_off(overlay, false); mutex_unlock(&overlay->dev->struct_mutex); } - /* Let userspace switch the overlay on again. In most cases userspace - * has to recompute where to put it anyway. */ - return; + /* Let userspace switch the overlay on again. In most cases userspace + * has to recompute where to put it anyway. + */ } static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 01ca494c6d87..44744537a0cf 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -287,9 +287,8 @@ extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane); extern void intel_setup_overlay(struct drm_device *dev); extern void intel_cleanup_overlay(struct drm_device *dev); -extern int intel_overlay_switch_off(struct intel_overlay *overlay); -extern int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, - bool interruptible); +extern int intel_overlay_switch_off(struct intel_overlay *overlay, + bool interruptible); extern int intel_overlay_put_image(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int intel_overlay_attrs(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 389690d36b59..3533355c5ea3 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -365,7 +365,8 @@ static int intel_overlay_continue(struct intel_overlay *overlay, } /* overlay needs to be disabled in OCMD reg */ -static int intel_overlay_off(struct intel_overlay *overlay) +static int intel_overlay_off(struct intel_overlay *overlay, + bool interruptible) { struct drm_device *dev = overlay->dev; u32 flip_addr = overlay->flip_addr; @@ -394,7 +395,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); ADVANCE_LP_RING(); - return intel_overlay_do_wait_request(overlay, request, true, + return intel_overlay_do_wait_request(overlay, request, interruptible, SWITCH_OFF); } @@ -427,8 +428,8 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay) /* recover from an interruption due to a signal * We have to be careful not to repeat work forever an make forward progess. */ -int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, - bool interruptible) +static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, + bool interruptible) { struct drm_device *dev = overlay->dev; drm_i915_private_t *dev_priv = dev->dev_private; @@ -855,17 +856,19 @@ out_unpin: return ret; } -int intel_overlay_switch_off(struct intel_overlay *overlay) +int intel_overlay_switch_off(struct intel_overlay *overlay, + bool interruptible) { - int ret; struct overlay_registers *regs; struct drm_device *dev = overlay->dev; + int ret; BUG_ON(!mutex_is_locked(&dev->struct_mutex)); BUG_ON(!mutex_is_locked(&dev->mode_config.mutex)); if (overlay->hw_wedged) { - ret = intel_overlay_recover_from_interrupt(overlay, 1); + ret = intel_overlay_recover_from_interrupt(overlay, + interruptible); if (ret != 0) return ret; } @@ -881,7 +884,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay) regs->OCMD = 0; intel_overlay_unmap_regs(overlay, regs); - ret = intel_overlay_off(overlay); + ret = intel_overlay_off(overlay, interruptible); if (ret != 0) return ret; @@ -1097,7 +1100,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, mutex_lock(&dev->mode_config.mutex); mutex_lock(&dev->struct_mutex); - ret = intel_overlay_switch_off(overlay); + ret = intel_overlay_switch_off(overlay, true); mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->mode_config.mutex); @@ -1135,7 +1138,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, if (overlay->crtc != crtc) { struct drm_display_mode *mode = &crtc->base.mode; - ret = intel_overlay_switch_off(overlay); + ret = intel_overlay_switch_off(overlay, true); if (ret != 0) goto out_unlock; From 23f09ce31ca68af3728ac5eed3e3efb03c5f990a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 Aug 2010 13:53:37 +0100 Subject: [PATCH 064/476] drm/i915/overlay: Make the overlay control struct opaque. Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 8 ++++---- drivers/gpu/drm/i915/intel_drv.h | 24 ------------------------ drivers/gpu/drm/i915/intel_overlay.c | 23 +++++++++++++++++++++++ 3 files changed, 27 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e4fb5364a533..0b400d1d2fe1 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2224,11 +2224,11 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) { if (!enable && intel_crtc->overlay) { - struct intel_overlay *overlay = intel_crtc->overlay; + struct drm_device *dev = intel_crtc->base.dev; - mutex_lock(&overlay->dev->struct_mutex); - (void) intel_overlay_switch_off(overlay, false); - mutex_unlock(&overlay->dev->struct_mutex); + mutex_lock(&dev->struct_mutex); + (void) intel_overlay_switch_off(intel_crtc->overlay, false); + mutex_unlock(&dev->struct_mutex); } /* Let userspace switch the overlay on again. In most cases userspace diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 44744537a0cf..949cfda4b49a 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -146,30 +146,6 @@ struct intel_connector { struct drm_connector base; }; -struct intel_crtc; -struct intel_overlay { - struct drm_device *dev; - struct intel_crtc *crtc; - struct drm_i915_gem_object *vid_bo; - struct drm_i915_gem_object *old_vid_bo; - int active; - int pfit_active; - u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */ - u32 color_key; - u32 brightness, contrast, saturation; - u32 old_xscale, old_yscale; - /* register access */ - u32 flip_addr; - struct drm_i915_gem_object *reg_bo; - /* flip handling */ - uint32_t last_flip_req; - int hw_wedged; -#define HW_WEDGED 1 -#define NEEDS_WAIT_FOR_FLIP 2 -#define RELEASE_OLD_VID 3 -#define SWITCH_OFF 4 -}; - struct intel_crtc { struct drm_crtc base; enum pipe pipe; diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 3533355c5ea3..0a7d3e688060 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -170,6 +170,29 @@ struct overlay_registers { u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES]; }; +struct intel_overlay { + struct drm_device *dev; + struct intel_crtc *crtc; + struct drm_i915_gem_object *vid_bo; + struct drm_i915_gem_object *old_vid_bo; + int active; + int pfit_active; + u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */ + u32 color_key; + u32 brightness, contrast, saturation; + u32 old_xscale, old_yscale; + /* register access */ + u32 flip_addr; + struct drm_i915_gem_object *reg_bo; + /* flip handling */ + uint32_t last_flip_req; + int hw_wedged; +#define HW_WEDGED 1 +#define NEEDS_WAIT_FOR_FLIP 2 +#define RELEASE_OLD_VID 3 +#define SWITCH_OFF 4 +}; + static struct overlay_registers * intel_overlay_map_regs_atomic(struct intel_overlay *overlay, int slot) From b303cf9542b016e2af3b9d17255a7f93cd790ef5 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 Aug 2010 14:03:48 +0100 Subject: [PATCH 065/476] drm/i915/overlay: Use a continuation hook to finish work after a flip. Slightly easier to follow than the state machine and now possible as the control structure is opaque and hw_wedged is no longer interferred with. Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_overlay.c | 110 +++++++++++---------------- 1 file changed, 44 insertions(+), 66 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 0a7d3e688060..bb2256f9dbc8 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -186,11 +186,7 @@ struct intel_overlay { struct drm_i915_gem_object *reg_bo; /* flip handling */ uint32_t last_flip_req; - int hw_wedged; -#define HW_WEDGED 1 -#define NEEDS_WAIT_FOR_FLIP 2 -#define RELEASE_OLD_VID 3 -#define SWITCH_OFF 4 + void (*flip_tail)(struct intel_overlay *); }; static struct overlay_registers * @@ -243,25 +239,25 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay, static int intel_overlay_do_wait_request(struct intel_overlay *overlay, struct drm_i915_gem_request *request, bool interruptible, - int stage) + void (*tail)(struct intel_overlay *)) { struct drm_device *dev = overlay->dev; drm_i915_private_t *dev_priv = dev->dev_private; int ret; + BUG_ON(overlay->last_flip_req); overlay->last_flip_req = i915_add_request(dev, NULL, request, &dev_priv->render_ring); if (overlay->last_flip_req == 0) return -ENOMEM; - overlay->hw_wedged = stage; + overlay->flip_tail = tail; ret = i915_do_wait_request(dev, overlay->last_flip_req, true, &dev_priv->render_ring); if (ret) return ret; - overlay->hw_wedged = 0; overlay->last_flip_req = 0; return 0; } @@ -344,8 +340,7 @@ static int intel_overlay_on(struct intel_overlay *overlay) OUT_RING(MI_NOOP); ADVANCE_LP_RING(); - ret = intel_overlay_do_wait_request(overlay, request, true, - NEEDS_WAIT_FOR_FLIP); + ret = intel_overlay_do_wait_request(overlay, request, true, NULL); out: if (pipe_a_quirk) i830_deactivate_pipe_a(dev); @@ -387,6 +382,33 @@ static int intel_overlay_continue(struct intel_overlay *overlay, return 0; } +static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) +{ + struct drm_gem_object *obj = &overlay->old_vid_bo->base; + + i915_gem_object_unpin(obj); + drm_gem_object_unreference(obj); + + overlay->old_vid_bo = NULL; +} + +static void intel_overlay_off_tail(struct intel_overlay *overlay) +{ + struct drm_gem_object *obj; + + /* never have the overlay hw on without showing a frame */ + BUG_ON(!overlay->vid_bo); + obj = &overlay->vid_bo->base; + + i915_gem_object_unpin(obj); + drm_gem_object_unreference(obj); + overlay->vid_bo = NULL; + + overlay->crtc->overlay = NULL; + overlay->crtc = NULL; + overlay->active = 0; +} + /* overlay needs to be disabled in OCMD reg */ static int intel_overlay_off(struct intel_overlay *overlay, bool interruptible) @@ -419,34 +441,7 @@ static int intel_overlay_off(struct intel_overlay *overlay, ADVANCE_LP_RING(); return intel_overlay_do_wait_request(overlay, request, interruptible, - SWITCH_OFF); -} - -static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) -{ - struct drm_gem_object *obj = &overlay->old_vid_bo->base; - - i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); - - overlay->old_vid_bo = NULL; -} - -static void intel_overlay_off_tail(struct intel_overlay *overlay) -{ - struct drm_gem_object *obj; - - /* never have the overlay hw on without showing a frame */ - BUG_ON(!overlay->vid_bo); - obj = &overlay->vid_bo->base; - - i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); - overlay->vid_bo = NULL; - - overlay->crtc->overlay = NULL; - overlay->crtc = NULL; - overlay->active = 0; + intel_overlay_off_tail); } /* recover from an interruption due to a signal @@ -458,28 +453,17 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, drm_i915_private_t *dev_priv = dev->dev_private; int ret; - if (overlay->hw_wedged == HW_WEDGED) - return -EIO; + if (overlay->last_flip_req == 0) + return 0; ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible, &dev_priv->render_ring); if (ret) return ret; - switch (overlay->hw_wedged) { - case RELEASE_OLD_VID: - intel_overlay_release_old_vid_tail(overlay); - break; + if (overlay->flip_tail) + overlay->flip_tail(overlay); - case SWITCH_OFF: - intel_overlay_off_tail(overlay); - break; - - default: - BUG_ON(overlay->hw_wedged != NEEDS_WAIT_FOR_FLIP); - } - - overlay->hw_wedged = 0; overlay->last_flip_req = 0; return 0; } @@ -514,7 +498,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) ADVANCE_LP_RING(); ret = intel_overlay_do_wait_request(overlay, request, true, - RELEASE_OLD_VID); + intel_overlay_release_old_vid_tail); if (ret) return ret; } @@ -889,12 +873,9 @@ int intel_overlay_switch_off(struct intel_overlay *overlay, BUG_ON(!mutex_is_locked(&dev->struct_mutex)); BUG_ON(!mutex_is_locked(&dev->mode_config.mutex)); - if (overlay->hw_wedged) { - ret = intel_overlay_recover_from_interrupt(overlay, - interruptible); - if (ret != 0) - return ret; - } + ret = intel_overlay_recover_from_interrupt(overlay, interruptible); + if (ret != 0) + return ret; if (!overlay->active) return 0; @@ -912,7 +893,6 @@ int intel_overlay_switch_off(struct intel_overlay *overlay, return ret; intel_overlay_off_tail(overlay); - return 0; } @@ -1153,11 +1133,9 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, mutex_lock(&dev->mode_config.mutex); mutex_lock(&dev->struct_mutex); - if (overlay->hw_wedged) { - ret = intel_overlay_recover_from_interrupt(overlay, 1); - if (ret != 0) - goto out_unlock; - } + ret = intel_overlay_recover_from_interrupt(overlay, true); + if (ret != 0) + goto out_unlock; if (overlay->crtc != crtc) { struct drm_display_mode *mode = &crtc->base.mode; From 3bd3c9329973a93fa3ef5e9840f2fd6fa2889e3f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 19 Aug 2010 08:19:30 +0100 Subject: [PATCH 066/476] drm/i915: Compile out error state without DEBUG_FS Alexander reported that the compilation of intel_overlay.c was failing due to an inclusion that was only valid with CONFIG_DEBUG_FS. As the whole error reporting is only useful with debugfs enabled, remove all the redundant error state collection code when compiling without CONFIG_DEBUG_FS. Reported-by: Alexander Lam Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 9 ++++- drivers/gpu/drm/i915/i915_irq.c | 4 ++ drivers/gpu/drm/i915/intel_overlay.c | 55 +++++++++++++++------------- 3 files changed, 42 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 101607391c4f..634e1c463dec 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -874,7 +874,6 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); /* i915_irq.c */ void i915_hangcheck_elapsed(unsigned long data); -void i915_destroy_error_state(struct drm_device *dev); extern int i915_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int i915_irq_wait(struct drm_device *dev, void *data, @@ -911,6 +910,12 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); void intel_enable_asle (struct drm_device *dev); +#ifdef CONFIG_DEBUG_FS +extern void i915_destroy_error_state(struct drm_device *dev); +#else +#define i915_destroy_error_state(x) +#endif + /* i915_mem.c */ extern int i915_mem_alloc(struct drm_device *dev, void *data, @@ -1091,8 +1096,10 @@ extern void intel_detect_pch (struct drm_device *dev); extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); /* overlay */ +#ifdef CONFIG_DEBUG_FS extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error); +#endif /** * Lock test for when it's just for synchronization of ring access. diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index e797157f4bb9..080ea3b162cd 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -421,6 +421,7 @@ static void i915_error_work_func(struct work_struct *work) } } +#ifdef CONFIG_DEBUG_FS static struct drm_i915_error_object * i915_error_object_create(struct drm_device *dev, struct drm_gem_object *src) @@ -744,6 +745,9 @@ void i915_destroy_error_state(struct drm_device *dev) if (error) i915_error_state_free(dev, error); } +#else +#define i915_capture_error_state(x) +#endif static void i915_report_and_clear_eir(struct drm_device *dev) { diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index bb2256f9dbc8..743ced7c4ae7 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -189,31 +189,6 @@ struct intel_overlay { void (*flip_tail)(struct intel_overlay *); }; -static struct overlay_registers * -intel_overlay_map_regs_atomic(struct intel_overlay *overlay, - int slot) -{ - drm_i915_private_t *dev_priv = overlay->dev->dev_private; - struct overlay_registers *regs; - - if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) - regs = overlay->reg_bo->phys_obj->handle->vaddr; - else - regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, - overlay->reg_bo->gtt_offset, - slot); - - return regs; -} - -static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay, - int slot, - struct overlay_registers *regs) -{ - if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) - io_mapping_unmap_atomic(regs, slot); -} - static struct overlay_registers * intel_overlay_map_regs(struct intel_overlay *overlay) { @@ -1454,6 +1429,9 @@ void intel_cleanup_overlay(struct drm_device *dev) kfree(dev_priv->overlay); } +#ifdef CONFIG_DEBUG_FS +#include + struct intel_overlay_error_state { struct overlay_registers regs; unsigned long base; @@ -1461,6 +1439,32 @@ struct intel_overlay_error_state { u32 isr; }; +static struct overlay_registers * +intel_overlay_map_regs_atomic(struct intel_overlay *overlay, + int slot) +{ + drm_i915_private_t *dev_priv = overlay->dev->dev_private; + struct overlay_registers *regs; + + if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) + regs = overlay->reg_bo->phys_obj->handle->vaddr; + else + regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, + overlay->reg_bo->gtt_offset, + slot); + + return regs; +} + +static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay, + int slot, + struct overlay_registers *regs) +{ + if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) + io_mapping_unmap_atomic(regs, slot); +} + + struct intel_overlay_error_state * intel_overlay_capture_error_state(struct drm_device *dev) { @@ -1549,3 +1553,4 @@ intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_s P(UVSCALEV); #undef P } +#endif From 015b9c8ce50e5bfb7ea78613dcad4b30d1a0d9da Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 20 Aug 2010 11:26:59 +0100 Subject: [PATCH 067/476] drm/i915: Remove the random SyncFlush during initialisation We have no idea why we request a SyncFlush via INSTPM at that point in time -- we certainly never check for its completion... Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 2dae3be9ebef..315326d5dc22 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1444,12 +1444,6 @@ static int i915_load_modeset_init(struct drm_device *dev, /* FIXME: do pre/post-mode set stuff in core KMS code */ dev->vblank_disable_allowed = 1; - /* - * Initialize the hardware status page IRQ location. - */ - - I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); - ret = intel_fbdev_init(dev); if (ret) goto cleanup_irq; From de227ef0907258359d53e3e1530c1f3678eb2bb9 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 3 Jul 2010 07:58:38 +0100 Subject: [PATCH 068/476] drm/i915: Kill the active list spinlock This spinlock only served debugging purposes in a time when we could not be sure of the mutex ever being released upon a GPU hang. As we now should be able rely on hangcheck to do the job for us (and that error reporting should not itself require the struct mutex) we can kill the incomplete attempt at protection. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 89 +++++++++++++++++++-------- drivers/gpu/drm/i915/i915_drv.h | 2 - drivers/gpu/drm/i915/i915_gem.c | 35 +---------- drivers/gpu/drm/i915/i915_gem_evict.c | 5 -- 4 files changed, 65 insertions(+), 66 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 16133f10ffaa..9074300fed8d 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -72,12 +72,15 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv; - spinlock_t *lock = NULL; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; switch (list) { case ACTIVE_LIST: seq_printf(m, "Active:\n"); - lock = &dev_priv->mm.active_list_lock; head = &dev_priv->render_ring.active_list; break; case INACTIVE_LIST: @@ -89,14 +92,11 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) head = &dev_priv->mm.flushing_list; break; default: - DRM_INFO("Ooops, unexpected list\n"); - return 0; + mutex_unlock(&dev->struct_mutex); + return -EINVAL; } - if (lock) - spin_lock(lock); - list_for_each_entry(obj_priv, head, list) - { + list_for_each_entry(obj_priv, head, list) { seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s", &obj_priv->base, get_pin_flag(obj_priv), @@ -117,8 +117,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) seq_printf(m, "\n"); } - if (lock) - spin_unlock(lock); + mutex_unlock(&dev->struct_mutex); return 0; } @@ -176,6 +175,11 @@ static int i915_gem_request_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_request *gem_request; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; seq_printf(m, "Request:\n"); list_for_each_entry(gem_request, &dev_priv->render_ring.request_list, @@ -184,6 +188,8 @@ static int i915_gem_request_info(struct seq_file *m, void *data) gem_request->seqno, (int) (jiffies - gem_request->emitted_jiffies)); } + mutex_unlock(&dev->struct_mutex); + return 0; } @@ -192,6 +198,11 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; if (dev_priv->render_ring.status_page.page_addr != NULL) { seq_printf(m, "Current sequence: %d\n", @@ -202,6 +213,9 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data) seq_printf(m, "Waiter sequence: %d\n", dev_priv->mm.waiting_gem_seqno); seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); + + mutex_unlock(&dev->struct_mutex); + return 0; } @@ -211,6 +225,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; if (!HAS_PCH_SPLIT(dev)) { seq_printf(m, "Interrupt enable: %08x\n", @@ -255,6 +274,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data) dev_priv->mm.waiting_gem_seqno); seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); + mutex_unlock(&dev->struct_mutex); + return 0; } @@ -263,7 +284,11 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - int i; + int i, ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); @@ -289,6 +314,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) seq_printf(m, "\n"); } } + mutex_unlock(&dev->struct_mutex); return 0; } @@ -319,10 +345,10 @@ static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_co uint32_t *mem; for (page = 0; page < page_count; page++) { - mem = kmap_atomic(pages[page], KM_USER0); + mem = kmap(pages[page]); for (i = 0; i < PAGE_SIZE; i += 4) seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); - kunmap_atomic(mem, KM_USER0); + kunmap(pages[page]); } } @@ -335,7 +361,9 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data) struct drm_i915_gem_object *obj_priv; int ret; - spin_lock(&dev_priv->mm.active_list_lock); + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list, list) { @@ -343,8 +371,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data) if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { ret = i915_gem_object_get_pages(obj, 0); if (ret) { - DRM_ERROR("Failed to get pages: %d\n", ret); - spin_unlock(&dev_priv->mm.active_list_lock); + mutex_unlock(&dev->struct_mutex); return ret; } @@ -355,7 +382,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data) } } - spin_unlock(&dev_priv->mm.active_list_lock); + mutex_unlock(&dev->struct_mutex); return 0; } @@ -365,20 +392,24 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - u8 *virt; - uint32_t *ptr, off; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; if (!dev_priv->render_ring.gem_object) { seq_printf(m, "No ringbuffer setup\n"); - return 0; - } + } else { + u8 *virt = dev_priv->render_ring.virtual_start; + uint32_t off; - virt = dev_priv->render_ring.virtual_start; - - for (off = 0; off < dev_priv->render_ring.size; off += 4) { - ptr = (uint32_t *)(virt + off); - seq_printf(m, "%08x : %08x\n", off, *ptr); + for (off = 0; off < dev_priv->render_ring.size; off += 4) { + uint32_t *ptr = (uint32_t *)(virt + off); + seq_printf(m, "%08x : %08x\n", off, *ptr); + } } + mutex_unlock(&dev->struct_mutex); return 0; } @@ -694,10 +725,16 @@ static int i915_emon_status(struct seq_file *m, void *unused) struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; unsigned long temp, chipset, gfx; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; temp = i915_mch_val(dev_priv); chipset = i915_chipset_val(dev_priv); gfx = i915_gfx_val(dev_priv); + mutex_unlock(&dev->struct_mutex); seq_printf(m, "GMCH temp: %ld\n", temp); seq_printf(m, "Chipset power: %ld\n", chipset); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 634e1c463dec..e6fbeb43d59c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -524,8 +524,6 @@ typedef struct drm_i915_private { */ struct list_head shrink_list; - spinlock_t active_list_lock; - /** * List of objects which are not in the ringbuffer but which * still have a write_domain which needs to be flushed before diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index afe4a9b0a03d..b6e4b60724ec 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1486,7 +1486,6 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, struct intel_ring_buffer *ring) { struct drm_device *dev = obj->dev; - drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t seqno = i915_gem_next_request_seqno(dev, ring); @@ -1500,9 +1499,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, } /* Move from whatever list we were on to the tail of execution. */ - spin_lock(&dev_priv->mm.active_list_lock); list_move_tail(&obj_priv->list, &ring->active_list); - spin_unlock(&dev_priv->mm.active_list_lock); obj_priv->last_rendering_seqno = seqno; } @@ -1676,14 +1673,11 @@ static void i915_gem_retire_request(struct drm_device *dev, struct drm_i915_gem_request *request) { - drm_i915_private_t *dev_priv = dev->dev_private; - trace_i915_gem_request_retire(dev, request->seqno); /* Move any buffers on the active list that are no longer referenced * by the ringbuffer to the flushing/inactive lists as appropriate. */ - spin_lock(&dev_priv->mm.active_list_lock); while (!list_empty(&request->ring->active_list)) { struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; @@ -1698,7 +1692,7 @@ i915_gem_retire_request(struct drm_device *dev, * this seqno. */ if (obj_priv->last_rendering_seqno != request->seqno) - goto out; + return; #if WATCH_LRU DRM_INFO("%s: retire %d moves to inactive list %p\n", @@ -1707,22 +1701,9 @@ i915_gem_retire_request(struct drm_device *dev, if (obj->write_domain != 0) i915_gem_object_move_to_flushing(obj); - else { - /* Take a reference on the object so it won't be - * freed while the spinlock is held. The list - * protection for this spinlock is safe when breaking - * the lock like this since the next thing we do - * is just get the head of the list again. - */ - drm_gem_object_reference(obj); + else i915_gem_object_move_to_inactive(obj); - spin_unlock(&dev_priv->mm.active_list_lock); - drm_gem_object_unreference(obj); - spin_lock(&dev_priv->mm.active_list_lock); - } } -out: - spin_unlock(&dev_priv->mm.active_list_lock); } /** @@ -1972,7 +1953,6 @@ int i915_gem_object_unbind(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; - drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int ret = 0; @@ -2027,10 +2007,8 @@ i915_gem_object_unbind(struct drm_gem_object *obj) } /* Remove ourselves from the LRU list if present. */ - spin_lock(&dev_priv->mm.active_list_lock); if (!list_empty(&obj_priv->list)) list_del_init(&obj_priv->list); - spin_unlock(&dev_priv->mm.active_list_lock); if (i915_gem_object_is_purgeable(obj_priv)) i915_gem_object_truncate(obj); @@ -2047,13 +2025,10 @@ i915_gpu_idle(struct drm_device *dev) bool lists_empty; int ret; - spin_lock(&dev_priv->mm.active_list_lock); lists_empty = (list_empty(&dev_priv->mm.flushing_list) && list_empty(&dev_priv->render_ring.active_list) && (!HAS_BSD(dev) || list_empty(&dev_priv->bsd_ring.active_list))); - spin_unlock(&dev_priv->mm.active_list_lock); - if (lists_empty) return 0; @@ -4550,11 +4525,8 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, return ret; } - spin_lock(&dev_priv->mm.active_list_lock); BUG_ON(!list_empty(&dev_priv->render_ring.active_list)); BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list)); - spin_unlock(&dev_priv->mm.active_list_lock); - BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); BUG_ON(!list_empty(&dev_priv->render_ring.request_list)); @@ -4606,7 +4578,6 @@ i915_gem_load(struct drm_device *dev) int i; drm_i915_private_t *dev_priv = dev->dev_private; - spin_lock_init(&dev_priv->mm.active_list_lock); INIT_LIST_HEAD(&dev_priv->mm.flushing_list); INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); INIT_LIST_HEAD(&dev_priv->mm.inactive_list); @@ -4862,12 +4833,10 @@ i915_gpu_is_active(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; int lists_empty; - spin_lock(&dev_priv->mm.active_list_lock); lists_empty = list_empty(&dev_priv->mm.flushing_list) && list_empty(&dev_priv->render_ring.active_list); if (HAS_BSD(dev)) lists_empty &= list_empty(&dev_priv->bsd_ring.active_list); - spin_unlock(&dev_priv->mm.active_list_lock); return !lists_empty; } diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 72cae3cccad8..82430e21c7ab 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -212,14 +212,11 @@ i915_gem_evict_everything(struct drm_device *dev) int ret; bool lists_empty; - spin_lock(&dev_priv->mm.active_list_lock); lists_empty = (list_empty(&dev_priv->mm.inactive_list) && list_empty(&dev_priv->mm.flushing_list) && list_empty(&dev_priv->render_ring.active_list) && (!HAS_BSD(dev) || list_empty(&dev_priv->bsd_ring.active_list))); - spin_unlock(&dev_priv->mm.active_list_lock); - if (lists_empty) return -ENOSPC; @@ -234,13 +231,11 @@ i915_gem_evict_everything(struct drm_device *dev) if (ret) return ret; - spin_lock(&dev_priv->mm.active_list_lock); lists_empty = (list_empty(&dev_priv->mm.inactive_list) && list_empty(&dev_priv->mm.flushing_list) && list_empty(&dev_priv->render_ring.active_list) && (!HAS_BSD(dev) || list_empty(&dev_priv->bsd_ring.active_list))); - spin_unlock(&dev_priv->mm.active_list_lock); BUG_ON(!lists_empty); return 0; From 995b6762f0fd54377bbfafdf5328b12de698bfa8 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 20 Aug 2010 13:23:26 +0100 Subject: [PATCH 069/476] drm/i915: Quieten sparse warnings for missing prototypes. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 2 +- drivers/gpu/drm/i915/i915_gem.c | 6 +++--- drivers/gpu/drm/i915/i915_irq.c | 6 +++--- drivers/gpu/drm/i915/intel_dp.c | 2 +- drivers/gpu/drm/i915/intel_fb.c | 4 ++-- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 315326d5dc22..c52e16fe3d8c 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1902,7 +1902,7 @@ static struct drm_i915_private *i915_mch_dev; * - dev_priv->fmax * - dev_priv->gpu_busy */ -DEFINE_SPINLOCK(mchdev_lock); +static DEFINE_SPINLOCK(mchdev_lock); /** * i915_read_mch_val - return value for IPS use diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b6e4b60724ec..e0b7ddc917c2 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4635,8 +4635,8 @@ i915_gem_load(struct drm_device *dev) * Create a physically contiguous memory object for this object * e.g. for cursor + overlay regs */ -int i915_gem_init_phys_object(struct drm_device *dev, - int id, int size, int align) +static int i915_gem_init_phys_object(struct drm_device *dev, + int id, int size, int align) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_phys_object *phys_obj; @@ -4668,7 +4668,7 @@ kfree_obj: return ret; } -void i915_gem_free_phys_object(struct drm_device *dev, int id) +static void i915_gem_free_phys_object(struct drm_device *dev, int id) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_phys_object *phys_obj; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 080ea3b162cd..29215b6d5650 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -85,7 +85,7 @@ ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) } /* For display hotplug interrupt */ -void +static void ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) { if ((dev_priv->irq_mask_reg & mask) != 0) { @@ -305,7 +305,7 @@ static void i915_handle_rps_change(struct drm_device *dev) return; } -irqreturn_t ironlake_irq_handler(struct drm_device *dev) +static irqreturn_t ironlake_irq_handler(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int ret = IRQ_NONE; @@ -1315,7 +1315,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data, return -EINVAL; } -struct drm_i915_gem_request * +static struct drm_i915_gem_request * i915_get_tail_request(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 3449a3b89e7f..eb6e6763ff56 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1513,7 +1513,7 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = { .destroy = intel_dp_encoder_destroy, }; -void +static void intel_dp_hot_plug(struct intel_encoder *intel_encoder) { struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 7bdc96256bf5..42f8a512815a 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -219,8 +219,8 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = { .fb_probe = intel_fb_find_or_create_single, }; -int intel_fbdev_destroy(struct drm_device *dev, - struct intel_fbdev *ifbdev) +static int intel_fbdev_destroy(struct drm_device *dev, + struct intel_fbdev *ifbdev) { struct fb_info *info; struct intel_framebuffer *ifb = &ifbdev->ifb; From 5d607f9b038ea03f5e5b3064d2f3993f9ea67e1e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 20 Aug 2010 22:37:03 +0100 Subject: [PATCH 070/476] drm/i915: Remove redundant initialisation of fb_base We do it whilst configuring dev->mode_config, so remove the out-of-place earlier initialisation. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index c52e16fe3d8c..8d52f01a6d90 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1369,12 +1369,8 @@ static int i915_load_modeset_init(struct drm_device *dev, unsigned long agp_size) { struct drm_i915_private *dev_priv = dev->dev_private; - int fb_bar = IS_I9XX(dev) ? 2 : 0; int ret = 0; - dev->mode_config.fb_base = pci_resource_start(dev->pdev, fb_bar) & - 0xff000000; - /* Basic memrange allocator for stolen space (aka vram) */ drm_mm_init(&dev_priv->vram, 0, prealloc_size); DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); From a95735569312f2ab0c80425e2cd1e5cb0b4e1870 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 22 Aug 2010 13:18:16 +0100 Subject: [PATCH 071/476] drm/i915: Refactor panel backlight controls There were two instances of code to control the panel backlight and neither handled the complete set of device variations. Fixes: Bug 29716 - [GM965] Regression: Backlight resets to minimum when changing resolution https://bugs.freedesktop.org/show_bug.cgi?id=29716 And a bug on one of my PineView boxes which overflowed the backlight value. Incorporates part of a similar patch by Matthew Garrett that exposes a native Intel backlight controller. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/intel_drv.h | 5 +- drivers/gpu/drm/i915/intel_lvds.c | 56 ++----------- drivers/gpu/drm/i915/intel_opregion.c | 61 ++------------ drivers/gpu/drm/i915/intel_panel.c | 109 ++++++++++++++++++++++++++ 5 files changed, 125 insertions(+), 108 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e6fbeb43d59c..cfc8bfd0fd7e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -319,7 +319,7 @@ typedef struct drm_i915_private { struct intel_overlay *overlay; /* LVDS info */ - int backlight_duty_cycle; /* restore backlight to this value */ + int backlight_level; /* restore backlight to this value */ bool panel_wants_dither; struct drm_display_mode *panel_fixed_mode; struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 949cfda4b49a..cdf9c78896cf 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -204,13 +204,16 @@ extern bool intel_pch_has_edp(struct drm_crtc *crtc); extern bool intel_dpd_is_edp(struct drm_device *dev); extern void intel_edp_link_config (struct intel_encoder *, int *, int *); - +/* intel_panel.c */ extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, struct drm_display_mode *adjusted_mode); extern void intel_pch_panel_fitting(struct drm_device *dev, int fitting_mode, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); +extern u32 intel_panel_get_max_backlight(struct drm_device *dev); +extern u32 intel_panel_get_backlight(struct drm_device *dev); +extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); extern int intel_panel_fitter_pipe (struct drm_device *dev); extern void intel_crtc_load_lut(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 047bd9538c6a..8320279fad58 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -53,43 +53,6 @@ static struct intel_lvds *enc_to_intel_lvds(struct drm_encoder *encoder) return container_of(enc_to_intel_encoder(encoder), struct intel_lvds, base); } -/** - * Sets the backlight level. - * - * \param level backlight level, from 0 to intel_lvds_get_max_backlight(). - */ -static void intel_lvds_set_backlight(struct drm_device *dev, int level) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 blc_pwm_ctl, reg; - - if (HAS_PCH_SPLIT(dev)) - reg = BLC_PWM_CPU_CTL; - else - reg = BLC_PWM_CTL; - - blc_pwm_ctl = I915_READ(reg) & ~BACKLIGHT_DUTY_CYCLE_MASK; - I915_WRITE(reg, (blc_pwm_ctl | - (level << BACKLIGHT_DUTY_CYCLE_SHIFT))); -} - -/** - * Returns the maximum level of the backlight duty cycle field. - */ -static u32 intel_lvds_get_max_backlight(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 reg; - - if (HAS_PCH_SPLIT(dev)) - reg = BLC_PWM_PCH_CTL2; - else - reg = BLC_PWM_CTL; - - return ((I915_READ(reg) & BACKLIGHT_MODULATION_FREQ_MASK) >> - BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; -} - /** * Sets the power state for the panel. */ @@ -117,9 +80,9 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on) if (wait_for(I915_READ(status_reg) & PP_ON, 1000)) DRM_ERROR("timed out waiting to enable LVDS pipe"); - intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle); + intel_panel_set_backlight(dev, dev_priv->backlight_level); } else { - intel_lvds_set_backlight(dev, 0); + intel_panel_set_backlight(dev, 0); I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); @@ -386,16 +349,8 @@ static void intel_lvds_prepare(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 reg; - if (HAS_PCH_SPLIT(dev)) - reg = BLC_PWM_CPU_CTL; - else - reg = BLC_PWM_CTL; - - dev_priv->saveBLC_PWM_CTL = I915_READ(reg); - dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & - BACKLIGHT_DUTY_CYCLE_MASK); + dev_priv->backlight_level = intel_panel_get_backlight(dev); intel_lvds_set_power(dev, false); } @@ -405,9 +360,8 @@ static void intel_lvds_commit( struct drm_encoder *encoder) struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - if (dev_priv->backlight_duty_cycle == 0) - dev_priv->backlight_duty_cycle = - intel_lvds_get_max_backlight(dev); + if (dev_priv->backlight_level == 0) + dev_priv->backlight_level = intel_panel_get_max_backlight(dev); intel_lvds_set_power(dev, true); } diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 3cb13237ba58..917c7dc3cd6b 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -31,9 +31,9 @@ #include "drmP.h" #include "i915_drm.h" #include "i915_drv.h" +#include "intel_drv.h" #define PCI_ASLE 0xe4 -#define PCI_LBPC 0xf4 #define PCI_ASLS 0xfc #define OPREGION_HEADER_OFFSET 0 @@ -147,36 +147,17 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) { struct drm_i915_private *dev_priv = dev->dev_private; struct opregion_asle *asle = dev_priv->opregion.asle; - u32 blc_pwm_ctl, blc_pwm_ctl2; - u32 max_backlight, level, shift; + u32 max; if (!(bclp & ASLE_BCLP_VALID)) return ASLE_BACKLIGHT_FAILED; bclp &= ASLE_BCLP_MSK; - if (bclp < 0 || bclp > 255) + if (bclp > 255) return ASLE_BACKLIGHT_FAILED; - blc_pwm_ctl = I915_READ(BLC_PWM_CTL); - blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2); - - if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE)) - pci_write_config_dword(dev->pdev, PCI_LBPC, bclp); - else { - if (IS_PINEVIEW(dev)) { - blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); - max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >> - BACKLIGHT_MODULATION_FREQ_SHIFT; - shift = BACKLIGHT_DUTY_CYCLE_SHIFT + 1; - } else { - blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK; - max_backlight = ((blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >> - BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; - shift = BACKLIGHT_DUTY_CYCLE_SHIFT; - } - level = (bclp * max_backlight) / 255; - I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | (level << shift)); - } + max = intel_panel_get_max_backlight(dev); + intel_panel_set_backlight(dev, bclp * max / 255); asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID; return 0; @@ -243,36 +224,6 @@ void intel_opregion_asle_intr(struct drm_device *dev) asle->aslc = asle_stat; } -static u32 asle_set_backlight_ironlake(struct drm_device *dev, u32 bclp) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct opregion_asle *asle = dev_priv->opregion.asle; - u32 cpu_pwm_ctl, pch_pwm_ctl2; - u32 max_backlight, level; - - if (!(bclp & ASLE_BCLP_VALID)) - return ASLE_BACKLIGHT_FAILED; - - bclp &= ASLE_BCLP_MSK; - if (bclp < 0 || bclp > 255) - return ASLE_BACKLIGHT_FAILED; - - cpu_pwm_ctl = I915_READ(BLC_PWM_CPU_CTL); - pch_pwm_ctl2 = I915_READ(BLC_PWM_PCH_CTL2); - /* get the max PWM frequency */ - max_backlight = (pch_pwm_ctl2 >> 16) & BACKLIGHT_DUTY_CYCLE_MASK; - /* calculate the expected PMW frequency */ - level = (bclp * max_backlight) / 255; - /* reserve the high 16 bits */ - cpu_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK); - /* write the updated PWM frequency */ - I915_WRITE(BLC_PWM_CPU_CTL, cpu_pwm_ctl | level); - - asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID; - - return 0; -} - /* Only present on Ironlake+ */ void intel_opregion_gse_intr(struct drm_device *dev) { @@ -297,7 +248,7 @@ void intel_opregion_gse_intr(struct drm_device *dev) } if (asle_req & ASLE_SET_BACKLIGHT) - asle_stat |= asle_set_backlight_ironlake(dev, asle->bclp); + asle_stat |= asle_set_backlight(dev, asle->bclp); if (asle_req & ASLE_SET_PFIT) { DRM_DEBUG_DRIVER("Pfit is not supported\n"); diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index e7f5299d9d57..30abe7afc942 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -30,6 +30,8 @@ #include "intel_drv.h" +#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */ + void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, struct drm_display_mode *adjusted_mode) @@ -109,3 +111,110 @@ done: dev_priv->pch_pf_pos = (x << 16) | y; dev_priv->pch_pf_size = (width << 16) | height; } + +static int is_backlight_combination_mode(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (IS_I965G(dev)) + return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE; + + if (IS_GEN2(dev)) + return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE; + + return 0; +} + +u32 intel_panel_get_max_backlight(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 max; + + if (HAS_PCH_SPLIT(dev)) { + max = I915_READ(BLC_PWM_PCH_CTL2) >> 16; + } else { + max = I915_READ(BLC_PWM_CTL); + if (IS_PINEVIEW(dev)) { + max >>= 17; + } else { + max >>= 16; + if (!IS_I965G(dev)) + max &= ~1; + } + + if (is_backlight_combination_mode(dev)) + max *= 0xff; + } + + if (max == 0) { + /* XXX add code here to query mode clock or hardware clock + * and program max PWM appropriately. + */ + DRM_ERROR("fixme: max PWM is zero.\n"); + max = 1; + } + + DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); + return max; +} + +u32 intel_panel_get_backlight(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 val; + + if (HAS_PCH_SPLIT(dev)) { + val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; + } else { + val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; + if (IS_PINEVIEW(dev)) + val >>= 1; + + if (is_backlight_combination_mode(dev)){ + u8 lbpc; + + val &= ~1; + pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc); + val *= lbpc; + val >>= 1; + } + } + + DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); + return val; +} + +static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 val = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; + I915_WRITE(BLC_PWM_CPU_CTL, val | level); +} + +void intel_panel_set_backlight(struct drm_device *dev, u32 level) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 tmp; + + DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level); + + if (HAS_PCH_SPLIT(dev)) + return intel_pch_panel_set_backlight(dev, level); + + if (is_backlight_combination_mode(dev)){ + u32 max = intel_panel_get_max_backlight(dev); + u8 lpbc; + + lpbc = level * 0xfe / max + 1; + level /= lpbc; + pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc); + } + + tmp = I915_READ(BLC_PWM_CTL); + if (IS_PINEVIEW(dev)) { + tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); + level <<= 1; + } else + tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK; + I915_WRITE(BLC_PWM_CTL, tmp | level); +} From b7ac36dadafa69214faa75a34844d56bd0c14e89 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 24 Aug 2010 16:07:16 +0100 Subject: [PATCH 072/476] drm/i915/tv: After disabling the pipe, use wait_for_vblank_off() Hopefully this is a contributing factor to the spurious TV detection repoted by Ivan Bulatovic and others. References: Bug 16871 - "TV1 connected" with no tv https://bugzilla.kernel.org/show_bug.cgi?id=16871 Signed-off-by: Chris Wilson Reported-by: Ivan Bulatovic Cc: Jesse Barnes --- drivers/gpu/drm/i915/intel_tv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 4a6534239fa3..d4066729f27b 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1164,7 +1164,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE); /* Wait for vblank for the disable to take effect. */ - intel_wait_for_vblank(dev, intel_crtc->pipe); + intel_wait_for_vblank_off(dev, intel_crtc->pipe); /* Filter ctl must be set before TV_WIN_SIZE */ I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE); From 70d39fe4862c6c69c2582c829ec240e05bf24430 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 25 Aug 2010 16:03:34 +0100 Subject: [PATCH 073/476] drm/i915: Show device capabilities in debugfs Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 41 +++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 9074300fed8d..18fd1532cecf 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -44,6 +44,46 @@ #define FLUSHING_LIST 2 #define INACTIVE_LIST 3 +static const char *yesno(int v) +{ + return v ? "yes" : "no"; +} + +static int i915_capabilities(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + const struct intel_device_info *info = INTEL_INFO(dev); + + seq_printf(m, "gen: %d\n", info->gen); +#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x)) + B(is_mobile); + B(is_i8xx); + B(is_i85x); + B(is_i915g); + B(is_i9xx); + B(is_i945gm); + B(is_i965g); + B(is_i965gm); + B(is_g33); + B(need_gfx_hws); + B(is_g4x); + B(is_pineview); + B(is_broadwater); + B(is_crestline); + B(is_ironlake); + B(has_fbc); + B(has_rc6); + B(has_pipe_cxsr); + B(has_hotplug); + B(cursor_needs_physical); + B(has_overlay); + B(overlay_needs_physical); +#undef B + + return 0; +} + static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv) { if (obj_priv->user_pin_count > 0) @@ -880,6 +920,7 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor) } static struct drm_info_list i915_debugfs_list[] = { + {"i915_capabilities", i915_capabilities, 0, 0}, {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, From 37811fcc9188f748407646e1157f3ed24ae181a4 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 25 Aug 2010 22:45:57 +0100 Subject: [PATCH 074/476] drm/i915: Show framebuffer info in debugfs Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 83 +++++++++++++++++++++++------ drivers/gpu/drm/i915/intel_drv.h | 8 ++- drivers/gpu/drm/i915/intel_fb.c | 7 --- 3 files changed, 73 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 18fd1532cecf..2f3e017d24d6 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -104,6 +104,27 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv) } } +static void +describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) +{ + seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s", + &obj->base, + get_pin_flag(obj), + get_tiling_flag(obj), + obj->base.size, + obj->base.read_domains, + obj->base.write_domain, + obj->last_rendering_seqno, + obj->dirty ? " dirty" : "", + obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); + if (obj->base.name) + seq_printf(m, " (name: %d)", obj->base.name); + if (obj->fence_reg != I915_FENCE_REG_NONE) + seq_printf(m, " (fence: %d)", obj->fence_reg); + if (obj->gtt_space != NULL) + seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset); +} + static int i915_gem_object_list_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; @@ -137,23 +158,8 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) } list_for_each_entry(obj_priv, head, list) { - seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s", - &obj_priv->base, - get_pin_flag(obj_priv), - obj_priv->base.size, - obj_priv->base.read_domains, - obj_priv->base.write_domain, - obj_priv->last_rendering_seqno, - obj_priv->dirty ? " dirty" : "", - obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : ""); - - if (obj_priv->base.name) - seq_printf(m, " (name: %d)", obj_priv->base.name); - if (obj_priv->fence_reg != I915_FENCE_REG_NONE) - seq_printf(m, " (fence: %d)", obj_priv->fence_reg); - if (obj_priv->gtt_space != NULL) - seq_printf(m, " (gtt_offset: %08x)", obj_priv->gtt_offset); - + seq_printf(m, " "); + describe_obj(m, obj_priv); seq_printf(m, "\n"); } @@ -815,6 +821,48 @@ static int i915_opregion(struct seq_file *m, void *unused) return 0; } +static int i915_gem_framebuffer_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_fbdev *ifbdev; + struct intel_framebuffer *fb; + int ret; + + ret = mutex_lock_interruptible(&dev->mode_config.mutex); + if (ret) + return ret; + + ifbdev = dev_priv->fbdev; + fb = to_intel_framebuffer(ifbdev->helper.fb); + + seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ", + fb->base.width, + fb->base.height, + fb->base.depth, + fb->base.bits_per_pixel); + describe_obj(m, to_intel_bo(fb->obj)); + seq_printf(m, "\n"); + + list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { + if (&fb->base == ifbdev->helper.fb) + continue; + + seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ", + fb->base.width, + fb->base.height, + fb->base.depth, + fb->base.bits_per_pixel); + describe_obj(m, to_intel_bo(fb->obj)); + seq_printf(m, "\n"); + } + + mutex_unlock(&dev->mode_config.mutex); + + return 0; +} + static int i915_wedged_open(struct inode *inode, struct file *filp) @@ -944,6 +992,7 @@ static struct drm_info_list i915_debugfs_list[] = { {"i915_fbc_status", i915_fbc_status, 0}, {"i915_sr_status", i915_sr_status, 0}, {"i915_opregion", i915_opregion, 0}, + {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, }; #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index cdf9c78896cf..b454d1a4271e 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -30,8 +30,8 @@ #include #include "i915_drv.h" #include "drm_crtc.h" - #include "drm_crtc_helper.h" +#include "drm_fb_helper.h" #define _wait_for(COND, MS, W) ({ \ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ @@ -129,6 +129,12 @@ struct intel_framebuffer { struct drm_gem_object *obj; }; +struct intel_fbdev { + struct drm_fb_helper helper; + struct intel_framebuffer ifb; + struct list_head fbdev_list; + struct drm_display_mode *our_mode; +}; struct intel_encoder { struct drm_encoder enc; diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 42f8a512815a..0ee4a8c16608 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -44,13 +44,6 @@ #include "i915_drm.h" #include "i915_drv.h" -struct intel_fbdev { - struct drm_fb_helper helper; - struct intel_framebuffer ifb; - struct list_head fbdev_list; - struct drm_display_mode *our_mode; -}; - static struct fb_ops intelfb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, From 4b60e5cb707aa1d44fd01680296a2caf45dd6fae Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 8 Aug 2010 11:53:53 +0100 Subject: [PATCH 075/476] drm/i915: Clear scanline waits after disabling the pipe. If we disable the pipe and the GPU is currently waiting on a scanline WAIT_FOR_EVENT, the GPU will hang. Fortunately, there is a magic bit which we can write on i915+ to break this wait after disabling the pipe. References: Bug 29252 - [Arrandale] Hung WAIT_FOR_EVENT when running rss-glx-skyrocket https://bugs.freedesktop.org/show_bug.cgi?id=29252 Bug 28964 - [i965gm] GPU infinite MI_WAIT_FOR_EVENT while watching video in Totem https://bugs.freedesktop.org/show_bug.cgi?id=28964 and many others. Signed-off-by: Chris Wilson Cc: Jesse Barnes --- drivers/gpu/drm/i915/i915_reg.h | 2 ++ drivers/gpu/drm/i915/intel_display.c | 31 ++++++++++++++++++++++++++-- 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index e240de9eed57..5ede5a5c3381 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -295,6 +295,8 @@ #define RING_VALID_MASK 0x00000001 #define RING_VALID 0x00000001 #define RING_INVALID 0x00000000 +#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */ +#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */ #define PRB1_TAIL 0x02040 /* 915+ only */ #define PRB1_HEAD 0x02044 /* 915+ only */ #define PRB1_START 0x02048 /* 915+ only */ diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 0b400d1d2fe1..d5cb7bab340c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2344,6 +2344,26 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) } } +/* + * When we disable a pipe, we need to clear any pending scanline wait events + * to avoid hanging the ring, which we assume we are waiting on. + */ +static void intel_clear_scanline_wait(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 tmp; + + if (IS_GEN2(dev)) + /* Can't break the hang on i8xx */ + return; + + tmp = I915_READ(PRB0_CTL); + if (tmp & RING_WAIT) { + I915_WRITE(PRB0_CTL, tmp); + POSTING_READ(PRB0_CTL); + } +} + /** * Sets the power management mode of the pipe and plane. */ @@ -2366,7 +2386,8 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) * with multiple pipes prior to enabling to new pipe. * * When switching off the display, make sure the cursor is - * properly hidden prior to disabling the pipe. + * properly hidden and there are no pending waits prior to + * disabling the pipe. */ if (mode == DRM_MODE_DPMS_ON) intel_update_watermarks(dev); @@ -2377,8 +2398,14 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) if (mode == DRM_MODE_DPMS_ON) intel_crtc_update_cursor(crtc); - else + else { + /* XXX Note that this is not a complete solution, but a hack + * to avoid the most frequently hit hang. + */ + intel_clear_scanline_wait(dev); + intel_update_watermarks(dev); + } if (!dev->primary->master) return; From 8c80b59b370b4d942f595bdb4a6d23494f77a810 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 8 Aug 2010 20:38:12 +0100 Subject: [PATCH 076/476] drm/i915: Add ringbuffer wait reset to hangcheck The GPU records whether it is currently waiting for a completion of a WAIT_FOR_EVENT in the RB_WAIT bit in the ringbuffer control registers. On third generation chipsets and later, a write of 1 to this bit breaks the hang and returns the GPU to arbitration, i.e. the GPU should continue executing the reminder of the batchbuffer and return to normal operations. By adding this to hangcheck we can avoid a full GPU reset under these conditions. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_irq.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 29215b6d5650..2f7f7cb0bf30 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1373,6 +1373,21 @@ void i915_hangcheck_elapsed(unsigned long data) dev_priv->last_instdone1 == instdone1) { if (dev_priv->hangcheck_count++ > 1) { DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); + + if (!IS_GEN2(dev)) { + /* Is the chip hanging on a WAIT_FOR_EVENT? + * If so we can simply poke the RB_WAIT bit + * and break the hang. This should work on + * all but the second generation chipsets. + */ + u32 tmp = I915_READ(PRB0_CTL); + if (tmp & RING_WAIT) { + I915_WRITE(PRB0_CTL, tmp); + POSTING_READ(PRB0_CTL); + goto out; + } + } + i915_handle_error(dev, true); return; } @@ -1384,6 +1399,7 @@ void i915_hangcheck_elapsed(unsigned long data) dev_priv->last_instdone1 = instdone1; } +out: /* Reset timer case chip hangs without another request being added */ mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); } From dd2575ffbd71d0922eb31b94adc0923f9808c915 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 4 Sep 2010 12:59:16 +0100 Subject: [PATCH 077/476] drm/i915: Remove impossible error handling from bit17 swizzling Our usage of kmap() cannot return NULL here, so remove the unnecessary error handling. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem_tiling.c | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 710eca70b323..3c0859edfdf7 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -399,16 +399,14 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, * bit 17 of its physical address and therefore being interpreted differently * by the GPU. */ -static int +static void i915_gem_swizzle_page(struct page *page) { + char temp[64]; char *vaddr; int i; - char temp[64]; vaddr = kmap(page); - if (vaddr == NULL) - return -ENOMEM; for (i = 0; i < PAGE_SIZE; i += 128) { memcpy(temp, &vaddr[i], 64); @@ -417,8 +415,6 @@ i915_gem_swizzle_page(struct page *page) } kunmap(page); - - return 0; } void @@ -440,11 +436,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj) char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17; if ((new_bit_17 & 0x1) != (test_bit(i, obj_priv->bit_17) != 0)) { - int ret = i915_gem_swizzle_page(obj_priv->pages[i]); - if (ret != 0) { - DRM_ERROR("Failed to swizzle page\n"); - return; - } + i915_gem_swizzle_page(obj_priv->pages[i]); set_page_dirty(obj_priv->pages[i]); } } From 4f0d1aff791db8935ee146fe7928b63bba0f1b59 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Tue, 7 Sep 2010 14:48:05 -0700 Subject: [PATCH 078/476] drm/i915: fix pipeconf dither bit definitions Make them match the others and add BPP definitions. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_reg.h | 16 +++++++++++----- drivers/gpu/drm/i915/intel_display.c | 8 ++++---- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5ede5a5c3381..d0b4b2375d56 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2078,11 +2078,6 @@ /* Display & cursor control */ -/* dithering flag on Ironlake */ -#define PIPE_ENABLE_DITHER (1 << 4) -#define PIPE_DITHER_TYPE_MASK (3 << 2) -#define PIPE_DITHER_TYPE_SPATIAL (0 << 2) -#define PIPE_DITHER_TYPE_ST01 (1 << 2) /* Pipe A */ #define PIPEADSL 0x70000 #define DSL_LINEMASK 0x00000fff @@ -2101,6 +2096,17 @@ #define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) #define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) #define PIPECONF_CXSR_DOWNCLOCK (1<<16) +#define PIPECONF_BPP_MASK (0x000000e0) +#define PIPECONF_BPP_8 (0<<5) +#define PIPECONF_BPP_10 (1<<5) +#define PIPECONF_BPP_6 (2<<5) +#define PIPECONF_BPP_12 (3<<5) +#define PIPECONF_DITHER_EN (1<<4) +#define PIPECONF_DITHER_TYPE_MASK (0x0000000c) +#define PIPECONF_DITHER_TYPE_SP (0<<2) +#define PIPECONF_DITHER_TYPE_ST1 (1<<2) +#define PIPECONF_DITHER_TYPE_ST2 (2<<2) +#define PIPECONF_DITHER_TYPE_TEMP (3<<2) #define PIPEASTAT 0x70024 #define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) #define PIPE_CRC_ERROR_ENABLE (1UL<<29) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d5cb7bab340c..948a3608d1bd 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3910,8 +3910,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } if (HAS_PCH_SPLIT(dev)) { - pipeconf &= ~PIPE_ENABLE_DITHER; - pipeconf &= ~PIPE_DITHER_TYPE_MASK; + pipeconf &= ~PIPECONF_DITHER_EN; + pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; } /* The LVDS pin pair needs to be on before the DPLLs are enabled. @@ -3955,8 +3955,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, if (IS_I965G(dev)) { if (dev_priv->lvds_dither) { if (HAS_PCH_SPLIT(dev)) { - pipeconf |= PIPE_ENABLE_DITHER; - pipeconf |= PIPE_DITHER_TYPE_ST01; + pipeconf |= PIPECONF_DITHER_EN; + pipeconf |= PIPECONF_DITHER_TYPE_ST1; } else lvds |= LVDS_ENABLE_DITHER; } else { From 434ed097245423c5ea277d18121c0fad0df42abf Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Tue, 7 Sep 2010 14:48:06 -0700 Subject: [PATCH 079/476] drm/i915: set dither bits on eDP panels too We really need a macro to test whether a given connector has a panel attached rather than sprinkling HAS_PCH_SPLIT/IS_eDP/has_edp_encoder etc all over. In the meantime, fix the bug... Signed-off-by: Jesse Barnes [ickle: tidy up the duplicity in the conditionals] Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 35 ++++++++++++++-------------- 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 948a3608d1bd..4b23646304df 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3909,11 +3909,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, udelay(150); } - if (HAS_PCH_SPLIT(dev)) { - pipeconf &= ~PIPECONF_DITHER_EN; - pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; - } - /* The LVDS pin pair needs to be on before the DPLLs are enabled. * This is an exception to the general rule that mode_set doesn't turn * things on. @@ -3951,23 +3946,27 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, * appropriately here, but we need to look more thoroughly into how * panels behave in the two modes. */ - /* set the dithering flag */ - if (IS_I965G(dev)) { - if (dev_priv->lvds_dither) { - if (HAS_PCH_SPLIT(dev)) { - pipeconf |= PIPECONF_DITHER_EN; - pipeconf |= PIPECONF_DITHER_TYPE_ST1; - } else - lvds |= LVDS_ENABLE_DITHER; - } else { - if (!HAS_PCH_SPLIT(dev)) { - lvds &= ~LVDS_ENABLE_DITHER; - } - } + /* set the dithering flag on non-PCH LVDS as needed */ + if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { + if (dev_priv->lvds_dither) + lvds |= LVDS_ENABLE_DITHER; + else + lvds &= ~LVDS_ENABLE_DITHER; } I915_WRITE(lvds_reg, lvds); I915_READ(lvds_reg); } + + /* set the dithering flag and clear for anything other than a panel. */ + if (HAS_PCH_SPLIT(dev)) { + pipeconf &= ~PIPECONF_DITHER_EN; + pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; + if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) { + pipeconf |= PIPECONF_DITHER_EN; + pipeconf |= PIPECONF_DITHER_TYPE_ST1; + } + } + if (is_dp) intel_dp_set_m_n(crtc, mode, adjusted_mode); else if (HAS_PCH_SPLIT(dev)) { From 309b1e3ab750c0ad4d77c6a6e434402e3346baf4 Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Tue, 18 May 2010 13:53:16 -0400 Subject: [PATCH 080/476] drm/i915: Don't disable panel for modesetting if pfit hasn't changed It seems to be possible to program a new mode without disabling the panel if the panel fitter setup doesn't change. Add support for that. Signed-off-by: Matthew Garrett Acked-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_lvds.c | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 8320279fad58..ef6455104ff1 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -53,6 +53,16 @@ static struct intel_lvds *enc_to_intel_lvds(struct drm_encoder *encoder) return container_of(enc_to_intel_encoder(encoder), struct intel_lvds, base); } +static void intel_lvds_lock_panel(struct drm_device *dev, bool lock) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (lock) + I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3); + else + I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); +} + /** * Sets the power state for the panel. */ @@ -349,10 +359,14 @@ static void intel_lvds_prepare(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder); dev_priv->backlight_level = intel_panel_get_backlight(dev); - intel_lvds_set_power(dev, false); + if (intel_lvds->pfit_control == I915_READ(PFIT_CONTROL)) + intel_lvds_lock_panel(dev, false); + else + intel_lvds_set_power(dev, false); } static void intel_lvds_commit( struct drm_encoder *encoder) @@ -363,7 +377,10 @@ static void intel_lvds_commit( struct drm_encoder *encoder) if (dev_priv->backlight_level == 0) dev_priv->backlight_level = intel_panel_get_max_backlight(dev); - intel_lvds_set_power(dev, true); + if ((I915_READ(PP_CONTROL) & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS) + intel_lvds_lock_panel(dev, true); + else + intel_lvds_set_power(dev, true); } static void intel_lvds_mode_set(struct drm_encoder *encoder, From 02c026ced58f33bb5702d3063c05dae2b651e4ba Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 24 Aug 2010 19:39:48 +0200 Subject: [PATCH 081/476] agp/intel: split out gmch/gtt probe, part 2 This just splits the device list into two and moves the gtt related stuff to intel-gtt.c. The two new devices lists also lose the not longer needed fields. There where only about 5 cases anyway with both a gmch and a possible agp port, so the duplication of entries is rather small. Additionally kill 2 out of the three Ironlake mobile entries that only differed in host bridge pci id. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-agp.c | 195 +++++------------------------------ drivers/char/agp/intel-gtt.c | 124 ++++++++++++++++++++++ 2 files changed, 147 insertions(+), 172 deletions(-) diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index eab58db5f91c..c818a4d50eab 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -703,177 +703,37 @@ static const struct agp_bridge_driver intel_7505_driver = { .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; -static int find_gmch(u16 device) -{ - struct pci_dev *gmch_device; - - gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); - if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) { - gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, - device, gmch_device); - } - - if (!gmch_device) - return 0; - - intel_private.pcidev = gmch_device; - return 1; -} - /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of * driver and gmch_driver must be non-null, and find_gmch will determine * which one should be used if a gmch_chip_id is present. */ -static const struct intel_driver_description { +static const struct intel_agp_driver_description { unsigned int chip_id; - unsigned int gmch_chip_id; char *name; const struct agp_bridge_driver *driver; - const struct agp_bridge_driver *gmch_driver; } intel_agp_chipsets[] = { - { PCI_DEVICE_ID_INTEL_82443LX_0, 0, "440LX", &intel_generic_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82443BX_0, 0, "440BX", &intel_generic_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82443GX_0, 0, "440GX", &intel_generic_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, "i810", - NULL, &intel_810_driver }, - { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, "i810", - NULL, &intel_810_driver }, - { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, "i810", - NULL, &intel_810_driver }, - { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, "i815", - &intel_815_driver, &intel_810_driver }, - { PCI_DEVICE_ID_INTEL_82820_HB, 0, "i820", &intel_820_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, "i820", &intel_820_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, "830M", - &intel_830mp_driver, &intel_830_driver }, - { PCI_DEVICE_ID_INTEL_82840_HB, 0, "i840", &intel_840_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82845_HB, 0, "845G", &intel_845_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, "830M", - &intel_845_driver, &intel_830_driver }, - { PCI_DEVICE_ID_INTEL_82850_HB, 0, "i850", &intel_850_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, "854", - &intel_845_driver, &intel_830_driver }, - { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, "855PM", &intel_845_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM", - &intel_845_driver, &intel_830_driver }, - { PCI_DEVICE_ID_INTEL_82860_HB, 0, "i860", &intel_860_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, "865", - &intel_845_driver, &intel_830_driver }, - { PCI_DEVICE_ID_INTEL_82875_HB, 0, "i875", &intel_845_driver, NULL }, - { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", - NULL, &intel_915_driver }, - { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, "915G", - NULL, &intel_915_driver }, - { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", - NULL, &intel_915_driver }, - { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, "945G", - NULL, &intel_915_driver }, - { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", - NULL, &intel_915_driver }, - { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", - NULL, &intel_915_driver }, - { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", - NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, "G35", - NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", - NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, "965G", - NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", - NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", - NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_7505_0, 0, "E7505", &intel_7505_driver, NULL }, - { PCI_DEVICE_ID_INTEL_7205_0, 0, "E7205", &intel_7505_driver, NULL }, - { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, "G33", - NULL, &intel_g33_driver }, - { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", - NULL, &intel_g33_driver }, - { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", - NULL, &intel_g33_driver }, - { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", - NULL, &intel_g33_driver }, - { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", - NULL, &intel_g33_driver }, - { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, - "GM45", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, - "Eaglelake", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, - "Q45/Q43", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, - "G45/G43", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, - "B43", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, - "G41", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, - "HD Graphics", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, - "HD Graphics", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, - "HD Graphics", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, - "HD Graphics", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG, - "Sandybridge", NULL, &intel_gen6_driver }, - { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG, - "Sandybridge", NULL, &intel_gen6_driver }, - { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG, - "Sandybridge", NULL, &intel_gen6_driver }, - { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG, - "Sandybridge", NULL, &intel_gen6_driver }, - { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG, - "Sandybridge", NULL, &intel_gen6_driver }, - { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG, - "Sandybridge", NULL, &intel_gen6_driver }, - { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG, - "Sandybridge", NULL, &intel_gen6_driver }, - { 0, 0, NULL, NULL, NULL } + { PCI_DEVICE_ID_INTEL_82443LX_0, "440LX", &intel_generic_driver }, + { PCI_DEVICE_ID_INTEL_82443BX_0, "440BX", &intel_generic_driver }, + { PCI_DEVICE_ID_INTEL_82443GX_0, "440GX", &intel_generic_driver }, + { PCI_DEVICE_ID_INTEL_82815_MC, "i815", &intel_815_driver }, + { PCI_DEVICE_ID_INTEL_82820_HB, "i820", &intel_820_driver }, + { PCI_DEVICE_ID_INTEL_82820_UP_HB, "i820", &intel_820_driver }, + { PCI_DEVICE_ID_INTEL_82830_HB, "830M", &intel_830mp_driver }, + { PCI_DEVICE_ID_INTEL_82840_HB, "i840", &intel_840_driver }, + { PCI_DEVICE_ID_INTEL_82845_HB, "845G", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_82845G_HB, "830M", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_82850_HB, "i850", &intel_850_driver }, + { PCI_DEVICE_ID_INTEL_82854_HB, "854", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_82855PM_HB, "855PM", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_82855GM_HB, "855GM", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_82860_HB, "i860", &intel_860_driver }, + { PCI_DEVICE_ID_INTEL_82865_HB, "865", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_82875_HB, "i875", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_7505_0, "E7505", &intel_7505_driver }, + { PCI_DEVICE_ID_INTEL_7205_0, "E7205", &intel_7505_driver }, + { 0, NULL, NULL } }; -static int __devinit intel_gmch_probe(struct pci_dev *pdev, - struct agp_bridge_data *bridge) -{ - int i, mask; - - bridge->driver = NULL; - - for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { - if ((intel_agp_chipsets[i].gmch_chip_id != 0) && - find_gmch(intel_agp_chipsets[i].gmch_chip_id)) { - bridge->driver = - intel_agp_chipsets[i].gmch_driver; - break; - } - } - - if (!bridge->driver) - return 0; - - bridge->dev_private_data = &intel_private; - bridge->dev = pdev; - - dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); - - if (bridge->driver->mask_memory == intel_gen6_mask_memory) - mask = 40; - else if (bridge->driver->mask_memory == intel_i965_mask_memory) - mask = 36; - else - mask = 32; - - if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask))) - dev_err(&intel_private.pcidev->dev, - "set gfx device dma mask %d-bit failed!\n", mask); - else - pci_set_consistent_dma_mask(intel_private.pcidev, - DMA_BIT_MASK(mask)); - - return 1; -} - static int __devinit agp_intel_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -903,7 +763,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, } } - if (intel_agp_chipsets[i].name == NULL) { + if (!bridge->driver) { if (cap_ptr) dev_warn(&pdev->dev, "unsupported Intel chipset [%04x/%04x]\n", pdev->vendor, pdev->device); @@ -911,14 +771,6 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, return -ENODEV; } - if (!bridge->driver) { - if (cap_ptr) - dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n", - intel_agp_chipsets[i].gmch_chip_id); - agp_put_bridge(bridge); - return -ENODEV; - } - bridge->dev = pdev; bridge->dev_private_data = NULL; @@ -970,8 +822,7 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev) agp_remove_bridge(bridge); - if (intel_private.pcidev) - pci_dev_put(intel_private.pcidev); + intel_gmch_remove(pdev); agp_put_bridge(bridge); } diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 75e0a3497888..6a89ab8fe605 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1648,3 +1648,127 @@ static const struct agp_bridge_driver intel_g33_driver = { .agp_unmap_memory = intel_agp_unmap_memory, #endif }; + +/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of + * driver and gmch_driver must be non-null, and find_gmch will determine + * which one should be used if a gmch_chip_id is present. + */ +static const struct intel_gtt_driver_description { + unsigned int gmch_chip_id; + char *name; + const struct agp_bridge_driver *gmch_driver; +} intel_gtt_chipsets[] = { + { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver }, + { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver }, + { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver }, + { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver }, + { PCI_DEVICE_ID_INTEL_82830_CGC, "830M", &intel_830_driver }, + { PCI_DEVICE_ID_INTEL_82845G_IG, "830M", &intel_830_driver }, + { PCI_DEVICE_ID_INTEL_82854_IG, "854", &intel_830_driver }, + { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM", &intel_830_driver }, + { PCI_DEVICE_ID_INTEL_82865_IG, "865", &intel_830_driver }, + { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", &intel_915_driver }, + { PCI_DEVICE_ID_INTEL_82915G_IG, "915G", &intel_915_driver }, + { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", &intel_915_driver }, + { PCI_DEVICE_ID_INTEL_82945G_IG, "945G", &intel_915_driver }, + { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", &intel_915_driver }, + { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", &intel_915_driver }, + { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_82G35_IG, "G35", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_82965G_IG, "965G", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_G33_IG, "G33", &intel_g33_driver }, + { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", &intel_g33_driver }, + { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", &intel_g33_driver }, + { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", &intel_g33_driver }, + { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", &intel_g33_driver }, + { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_B43_IG, "B43", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_G41_IG, "G41", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, + "HD Graphics", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, + "HD Graphics", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG, + "Sandybridge", &intel_gen6_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG, + "Sandybridge", &intel_gen6_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG, + "Sandybridge", &intel_gen6_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG, + "Sandybridge", &intel_gen6_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG, + "Sandybridge", &intel_gen6_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG, + "Sandybridge", &intel_gen6_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG, + "Sandybridge", &intel_gen6_driver }, + { 0, NULL, NULL } +}; + +static int find_gmch(u16 device) +{ + struct pci_dev *gmch_device; + + gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); + if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) { + gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, + device, gmch_device); + } + + if (!gmch_device) + return 0; + + intel_private.pcidev = gmch_device; + return 1; +} + +int __devinit intel_gmch_probe(struct pci_dev *pdev, + struct agp_bridge_data *bridge) +{ + int i, mask; + bridge->driver = NULL; + + for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) { + if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) { + bridge->driver = + intel_gtt_chipsets[i].gmch_driver; + break; + } + } + + if (!bridge->driver) + return 0; + + bridge->dev_private_data = &intel_private; + bridge->dev = pdev; + + dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name); + + if (bridge->driver->mask_memory == intel_gen6_mask_memory) + mask = 40; + else if (bridge->driver->mask_memory == intel_i965_mask_memory) + mask = 36; + else + mask = 32; + + if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask))) + dev_err(&intel_private.pcidev->dev, + "set gfx device dma mask %d-bit failed!\n", mask); + else + pci_set_consistent_dma_mask(intel_private.pcidev, + DMA_BIT_MASK(mask)); + + return 1; +} + +void __devexit intel_gmch_remove(struct pci_dev *pdev) +{ + if (intel_private.pcidev) + pci_dev_put(intel_private.pcidev); +} From e2404e7c3fe6f46e161edf085c6d9bea06ebe488 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 8 Sep 2010 17:29:51 +0200 Subject: [PATCH 082/476] agp/intel: make intel-gtt.c into a real source file Now that the disentangling is complete, stop including intel-gtt.c from intel-agp.c. The linux build system _really_ doesn't allow .c source files with the same name as the module. It fails with the following message when trying to build such a bugger: make[3]: Circular drivers/char/agp/intel-agp.o <- drivers/char/agp/intel-agp.o dependency dropped. Instead of renameing intel-agp.c I've simply created a new module out of intel-gtt.c. Renaming intel-agp.ko to something else is not an option for it will surely kill someones boot process. This also paves the way to use the gtt code without loading the agp driver. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/Makefile | 1 + drivers/char/agp/intel-agp.c | 3 --- drivers/char/agp/intel-agp.h | 3 +++ drivers/char/agp/intel-gtt.c | 20 ++++++++++++++++++-- 4 files changed, 22 insertions(+), 5 deletions(-) diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile index 627f542827c7..8eb56e273e75 100644 --- a/drivers/char/agp/Makefile +++ b/drivers/char/agp/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o obj-$(CONFIG_AGP_PARISC) += parisc-agp.o obj-$(CONFIG_AGP_I460) += i460-agp.o obj-$(CONFIG_AGP_INTEL) += intel-agp.o +obj-$(CONFIG_AGP_INTEL) += intel-gtt.o obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o obj-$(CONFIG_AGP_SGI_TIOCA) += sgi-agp.o obj-$(CONFIG_AGP_SIS) += sis-agp.o diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index c818a4d50eab..5cd2221ab472 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -12,9 +12,6 @@ #include #include "agp.h" #include "intel-agp.h" -#include - -#include "intel-gtt.c" int intel_agp_enabled; EXPORT_SYMBOL(intel_agp_enabled); diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index ee189c74d345..bf03afc57cb7 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h @@ -253,4 +253,7 @@ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \ IS_SNB) +int intel_gmch_probe(struct pci_dev *pdev, + struct agp_bridge_data *bridge); +void intel_gmch_remove(struct pci_dev *pdev); #endif diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 6a89ab8fe605..72f937615056 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -15,6 +15,17 @@ * /fairy-tale-mode off */ +#include +#include +#include +#include +#include +#include +#include +#include "agp.h" +#include "intel-agp.h" +#include + /* * If we have Intel graphics, we're not going to have anything other than * an Intel IOMMU. So make the correct use of the PCI DMA API contingent @@ -1728,7 +1739,7 @@ static int find_gmch(u16 device) return 1; } -int __devinit intel_gmch_probe(struct pci_dev *pdev, +int intel_gmch_probe(struct pci_dev *pdev, struct agp_bridge_data *bridge) { int i, mask; @@ -1766,9 +1777,14 @@ int __devinit intel_gmch_probe(struct pci_dev *pdev, return 1; } +EXPORT_SYMBOL(intel_gmch_probe); -void __devexit intel_gmch_remove(struct pci_dev *pdev) +void intel_gmch_remove(struct pci_dev *pdev) { if (intel_private.pcidev) pci_dev_put(intel_private.pcidev); } +EXPORT_SYMBOL(intel_gmch_remove); + +MODULE_AUTHOR("Dave Jones "); +MODULE_LICENSE("GPL and additional rights"); From 0ade638655f0ef4d807295c14a4c97544bd6b9ca Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 24 Aug 2010 22:18:41 +0200 Subject: [PATCH 083/476] intel-gtt: introduce drm/intel-gtt.h Add a few definitions to it that are already shared and that will be shared in the future (like the number of stolen entries). No functional changes in here. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 42 +++++++++++++++------------------ drivers/gpu/drm/i915/i915_dma.c | 2 -- drivers/gpu/drm/i915/i915_drv.h | 1 + include/drm/intel-gtt.h | 18 ++++++++++++++ 4 files changed, 38 insertions(+), 25 deletions(-) create mode 100644 include/drm/intel-gtt.h diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 72f937615056..0a3e91ba0f2b 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -25,6 +25,7 @@ #include "agp.h" #include "intel-agp.h" #include +#include /* * If we have Intel graphics, we're not going to have anything other than @@ -81,17 +82,11 @@ static struct gatt_mask intel_gen6_masks[] = }; static struct _intel_private { + struct intel_gtt base; struct pci_dev *pcidev; /* device one */ u8 __iomem *registers; u32 __iomem *gtt; /* I915G */ int num_dcache_entries; - /* gtt_entries is the number of gtt entries that are already mapped - * to stolen memory. Stolen memory is larger than the memory mapped - * through gtt_entries, as it includes some reserved space for the BIOS - * popup and for the GTT. - */ - int gtt_entries; /* i830+ */ - int gtt_total_size; union { void __iomem *i9xx_flush_page; void *i8xx_flush_page; @@ -772,7 +767,7 @@ static void intel_i830_init_gtt_entries(void) gtt_entries = 0; } - intel_private.gtt_entries = gtt_entries; + intel_private.base.gtt_stolen_entries = gtt_entries; } static void intel_i830_fini_flush(void) @@ -849,7 +844,7 @@ static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge) /* we have to call this as early as possible after the MMIO base address is known */ intel_i830_init_gtt_entries(); - if (intel_private.gtt_entries == 0) { + if (intel_private.base.gtt_stolen_entries == 0) { iounmap(intel_private.registers); return -ENOMEM; } @@ -919,7 +914,7 @@ static int intel_i830_configure(void) readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ if (agp_bridge->driver->needs_scratch_page) { - for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) { + for (i = intel_private.base.gtt_stolen_entries; i < current_size->num_entries; i++) { writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); } readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */ @@ -950,10 +945,10 @@ static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start, temp = agp_bridge->current_size; num_entries = A_SIZE_FIX(temp)->num_entries; - if (pg_start < intel_private.gtt_entries) { + if (pg_start < intel_private.base.gtt_stolen_entries) { dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, - "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", - pg_start, intel_private.gtt_entries); + "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n", + pg_start, intel_private.base.gtt_stolen_entries); dev_info(&intel_private.pcidev->dev, "trying to insert into local/stolen memory\n"); @@ -1001,7 +996,7 @@ static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start, if (mem->page_count == 0) return 0; - if (pg_start < intel_private.gtt_entries) { + if (pg_start < intel_private.base.gtt_stolen_entries) { dev_info(&intel_private.pcidev->dev, "trying to disable local/stolen memory\n"); return -EINVAL; @@ -1136,7 +1131,8 @@ static int intel_i9xx_configure(void) readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ if (agp_bridge->driver->needs_scratch_page) { - for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) { + for (i = intel_private.base.gtt_stolen_entries; i < + intel_private.base.gtt_total_entries; i++) { writel(agp_bridge->scratch_page, intel_private.gtt+i); } readl(intel_private.gtt+i-1); /* PCI Posting. */ @@ -1181,10 +1177,10 @@ static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, temp = agp_bridge->current_size; num_entries = A_SIZE_FIX(temp)->num_entries; - if (pg_start < intel_private.gtt_entries) { + if (pg_start < intel_private.base.gtt_stolen_entries) { dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, - "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", - pg_start, intel_private.gtt_entries); + "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n", + pg_start, intel_private.base.gtt_stolen_entries); dev_info(&intel_private.pcidev->dev, "trying to insert into local/stolen memory\n"); @@ -1227,7 +1223,7 @@ static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start, if (mem->page_count == 0) return 0; - if (pg_start < intel_private.gtt_entries) { + if (pg_start < intel_private.base.gtt_stolen_entries) { dev_info(&intel_private.pcidev->dev, "trying to disable local/stolen memory\n"); return -EINVAL; @@ -1323,7 +1319,7 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) if (!intel_private.gtt) return -ENOMEM; - intel_private.gtt_total_size = gtt_map_size / 4; + intel_private.base.gtt_total_entries = gtt_map_size / 4; temp &= 0xfff80000; @@ -1338,7 +1334,7 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) /* we have to call this as early as possible after the MMIO base address is known */ intel_i830_init_gtt_entries(); - if (intel_private.gtt_entries == 0) { + if (intel_private.base.gtt_stolen_entries == 0) { iounmap(intel_private.gtt); iounmap(intel_private.registers); return -ENOMEM; @@ -1449,7 +1445,7 @@ static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) if (!intel_private.gtt) return -ENOMEM; - intel_private.gtt_total_size = gtt_size / 4; + intel_private.base.gtt_total_entries = gtt_size / 4; intel_private.registers = ioremap(temp, 128 * 4096); if (!intel_private.registers) { @@ -1462,7 +1458,7 @@ static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) /* we have to call this as early as possible after the MMIO base address is known */ intel_i830_init_gtt_entries(); - if (intel_private.gtt_entries == 0) { + if (intel_private.base.gtt_stolen_entries == 0) { iounmap(intel_private.gtt); iounmap(intel_private.registers); return -ENOMEM; diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 8d52f01a6d90..47228cb16901 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -42,8 +42,6 @@ #include #include -extern int intel_max_stolen; /* from AGP driver */ - /** * Sets up the hardware status page for devices that need a physical address * in the register. diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index cfc8bfd0fd7e..d825ef207b2c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -34,6 +34,7 @@ #include "intel_bios.h" #include "intel_ringbuffer.h" #include +#include /* General customization: */ diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h new file mode 100644 index 000000000000..6769cb704e9b --- /dev/null +++ b/include/drm/intel-gtt.h @@ -0,0 +1,18 @@ +/* Common header for intel-gtt.ko and i915.ko */ + +#ifndef _DRM_INTEL_GTT_H +#define _DRM_INTEL_GTT_H +extern int intel_max_stolen; /* from AGP driver */ + +struct intel_gtt { + /* Number of stolen gtt entries at the beginning. */ + unsigned int gtt_stolen_entries; + /* Total number of gtt entries. */ + unsigned int gtt_total_entries; + /* Part of the gtt that is mappable by the cpu, for those chips where + * this is not the full gtt. */ + unsigned int gtt_mappable_entries; +}; + +#endif + From d7cca2f7000243ac43a389110c3d8474f582ae3f Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 24 Aug 2010 23:06:19 +0200 Subject: [PATCH 084/476] intel-gtt: store a local pointer to the bridge pci dev When the intel-gtt code now longer depends on agp, we cannot rely on this. So store a local reference in intel-gtt.c. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 61 ++++++++++++++++++++---------------- 1 file changed, 34 insertions(+), 27 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 0a3e91ba0f2b..96e5fd1aa554 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -84,6 +84,7 @@ static struct gatt_mask intel_gen6_masks[] = static struct _intel_private { struct intel_gtt base; struct pci_dev *pcidev; /* device one */ + struct pci_dev *bridge_dev; u8 __iomem *registers; u32 __iomem *gtt; /* I915G */ int num_dcache_entries; @@ -221,11 +222,12 @@ static int intel_i810_fetch_size(void) u32 smram_miscc; struct aper_size_info_fixed *values; - pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc); + pci_read_config_dword(intel_private.bridge_dev, + I810_SMRAM_MISCC, &smram_miscc); values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { - dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n"); + dev_warn(&intel_private.bridge_dev->dev, "i810 is disabled\n"); return 0; } if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { @@ -538,7 +540,8 @@ static void intel_i830_init_gtt_entries(void) static const int ddt[4] = { 0, 16, 32, 64 }; int size; /* reserved space (in kb) at the top of stolen memory */ - pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); + pci_read_config_word(intel_private.bridge_dev, + I830_GMCH_CTRL, &gmch_ctrl); if (IS_I965) { u32 pgetbl_ctl; @@ -583,7 +586,7 @@ static void intel_i830_init_gtt_entries(void) size = 2048; break; default: - dev_info(&agp_bridge->dev->dev, + dev_info(&intel_private.bridge_dev->dev, "unknown page table size 0x%x, assuming 512KB\n", (gmch_ctrl & G33_PGETBL_SIZE_MASK)); size = 512; @@ -602,8 +605,8 @@ static void intel_i830_init_gtt_entries(void) size = agp_bridge->driver->fetch_size() + 4; } - if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB || - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { + if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB || + intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { switch (gmch_ctrl & I830_GMCH_GMS_MASK) { case I830_GMCH_GMS_STOLEN_512: gtt_entries = KB(512) - KB(size); @@ -753,16 +756,16 @@ static void intel_i830_init_gtt_entries(void) } } if (!local && gtt_entries > intel_max_stolen) { - dev_info(&agp_bridge->dev->dev, + dev_info(&intel_private.bridge_dev->dev, "detected %dK stolen memory, trimming to %dK\n", gtt_entries / KB(1), intel_max_stolen / KB(1)); gtt_entries = intel_max_stolen / KB(4); } else if (gtt_entries > 0) { - dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n", + dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n", gtt_entries / KB(1), local ? "local" : "stolen"); gtt_entries /= KB(4); } else { - dev_info(&agp_bridge->dev->dev, + dev_info(&intel_private.bridge_dev->dev, "no pre-allocated video memory detected\n"); gtt_entries = 0; } @@ -871,15 +874,15 @@ static int intel_i830_fetch_size(void) values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); - if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB && - agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) { + if (intel_private.bridge_dev->device != PCI_DEVICE_ID_INTEL_82830_HB && + intel_private.bridge_dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) { /* 855GM/852GM/865G has 128MB aperture size */ agp_bridge->current_size = (void *) values; agp_bridge->aperture_size_idx = 0; return values[0].size; } - pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); + pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) { agp_bridge->current_size = (void *) values; @@ -906,9 +909,9 @@ static int intel_i830_configure(void) pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); + pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); gmch_ctrl |= I830_GMCH_ENABLED; - pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl); + pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl); writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ @@ -1021,9 +1024,9 @@ static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type) static int intel_alloc_chipset_flush_resource(void) { int ret; - ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE, + ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE, PAGE_SIZE, PCIBIOS_MIN_MEM, 0, - pcibios_align_resource, agp_bridge->dev); + pcibios_align_resource, intel_private.bridge_dev); return ret; } @@ -1033,11 +1036,11 @@ static void intel_i915_setup_chipset_flush(void) int ret; u32 temp; - pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp); + pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp); if (!(temp & 0x1)) { intel_alloc_chipset_flush_resource(); intel_private.resource_valid = 1; - pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); + pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); } else { temp &= ~1; @@ -1056,17 +1059,17 @@ static void intel_i965_g33_setup_chipset_flush(void) u32 temp_hi, temp_lo; int ret; - pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi); - pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo); + pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi); + pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo); if (!(temp_lo & 0x1)) { intel_alloc_chipset_flush_resource(); intel_private.resource_valid = 1; - pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4, + pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, upper_32_bits(intel_private.ifp_resource.start)); - pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); + pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); } else { u64 l64; @@ -1123,9 +1126,9 @@ static int intel_i9xx_configure(void) agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); + pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); gmch_ctrl |= I830_GMCH_ENABLED; - pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl); + pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl); writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ @@ -1267,7 +1270,7 @@ static int intel_i915_get_gtt_size(void) u16 gmch_ctrl; /* G33's GTT size defined in gmch_ctrl */ - pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); + pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); switch (gmch_ctrl & I830_GMCH_GMS_MASK) { case I830_GMCH_GMS_STOLEN_512: size = 512; @@ -1279,7 +1282,7 @@ static int intel_i915_get_gtt_size(void) size = 8*1024; break; default: - dev_info(&agp_bridge->dev->dev, + dev_info(&intel_private.bridge_dev->dev, "unknown page table size 0x%x, assuming 512KB\n", (gmch_ctrl & I830_GMCH_GMS_MASK)); size = 512; @@ -1380,7 +1383,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) { u16 snb_gmch_ctl; - switch (agp_bridge->dev->device) { + switch (intel_private.bridge_dev->device) { case PCI_DEVICE_ID_INTEL_GM45_HB: case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB: case PCI_DEVICE_ID_INTEL_Q45_HB: @@ -1755,6 +1758,8 @@ int intel_gmch_probe(struct pci_dev *pdev, bridge->dev_private_data = &intel_private; bridge->dev = pdev; + intel_private.bridge_dev = pci_dev_get(pdev); + dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name); if (bridge->driver->mask_memory == intel_gen6_mask_memory) @@ -1779,6 +1784,8 @@ void intel_gmch_remove(struct pci_dev *pdev) { if (intel_private.pcidev) pci_dev_put(intel_private.pcidev); + if (intel_private.bridge_dev) + pci_dev_put(intel_private.bridge_dev); } EXPORT_SYMBOL(intel_gmch_remove); From bfde067bebe72293b1f909a8b35ee8d82811f8f5 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 24 Aug 2010 23:07:59 +0200 Subject: [PATCH 085/476] intel-gtt: s/intel_i830_init_gtt_entries/intel_gtt_stolen_entries First simple step towards a more generic initialization. This is needed to disentangle the agp stuff from the stuff that is actually needed by drm/i915. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 96e5fd1aa554..9edeb060efb2 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -531,10 +531,10 @@ static struct aper_size_info_fixed intel_i830_sizes[] = {512, 131072, 7}, }; -static void intel_i830_init_gtt_entries(void) +static unsigned int intel_gtt_stolen_entries(void) { u16 gmch_ctrl; - int gtt_entries = 0; + unsigned int gtt_entries = 0; u8 rdct; int local = 0; static const int ddt[4] = { 0, 16, 32, 64 }; @@ -770,7 +770,7 @@ static void intel_i830_init_gtt_entries(void) gtt_entries = 0; } - intel_private.base.gtt_stolen_entries = gtt_entries; + return gtt_entries; } static void intel_i830_fini_flush(void) @@ -846,7 +846,7 @@ static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge) global_cache_flush(); /* FIXME: ?? */ /* we have to call this as early as possible after the MMIO base address is known */ - intel_i830_init_gtt_entries(); + intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries(); if (intel_private.base.gtt_stolen_entries == 0) { iounmap(intel_private.registers); return -ENOMEM; @@ -1336,7 +1336,7 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) global_cache_flush(); /* FIXME: ? */ /* we have to call this as early as possible after the MMIO base address is known */ - intel_i830_init_gtt_entries(); + intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries(); if (intel_private.base.gtt_stolen_entries == 0) { iounmap(intel_private.gtt); iounmap(intel_private.registers); @@ -1460,7 +1460,7 @@ static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) global_cache_flush(); /* FIXME: ? */ /* we have to call this as early as possible after the MMIO base address is known */ - intel_i830_init_gtt_entries(); + intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries(); if (intel_private.base.gtt_stolen_entries == 0) { iounmap(intel_private.gtt); iounmap(intel_private.registers); From 1784a5fb4f7a41b9a5ea066f7782418bfe170c04 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 8 Sep 2010 21:01:04 +0200 Subject: [PATCH 086/476] intel-gtt: new function intel_gtt_mappable_entries This implementation is stolen from drm/i915, but is equivalent to the code sprinkled over intel-gtt.c in the various fetch_size functions. It's not yet used anywhere, though. Also introduce intel_gtt_init which only calls intel_gtt_stolen_entries. Over the course of the next patches, this will grow untill it contains the complete init sequence starting from the call to gtt_mappable_entries. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 78 +++++++++++++++++++++++++++--------- 1 file changed, 59 insertions(+), 19 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 9edeb060efb2..932ede81e726 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -755,6 +755,7 @@ static unsigned int intel_gtt_stolen_entries(void) break; } } + if (!local && gtt_entries > intel_max_stolen) { dev_info(&intel_private.bridge_dev->dev, "detected %dK stolen memory, trimming to %dK\n", @@ -773,6 +774,47 @@ static unsigned int intel_gtt_stolen_entries(void) return gtt_entries; } +static unsigned int intel_gtt_mappable_entries(void) +{ + unsigned int aperture_size; + u16 gmch_ctrl; + + aperture_size = 1024 * 1024; + + pci_read_config_word(intel_private.bridge_dev, + I830_GMCH_CTRL, &gmch_ctrl); + + switch (intel_private.pcidev->device) { + case PCI_DEVICE_ID_INTEL_82830_CGC: + case PCI_DEVICE_ID_INTEL_82845G_IG: + case PCI_DEVICE_ID_INTEL_82855GM_IG: + case PCI_DEVICE_ID_INTEL_82865_IG: + if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M) + aperture_size *= 64; + else + aperture_size *= 128; + break; + default: + /* 9xx supports large sizes, just look at the length */ + aperture_size = pci_resource_len(intel_private.pcidev, 2); + break; + } + + return aperture_size >> PAGE_SHIFT; +} + +static int intel_gtt_init(void) +{ + /* we have to call this as early as possible after the MMIO base address is known */ + intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries(); + if (intel_private.base.gtt_stolen_entries == 0) { + iounmap(intel_private.registers); + return -ENOMEM; + } + + return 0; +} + static void intel_i830_fini_flush(void) { kunmap(intel_private.i8xx_page); @@ -825,7 +867,7 @@ static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) */ static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge) { - int page_order; + int page_order, ret; struct aper_size_info_fixed *size; int num_entries; u32 temp; @@ -845,12 +887,9 @@ static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge) temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; global_cache_flush(); /* FIXME: ?? */ - /* we have to call this as early as possible after the MMIO base address is known */ - intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries(); - if (intel_private.base.gtt_stolen_entries == 0) { - iounmap(intel_private.registers); - return -ENOMEM; - } + ret = intel_gtt_init(); + if (ret != 0) + return ret; agp_bridge->gatt_table = NULL; @@ -1302,7 +1341,7 @@ static int intel_i915_get_gtt_size(void) */ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) { - int page_order; + int page_order, ret; struct aper_size_info_fixed *size; int num_entries; u32 temp, temp2; @@ -1335,12 +1374,10 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; global_cache_flush(); /* FIXME: ? */ - /* we have to call this as early as possible after the MMIO base address is known */ - intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries(); - if (intel_private.base.gtt_stolen_entries == 0) { + ret = intel_gtt_init(); + if (ret != 0) { iounmap(intel_private.gtt); - iounmap(intel_private.registers); - return -ENOMEM; + return ret; } agp_bridge->gatt_table = NULL; @@ -1426,7 +1463,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) */ static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) { - int page_order; + int page_order, ret; struct aper_size_info_fixed *size; int num_entries; u32 temp; @@ -1459,12 +1496,10 @@ static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; global_cache_flush(); /* FIXME: ? */ - /* we have to call this as early as possible after the MMIO base address is known */ - intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries(); - if (intel_private.base.gtt_stolen_entries == 0) { + ret = intel_gtt_init(); + if (ret != 0) { iounmap(intel_private.gtt); - iounmap(intel_private.registers); - return -ENOMEM; + return ret; } agp_bridge->gatt_table = NULL; @@ -1776,6 +1811,11 @@ int intel_gmch_probe(struct pci_dev *pdev, pci_set_consistent_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)); + if (bridge->driver == &intel_810_driver) + return 1; + + intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries(); + return 1; } EXPORT_SYMBOL(intel_gmch_probe); From 3e921f980fdd5b972efb7f368b2a847a01804184 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 27 Aug 2010 15:33:26 +0200 Subject: [PATCH 087/476] intel-gtt: generic intel_fake_agp_fetch_size This uses the new mappable gtt size detection from the previous patch. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 77 +++++++++++------------------------- 1 file changed, 24 insertions(+), 53 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 932ede81e726..a997a202832e 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -815,6 +815,25 @@ static int intel_gtt_init(void) return 0; } +static int intel_fake_agp_fetch_size(void) +{ + unsigned int aper_size; + int i; + int num_sizes = ARRAY_SIZE(intel_i830_sizes); + + aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT) + / MB(1); + + for (i = 0; i < num_sizes; i++) { + if (aper_size == intel_i830_sizes[i].size) { + agp_bridge->current_size = intel_i830_sizes + i; + return aper_size; + } + } + + return 0; +} + static void intel_i830_fini_flush(void) { kunmap(intel_private.i8xx_page); @@ -906,36 +925,6 @@ static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge) return 0; } -static int intel_i830_fetch_size(void) -{ - u16 gmch_ctrl; - struct aper_size_info_fixed *values; - - values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); - - if (intel_private.bridge_dev->device != PCI_DEVICE_ID_INTEL_82830_HB && - intel_private.bridge_dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) { - /* 855GM/852GM/865G has 128MB aperture size */ - agp_bridge->current_size = (void *) values; - agp_bridge->aperture_size_idx = 0; - return values[0].size; - } - - pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); - - if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) { - agp_bridge->current_size = (void *) values; - agp_bridge->aperture_size_idx = 0; - return values[0].size; - } else { - agp_bridge->current_size = (void *) (values + 1); - agp_bridge->aperture_size_idx = 1; - return values[1].size; - } - - return 0; -} - static int intel_i830_configure(void) { struct aper_size_info_fixed *current_size; @@ -1283,24 +1272,6 @@ static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start, * described in the spec of the MSAC registers is just changing of the * resource size. */ -static int intel_i9xx_fetch_size(void) -{ - int num_sizes = ARRAY_SIZE(intel_i830_sizes); - int aper_size; /* size in megabytes */ - int i; - - aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1); - - for (i = 0; i < num_sizes; i++) { - if (aper_size == intel_i830_sizes[i].size) { - agp_bridge->current_size = intel_i830_sizes + i; - return aper_size; - } - } - - return 0; -} - static int intel_i915_get_gtt_size(void) { int size; @@ -1542,7 +1513,7 @@ static const struct agp_bridge_driver intel_830_driver = { .num_aperture_sizes = 4, .needs_scratch_page = true, .configure = intel_i830_configure, - .fetch_size = intel_i830_fetch_size, + .fetch_size = intel_fake_agp_fetch_size, .cleanup = intel_i830_cleanup, .mask_memory = intel_i810_mask_memory, .masks = intel_i810_masks, @@ -1569,7 +1540,7 @@ static const struct agp_bridge_driver intel_915_driver = { .num_aperture_sizes = 4, .needs_scratch_page = true, .configure = intel_i9xx_configure, - .fetch_size = intel_i9xx_fetch_size, + .fetch_size = intel_fake_agp_fetch_size, .cleanup = intel_i915_cleanup, .mask_memory = intel_i810_mask_memory, .masks = intel_i810_masks, @@ -1602,7 +1573,7 @@ static const struct agp_bridge_driver intel_i965_driver = { .num_aperture_sizes = 4, .needs_scratch_page = true, .configure = intel_i9xx_configure, - .fetch_size = intel_i9xx_fetch_size, + .fetch_size = intel_fake_agp_fetch_size, .cleanup = intel_i915_cleanup, .mask_memory = intel_i965_mask_memory, .masks = intel_i810_masks, @@ -1635,7 +1606,7 @@ static const struct agp_bridge_driver intel_gen6_driver = { .num_aperture_sizes = 4, .needs_scratch_page = true, .configure = intel_i9xx_configure, - .fetch_size = intel_i9xx_fetch_size, + .fetch_size = intel_fake_agp_fetch_size, .cleanup = intel_i915_cleanup, .mask_memory = intel_gen6_mask_memory, .masks = intel_gen6_masks, @@ -1668,7 +1639,7 @@ static const struct agp_bridge_driver intel_g33_driver = { .num_aperture_sizes = 4, .needs_scratch_page = true, .configure = intel_i9xx_configure, - .fetch_size = intel_i9xx_fetch_size, + .fetch_size = intel_fake_agp_fetch_size, .cleanup = intel_i915_cleanup, .mask_memory = intel_i965_mask_memory, .masks = intel_i810_masks, From d8d9abcd35aeebd633cba2e99c384f4e004ccb84 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 27 Aug 2010 16:13:52 +0200 Subject: [PATCH 088/476] intel-gtt: sane variable names for intel_gtt_stolen_entries This somewhat aligns it with the version in drm/i915/i915_dma.c. Changes: - s/gtt_entries/stolen_size - track overhead entries in a seperate var (the effective gtt size calculation will be extracted later on). - subtract the overhead at the end instead of in each clause. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 108 ++++++++++++++++++----------------- 1 file changed, 56 insertions(+), 52 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index a997a202832e..79eb106c6f08 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -534,11 +534,12 @@ static struct aper_size_info_fixed intel_i830_sizes[] = static unsigned int intel_gtt_stolen_entries(void) { u16 gmch_ctrl; - unsigned int gtt_entries = 0; u8 rdct; int local = 0; static const int ddt[4] = { 0, 16, 32, 64 }; int size; /* reserved space (in kb) at the top of stolen memory */ + unsigned int overhead_entries, stolen_entries; + unsigned int stolen_size = 0; pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); @@ -605,26 +606,28 @@ static unsigned int intel_gtt_stolen_entries(void) size = agp_bridge->driver->fetch_size() + 4; } + overhead_entries = size/4; + if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB || intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { switch (gmch_ctrl & I830_GMCH_GMS_MASK) { case I830_GMCH_GMS_STOLEN_512: - gtt_entries = KB(512) - KB(size); + stolen_size = KB(512); break; case I830_GMCH_GMS_STOLEN_1024: - gtt_entries = MB(1) - KB(size); + stolen_size = MB(1); break; case I830_GMCH_GMS_STOLEN_8192: - gtt_entries = MB(8) - KB(size); + stolen_size = MB(8); break; case I830_GMCH_GMS_LOCAL: rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE); - gtt_entries = (I830_RDRAM_ND(rdct) + 1) * + stolen_size = (I830_RDRAM_ND(rdct) + 1) * MB(ddt[I830_RDRAM_DDT(rdct)]); local = 1; break; default: - gtt_entries = 0; + stolen_size = 0; break; } } else if (IS_SNB) { @@ -635,143 +638,144 @@ static unsigned int intel_gtt_stolen_entries(void) pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) { case SNB_GMCH_GMS_STOLEN_32M: - gtt_entries = MB(32) - KB(size); + stolen_size = MB(32); break; case SNB_GMCH_GMS_STOLEN_64M: - gtt_entries = MB(64) - KB(size); + stolen_size = MB(64); break; case SNB_GMCH_GMS_STOLEN_96M: - gtt_entries = MB(96) - KB(size); + stolen_size = MB(96); break; case SNB_GMCH_GMS_STOLEN_128M: - gtt_entries = MB(128) - KB(size); + stolen_size = MB(128); break; case SNB_GMCH_GMS_STOLEN_160M: - gtt_entries = MB(160) - KB(size); + stolen_size = MB(160); break; case SNB_GMCH_GMS_STOLEN_192M: - gtt_entries = MB(192) - KB(size); + stolen_size = MB(192); break; case SNB_GMCH_GMS_STOLEN_224M: - gtt_entries = MB(224) - KB(size); + stolen_size = MB(224); break; case SNB_GMCH_GMS_STOLEN_256M: - gtt_entries = MB(256) - KB(size); + stolen_size = MB(256); break; case SNB_GMCH_GMS_STOLEN_288M: - gtt_entries = MB(288) - KB(size); + stolen_size = MB(288); break; case SNB_GMCH_GMS_STOLEN_320M: - gtt_entries = MB(320) - KB(size); + stolen_size = MB(320); break; case SNB_GMCH_GMS_STOLEN_352M: - gtt_entries = MB(352) - KB(size); + stolen_size = MB(352); break; case SNB_GMCH_GMS_STOLEN_384M: - gtt_entries = MB(384) - KB(size); + stolen_size = MB(384); break; case SNB_GMCH_GMS_STOLEN_416M: - gtt_entries = MB(416) - KB(size); + stolen_size = MB(416); break; case SNB_GMCH_GMS_STOLEN_448M: - gtt_entries = MB(448) - KB(size); + stolen_size = MB(448); break; case SNB_GMCH_GMS_STOLEN_480M: - gtt_entries = MB(480) - KB(size); + stolen_size = MB(480); break; case SNB_GMCH_GMS_STOLEN_512M: - gtt_entries = MB(512) - KB(size); + stolen_size = MB(512); break; } } else { switch (gmch_ctrl & I855_GMCH_GMS_MASK) { case I855_GMCH_GMS_STOLEN_1M: - gtt_entries = MB(1) - KB(size); + stolen_size = MB(1); break; case I855_GMCH_GMS_STOLEN_4M: - gtt_entries = MB(4) - KB(size); + stolen_size = MB(4); break; case I855_GMCH_GMS_STOLEN_8M: - gtt_entries = MB(8) - KB(size); + stolen_size = MB(8); break; case I855_GMCH_GMS_STOLEN_16M: - gtt_entries = MB(16) - KB(size); + stolen_size = MB(16); break; case I855_GMCH_GMS_STOLEN_32M: - gtt_entries = MB(32) - KB(size); + stolen_size = MB(32); break; case I915_GMCH_GMS_STOLEN_48M: /* Check it's really I915G */ if (IS_I915 || IS_I965 || IS_G33 || IS_G4X) - gtt_entries = MB(48) - KB(size); + stolen_size = MB(48); else - gtt_entries = 0; + stolen_size = 0; break; case I915_GMCH_GMS_STOLEN_64M: /* Check it's really I915G */ if (IS_I915 || IS_I965 || IS_G33 || IS_G4X) - gtt_entries = MB(64) - KB(size); + stolen_size = MB(64); else - gtt_entries = 0; + stolen_size = 0; break; case G33_GMCH_GMS_STOLEN_128M: if (IS_G33 || IS_I965 || IS_G4X) - gtt_entries = MB(128) - KB(size); + stolen_size = MB(128); else - gtt_entries = 0; + stolen_size = 0; break; case G33_GMCH_GMS_STOLEN_256M: if (IS_G33 || IS_I965 || IS_G4X) - gtt_entries = MB(256) - KB(size); + stolen_size = MB(256); else - gtt_entries = 0; + stolen_size = 0; break; case INTEL_GMCH_GMS_STOLEN_96M: if (IS_I965 || IS_G4X) - gtt_entries = MB(96) - KB(size); + stolen_size = MB(96); else - gtt_entries = 0; + stolen_size = 0; break; case INTEL_GMCH_GMS_STOLEN_160M: if (IS_I965 || IS_G4X) - gtt_entries = MB(160) - KB(size); + stolen_size = MB(160); else - gtt_entries = 0; + stolen_size = 0; break; case INTEL_GMCH_GMS_STOLEN_224M: if (IS_I965 || IS_G4X) - gtt_entries = MB(224) - KB(size); + stolen_size = MB(224); else - gtt_entries = 0; + stolen_size = 0; break; case INTEL_GMCH_GMS_STOLEN_352M: if (IS_I965 || IS_G4X) - gtt_entries = MB(352) - KB(size); + stolen_size = MB(352); else - gtt_entries = 0; + stolen_size = 0; break; default: - gtt_entries = 0; + stolen_size = 0; break; } } - if (!local && gtt_entries > intel_max_stolen) { + if (!local && stolen_size > intel_max_stolen) { dev_info(&intel_private.bridge_dev->dev, "detected %dK stolen memory, trimming to %dK\n", - gtt_entries / KB(1), intel_max_stolen / KB(1)); - gtt_entries = intel_max_stolen / KB(4); - } else if (gtt_entries > 0) { + stolen_size / KB(1), intel_max_stolen / KB(1)); + stolen_size = intel_max_stolen; + } else if (stolen_size > 0) { dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n", - gtt_entries / KB(1), local ? "local" : "stolen"); - gtt_entries /= KB(4); + stolen_size / KB(1), local ? "local" : "stolen"); } else { dev_info(&intel_private.bridge_dev->dev, "no pre-allocated video memory detected\n"); - gtt_entries = 0; + stolen_size = 0; } - return gtt_entries; + stolen_entries = stolen_size/KB(4) - overhead_entries; + + return stolen_entries; } static unsigned int intel_gtt_mappable_entries(void) From 77ad498ecaeb9a614d2a7bbfaab58a35c0cc577d Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 27 Aug 2010 16:25:54 +0200 Subject: [PATCH 089/476] intel-gtt: drop unnecessary conditions in intel_gtt_stolen_entries The dedection function in drm/i915/i915_dma.c works without it, so drop it here, too. All the values are disdinct, anyway. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 42 +++++++----------------------------- 1 file changed, 8 insertions(+), 34 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 79eb106c6f08..a620296c0810 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -704,54 +704,28 @@ static unsigned int intel_gtt_stolen_entries(void) stolen_size = MB(32); break; case I915_GMCH_GMS_STOLEN_48M: - /* Check it's really I915G */ - if (IS_I915 || IS_I965 || IS_G33 || IS_G4X) - stolen_size = MB(48); - else - stolen_size = 0; + stolen_size = MB(48); break; case I915_GMCH_GMS_STOLEN_64M: - /* Check it's really I915G */ - if (IS_I915 || IS_I965 || IS_G33 || IS_G4X) - stolen_size = MB(64); - else - stolen_size = 0; + stolen_size = MB(64); break; case G33_GMCH_GMS_STOLEN_128M: - if (IS_G33 || IS_I965 || IS_G4X) - stolen_size = MB(128); - else - stolen_size = 0; + stolen_size = MB(128); break; case G33_GMCH_GMS_STOLEN_256M: - if (IS_G33 || IS_I965 || IS_G4X) - stolen_size = MB(256); - else - stolen_size = 0; + stolen_size = MB(256); break; case INTEL_GMCH_GMS_STOLEN_96M: - if (IS_I965 || IS_G4X) - stolen_size = MB(96); - else - stolen_size = 0; + stolen_size = MB(96); break; case INTEL_GMCH_GMS_STOLEN_160M: - if (IS_I965 || IS_G4X) - stolen_size = MB(160); - else - stolen_size = 0; + stolen_size = MB(160); break; case INTEL_GMCH_GMS_STOLEN_224M: - if (IS_I965 || IS_G4X) - stolen_size = MB(224); - else - stolen_size = 0; + stolen_size = MB(224); break; case INTEL_GMCH_GMS_STOLEN_352M: - if (IS_I965 || IS_G4X) - stolen_size = MB(352); - else - stolen_size = 0; + stolen_size = MB(352); break; default: stolen_size = 0; From fbe407836b5c8d82c68195962240a392d0ce64ea Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 27 Aug 2010 17:12:41 +0200 Subject: [PATCH 090/476] intel-gtt: adjust overhead entries in intel_gtt_stolen_entries agp/intel_gtt.c and drm/i915/i915_dma.c don't calculate this the same way: The intel-gtt code seems to use the actual gtt size, the drm module just the mappable. Go with the logic from the drm module because that's the more conservative choice. But conserve the original code in intel_gtt_total_size for later use. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 141 +++++++++++++++++++---------------- 1 file changed, 78 insertions(+), 63 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index a620296c0810..04e052e3f3da 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -537,76 +537,19 @@ static unsigned int intel_gtt_stolen_entries(void) u8 rdct; int local = 0; static const int ddt[4] = { 0, 16, 32, 64 }; - int size; /* reserved space (in kb) at the top of stolen memory */ unsigned int overhead_entries, stolen_entries; unsigned int stolen_size = 0; pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); - if (IS_I965) { - u32 pgetbl_ctl; - pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); + if (IS_G4X || IS_PINEVIEW) + overhead_entries = 0; + else + overhead_entries = intel_private.base.gtt_mappable_entries + / 1024; - /* The 965 has a field telling us the size of the GTT, - * which may be larger than what is necessary to map the - * aperture. - */ - switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { - case I965_PGETBL_SIZE_128KB: - size = 128; - break; - case I965_PGETBL_SIZE_256KB: - size = 256; - break; - case I965_PGETBL_SIZE_512KB: - size = 512; - break; - case I965_PGETBL_SIZE_1MB: - size = 1024; - break; - case I965_PGETBL_SIZE_2MB: - size = 2048; - break; - case I965_PGETBL_SIZE_1_5MB: - size = 1024 + 512; - break; - default: - dev_info(&intel_private.pcidev->dev, - "unknown page table size, assuming 512KB\n"); - size = 512; - } - size += 4; /* add in BIOS popup space */ - } else if (IS_G33 && !IS_PINEVIEW) { - /* G33's GTT size defined in gmch_ctrl */ - switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { - case G33_PGETBL_SIZE_1M: - size = 1024; - break; - case G33_PGETBL_SIZE_2M: - size = 2048; - break; - default: - dev_info(&intel_private.bridge_dev->dev, - "unknown page table size 0x%x, assuming 512KB\n", - (gmch_ctrl & G33_PGETBL_SIZE_MASK)); - size = 512; - } - size += 4; - } else if (IS_G4X || IS_PINEVIEW) { - /* On 4 series hardware, GTT stolen is separate from graphics - * stolen, ignore it in stolen gtt entries counting. However, - * 4KB of the stolen memory doesn't get mapped to the GTT. - */ - size = 4; - } else { - /* On previous hardware, the GTT size was just what was - * required to map the aperture. - */ - size = agp_bridge->driver->fetch_size() + 4; - } - - overhead_entries = size/4; + overhead_entries += 1; /* BIOS popup */ if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB || intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { @@ -752,6 +695,78 @@ static unsigned int intel_gtt_stolen_entries(void) return stolen_entries; } +#if 0 /* extracted code in bad shape, needs some cleaning before use */ +static unsigned int intel_gtt_total_entries(void) +{ + int size; + u16 gmch_ctrl; + + if (IS_I965) { + u32 pgetbl_ctl; + pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); + + /* The 965 has a field telling us the size of the GTT, + * which may be larger than what is necessary to map the + * aperture. + */ + switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { + case I965_PGETBL_SIZE_128KB: + size = 128; + break; + case I965_PGETBL_SIZE_256KB: + size = 256; + break; + case I965_PGETBL_SIZE_512KB: + size = 512; + break; + case I965_PGETBL_SIZE_1MB: + size = 1024; + break; + case I965_PGETBL_SIZE_2MB: + size = 2048; + break; + case I965_PGETBL_SIZE_1_5MB: + size = 1024 + 512; + break; + default: + dev_info(&intel_private.pcidev->dev, + "unknown page table size, assuming 512KB\n"); + size = 512; + } + size += 4; /* add in BIOS popup space */ + } else if (IS_G33 && !IS_PINEVIEW) { + /* G33's GTT size defined in gmch_ctrl */ + switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { + case G33_PGETBL_SIZE_1M: + size = 1024; + break; + case G33_PGETBL_SIZE_2M: + size = 2048; + break; + default: + dev_info(&intel_private.bridge_dev->dev, + "unknown page table size 0x%x, assuming 512KB\n", + (gmch_ctrl & G33_PGETBL_SIZE_MASK)); + size = 512; + } + size += 4; + } else if (IS_G4X || IS_PINEVIEW) { + /* On 4 series hardware, GTT stolen is separate from graphics + * stolen, ignore it in stolen gtt entries counting. However, + * 4KB of the stolen memory doesn't get mapped to the GTT. + */ + size = 4; + } else { + /* On previous hardware, the GTT size was just what was + * required to map the aperture. + */ + size = agp_bridge->driver->fetch_size() + 4; + } + + return size/KB(4); +} +#endif + static unsigned int intel_gtt_mappable_entries(void) { unsigned int aperture_size; From ffdd7510b0bd5ec663b6b11b39810574f2ce3111 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 27 Aug 2010 17:51:29 +0200 Subject: [PATCH 091/476] intel-gtt: s/i8[13]0/fake_agp for generic functions Start to separate the fake agp driver from the rest of intel-gtt.c Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 57 ++++++++++++++++++------------------ 1 file changed, 29 insertions(+), 28 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 04e052e3f3da..56bcf27bdb77 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -292,7 +292,7 @@ static void intel_i810_cleanup(void) iounmap(intel_private.registers); } -static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode) +static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode) { return; } @@ -522,7 +522,7 @@ static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge, return addr | bridge->driver->masks[type].mask; } -static struct aper_size_info_fixed intel_i830_sizes[] = +static struct aper_size_info_fixed intel_fake_agp_sizes[] = { {128, 32768, 5}, /* The 64M mode still requires a 128k gatt */ @@ -812,14 +812,14 @@ static int intel_fake_agp_fetch_size(void) { unsigned int aper_size; int i; - int num_sizes = ARRAY_SIZE(intel_i830_sizes); + int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes); aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT) / MB(1); for (i = 0; i < num_sizes; i++) { - if (aper_size == intel_i830_sizes[i].size) { - agp_bridge->current_size = intel_i830_sizes + i; + if (aper_size == intel_fake_agp_sizes[i].size) { + agp_bridge->current_size = intel_fake_agp_sizes + i; return aper_size; } } @@ -913,7 +913,7 @@ static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge) /* Return the gatt table to a sane state. Use the top of stolen * memory for the GTT. */ -static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge) +static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge) { return 0; } @@ -1034,7 +1034,8 @@ static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start, return 0; } -static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type) +static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count, + int type) { if (type == AGP_PHYS_MEMORY) return alloc_agpphysmem_i8xx(pg_count, type); @@ -1484,7 +1485,7 @@ static const struct agp_bridge_driver intel_810_driver = { .cleanup = intel_i810_cleanup, .mask_memory = intel_i810_mask_memory, .masks = intel_i810_masks, - .agp_enable = intel_i810_agp_enable, + .agp_enable = intel_fake_agp_enable, .cache_flush = global_cache_flush, .create_gatt_table = agp_generic_create_gatt_table, .free_gatt_table = agp_generic_free_gatt_table, @@ -1501,7 +1502,7 @@ static const struct agp_bridge_driver intel_810_driver = { static const struct agp_bridge_driver intel_830_driver = { .owner = THIS_MODULE, - .aperture_sizes = intel_i830_sizes, + .aperture_sizes = intel_fake_agp_sizes, .size_type = FIXED_APER_SIZE, .num_aperture_sizes = 4, .needs_scratch_page = true, @@ -1510,13 +1511,13 @@ static const struct agp_bridge_driver intel_830_driver = { .cleanup = intel_i830_cleanup, .mask_memory = intel_i810_mask_memory, .masks = intel_i810_masks, - .agp_enable = intel_i810_agp_enable, + .agp_enable = intel_fake_agp_enable, .cache_flush = global_cache_flush, .create_gatt_table = intel_i830_create_gatt_table, - .free_gatt_table = intel_i830_free_gatt_table, + .free_gatt_table = intel_fake_agp_free_gatt_table, .insert_memory = intel_i830_insert_entries, .remove_memory = intel_i830_remove_entries, - .alloc_by_type = intel_i830_alloc_by_type, + .alloc_by_type = intel_fake_agp_alloc_by_type, .free_by_type = intel_i810_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, @@ -1528,7 +1529,7 @@ static const struct agp_bridge_driver intel_830_driver = { static const struct agp_bridge_driver intel_915_driver = { .owner = THIS_MODULE, - .aperture_sizes = intel_i830_sizes, + .aperture_sizes = intel_fake_agp_sizes, .size_type = FIXED_APER_SIZE, .num_aperture_sizes = 4, .needs_scratch_page = true, @@ -1537,13 +1538,13 @@ static const struct agp_bridge_driver intel_915_driver = { .cleanup = intel_i915_cleanup, .mask_memory = intel_i810_mask_memory, .masks = intel_i810_masks, - .agp_enable = intel_i810_agp_enable, + .agp_enable = intel_fake_agp_enable, .cache_flush = global_cache_flush, .create_gatt_table = intel_i915_create_gatt_table, - .free_gatt_table = intel_i830_free_gatt_table, + .free_gatt_table = intel_fake_agp_free_gatt_table, .insert_memory = intel_i915_insert_entries, .remove_memory = intel_i915_remove_entries, - .alloc_by_type = intel_i830_alloc_by_type, + .alloc_by_type = intel_fake_agp_alloc_by_type, .free_by_type = intel_i810_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, @@ -1561,7 +1562,7 @@ static const struct agp_bridge_driver intel_915_driver = { static const struct agp_bridge_driver intel_i965_driver = { .owner = THIS_MODULE, - .aperture_sizes = intel_i830_sizes, + .aperture_sizes = intel_fake_agp_sizes, .size_type = FIXED_APER_SIZE, .num_aperture_sizes = 4, .needs_scratch_page = true, @@ -1570,13 +1571,13 @@ static const struct agp_bridge_driver intel_i965_driver = { .cleanup = intel_i915_cleanup, .mask_memory = intel_i965_mask_memory, .masks = intel_i810_masks, - .agp_enable = intel_i810_agp_enable, + .agp_enable = intel_fake_agp_enable, .cache_flush = global_cache_flush, .create_gatt_table = intel_i965_create_gatt_table, - .free_gatt_table = intel_i830_free_gatt_table, + .free_gatt_table = intel_fake_agp_free_gatt_table, .insert_memory = intel_i915_insert_entries, .remove_memory = intel_i915_remove_entries, - .alloc_by_type = intel_i830_alloc_by_type, + .alloc_by_type = intel_fake_agp_alloc_by_type, .free_by_type = intel_i810_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, @@ -1594,7 +1595,7 @@ static const struct agp_bridge_driver intel_i965_driver = { static const struct agp_bridge_driver intel_gen6_driver = { .owner = THIS_MODULE, - .aperture_sizes = intel_i830_sizes, + .aperture_sizes = intel_fake_agp_sizes, .size_type = FIXED_APER_SIZE, .num_aperture_sizes = 4, .needs_scratch_page = true, @@ -1603,13 +1604,13 @@ static const struct agp_bridge_driver intel_gen6_driver = { .cleanup = intel_i915_cleanup, .mask_memory = intel_gen6_mask_memory, .masks = intel_gen6_masks, - .agp_enable = intel_i810_agp_enable, + .agp_enable = intel_fake_agp_enable, .cache_flush = global_cache_flush, .create_gatt_table = intel_i965_create_gatt_table, - .free_gatt_table = intel_i830_free_gatt_table, + .free_gatt_table = intel_fake_agp_free_gatt_table, .insert_memory = intel_i915_insert_entries, .remove_memory = intel_i915_remove_entries, - .alloc_by_type = intel_i830_alloc_by_type, + .alloc_by_type = intel_fake_agp_alloc_by_type, .free_by_type = intel_i810_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, @@ -1627,7 +1628,7 @@ static const struct agp_bridge_driver intel_gen6_driver = { static const struct agp_bridge_driver intel_g33_driver = { .owner = THIS_MODULE, - .aperture_sizes = intel_i830_sizes, + .aperture_sizes = intel_fake_agp_sizes, .size_type = FIXED_APER_SIZE, .num_aperture_sizes = 4, .needs_scratch_page = true, @@ -1636,13 +1637,13 @@ static const struct agp_bridge_driver intel_g33_driver = { .cleanup = intel_i915_cleanup, .mask_memory = intel_i965_mask_memory, .masks = intel_i810_masks, - .agp_enable = intel_i810_agp_enable, + .agp_enable = intel_fake_agp_enable, .cache_flush = global_cache_flush, .create_gatt_table = intel_i915_create_gatt_table, - .free_gatt_table = intel_i830_free_gatt_table, + .free_gatt_table = intel_fake_agp_free_gatt_table, .insert_memory = intel_i915_insert_entries, .remove_memory = intel_i915_remove_entries, - .alloc_by_type = intel_i830_alloc_by_type, + .alloc_by_type = intel_fake_agp_alloc_by_type, .free_by_type = intel_i810_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, From e5e408fc94595aab897f613b6f4e2f5b36870a6f Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sat, 28 Aug 2010 11:04:32 +0200 Subject: [PATCH 092/476] intel-gtt: fix gtt_total_entries detection In commit f1befe71 Chris Wilson added some code to clear the full gtt on g33/pineview instead of just the mappable part. The code looks like it was copy-pasted from agp/intel-gtt.c, at least an identical piece of code is still there (in intel_i830_init_gtt_entries). This lead to a regression in 2.6.35 which was supposedly fixed in commit e7b96f28 Now this commit makes absolutely no sense to me. It seems to be slightly confused about chipset generations - it references docs for 4th gen but the regression concerns 3rd gen g33. Luckily the the g33 gmch docs are available with the GMCH Graphics Control pci config register definitions. The other (bigger problem) is that the new check in there uses the i830 stolen mem bits (.5M, 1M or 8M of stolen mem). They are different since the i855GM. The most likely case is that it hits the 512M fallback, which was probably the right thing for the boxes this was tested on. So the original approach by Chris Wilson seems to be wrong and the current code is definitely wrong. There is a third approach by Jesse Barnes from his RFC patch "Who wants a bigger GTT mapping range?" where he simply shoves g33 in the same clause like later chipset generations. I've asked him and Jesse confirmed that this should work. So implement it. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=16891$ Tested-by: Anisse Astier Cc: stable@kernel.org Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 50 ++++++++---------------------------- 1 file changed, 11 insertions(+), 39 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 56bcf27bdb77..3b84d8445a43 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -699,71 +699,43 @@ static unsigned int intel_gtt_stolen_entries(void) static unsigned int intel_gtt_total_entries(void) { int size; - u16 gmch_ctrl; - if (IS_I965) { + if (IS_G33 || IS_I965 || IS_G4X) { u32 pgetbl_ctl; pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); - /* The 965 has a field telling us the size of the GTT, - * which may be larger than what is necessary to map the - * aperture. - */ switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { case I965_PGETBL_SIZE_128KB: - size = 128; + size = KB(128); break; case I965_PGETBL_SIZE_256KB: - size = 256; + size = KB(256); break; case I965_PGETBL_SIZE_512KB: - size = 512; + size = KB(512); break; case I965_PGETBL_SIZE_1MB: - size = 1024; + size = KB(1024); break; case I965_PGETBL_SIZE_2MB: - size = 2048; + size = KB(2048); break; case I965_PGETBL_SIZE_1_5MB: - size = 1024 + 512; + size = KB(1024 + 512); break; default: dev_info(&intel_private.pcidev->dev, "unknown page table size, assuming 512KB\n"); - size = 512; + size = KB(512); } - size += 4; /* add in BIOS popup space */ - } else if (IS_G33 && !IS_PINEVIEW) { - /* G33's GTT size defined in gmch_ctrl */ - switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { - case G33_PGETBL_SIZE_1M: - size = 1024; - break; - case G33_PGETBL_SIZE_2M: - size = 2048; - break; - default: - dev_info(&intel_private.bridge_dev->dev, - "unknown page table size 0x%x, assuming 512KB\n", - (gmch_ctrl & G33_PGETBL_SIZE_MASK)); - size = 512; - } - size += 4; - } else if (IS_G4X || IS_PINEVIEW) { - /* On 4 series hardware, GTT stolen is separate from graphics - * stolen, ignore it in stolen gtt entries counting. However, - * 4KB of the stolen memory doesn't get mapped to the GTT. - */ - size = 4; + + return size/4; } else { /* On previous hardware, the GTT size was just what was * required to map the aperture. */ - size = agp_bridge->driver->fetch_size() + 4; + return intel_private.base.gtt_mappable_entries; } - - return size/KB(4); } #endif From 1a997ff2a0089a07a5494545d31f4366742dea43 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 8 Sep 2010 21:18:53 +0200 Subject: [PATCH 093/476] intel-gtt: introduce intel_gtt_driver Same idea as INTEL_INFO from drm/i915. This - reduces the dependancy on agp_driver - stops the what-does-IS_I965G-mean confusion (here it's just gen4, in drm/i915 it's gen >=4) - further prepares the separation of the fake agp driver from the rest. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-agp.h | 40 --------- drivers/char/agp/intel-gtt.c | 167 +++++++++++++++++++++++++---------- 2 files changed, 120 insertions(+), 87 deletions(-) diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index bf03afc57cb7..ef7ca9756dc5 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h @@ -213,46 +213,6 @@ #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */ #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A -/* cover 915 and 945 variants */ -#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB) - -#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB) - -#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) - -#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) - -#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB) - -#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \ - IS_SNB) - int intel_gmch_probe(struct pci_dev *pdev, struct agp_bridge_data *bridge); void intel_gmch_remove(struct pci_dev *pdev); diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 3b84d8445a43..831f3c527bdf 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -81,8 +81,16 @@ static struct gatt_mask intel_gen6_masks[] = .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT }, }; +struct intel_gtt_driver { + unsigned int gen : 8; + unsigned int is_g33 : 1; + unsigned int is_pineview : 1; + unsigned int is_ironlake : 1; +}; + static struct _intel_private { struct intel_gtt base; + const struct intel_gtt_driver *driver; struct pci_dev *pcidev; /* device one */ struct pci_dev *bridge_dev; u8 __iomem *registers; @@ -97,6 +105,11 @@ static struct _intel_private { int resource_valid; } intel_private; +#define INTEL_GTT_GEN intel_private.driver->gen +#define IS_G33 intel_private.driver->is_g33 +#define IS_PINEVIEW intel_private.driver->is_pineview +#define IS_IRONLAKE intel_private.driver->is_ironlake + #ifdef USE_PCI_DMA_API static int intel_agp_map_page(struct page *page, dma_addr_t *ret) { @@ -543,7 +556,7 @@ static unsigned int intel_gtt_stolen_entries(void) pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); - if (IS_G4X || IS_PINEVIEW) + if (INTEL_GTT_GEN > 4 || IS_PINEVIEW) overhead_entries = 0; else overhead_entries = intel_private.base.gtt_mappable_entries @@ -573,7 +586,7 @@ static unsigned int intel_gtt_stolen_entries(void) stolen_size = 0; break; } - } else if (IS_SNB) { + } else if (INTEL_GTT_GEN == 6) { /* * SandyBridge has new memory control reg at 0x50.w */ @@ -700,7 +713,7 @@ static unsigned int intel_gtt_total_entries(void) { int size; - if (IS_G33 || IS_I965 || IS_G4X) { + if (IS_G33 || INTEL_GTT_GEN >= 4) { u32 pgetbl_ctl; pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); @@ -1086,7 +1099,7 @@ static void intel_i9xx_setup_flush(void) if (intel_private.ifp_resource.start) return; - if (IS_SNB) + if (INTEL_GTT_GEN == 6) return; /* setup a resource for this object */ @@ -1094,7 +1107,7 @@ static void intel_i9xx_setup_flush(void) intel_private.ifp_resource.flags = IORESOURCE_MEM; /* Setup chipset flush for 915 */ - if (IS_I965 || IS_G33 || IS_G4X) { + if (IS_G33 || INTEL_GTT_GEN >= 4) { intel_i965_g33_setup_chipset_flush(); } else { intel_i915_setup_chipset_flush(); @@ -1196,7 +1209,8 @@ static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); - if (!IS_SNB && mask_type != 0 && mask_type != AGP_PHYS_MEMORY && + if (INTEL_GTT_GEN != 6 && mask_type != 0 && + mask_type != AGP_PHYS_MEMORY && mask_type != INTEL_AGP_CACHED_MEMORY) goto out_err; @@ -1631,6 +1645,34 @@ static const struct agp_bridge_driver intel_g33_driver = { #endif }; +static const struct intel_gtt_driver i8xx_gtt_driver = { + .gen = 2, +}; +static const struct intel_gtt_driver i915_gtt_driver = { + .gen = 3, +}; +static const struct intel_gtt_driver g33_gtt_driver = { + .gen = 3, + .is_g33 = 1, +}; +static const struct intel_gtt_driver pineview_gtt_driver = { + .gen = 3, + .is_pineview = 1, .is_g33 = 1, +}; +static const struct intel_gtt_driver i965_gtt_driver = { + .gen = 4, +}; +static const struct intel_gtt_driver g4x_gtt_driver = { + .gen = 5, +}; +static const struct intel_gtt_driver ironlake_gtt_driver = { + .gen = 5, + .is_ironlake = 1, +}; +static const struct intel_gtt_driver sandybridge_gtt_driver = { + .gen = 6, +}; + /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of * driver and gmch_driver must be non-null, and find_gmch will determine * which one should be used if a gmch_chip_id is present. @@ -1639,57 +1681,86 @@ static const struct intel_gtt_driver_description { unsigned int gmch_chip_id; char *name; const struct agp_bridge_driver *gmch_driver; + const struct intel_gtt_driver *gtt_driver; } intel_gtt_chipsets[] = { - { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver }, - { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver }, - { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver }, - { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver }, - { PCI_DEVICE_ID_INTEL_82830_CGC, "830M", &intel_830_driver }, - { PCI_DEVICE_ID_INTEL_82845G_IG, "830M", &intel_830_driver }, - { PCI_DEVICE_ID_INTEL_82854_IG, "854", &intel_830_driver }, - { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM", &intel_830_driver }, - { PCI_DEVICE_ID_INTEL_82865_IG, "865", &intel_830_driver }, - { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", &intel_915_driver }, - { PCI_DEVICE_ID_INTEL_82915G_IG, "915G", &intel_915_driver }, - { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", &intel_915_driver }, - { PCI_DEVICE_ID_INTEL_82945G_IG, "945G", &intel_915_driver }, - { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", &intel_915_driver }, - { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", &intel_915_driver }, - { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_82G35_IG, "G35", &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_82965G_IG, "965G", &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_G33_IG, "G33", &intel_g33_driver }, - { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", &intel_g33_driver }, - { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", &intel_g33_driver }, - { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", &intel_g33_driver }, - { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", &intel_g33_driver }, - { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45", &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake", &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43", &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43", &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_B43_IG, "B43", &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_G41_IG, "G41", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver , NULL}, + { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver , NULL}, + { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver , NULL}, + { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver , NULL}, + { PCI_DEVICE_ID_INTEL_82830_CGC, "830M", + &intel_830_driver , &i8xx_gtt_driver}, + { PCI_DEVICE_ID_INTEL_82845G_IG, "830M", + &intel_830_driver , &i8xx_gtt_driver}, + { PCI_DEVICE_ID_INTEL_82854_IG, "854", + &intel_830_driver , &i8xx_gtt_driver}, + { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM", + &intel_830_driver , &i8xx_gtt_driver}, + { PCI_DEVICE_ID_INTEL_82865_IG, "865", + &intel_830_driver , &i8xx_gtt_driver}, + { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", + &intel_915_driver , &i915_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82915G_IG, "915G", + &intel_915_driver , &i915_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", + &intel_915_driver , &i915_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82945G_IG, "945G", + &intel_915_driver , &i915_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", + &intel_915_driver , &i915_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", + &intel_915_driver , &i915_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", + &intel_i965_driver , &i965_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82G35_IG, "G35", + &intel_i965_driver , &i965_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", + &intel_i965_driver , &i965_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82965G_IG, "965G", + &intel_i965_driver , &i965_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", + &intel_i965_driver , &i965_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", + &intel_i965_driver , &i965_gtt_driver }, + { PCI_DEVICE_ID_INTEL_G33_IG, "G33", + &intel_g33_driver , &g33_gtt_driver }, + { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", + &intel_g33_driver , &g33_gtt_driver }, + { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", + &intel_g33_driver , &g33_gtt_driver }, + { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", + &intel_g33_driver , &pineview_gtt_driver }, + { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", + &intel_g33_driver , &pineview_gtt_driver }, + { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45", + &intel_i965_driver , &g4x_gtt_driver }, + { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake", + &intel_i965_driver , &g4x_gtt_driver }, + { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43", + &intel_i965_driver , &g4x_gtt_driver }, + { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43", + &intel_i965_driver , &g4x_gtt_driver }, + { PCI_DEVICE_ID_INTEL_B43_IG, "B43", + &intel_i965_driver , &g4x_gtt_driver }, + { PCI_DEVICE_ID_INTEL_G41_IG, "G41", + &intel_i965_driver , &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, - "HD Graphics", &intel_i965_driver }, + "HD Graphics", &intel_i965_driver , &ironlake_gtt_driver }, { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, - "HD Graphics", &intel_i965_driver }, + "HD Graphics", &intel_i965_driver , &ironlake_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG, - "Sandybridge", &intel_gen6_driver }, + "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG, - "Sandybridge", &intel_gen6_driver }, + "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG, - "Sandybridge", &intel_gen6_driver }, + "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG, - "Sandybridge", &intel_gen6_driver }, + "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG, - "Sandybridge", &intel_gen6_driver }, + "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG, - "Sandybridge", &intel_gen6_driver }, + "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG, - "Sandybridge", &intel_gen6_driver }, + "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver }, { 0, NULL, NULL } }; @@ -1720,6 +1791,8 @@ int intel_gmch_probe(struct pci_dev *pdev, if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) { bridge->driver = intel_gtt_chipsets[i].gmch_driver; + intel_private.driver = + intel_gtt_chipsets[i].gtt_driver; break; } } From ccc4e67be5ac1bd38c4bfd61aca38366597e8afb Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 8 Sep 2010 21:20:12 +0200 Subject: [PATCH 094/476] intel-gtt: i915: use detected gtt size for mapping Slight reordering of the init sequence required. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 60 ++++++------------------------------ 1 file changed, 9 insertions(+), 51 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 831f3c527bdf..3d93cd0acc01 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -708,7 +708,6 @@ static unsigned int intel_gtt_stolen_entries(void) return stolen_entries; } -#if 0 /* extracted code in bad shape, needs some cleaning before use */ static unsigned int intel_gtt_total_entries(void) { int size; @@ -750,7 +749,6 @@ static unsigned int intel_gtt_total_entries(void) return intel_private.base.gtt_mappable_entries; } } -#endif static unsigned int intel_gtt_mappable_entries(void) { @@ -1248,45 +1246,6 @@ static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start, return 0; } -/* Return the aperture size by just checking the resource length. The effect - * described in the spec of the MSAC registers is just changing of the - * resource size. - */ -static int intel_i915_get_gtt_size(void) -{ - int size; - - if (IS_G33) { - u16 gmch_ctrl; - - /* G33's GTT size defined in gmch_ctrl */ - pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); - switch (gmch_ctrl & I830_GMCH_GMS_MASK) { - case I830_GMCH_GMS_STOLEN_512: - size = 512; - break; - case I830_GMCH_GMS_STOLEN_1024: - size = 1024; - break; - case I830_GMCH_GMS_STOLEN_8192: - size = 8*1024; - break; - default: - dev_info(&intel_private.bridge_dev->dev, - "unknown page table size 0x%x, assuming 512KB\n", - (gmch_ctrl & I830_GMCH_GMS_MASK)); - size = 512; - } - } else { - /* On previous hardware, the GTT size was just what was - * required to map the aperture. - */ - size = agp_bridge->driver->fetch_size(); - } - - return KB(size); -} - /* The intel i915 automatically initializes the agp aperture during POST. * Use the memory already set aside for in the GTT. */ @@ -1306,19 +1265,18 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2); - gtt_map_size = intel_i915_get_gtt_size(); - - intel_private.gtt = ioremap(temp2, gtt_map_size); - if (!intel_private.gtt) - return -ENOMEM; - - intel_private.base.gtt_total_entries = gtt_map_size / 4; - temp &= 0xfff80000; intel_private.registers = ioremap(temp, 128 * 4096); - if (!intel_private.registers) { - iounmap(intel_private.gtt); + if (!intel_private.registers) + return -ENOMEM; + + intel_private.base.gtt_total_entries = intel_gtt_total_entries(); + gtt_map_size = intel_private.base.gtt_total_entries * 4; + + intel_private.gtt = ioremap(temp2, gtt_map_size); + if (!intel_private.gtt) { + iounmap(intel_private.registers); return -ENOMEM; } From 210b23c2f7b9721afb0a57459b7dbac3b094862e Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sat, 28 Aug 2010 16:14:32 +0200 Subject: [PATCH 095/476] intel-gtt: i965: use detected gtt size for mapping Also move the Sandybdridge size detection into gtt_total_entries, like the rest. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 76 ++++++++++++++++-------------------- 1 file changed, 34 insertions(+), 42 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 3d93cd0acc01..cd0fd1479e5d 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -712,7 +712,7 @@ static unsigned int intel_gtt_total_entries(void) { int size; - if (IS_G33 || INTEL_GTT_GEN >= 4) { + if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) { u32 pgetbl_ctl; pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); @@ -741,6 +741,24 @@ static unsigned int intel_gtt_total_entries(void) size = KB(512); } + return size/4; + } else if (INTEL_GTT_GEN == 6) { + u16 snb_gmch_ctl; + + pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); + switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) { + default: + case SNB_GTT_SIZE_0M: + printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl); + size = MB(0); + break; + case SNB_GTT_SIZE_1M: + size = MB(1); + break; + case SNB_GTT_SIZE_2M: + size = MB(2); + break; + } return size/4; } else { /* On previous hardware, the GTT size was just what was @@ -1327,44 +1345,18 @@ static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge, static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) { - u16 snb_gmch_ctl; - - switch (intel_private.bridge_dev->device) { - case PCI_DEVICE_ID_INTEL_GM45_HB: - case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB: - case PCI_DEVICE_ID_INTEL_Q45_HB: - case PCI_DEVICE_ID_INTEL_G45_HB: - case PCI_DEVICE_ID_INTEL_G41_HB: - case PCI_DEVICE_ID_INTEL_B43_HB: - case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB: - case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB: - case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB: - case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB: - *gtt_offset = *gtt_size = MB(2); - break; - case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB: - case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB: - case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB: + switch (INTEL_GTT_GEN) { + case 5: + case 6: *gtt_offset = MB(2); - - pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); - switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) { - default: - case SNB_GTT_SIZE_0M: - printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl); - *gtt_size = MB(0); - break; - case SNB_GTT_SIZE_1M: - *gtt_size = MB(1); - break; - case SNB_GTT_SIZE_2M: - *gtt_size = MB(2); - break; - } break; + case 4: default: - *gtt_offset = *gtt_size = KB(512); + *gtt_offset = KB(512); + break; } + + *gtt_size = intel_private.base.gtt_total_entries * 4; } /* The intel i965 automatically initializes the agp aperture during POST. @@ -1387,17 +1379,17 @@ static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) temp &= 0xfff00000; + intel_private.registers = ioremap(temp, 128 * 4096); + if (!intel_private.registers) + return -ENOMEM; + + intel_private.base.gtt_total_entries = intel_gtt_total_entries(); + intel_i965_get_gtt_range(>t_offset, >t_size); intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size); - if (!intel_private.gtt) - return -ENOMEM; - - intel_private.base.gtt_total_entries = gtt_size / 4; - - intel_private.registers = ioremap(temp, 128 * 4096); - if (!intel_private.registers) { + if (!intel_private.gtt) { iounmap(intel_private.gtt); return -ENOMEM; } From fdfb58a965486d2afea4ef0f9b8153dab9b98b2e Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 29 Aug 2010 00:15:03 +0200 Subject: [PATCH 096/476] intel-gtt: i830: adjust ioremap of regs and gtt to i9xx This way around this can be extracted into common code. Also use a common cleanup function (and give it a generic name). Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 41 ++++++++++++++++++++---------------- 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index cd0fd1479e5d..7359fbe94428 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -883,6 +883,7 @@ static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge) int page_order, ret; struct aper_size_info_fixed *size; int num_entries; + int gtt_map_size; u32 temp; size = agp_bridge->current_size; @@ -893,10 +894,19 @@ static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge) pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); temp &= 0xfff80000; - intel_private.registers = ioremap(temp, 128 * 4096); + intel_private.registers = ioremap(temp, KB(64)); if (!intel_private.registers) return -ENOMEM; + intel_private.base.gtt_total_entries = intel_gtt_total_entries(); + gtt_map_size = intel_private.base.gtt_total_entries * 4; + + intel_private.gtt = ioremap(temp + I810_PTE_BASE, gtt_map_size); + if (!intel_private.gtt) { + iounmap(intel_private.registers); + return -ENOMEM; + } + temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; global_cache_flush(); /* FIXME: ?? */ @@ -940,9 +950,9 @@ static int intel_i830_configure(void) if (agp_bridge->driver->needs_scratch_page) { for (i = intel_private.base.gtt_stolen_entries; i < current_size->num_entries; i++) { - writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); + writel(agp_bridge->scratch_page, intel_private.gtt+i); } - readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */ + readl(intel_private.gtt+i-1); /* PCI Posting. */ } global_cache_flush(); @@ -951,11 +961,6 @@ static int intel_i830_configure(void) return 0; } -static void intel_i830_cleanup(void) -{ - iounmap(intel_private.registers); -} - static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start, int type) { @@ -1002,9 +1007,9 @@ static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start, for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { writel(agp_bridge->driver->mask_memory(agp_bridge, page_to_phys(mem->pages[i]), mask_type), - intel_private.registers+I810_PTE_BASE+(j*4)); + intel_private.gtt+j); } - readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); + readl(intel_private.gtt+j-1); out: ret = 0; @@ -1028,9 +1033,9 @@ static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start, } for (i = pg_start; i < (mem->page_count + pg_start); i++) { - writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); + writel(agp_bridge->scratch_page, intel_private.gtt+i); } - readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); + readl(intel_private.gtt+i-1); return 0; } @@ -1171,7 +1176,7 @@ static int intel_i9xx_configure(void) return 0; } -static void intel_i915_cleanup(void) +static void intel_gtt_cleanup(void) { if (intel_private.i9xx_flush_page) iounmap(intel_private.i9xx_flush_page); @@ -1444,7 +1449,7 @@ static const struct agp_bridge_driver intel_830_driver = { .needs_scratch_page = true, .configure = intel_i830_configure, .fetch_size = intel_fake_agp_fetch_size, - .cleanup = intel_i830_cleanup, + .cleanup = intel_gtt_cleanup, .mask_memory = intel_i810_mask_memory, .masks = intel_i810_masks, .agp_enable = intel_fake_agp_enable, @@ -1471,7 +1476,7 @@ static const struct agp_bridge_driver intel_915_driver = { .needs_scratch_page = true, .configure = intel_i9xx_configure, .fetch_size = intel_fake_agp_fetch_size, - .cleanup = intel_i915_cleanup, + .cleanup = intel_gtt_cleanup, .mask_memory = intel_i810_mask_memory, .masks = intel_i810_masks, .agp_enable = intel_fake_agp_enable, @@ -1504,7 +1509,7 @@ static const struct agp_bridge_driver intel_i965_driver = { .needs_scratch_page = true, .configure = intel_i9xx_configure, .fetch_size = intel_fake_agp_fetch_size, - .cleanup = intel_i915_cleanup, + .cleanup = intel_gtt_cleanup, .mask_memory = intel_i965_mask_memory, .masks = intel_i810_masks, .agp_enable = intel_fake_agp_enable, @@ -1537,7 +1542,7 @@ static const struct agp_bridge_driver intel_gen6_driver = { .needs_scratch_page = true, .configure = intel_i9xx_configure, .fetch_size = intel_fake_agp_fetch_size, - .cleanup = intel_i915_cleanup, + .cleanup = intel_gtt_cleanup, .mask_memory = intel_gen6_mask_memory, .masks = intel_gen6_masks, .agp_enable = intel_fake_agp_enable, @@ -1570,7 +1575,7 @@ static const struct agp_bridge_driver intel_g33_driver = { .needs_scratch_page = true, .configure = intel_i9xx_configure, .fetch_size = intel_fake_agp_fetch_size, - .cleanup = intel_i915_cleanup, + .cleanup = intel_gtt_cleanup, .mask_memory = intel_i965_mask_memory, .masks = intel_i810_masks, .agp_enable = intel_fake_agp_enable, From f67eab664c47b261517b09812477de9a1780b426 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 29 Aug 2010 17:27:36 +0200 Subject: [PATCH 097/476] intel-gtt: consolidate the gtt ioremap calls Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 70 ++++++++++++++---------------------- 1 file changed, 26 insertions(+), 44 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 7359fbe94428..73082ef09dc1 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -94,6 +94,7 @@ static struct _intel_private { struct pci_dev *pcidev; /* device one */ struct pci_dev *bridge_dev; u8 __iomem *registers; + phys_addr_t gtt_bus_addr; u32 __iomem *gtt; /* I915G */ int num_dcache_entries; union { @@ -799,10 +800,27 @@ static unsigned int intel_gtt_mappable_entries(void) static int intel_gtt_init(void) { + u32 gtt_map_size; + + intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries(); + intel_private.base.gtt_total_entries = intel_gtt_total_entries(); + + gtt_map_size = intel_private.base.gtt_total_entries * 4; + + intel_private.gtt = ioremap(intel_private.gtt_bus_addr, + gtt_map_size); + if (!intel_private.gtt) { + iounmap(intel_private.registers); + return -ENOMEM; + } + + global_cache_flush(); /* FIXME: ? */ + /* we have to call this as early as possible after the MMIO base address is known */ intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries(); if (intel_private.base.gtt_stolen_entries == 0) { iounmap(intel_private.registers); + iounmap(intel_private.gtt); return -ENOMEM; } @@ -883,7 +901,6 @@ static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge) int page_order, ret; struct aper_size_info_fixed *size; int num_entries; - int gtt_map_size; u32 temp; size = agp_bridge->current_size; @@ -898,17 +915,8 @@ static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge) if (!intel_private.registers) return -ENOMEM; - intel_private.base.gtt_total_entries = intel_gtt_total_entries(); - gtt_map_size = intel_private.base.gtt_total_entries * 4; - - intel_private.gtt = ioremap(temp + I810_PTE_BASE, gtt_map_size); - if (!intel_private.gtt) { - iounmap(intel_private.registers); - return -ENOMEM; - } - + intel_private.gtt_bus_addr = temp + I810_PTE_BASE; temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; - global_cache_flush(); /* FIXME: ?? */ ret = intel_gtt_init(); if (ret != 0) @@ -1278,7 +1286,6 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) struct aper_size_info_fixed *size; int num_entries; u32 temp, temp2; - int gtt_map_size; size = agp_bridge->current_size; page_order = size->page_order; @@ -1294,23 +1301,12 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) if (!intel_private.registers) return -ENOMEM; - intel_private.base.gtt_total_entries = intel_gtt_total_entries(); - gtt_map_size = intel_private.base.gtt_total_entries * 4; - - intel_private.gtt = ioremap(temp2, gtt_map_size); - if (!intel_private.gtt) { - iounmap(intel_private.registers); - return -ENOMEM; - } - + intel_private.gtt_bus_addr = temp2; temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; - global_cache_flush(); /* FIXME: ? */ ret = intel_gtt_init(); - if (ret != 0) { - iounmap(intel_private.gtt); + if (ret != 0) return ret; - } agp_bridge->gatt_table = NULL; @@ -1348,7 +1344,7 @@ static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge, return addr | bridge->driver->masks[type].mask; } -static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) +static void intel_i965_get_gtt_range(int *gtt_offset) { switch (INTEL_GTT_GEN) { case 5: @@ -1360,8 +1356,6 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) *gtt_offset = KB(512); break; } - - *gtt_size = intel_private.base.gtt_total_entries * 4; } /* The intel i965 automatically initializes the agp aperture during POST. @@ -1373,7 +1367,7 @@ static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) struct aper_size_info_fixed *size; int num_entries; u32 temp; - int gtt_offset, gtt_size; + int gtt_offset; size = agp_bridge->current_size; page_order = size->page_order; @@ -1388,25 +1382,13 @@ static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) if (!intel_private.registers) return -ENOMEM; - intel_private.base.gtt_total_entries = intel_gtt_total_entries(); - - intel_i965_get_gtt_range(>t_offset, >t_size); - - intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size); - - if (!intel_private.gtt) { - iounmap(intel_private.gtt); - return -ENOMEM; - } - + intel_i965_get_gtt_range(>t_offset); + intel_private.gtt_bus_addr = temp + gtt_offset; temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; - global_cache_flush(); /* FIXME: ? */ ret = intel_gtt_init(); - if (ret != 0) { - iounmap(intel_private.gtt); + if (ret != 0) return ret; - } agp_bridge->gatt_table = NULL; From 73800422a30e9b8b6e0e49c27af9e9d196e52fd9 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 29 Aug 2010 17:29:50 +0200 Subject: [PATCH 098/476] intel-gtt: consolidate i830 setup Slighlty reordered sequence was necessary. Also don't set agp_bridge->gatt_bus_addr anymore. Only used by generic agp helper functions, hence unnecessary for the intel fake agp driver. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 85 +++++++++++++++++++++--------------- 1 file changed, 50 insertions(+), 35 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 73082ef09dc1..fd977aa4a17d 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -86,6 +86,8 @@ struct intel_gtt_driver { unsigned int is_g33 : 1; unsigned int is_pineview : 1; unsigned int is_ironlake : 1; + /* Chipset specific GTT setup */ + int (*setup)(void); }; static struct _intel_private { @@ -95,6 +97,7 @@ static struct _intel_private { struct pci_dev *bridge_dev; u8 __iomem *registers; phys_addr_t gtt_bus_addr; + phys_addr_t gma_bus_addr; u32 __iomem *gtt; /* I915G */ int num_dcache_entries; union { @@ -893,38 +896,60 @@ static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) printk(KERN_ERR "Timed out waiting for cache flush.\n"); } +static void intel_enable_gtt(void) +{ + u32 ptetbl_addr, gma_addr; + u16 gmch_ctrl; + + ptetbl_addr = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; + + pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &gma_addr); + intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); + + pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); + gmch_ctrl |= I830_GMCH_ENABLED; + pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl); + + writel(ptetbl_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); + readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ +} + +static int i830_setup(void) +{ + u32 reg_addr; + + pci_read_config_dword(intel_private.pcidev, I810_MMADDR, ®_addr); + reg_addr &= 0xfff80000; + + intel_private.registers = ioremap(reg_addr, KB(64)); + if (!intel_private.registers) + return -ENOMEM; + + intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE; + + intel_i830_setup_flush(); + + return 0; +} + /* The intel i830 automatically initializes the agp aperture during POST. * Use the memory already set aside for in the GTT. */ static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge) { - int page_order, ret; - struct aper_size_info_fixed *size; - int num_entries; - u32 temp; + int ret; - size = agp_bridge->current_size; - page_order = size->page_order; - num_entries = size->num_entries; - agp_bridge->gatt_table_real = NULL; - - pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); - temp &= 0xfff80000; - - intel_private.registers = ioremap(temp, KB(64)); - if (!intel_private.registers) - return -ENOMEM; - - intel_private.gtt_bus_addr = temp + I810_PTE_BASE; - temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; + ret = intel_private.driver->setup(); + if (ret != 0) + return ret; ret = intel_gtt_init(); if (ret != 0) return ret; + agp_bridge->gatt_table_real = NULL; agp_bridge->gatt_table = NULL; - - agp_bridge->gatt_bus_addr = temp; + agp_bridge->gatt_bus_addr = 0; return 0; } @@ -939,25 +964,15 @@ static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge) static int intel_i830_configure(void) { - struct aper_size_info_fixed *current_size; - u32 temp; - u16 gmch_ctrl; int i; - current_size = A_SIZE_FIX(agp_bridge->current_size); + intel_enable_gtt(); - pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); - agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - - pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); - gmch_ctrl |= I830_GMCH_ENABLED; - pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl); - - writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); - readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ + agp_bridge->gart_bus_addr = intel_private.gma_bus_addr; if (agp_bridge->driver->needs_scratch_page) { - for (i = intel_private.base.gtt_stolen_entries; i < current_size->num_entries; i++) { + for (i = intel_private.base.gtt_stolen_entries; + i < intel_private.base.gtt_total_entries; i++) { writel(agp_bridge->scratch_page, intel_private.gtt+i); } readl(intel_private.gtt+i-1); /* PCI Posting. */ @@ -965,7 +980,6 @@ static int intel_i830_configure(void) global_cache_flush(); - intel_i830_setup_flush(); return 0; } @@ -1584,6 +1598,7 @@ static const struct agp_bridge_driver intel_g33_driver = { static const struct intel_gtt_driver i8xx_gtt_driver = { .gen = 2, + .setup = i830_setup, }; static const struct intel_gtt_driver i915_gtt_driver = { .gen = 3, From 2d2430cf9bf9e8b0ad9ea34a103625f4fe7e4477 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 29 Aug 2010 17:35:30 +0200 Subject: [PATCH 099/476] intel-gtt: consolidate i9xx setup The only difference between i915 and i965 was the calculation of the gtt address. So merge these two paths into one. Otherwise the same changes as in the i830 setup consolidation. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 156 ++++++++++++++--------------------- 1 file changed, 62 insertions(+), 94 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index fd977aa4a17d..7ac7d5cb3dc1 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -903,7 +903,13 @@ static void intel_enable_gtt(void) ptetbl_addr = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; - pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &gma_addr); + if (INTEL_GTT_GEN == 2) + pci_read_config_dword(intel_private.pcidev, I810_GMADDR, + &gma_addr); + else + pci_read_config_dword(intel_private.pcidev, I915_GMADDR, + &gma_addr); + intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); @@ -1165,23 +1171,11 @@ static void intel_i9xx_setup_flush(void) static int intel_i9xx_configure(void) { - struct aper_size_info_fixed *current_size; - u32 temp; - u16 gmch_ctrl; int i; - current_size = A_SIZE_FIX(agp_bridge->current_size); + intel_enable_gtt(); - pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp); - - agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - - pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); - gmch_ctrl |= I830_GMCH_ENABLED; - pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl); - - writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); - readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ + agp_bridge->gart_bus_addr = intel_private.gma_bus_addr; if (agp_bridge->driver->needs_scratch_page) { for (i = intel_private.base.gtt_stolen_entries; i < @@ -1193,8 +1187,6 @@ static int intel_i9xx_configure(void) global_cache_flush(); - intel_i9xx_setup_flush(); - return 0; } @@ -1291,40 +1283,62 @@ static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start, return 0; } +static int i9xx_setup(void) +{ + u32 reg_addr; + + pci_read_config_dword(intel_private.pcidev, I915_MMADDR, ®_addr); + + reg_addr &= 0xfff80000; + + intel_private.registers = ioremap(reg_addr, 128 * 4096); + if (!intel_private.registers) + return -ENOMEM; + + if (INTEL_GTT_GEN == 3) { + u32 gtt_addr; + pci_read_config_dword(intel_private.pcidev, + I915_PTEADDR, >t_addr); + intel_private.gtt_bus_addr = gtt_addr; + } else { + u32 gtt_offset; + + switch (INTEL_GTT_GEN) { + case 5: + case 6: + gtt_offset = MB(2); + break; + case 4: + default: + gtt_offset = KB(512); + break; + } + intel_private.gtt_bus_addr = reg_addr + gtt_offset; + } + + intel_i9xx_setup_flush(); + + return 0; +} + /* The intel i915 automatically initializes the agp aperture during POST. * Use the memory already set aside for in the GTT. */ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) { - int page_order, ret; - struct aper_size_info_fixed *size; - int num_entries; - u32 temp, temp2; + int ret; - size = agp_bridge->current_size; - page_order = size->page_order; - num_entries = size->num_entries; - agp_bridge->gatt_table_real = NULL; - - pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); - pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2); - - temp &= 0xfff80000; - - intel_private.registers = ioremap(temp, 128 * 4096); - if (!intel_private.registers) - return -ENOMEM; - - intel_private.gtt_bus_addr = temp2; - temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; + ret = intel_private.driver->setup(); + if (ret != 0) + return ret; ret = intel_gtt_init(); if (ret != 0) return ret; + agp_bridge->gatt_table_real = NULL; agp_bridge->gatt_table = NULL; - - agp_bridge->gatt_bus_addr = temp; + agp_bridge->gatt_bus_addr = 0; return 0; } @@ -1358,59 +1372,6 @@ static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge, return addr | bridge->driver->masks[type].mask; } -static void intel_i965_get_gtt_range(int *gtt_offset) -{ - switch (INTEL_GTT_GEN) { - case 5: - case 6: - *gtt_offset = MB(2); - break; - case 4: - default: - *gtt_offset = KB(512); - break; - } -} - -/* The intel i965 automatically initializes the agp aperture during POST. - * Use the memory already set aside for in the GTT. - */ -static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) -{ - int page_order, ret; - struct aper_size_info_fixed *size; - int num_entries; - u32 temp; - int gtt_offset; - - size = agp_bridge->current_size; - page_order = size->page_order; - num_entries = size->num_entries; - agp_bridge->gatt_table_real = NULL; - - pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); - - temp &= 0xfff00000; - - intel_private.registers = ioremap(temp, 128 * 4096); - if (!intel_private.registers) - return -ENOMEM; - - intel_i965_get_gtt_range(>t_offset); - intel_private.gtt_bus_addr = temp + gtt_offset; - temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; - - ret = intel_gtt_init(); - if (ret != 0) - return ret; - - agp_bridge->gatt_table = NULL; - - agp_bridge->gatt_bus_addr = temp; - - return 0; -} - static const struct agp_bridge_driver intel_810_driver = { .owner = THIS_MODULE, .aperture_sizes = intel_i810_sizes, @@ -1510,7 +1471,7 @@ static const struct agp_bridge_driver intel_i965_driver = { .masks = intel_i810_masks, .agp_enable = intel_fake_agp_enable, .cache_flush = global_cache_flush, - .create_gatt_table = intel_i965_create_gatt_table, + .create_gatt_table = intel_i915_create_gatt_table, .free_gatt_table = intel_fake_agp_free_gatt_table, .insert_memory = intel_i915_insert_entries, .remove_memory = intel_i915_remove_entries, @@ -1543,7 +1504,7 @@ static const struct agp_bridge_driver intel_gen6_driver = { .masks = intel_gen6_masks, .agp_enable = intel_fake_agp_enable, .cache_flush = global_cache_flush, - .create_gatt_table = intel_i965_create_gatt_table, + .create_gatt_table = intel_i915_create_gatt_table, .free_gatt_table = intel_fake_agp_free_gatt_table, .insert_memory = intel_i915_insert_entries, .remove_memory = intel_i915_remove_entries, @@ -1602,27 +1563,34 @@ static const struct intel_gtt_driver i8xx_gtt_driver = { }; static const struct intel_gtt_driver i915_gtt_driver = { .gen = 3, + .setup = i9xx_setup, }; static const struct intel_gtt_driver g33_gtt_driver = { .gen = 3, .is_g33 = 1, + .setup = i9xx_setup, }; static const struct intel_gtt_driver pineview_gtt_driver = { .gen = 3, .is_pineview = 1, .is_g33 = 1, + .setup = i9xx_setup, }; static const struct intel_gtt_driver i965_gtt_driver = { .gen = 4, + .setup = i9xx_setup, }; static const struct intel_gtt_driver g4x_gtt_driver = { .gen = 5, + .setup = i9xx_setup, }; static const struct intel_gtt_driver ironlake_gtt_driver = { .gen = 5, .is_ironlake = 1, + .setup = i9xx_setup, }; static const struct intel_gtt_driver sandybridge_gtt_driver = { .gen = 6, + .setup = i9xx_setup, }; /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of From 3b15a9d7cd59b7ec79f61aafabfbe84116561461 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 29 Aug 2010 14:18:49 +0200 Subject: [PATCH 100/476] intel-gtt: call init_gtt_init in probe function This way create_gatt_table become dummy glue functions for the fake agp driver - rename them accordingly (and kill the now unnecessary i9xx copy). With this change, the gtt initialization code is almost independant from the agp stuff. Two things are still missing: - the scratch page is created by the generic agp code. - filling the whole gtt with scratch_page ptes is not yet consolidated - this needs abstracted pte handling, first. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 60 +++++++++--------------------------- 1 file changed, 15 insertions(+), 45 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 7ac7d5cb3dc1..d7207e8092ab 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -804,6 +804,13 @@ static unsigned int intel_gtt_mappable_entries(void) static int intel_gtt_init(void) { u32 gtt_map_size; + int ret; + + intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries(); + + ret = intel_private.driver->setup(); + if (ret != 0) + return ret; intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries(); intel_private.base.gtt_total_entries = intel_gtt_total_entries(); @@ -938,21 +945,8 @@ static int i830_setup(void) return 0; } -/* The intel i830 automatically initializes the agp aperture during POST. - * Use the memory already set aside for in the GTT. - */ -static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge) +static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge) { - int ret; - - ret = intel_private.driver->setup(); - if (ret != 0) - return ret; - - ret = intel_gtt_init(); - if (ret != 0) - return ret; - agp_bridge->gatt_table_real = NULL; agp_bridge->gatt_table = NULL; agp_bridge->gatt_bus_addr = 0; @@ -960,9 +954,6 @@ static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge) return 0; } -/* Return the gatt table to a sane state. Use the top of stolen - * memory for the GTT. - */ static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge) { return 0; @@ -1321,28 +1312,6 @@ static int i9xx_setup(void) return 0; } -/* The intel i915 automatically initializes the agp aperture during POST. - * Use the memory already set aside for in the GTT. - */ -static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) -{ - int ret; - - ret = intel_private.driver->setup(); - if (ret != 0) - return ret; - - ret = intel_gtt_init(); - if (ret != 0) - return ret; - - agp_bridge->gatt_table_real = NULL; - agp_bridge->gatt_table = NULL; - agp_bridge->gatt_bus_addr = 0; - - return 0; -} - /* * The i965 supports 36-bit physical addresses, but to keep * the format of the GTT the same, the bits that don't fit @@ -1411,7 +1380,7 @@ static const struct agp_bridge_driver intel_830_driver = { .masks = intel_i810_masks, .agp_enable = intel_fake_agp_enable, .cache_flush = global_cache_flush, - .create_gatt_table = intel_i830_create_gatt_table, + .create_gatt_table = intel_fake_agp_create_gatt_table, .free_gatt_table = intel_fake_agp_free_gatt_table, .insert_memory = intel_i830_insert_entries, .remove_memory = intel_i830_remove_entries, @@ -1438,7 +1407,7 @@ static const struct agp_bridge_driver intel_915_driver = { .masks = intel_i810_masks, .agp_enable = intel_fake_agp_enable, .cache_flush = global_cache_flush, - .create_gatt_table = intel_i915_create_gatt_table, + .create_gatt_table = intel_fake_agp_create_gatt_table, .free_gatt_table = intel_fake_agp_free_gatt_table, .insert_memory = intel_i915_insert_entries, .remove_memory = intel_i915_remove_entries, @@ -1471,7 +1440,7 @@ static const struct agp_bridge_driver intel_i965_driver = { .masks = intel_i810_masks, .agp_enable = intel_fake_agp_enable, .cache_flush = global_cache_flush, - .create_gatt_table = intel_i915_create_gatt_table, + .create_gatt_table = intel_fake_agp_create_gatt_table, .free_gatt_table = intel_fake_agp_free_gatt_table, .insert_memory = intel_i915_insert_entries, .remove_memory = intel_i915_remove_entries, @@ -1504,7 +1473,7 @@ static const struct agp_bridge_driver intel_gen6_driver = { .masks = intel_gen6_masks, .agp_enable = intel_fake_agp_enable, .cache_flush = global_cache_flush, - .create_gatt_table = intel_i915_create_gatt_table, + .create_gatt_table = intel_fake_agp_create_gatt_table, .free_gatt_table = intel_fake_agp_free_gatt_table, .insert_memory = intel_i915_insert_entries, .remove_memory = intel_i915_remove_entries, @@ -1537,7 +1506,7 @@ static const struct agp_bridge_driver intel_g33_driver = { .masks = intel_i810_masks, .agp_enable = intel_fake_agp_enable, .cache_flush = global_cache_flush, - .create_gatt_table = intel_i915_create_gatt_table, + .create_gatt_table = intel_fake_agp_create_gatt_table, .free_gatt_table = intel_fake_agp_free_gatt_table, .insert_memory = intel_i915_insert_entries, .remove_memory = intel_i915_remove_entries, @@ -1744,7 +1713,8 @@ int intel_gmch_probe(struct pci_dev *pdev, if (bridge->driver == &intel_810_driver) return 1; - intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries(); + if (intel_gtt_init() != 0) + return 0; return 1; } From 239918f7a5ac118ecfe9c55a4cfd25d7767b674a Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 31 Aug 2010 22:30:43 +0200 Subject: [PATCH 101/476] intel-gtt: use chipset generation number some more Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index d7207e8092ab..6eb64c19af0e 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -782,20 +782,14 @@ static unsigned int intel_gtt_mappable_entries(void) pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); - switch (intel_private.pcidev->device) { - case PCI_DEVICE_ID_INTEL_82830_CGC: - case PCI_DEVICE_ID_INTEL_82845G_IG: - case PCI_DEVICE_ID_INTEL_82855GM_IG: - case PCI_DEVICE_ID_INTEL_82865_IG: + if (INTEL_GTT_GEN == 2) { if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M) aperture_size *= 64; else aperture_size *= 128; - break; - default: + } else { /* 9xx supports large sizes, just look at the length */ aperture_size = pci_resource_len(intel_private.pcidev, 2); - break; } return aperture_size >> PAGE_SHIFT; From ac622a9cdb742cad90648d95f2c4877774518f19 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 8 Sep 2010 21:26:07 +0200 Subject: [PATCH 102/476] drm/i915: drop prealloc_start from i915_dma gtt init Not used and simply confusing. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 47228cb16901..a693b27f3df4 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1000,8 +1000,7 @@ intel_teardown_mchbar(struct drm_device *dev) * how much was set aside so we can use it for our own purposes. */ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, - uint32_t *preallocated_size, - uint32_t *start) + uint32_t *preallocated_size) { struct drm_i915_private *dev_priv = dev->dev_private; u16 tmp = 0; @@ -1152,7 +1151,6 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, } *preallocated_size = stolen - overhead; - *start = overhead; return 0; } @@ -1362,7 +1360,6 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev) } static int i915_load_modeset_init(struct drm_device *dev, - unsigned long prealloc_start, unsigned long prealloc_size, unsigned long agp_size) { @@ -2051,7 +2048,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) struct drm_i915_private *dev_priv; resource_size_t base, size; int ret = 0, mmio_bar; - uint32_t agp_size, prealloc_size, prealloc_start; + uint32_t agp_size, prealloc_size; /* i915 has 4 more counters */ dev->counters += 4; dev->types[6] = _DRM_STAT_IRQ; @@ -2110,7 +2107,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) "performance may suffer.\n"); } - ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start); + ret = i915_probe_agp(dev, &agp_size, &prealloc_size); if (ret) goto out_iomapfree; @@ -2202,8 +2199,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) intel_detect_pch(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) { - ret = i915_load_modeset_init(dev, prealloc_start, - prealloc_size, agp_size); + ret = i915_load_modeset_init(dev, prealloc_size, agp_size); if (ret < 0) { DRM_ERROR("failed to init modeset\n"); goto out_workqueue_free; From 19966754328d99ee003ddfc7a8c31ceb115483ac Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 6 Sep 2010 20:08:44 +0200 Subject: [PATCH 103/476] drm/i915: die, i915_probe_agp, die Use the detection from intel-gtt.ko instead. Hooray! Also move the stolen mem allocator to the other gtt stuff in dev_prv->mem. v2: Chris Wilson noted that my error handling was crap. Fix it. He also said that this fixes a problem on his i845. Indeed, i915_probe_agp misses a special case for i830/i845 stolen mem detection. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=25476 Cc: stable@kernel.org Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 6 + drivers/gpu/drm/i915/i915_dma.c | 190 ++------------------------------ drivers/gpu/drm/i915/i915_drv.h | 7 +- include/drm/intel-gtt.h | 2 + 4 files changed, 25 insertions(+), 180 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 6eb64c19af0e..9cb7c98afb9c 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1714,6 +1714,12 @@ int intel_gmch_probe(struct pci_dev *pdev, } EXPORT_SYMBOL(intel_gmch_probe); +struct intel_gtt *intel_gtt_get(void) +{ + return &intel_private.base; +} +EXPORT_SYMBOL(intel_gtt_get); + void intel_gmch_remove(struct pci_dev *pdev) { if (intel_private.pcidev) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index a693b27f3df4..428c75b466aa 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -989,172 +989,6 @@ intel_teardown_mchbar(struct drm_device *dev) release_resource(&dev_priv->mch_res); } -/** - * i915_probe_agp - get AGP bootup configuration - * @pdev: PCI device - * @aperture_size: returns AGP aperture configured size - * @preallocated_size: returns size of BIOS preallocated AGP space - * - * Since Intel integrated graphics are UMA, the BIOS has to set aside - * some RAM for the framebuffer at early boot. This code figures out - * how much was set aside so we can use it for our own purposes. - */ -static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, - uint32_t *preallocated_size) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u16 tmp = 0; - unsigned long overhead; - unsigned long stolen; - - /* Get the fb aperture size and "stolen" memory amount. */ - pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &tmp); - - *aperture_size = 1024 * 1024; - *preallocated_size = 1024 * 1024; - - switch (dev->pdev->device) { - case PCI_DEVICE_ID_INTEL_82830_CGC: - case PCI_DEVICE_ID_INTEL_82845G_IG: - case PCI_DEVICE_ID_INTEL_82855GM_IG: - case PCI_DEVICE_ID_INTEL_82865_IG: - if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M) - *aperture_size *= 64; - else - *aperture_size *= 128; - break; - default: - /* 9xx supports large sizes, just look at the length */ - *aperture_size = pci_resource_len(dev->pdev, 2); - break; - } - - /* - * Some of the preallocated space is taken by the GTT - * and popup. GTT is 1K per MB of aperture size, and popup is 4K. - */ - if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) - overhead = 4096; - else - overhead = (*aperture_size / 1024) + 4096; - - if (IS_GEN6(dev)) { - /* SNB has memory control reg at 0x50.w */ - pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &tmp); - - switch (tmp & SNB_GMCH_GMS_STOLEN_MASK) { - case INTEL_855_GMCH_GMS_DISABLED: - DRM_ERROR("video memory is disabled\n"); - return -1; - case SNB_GMCH_GMS_STOLEN_32M: - stolen = 32 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_64M: - stolen = 64 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_96M: - stolen = 96 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_128M: - stolen = 128 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_160M: - stolen = 160 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_192M: - stolen = 192 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_224M: - stolen = 224 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_256M: - stolen = 256 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_288M: - stolen = 288 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_320M: - stolen = 320 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_352M: - stolen = 352 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_384M: - stolen = 384 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_416M: - stolen = 416 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_448M: - stolen = 448 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_480M: - stolen = 480 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_512M: - stolen = 512 * 1024 * 1024; - break; - default: - DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", - tmp & SNB_GMCH_GMS_STOLEN_MASK); - return -1; - } - } else { - switch (tmp & INTEL_GMCH_GMS_MASK) { - case INTEL_855_GMCH_GMS_DISABLED: - DRM_ERROR("video memory is disabled\n"); - return -1; - case INTEL_855_GMCH_GMS_STOLEN_1M: - stolen = 1 * 1024 * 1024; - break; - case INTEL_855_GMCH_GMS_STOLEN_4M: - stolen = 4 * 1024 * 1024; - break; - case INTEL_855_GMCH_GMS_STOLEN_8M: - stolen = 8 * 1024 * 1024; - break; - case INTEL_855_GMCH_GMS_STOLEN_16M: - stolen = 16 * 1024 * 1024; - break; - case INTEL_855_GMCH_GMS_STOLEN_32M: - stolen = 32 * 1024 * 1024; - break; - case INTEL_915G_GMCH_GMS_STOLEN_48M: - stolen = 48 * 1024 * 1024; - break; - case INTEL_915G_GMCH_GMS_STOLEN_64M: - stolen = 64 * 1024 * 1024; - break; - case INTEL_GMCH_GMS_STOLEN_128M: - stolen = 128 * 1024 * 1024; - break; - case INTEL_GMCH_GMS_STOLEN_256M: - stolen = 256 * 1024 * 1024; - break; - case INTEL_GMCH_GMS_STOLEN_96M: - stolen = 96 * 1024 * 1024; - break; - case INTEL_GMCH_GMS_STOLEN_160M: - stolen = 160 * 1024 * 1024; - break; - case INTEL_GMCH_GMS_STOLEN_224M: - stolen = 224 * 1024 * 1024; - break; - case INTEL_GMCH_GMS_STOLEN_352M: - stolen = 352 * 1024 * 1024; - break; - default: - DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", - tmp & INTEL_GMCH_GMS_MASK); - return -1; - } - } - - *preallocated_size = stolen - overhead; - - return 0; -} - #define PTE_ADDRESS_MASK 0xfffff000 #define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */ #define PTE_MAPPING_TYPE_UNCACHED (0 << 1) @@ -1249,7 +1083,7 @@ static void i915_setup_compression(struct drm_device *dev, int size) unsigned long ll_base = 0; /* Leave 1M for line length buffer & misc. */ - compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); + compressed_fb = drm_mm_search_free(&dev_priv->mm.vram, size, 4096, 0); if (!compressed_fb) { dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; i915_warn_stolen(dev); @@ -1270,7 +1104,7 @@ static void i915_setup_compression(struct drm_device *dev, int size) } if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) { - compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096, + compressed_llb = drm_mm_search_free(&dev_priv->mm.vram, 4096, 4096, 0); if (!compressed_llb) { i915_warn_stolen(dev); @@ -1366,8 +1200,8 @@ static int i915_load_modeset_init(struct drm_device *dev, struct drm_i915_private *dev_priv = dev->dev_private; int ret = 0; - /* Basic memrange allocator for stolen space (aka vram) */ - drm_mm_init(&dev_priv->vram, 0, prealloc_size); + /* Basic memrange allocator for stolen space (aka mm.vram) */ + drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size); DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); /* We're off and running w/KMS */ @@ -2107,16 +1941,16 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) "performance may suffer.\n"); } - ret = i915_probe_agp(dev, &agp_size, &prealloc_size); - if (ret) + dev_priv->mm.gtt = intel_gtt_get(); + if (!dev_priv->mm.gtt) { + DRM_ERROR("Failed to initialize GTT\n"); + ret = -ENODEV; goto out_iomapfree; - - if (prealloc_size > intel_max_stolen) { - DRM_INFO("detected %dM stolen memory, trimming to %dM\n", - prealloc_size >> 20, intel_max_stolen >> 20); - prealloc_size = intel_max_stolen; } + prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT; + agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; + dev_priv->wq = create_singlethread_workqueue("i915"); if (dev_priv->wq == NULL) { DRM_ERROR("Failed to create our workqueue.\n"); @@ -2301,7 +2135,7 @@ int i915_driver_unload(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); if (I915_HAS_FBC(dev) && i915_powersave) i915_cleanup_compression(dev); - drm_mm_takedown(&dev_priv->vram); + drm_mm_takedown(&dev_priv->mm.vram); intel_cleanup_overlay(dev); } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d825ef207b2c..c8b22005ec18 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -305,8 +305,6 @@ typedef struct drm_i915_private { uint32_t last_instdone; uint32_t last_instdone1; - struct drm_mm vram; - unsigned long cfb_size; unsigned long cfb_pitch; int cfb_fence; @@ -511,6 +509,11 @@ typedef struct drm_i915_private { u32 saveMCHBAR_RENDER_STANDBY; struct { + /** Bridge to intel-gtt-ko */ + struct intel_gtt *gtt; + /** Memory allocator for GTT stolen memory */ + struct drm_mm vram; + /** Memory allocator for GTT */ struct drm_mm gtt_space; struct io_mapping *gtt_mapping; diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h index 6769cb704e9b..b3aa7ab72d09 100644 --- a/include/drm/intel-gtt.h +++ b/include/drm/intel-gtt.h @@ -14,5 +14,7 @@ struct intel_gtt { unsigned int gtt_mappable_entries; }; +struct intel_gtt *intel_gtt_get(void); + #endif From f4433a8d5d3076775bdd1a996a47db7beb468ac0 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 8 Sep 2010 21:44:47 +0200 Subject: [PATCH 104/476] i915: snprintf returns large values snprintf() returns the number of bytes which would have been used if there was enough space. It can be larger than the size of the buffer. Obviously in this case the buffer is large enough but everyone just copy and pastes this code so it's better to limit it and set a good example. Signed-off-by: Dan Carpenter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 2f3e017d24d6..7700ccf25784 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -886,6 +886,9 @@ i915_wedged_read(struct file *filp, "wedged : %d\n", atomic_read(&dev_priv->mm.wedged)); + if (len > sizeof (buf)) + len = sizeof (buf); + return simple_read_from_buffer(ubuf, max, ppos, buf, len); } From 7eaf5547d0460027b15a297bb15d80bdd600cb41 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 8 Sep 2010 12:41:59 -0700 Subject: [PATCH 105/476] drm/i915: fix eDP detection Panel needs to be powered up. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_dp.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index eb6e6763ff56..38bf7cd3d480 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -754,13 +754,14 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, } } -static void ironlake_edp_panel_on (struct drm_device *dev) +/* Returns true if the panel was already on when called */ +static bool ironlake_edp_panel_on (struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 pp; if (I915_READ(PCH_PP_STATUS) & PP_ON) - return; + return true; pp = I915_READ(PCH_PP_CONTROL); @@ -780,6 +781,8 @@ static void ironlake_edp_panel_on (struct drm_device *dev) pp |= PANEL_POWER_RESET; /* restore panel reset bit */ I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); + + return false; } static void ironlake_edp_panel_off (struct drm_device *dev) @@ -860,7 +863,7 @@ static void intel_dp_prepare(struct drm_encoder *encoder) struct drm_i915_private *dev_priv = dev->dev_private; uint32_t dp_reg = I915_READ(intel_dp->output_reg); - if (IS_eDP(intel_dp)) { + if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { ironlake_edp_backlight_off(dev); ironlake_edp_panel_on(dev); ironlake_edp_pll_on(encoder); @@ -1365,7 +1368,11 @@ ironlake_dp_detect(struct drm_connector *connector) struct drm_encoder *encoder = intel_attached_encoder(connector); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); enum drm_connector_status status; + bool was_on = false; + /* Panel needs power for AUX to work */ + if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) + was_on = ironlake_edp_panel_on(connector->dev); status = connector_status_disconnected; if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd, @@ -1376,6 +1383,8 @@ ironlake_dp_detect(struct drm_connector *connector) } DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]); + if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && !was_on) + ironlake_edp_panel_off(connector->dev); return status; } From 6176b8f908a58a7affaacf6f3a90ef14325686f0 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 8 Sep 2010 12:42:00 -0700 Subject: [PATCH 106/476] drm/i915: use 125MHz reference clock for PCH attached eDP Fix the test so we don't try to use the 450MHz refclk on PCH attached eDP. References: https://bugs.freedesktop.org/show_bug.cgi?id=29141 Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_dp.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 38bf7cd3d480..8c1da1efc063 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -246,8 +246,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, /* The clock divider is based off the hrawclk, * and would like to run at 2MHz. So, take the * hrawclk value and divide by 2 and use that + * + * Note that PCH attached eDP panels should use a 125MHz input + * clock divider. */ - if (IS_eDP(intel_dp)) { + if (IS_eDP(intel_dp) && !IS_PCH_eDP(intel_dp)) { if (IS_GEN6(dev)) aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ else From b2094bbad48a59f59b115832879121aa210841f0 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 8 Sep 2010 12:42:01 -0700 Subject: [PATCH 107/476] drm/i915: use VDD AUX for panel power around detection and in prepare Mode setting sequence specifies that we use VDD AUX for configuration and detection, and early in the mode set sequence. Only later (after DP_A has started training) should we actually enable panel power. Signed-off-by: Jesse Barnes [ickle: checkpatch.pl complaining about whitespace] Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_dp.c | 35 +++++++++++++++++++++++++++------ 1 file changed, 29 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 8c1da1efc063..3a4e2a639872 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -813,6 +813,28 @@ static void ironlake_edp_panel_off (struct drm_device *dev) POSTING_READ(PCH_PP_CONTROL); } +static void ironlake_edp_panel_vdd_on(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pp; + + pp = I915_READ(PCH_PP_CONTROL); + pp |= EDP_FORCE_VDD; + I915_WRITE(PCH_PP_CONTROL, pp); + POSTING_READ(PCH_PP_CONTROL); +} + +static void ironlake_edp_panel_vdd_off(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pp; + + pp = I915_READ(PCH_PP_CONTROL); + pp &= ~EDP_FORCE_VDD; + I915_WRITE(PCH_PP_CONTROL, pp); + POSTING_READ(PCH_PP_CONTROL); +} + static void ironlake_edp_backlight_on (struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -868,7 +890,7 @@ static void intel_dp_prepare(struct drm_encoder *encoder) if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { ironlake_edp_backlight_off(dev); - ironlake_edp_panel_on(dev); + ironlake_edp_panel_vdd_on(dev); ironlake_edp_pll_on(encoder); } if (dp_reg & DP_PORT_EN) @@ -885,8 +907,10 @@ static void intel_dp_commit(struct drm_encoder *encoder) if (!(dp_reg & DP_PORT_EN)) { intel_dp_link_train(intel_dp); } - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) + if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { + ironlake_edp_panel_on(dev); ironlake_edp_backlight_on(dev); + } } static void @@ -1371,11 +1395,10 @@ ironlake_dp_detect(struct drm_connector *connector) struct drm_encoder *encoder = intel_attached_encoder(connector); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); enum drm_connector_status status; - bool was_on = false; /* Panel needs power for AUX to work */ if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) - was_on = ironlake_edp_panel_on(connector->dev); + ironlake_edp_panel_vdd_on(connector->dev); status = connector_status_disconnected; if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd, @@ -1386,8 +1409,8 @@ ironlake_dp_detect(struct drm_connector *connector) } DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]); - if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && !was_on) - ironlake_edp_panel_off(connector->dev); + if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) + ironlake_edp_panel_vdd_off(connector->dev); return status; } From 33a34e4e5969c5272cd6cb88f2e01c97218dd80b Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 8 Sep 2010 12:42:02 -0700 Subject: [PATCH 108/476] drm/i915: split DP link training across panel power sequencing Mode set sequence requires that we start training, then enable the panel, then complete training. So split the DP training function into two parts; the first enables the DP port and sets training pattern 1 and the second completes the training. As part of this, remove some redundant function args from the various DP handling functions and use the intel_dp fields everywhere we can. Signed-off-by: Jesse Barnes [ickle: removed first ironlake_edp_backlight_on() on advice of jbarnes] Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_dp.c | 121 +++++++++++++++++--------------- 1 file changed, 65 insertions(+), 56 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 3a4e2a639872..effbbe0915ec 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -58,6 +58,8 @@ struct intel_dp { struct i2c_adapter adapter; struct i2c_algo_dp_aux_data algo; bool is_pch_edp; + uint8_t train_set[4]; + uint8_t link_status[DP_LINK_STATUS_SIZE]; }; static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) @@ -65,7 +67,8 @@ static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) return container_of(enc_to_intel_encoder(encoder), struct intel_dp, base); } -static void intel_dp_link_train(struct intel_dp *intel_dp); +static void intel_dp_start_link_train(struct intel_dp *intel_dp); +static void intel_dp_complete_link_train(struct intel_dp *intel_dp); static void intel_dp_link_down(struct intel_dp *intel_dp); void @@ -901,16 +904,16 @@ static void intel_dp_commit(struct drm_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t dp_reg = I915_READ(intel_dp->output_reg); - if (!(dp_reg & DP_PORT_EN)) { - intel_dp_link_train(intel_dp); - } - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { + intel_dp_start_link_train(intel_dp); + + if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) ironlake_edp_panel_on(dev); + + intel_dp_complete_link_train(intel_dp); + + if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) ironlake_edp_backlight_on(dev); - } } static void @@ -932,9 +935,10 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) ironlake_edp_pll_off(encoder); } else { if (!(dp_reg & DP_PORT_EN)) { + intel_dp_start_link_train(intel_dp); if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) ironlake_edp_panel_on(dev); - intel_dp_link_train(intel_dp); + intel_dp_complete_link_train(intel_dp); if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) ironlake_edp_backlight_on(dev); } @@ -947,14 +951,13 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) * link status information */ static bool -intel_dp_get_link_status(struct intel_dp *intel_dp, - uint8_t link_status[DP_LINK_STATUS_SIZE]) +intel_dp_get_link_status(struct intel_dp *intel_dp) { int ret; ret = intel_dp_aux_native_read(intel_dp, DP_LANE0_1_STATUS, - link_status, DP_LINK_STATUS_SIZE); + intel_dp->link_status, DP_LINK_STATUS_SIZE); if (ret != DP_LINK_STATUS_SIZE) return false; return true; @@ -1029,18 +1032,15 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing) } static void -intel_get_adjust_train(struct intel_dp *intel_dp, - uint8_t link_status[DP_LINK_STATUS_SIZE], - int lane_count, - uint8_t train_set[4]) +intel_get_adjust_train(struct intel_dp *intel_dp) { uint8_t v = 0; uint8_t p = 0; int lane; - for (lane = 0; lane < lane_count; lane++) { - uint8_t this_v = intel_get_adjust_request_voltage(link_status, lane); - uint8_t this_p = intel_get_adjust_request_pre_emphasis(link_status, lane); + for (lane = 0; lane < intel_dp->lane_count; lane++) { + uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane); + uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane); if (this_v > v) v = this_v; @@ -1055,7 +1055,7 @@ intel_get_adjust_train(struct intel_dp *intel_dp, p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; for (lane = 0; lane < 4; lane++) - train_set[lane] = v | p; + intel_dp->train_set[lane] = v | p; } static uint32_t @@ -1146,18 +1146,18 @@ intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count DP_LANE_CHANNEL_EQ_DONE|\ DP_LANE_SYMBOL_LOCKED) static bool -intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) +intel_channel_eq_ok(struct intel_dp *intel_dp) { uint8_t lane_align; uint8_t lane_status; int lane; - lane_align = intel_dp_link_status(link_status, + lane_align = intel_dp_link_status(intel_dp->link_status, DP_LANE_ALIGN_STATUS_UPDATED); if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) return false; - for (lane = 0; lane < lane_count; lane++) { - lane_status = intel_get_lane_status(link_status, lane); + for (lane = 0; lane < intel_dp->lane_count; lane++) { + lane_status = intel_get_lane_status(intel_dp->link_status, lane); if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) return false; } @@ -1168,7 +1168,6 @@ static bool intel_dp_set_link_train(struct intel_dp *intel_dp, uint32_t dp_reg_value, uint8_t dp_train_pat, - uint8_t train_set[4], bool first) { struct drm_device *dev = intel_dp->base.enc.dev; @@ -1186,24 +1185,21 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, dp_train_pat); ret = intel_dp_aux_native_write(intel_dp, - DP_TRAINING_LANE0_SET, train_set, 4); + DP_TRAINING_LANE0_SET, intel_dp->train_set, 4); if (ret != 4) return false; return true; } +/* Enable corresponding port and start training pattern 1 */ static void -intel_dp_link_train(struct intel_dp *intel_dp) +intel_dp_start_link_train(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.enc.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - uint8_t train_set[4]; - uint8_t link_status[DP_LINK_STATUS_SIZE]; int i; uint8_t voltage; bool clock_recovery = false; - bool channel_eq = false; bool first = true; int tries; u32 reg; @@ -1219,18 +1215,18 @@ intel_dp_link_train(struct intel_dp *intel_dp) DP &= ~DP_LINK_TRAIN_MASK_CPT; else DP &= ~DP_LINK_TRAIN_MASK; - memset(train_set, 0, 4); + memset(intel_dp->train_set, 0, 4); voltage = 0xff; tries = 0; clock_recovery = false; for (;;) { - /* Use train_set[0] to set the voltage and pre emphasis values */ + /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ uint32_t signal_levels; if (IS_GEN6(dev) && IS_eDP(intel_dp)) { - signal_levels = intel_gen6_edp_signal_levels(train_set[0]); + signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; } else { - signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count); + signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } @@ -1240,52 +1236,65 @@ intel_dp_link_train(struct intel_dp *intel_dp) reg = DP | DP_LINK_TRAIN_PAT_1; if (!intel_dp_set_link_train(intel_dp, reg, - DP_TRAINING_PATTERN_1, train_set, first)) + DP_TRAINING_PATTERN_1, first)) break; first = false; /* Set training pattern 1 */ udelay(100); - if (!intel_dp_get_link_status(intel_dp, link_status)) + if (!intel_dp_get_link_status(intel_dp)) break; - if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { + if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { clock_recovery = true; break; } /* Check to see if we've tried the max voltage */ for (i = 0; i < intel_dp->lane_count; i++) - if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) + if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) break; if (i == intel_dp->lane_count) break; /* Check to see if we've tried the same voltage 5 times */ - if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { + if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { ++tries; if (tries == 5) break; } else tries = 0; - voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; + voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; - /* Compute new train_set as requested by target */ - intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set); + /* Compute new intel_dp->train_set as requested by target */ + intel_get_adjust_train(intel_dp); } + intel_dp->DP = DP; +} + +static void +intel_dp_complete_link_train(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp->base.enc.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + bool channel_eq = false; + int tries; + u32 reg; + uint32_t DP = intel_dp->DP; + /* channel equalization */ tries = 0; channel_eq = false; for (;;) { - /* Use train_set[0] to set the voltage and pre emphasis values */ + /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ uint32_t signal_levels; if (IS_GEN6(dev) && IS_eDP(intel_dp)) { - signal_levels = intel_gen6_edp_signal_levels(train_set[0]); + signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; } else { - signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count); + signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } @@ -1296,15 +1305,15 @@ intel_dp_link_train(struct intel_dp *intel_dp) /* channel eq pattern */ if (!intel_dp_set_link_train(intel_dp, reg, - DP_TRAINING_PATTERN_2, train_set, + DP_TRAINING_PATTERN_2, false)) break; udelay(400); - if (!intel_dp_get_link_status(intel_dp, link_status)) + if (!intel_dp_get_link_status(intel_dp)) break; - if (intel_channel_eq_ok(link_status, intel_dp->lane_count)) { + if (intel_channel_eq_ok(intel_dp)) { channel_eq = true; break; } @@ -1313,8 +1322,8 @@ intel_dp_link_train(struct intel_dp *intel_dp) if (tries > 5) break; - /* Compute new train_set as requested by target */ - intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set); + /* Compute new intel_dp->train_set as requested by target */ + intel_get_adjust_train(intel_dp); ++tries; } @@ -1375,18 +1384,18 @@ intel_dp_link_down(struct intel_dp *intel_dp) static void intel_dp_check_link_status(struct intel_dp *intel_dp) { - uint8_t link_status[DP_LINK_STATUS_SIZE]; - if (!intel_dp->base.enc.crtc) return; - if (!intel_dp_get_link_status(intel_dp, link_status)) { + if (!intel_dp_get_link_status(intel_dp)) { intel_dp_link_down(intel_dp); return; } - if (!intel_channel_eq_ok(link_status, intel_dp->lane_count)) - intel_dp_link_train(intel_dp); + if (!intel_channel_eq_ok(intel_dp)) { + intel_dp_start_link_train(intel_dp); + intel_dp_complete_link_train(intel_dp); + } } static enum drm_connector_status From 3969c9c927b0bdb1e477a1eda60743143a75e4a5 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 8 Sep 2010 12:42:03 -0700 Subject: [PATCH 109/476] drm/i915: don't change VDD AUX status in panel power functions Mode set sequence outlines when the AUX VDD bit should be set and cleared, and it's separate from the panel power sequence. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_dp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index effbbe0915ec..153a5934b2e1 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -783,7 +783,7 @@ static bool ironlake_edp_panel_on (struct drm_device *dev) DRM_ERROR("panel on wait timed out: 0x%08x\n", I915_READ(PCH_PP_STATUS)); - pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD); + pp &= ~(PANEL_UNLOCK_REGS); pp |= PANEL_POWER_RESET; /* restore panel reset bit */ I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); @@ -811,7 +811,7 @@ static void ironlake_edp_panel_off (struct drm_device *dev) I915_READ(PCH_PP_STATUS)); /* Make sure VDD is enabled so DP AUX will work */ - pp |= EDP_FORCE_VDD | PANEL_POWER_RESET; /* restore panel reset bit */ + pp |= PANEL_POWER_RESET; /* restore panel reset bit */ I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); } From 3ba5c569c4a99c43bdac9f0c1c65e15a7b3390b9 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 25 Aug 2010 13:09:48 -0700 Subject: [PATCH 110/476] drm/i915: make sure VDD AUX power has time to settle When turning on or off the VDD AUX bit, we need to give the panel time to start or stop or AUX transactions may fail. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_dp.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 153a5934b2e1..bcd81f96fc7f 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -825,6 +825,7 @@ static void ironlake_edp_panel_vdd_on(struct drm_device *dev) pp |= EDP_FORCE_VDD; I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); + msleep(300); } static void ironlake_edp_panel_vdd_off(struct drm_device *dev) @@ -836,6 +837,7 @@ static void ironlake_edp_panel_vdd_off(struct drm_device *dev) pp &= ~EDP_FORCE_VDD; I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); + msleep(300); } static void ironlake_edp_backlight_on (struct drm_device *dev) From 2c9d97545914cc764786702f361a1f1c9bb8dfa9 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 8 Sep 2010 12:42:05 -0700 Subject: [PATCH 111/476] drm/i915: make sure panel is sequenced off when starting a mode set Otherwise we may not be able to train the DP link. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_dp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index bcd81f96fc7f..27805a9ca877 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -894,6 +894,7 @@ static void intel_dp_prepare(struct drm_encoder *encoder) uint32_t dp_reg = I915_READ(intel_dp->output_reg); if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { + ironlake_edp_panel_off(dev); ironlake_edp_backlight_off(dev); ironlake_edp_panel_vdd_on(dev); ironlake_edp_pll_on(encoder); From 1af5fa1b7e5ff8332f8a2ee3c5fb44d93b34868d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 8 Sep 2010 21:07:28 +0100 Subject: [PATCH 112/476] drm/i915/dp: Flush the PLL register write before sleeping Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_dp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 27805a9ca877..c7aa29bfdea9 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -883,6 +883,7 @@ static void ironlake_edp_pll_off(struct drm_encoder *encoder) dpa_ctl = I915_READ(DP_A); dpa_ctl |= DP_PLL_ENABLE; I915_WRITE(DP_A, dpa_ctl); + POSTING_READ(DP_A); udelay(200); } From 4ef69c7a64b78d477d1666eba258ca049e8bac91 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 9 Sep 2010 15:14:28 +0100 Subject: [PATCH 113/476] drm/i915: Rename intel_encoder->enc to base for consistency [Patch is slightly larger than is strictly necessary to fixup surrounding checkpatch.pl errors.] Signed-off-by: Chris Wilson Reviewed-by: Jesse Barnes --- drivers/gpu/drm/i915/i915_irq.c | 14 +-- drivers/gpu/drm/i915/intel_crt.c | 14 +-- drivers/gpu/drm/i915/intel_display.c | 50 ++++----- drivers/gpu/drm/i915/intel_dp.c | 28 ++--- drivers/gpu/drm/i915/intel_drv.h | 4 +- drivers/gpu/drm/i915/intel_dvo.c | 8 +- drivers/gpu/drm/i915/intel_hdmi.c | 8 +- drivers/gpu/drm/i915/intel_lvds.c | 10 +- drivers/gpu/drm/i915/intel_modes.c | 4 +- drivers/gpu/drm/i915/intel_sdvo.c | 148 +++++++++++++-------------- drivers/gpu/drm/i915/intel_tv.c | 15 +-- 11 files changed, 147 insertions(+), 156 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 2f7f7cb0bf30..33525c922218 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -260,16 +260,12 @@ static void i915_hotplug_work_func(struct work_struct *work) hotplug_work); struct drm_device *dev = dev_priv->dev; struct drm_mode_config *mode_config = &dev->mode_config; - struct drm_encoder *encoder; + struct intel_encoder *encoder; + + list_for_each_entry(encoder, &mode_config->encoder_list, base.head) + if (encoder->hot_plug) + encoder->hot_plug(encoder); - if (mode_config->num_encoder) { - list_for_each_entry(encoder, &mode_config->encoder_list, head) { - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - - if (intel_encoder->hot_plug) - (*intel_encoder->hot_plug) (intel_encoder); - } - } /* Just fire off a uevent and let userspace tell us what to do */ drm_helper_hpd_irq_event(dev); } diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 626279791b89..e3f5e218036d 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -263,7 +263,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) static bool intel_crt_detect_ddc(struct drm_encoder *encoder) { - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_encoder *intel_encoder = to_intel_encoder(encoder); /* CRT should always be at 0, but check anyway */ if (intel_encoder->type != INTEL_OUTPUT_ANALOG) @@ -275,7 +275,7 @@ static bool intel_crt_detect_ddc(struct drm_encoder *encoder) static enum drm_connector_status intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder) { - struct drm_encoder *encoder = &intel_encoder->enc; + struct drm_encoder *encoder = &intel_encoder->base; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -405,7 +405,7 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto { struct drm_device *dev = connector->dev; struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_encoder *intel_encoder = to_intel_encoder(encoder); struct drm_crtc *crtc; int dpms_mode; enum drm_connector_status status; @@ -448,7 +448,7 @@ static int intel_crt_get_modes(struct drm_connector *connector) { int ret; struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_encoder *intel_encoder = to_intel_encoder(encoder); struct i2c_adapter *ddc_bus; struct drm_device *dev = connector->dev; @@ -533,11 +533,11 @@ void intel_crt_init(struct drm_device *dev) drm_connector_init(dev, &intel_connector->base, &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); - drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs, + drm_encoder_init(dev, &intel_encoder->base, &intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC); drm_mode_connector_attach_encoder(&intel_connector->base, - &intel_encoder->enc); + &intel_encoder->base); /* Set up the DDC bus. */ if (HAS_PCH_SPLIT(dev)) @@ -563,7 +563,7 @@ void intel_crt_init(struct drm_device *dev) connector->interlace_allowed = 1; connector->doublescan_allowed = 0; - drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs); + drm_encoder_helper_add(&intel_encoder->base, &intel_crt_helper_funcs); drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); drm_sysfs_connector_add(connector); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 4b23646304df..24c7b0e4fdf6 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -744,20 +744,17 @@ static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock /** * Returns whether any output on the specified pipe is of the specified type */ -bool intel_pipe_has_type (struct drm_crtc *crtc, int type) +bool intel_pipe_has_type(struct drm_crtc *crtc, int type) { - struct drm_device *dev = crtc->dev; - struct drm_mode_config *mode_config = &dev->mode_config; - struct drm_encoder *l_entry; + struct drm_device *dev = crtc->dev; + struct drm_mode_config *mode_config = &dev->mode_config; + struct intel_encoder *encoder; - list_for_each_entry(l_entry, &mode_config->encoder_list, head) { - if (l_entry && l_entry->crtc == crtc) { - struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry); - if (intel_encoder->type == type) - return true; - } - } - return false; + list_for_each_entry(encoder, &mode_config->encoder_list, base.head) + if (encoder->base.crtc == crtc && encoder->type == type) + return true; + + return false; } #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) @@ -2459,7 +2456,7 @@ void intel_encoder_commit (struct drm_encoder *encoder) void intel_encoder_destroy(struct drm_encoder *encoder) { - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_encoder *intel_encoder = to_intel_encoder(encoder); if (intel_encoder->ddc_bus) intel_i2c_destroy(intel_encoder->ddc_bus); @@ -3540,7 +3537,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, if (encoder->crtc != crtc) continue; - intel_encoder = enc_to_intel_encoder(encoder); + intel_encoder = to_intel_encoder(encoder); switch (intel_encoder->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; @@ -4430,7 +4427,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, struct intel_crtc *intel_crtc; struct drm_crtc *possible_crtc; struct drm_crtc *supported_crtc =NULL; - struct drm_encoder *encoder = &intel_encoder->enc; + struct drm_encoder *encoder = &intel_encoder->base; struct drm_crtc *crtc = NULL; struct drm_device *dev = encoder->dev; struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; @@ -4511,7 +4508,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, struct drm_connector *connector, int dpms_mode) { - struct drm_encoder *encoder = &intel_encoder->enc; + struct drm_encoder *encoder = &intel_encoder->base; struct drm_device *dev = encoder->dev; struct drm_crtc *crtc = encoder->crtc; struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; @@ -5243,24 +5240,23 @@ struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe) static int intel_encoder_clones(struct drm_device *dev, int type_mask) { + struct intel_encoder *encoder; int index_mask = 0; - struct drm_encoder *encoder; int entry = 0; - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - if (type_mask & intel_encoder->clone_mask) + list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { + if (type_mask & encoder->clone_mask) index_mask |= (1 << entry); entry++; } + return index_mask; } - static void intel_setup_outputs(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_encoder *encoder; + struct intel_encoder *encoder; bool dpd_is_edp = false; if (IS_MOBILE(dev) && !IS_I830(dev)) @@ -5349,12 +5345,10 @@ static void intel_setup_outputs(struct drm_device *dev) if (SUPPORTS_TV(dev)) intel_tv_init(dev); - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - - encoder->possible_crtcs = intel_encoder->crtc_mask; - encoder->possible_clones = intel_encoder_clones(dev, - intel_encoder->clone_mask); + list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { + encoder->base.possible_crtcs = encoder->crtc_mask; + encoder->base.possible_clones = + intel_encoder_clones(dev, encoder->clone_mask); } } diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index c7aa29bfdea9..6cdc53de24ac 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -64,7 +64,7 @@ struct intel_dp { static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) { - return container_of(enc_to_intel_encoder(encoder), struct intel_dp, base); + return container_of(encoder, struct intel_dp, base.base); } static void intel_dp_start_link_train(struct intel_dp *intel_dp); @@ -236,7 +236,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, uint8_t *recv, int recv_size) { uint32_t output_reg = intel_dp->output_reg; - struct drm_device *dev = intel_dp->base.enc.dev; + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t ch_ctl = output_reg + 0x10; uint32_t ch_data = ch_ctl + 4; @@ -704,7 +704,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, { struct drm_device *dev = encoder->dev; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct drm_crtc *crtc = intel_dp->base.enc.crtc; + struct drm_crtc *crtc = intel_dp->base.base.crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); intel_dp->DP = (DP_VOLTAGE_0_4 | @@ -1174,9 +1174,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, uint8_t dp_train_pat, bool first) { - struct drm_device *dev = intel_dp->base.enc.dev; + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); int ret; I915_WRITE(intel_dp->output_reg, dp_reg_value); @@ -1200,7 +1200,7 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, static void intel_dp_start_link_train(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp->base.enc.dev; + struct drm_device *dev = intel_dp->base.base.dev; int i; uint8_t voltage; bool clock_recovery = false; @@ -1280,7 +1280,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) static void intel_dp_complete_link_train(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp->base.enc.dev; + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; bool channel_eq = false; int tries; @@ -1345,7 +1345,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) static void intel_dp_link_down(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp->base.enc.dev; + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t DP = intel_dp->DP; @@ -1388,7 +1388,7 @@ intel_dp_link_down(struct intel_dp *intel_dp) static void intel_dp_check_link_status(struct intel_dp *intel_dp) { - if (!intel_dp->base.enc.crtc) + if (!intel_dp->base.base.crtc) return; if (!intel_dp_get_link_status(intel_dp)) { @@ -1438,7 +1438,7 @@ intel_dp_detect(struct drm_connector *connector) { struct drm_encoder *encoder = intel_attached_encoder(connector); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct drm_device *dev = intel_dp->base.enc.dev; + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t temp, bit; enum drm_connector_status status; @@ -1482,7 +1482,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) { struct drm_encoder *encoder = intel_attached_encoder(connector); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct drm_device *dev = intel_dp->base.enc.dev; + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; int ret; @@ -1670,12 +1670,12 @@ intel_dp_init(struct drm_device *dev, int output_reg) intel_dp->has_audio = false; intel_dp->dpms_mode = DRM_MODE_DPMS_ON; - drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs, + drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs); + drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); drm_mode_connector_attach_encoder(&intel_connector->base, - &intel_encoder->enc); + &intel_encoder->base); drm_sysfs_connector_add(connector); /* Set up the DDC bus. */ diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index b454d1a4271e..454bcf3933ce 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -137,7 +137,7 @@ struct intel_fbdev { }; struct intel_encoder { - struct drm_encoder enc; + struct drm_encoder base; int type; struct i2c_adapter *i2c_bus; struct i2c_adapter *ddc_bus; @@ -174,7 +174,7 @@ struct intel_crtc { #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) #define to_intel_connector(x) container_of(x, struct intel_connector, base) -#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc) +#define to_intel_encoder(x) container_of(x, struct intel_encoder, base) #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) struct intel_unpin_work { diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index a399f4b2c1c5..7bf7311deb2e 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -88,7 +88,7 @@ struct intel_dvo { static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder) { - return container_of(enc_to_intel_encoder(encoder), struct intel_dvo, base); + return container_of(encoder, struct intel_dvo, base.base); } static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) @@ -426,13 +426,13 @@ void intel_dvo_init(struct drm_device *dev) connector->interlace_allowed = false; connector->doublescan_allowed = false; - drm_encoder_init(dev, &intel_encoder->enc, + drm_encoder_init(dev, &intel_encoder->base, &intel_dvo_enc_funcs, encoder_type); - drm_encoder_helper_add(&intel_encoder->enc, + drm_encoder_helper_add(&intel_encoder->base, &intel_dvo_helper_funcs); drm_mode_connector_attach_encoder(&intel_connector->base, - &intel_encoder->enc); + &intel_encoder->base); if (dvo->type == INTEL_DVO_CHIP_LVDS) { /* For our LVDS chipsets, we should hopefully be able * to dig the fixed panel mode out of the BIOS data. diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index ccd4c97e6524..405afd75241b 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -45,7 +45,7 @@ struct intel_hdmi { static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) { - return container_of(enc_to_intel_encoder(encoder), struct intel_hdmi, base); + return container_of(encoder, struct intel_hdmi, base.base); } static void intel_hdmi_mode_set(struct drm_encoder *encoder, @@ -266,12 +266,12 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) intel_hdmi->sdvox_reg = sdvox_reg; - drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs, + drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs); + drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); drm_mode_connector_attach_encoder(&intel_connector->base, - &intel_encoder->enc); + &intel_encoder->base); drm_sysfs_connector_add(connector); /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index ef6455104ff1..ed1c87636814 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -50,7 +50,7 @@ struct intel_lvds { static struct intel_lvds *enc_to_intel_lvds(struct drm_encoder *encoder) { - return container_of(enc_to_intel_encoder(encoder), struct intel_lvds, base); + return container_of(encoder, struct intel_lvds, base.base); } static void intel_lvds_lock_panel(struct drm_device *dev, bool lock) @@ -437,7 +437,7 @@ static int intel_lvds_get_modes(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_encoder *intel_encoder = to_intel_encoder(encoder); struct drm_i915_private *dev_priv = dev->dev_private; int ret = 0; @@ -839,15 +839,15 @@ void intel_lvds_init(struct drm_device *dev) } intel_encoder = &intel_lvds->base; - encoder = &intel_encoder->enc; + encoder = &intel_encoder->base; connector = &intel_connector->base; drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, DRM_MODE_CONNECTOR_LVDS); - drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs, + drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS); - drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc); + drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->base); intel_encoder->type = INTEL_OUTPUT_LVDS; intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index 4b1fd3d9c73c..1138aa98573d 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c @@ -54,9 +54,9 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder) } }; - intel_i2c_quirk_set(intel_encoder->enc.dev, true); + intel_i2c_quirk_set(intel_encoder->base.dev, true); ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2); - intel_i2c_quirk_set(intel_encoder->enc.dev, false); + intel_i2c_quirk_set(intel_encoder->base.dev, false); if (ret == 2) return true; diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 1c1aeea81e56..3d8f4f4cf326 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -186,7 +186,7 @@ struct intel_sdvo_connector { static struct intel_sdvo *enc_to_intel_sdvo(struct drm_encoder *encoder) { - return container_of(enc_to_intel_encoder(encoder), struct intel_sdvo, base); + return container_of(encoder, struct intel_sdvo, base.base); } static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector) @@ -211,7 +211,7 @@ intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, */ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) { - struct drm_device *dev = intel_sdvo->base.enc.dev; + struct drm_device *dev = intel_sdvo->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 bval = val, cval = val; int i; @@ -2077,7 +2077,7 @@ intel_sdvo_connector_init(struct drm_encoder *encoder, static bool intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) { - struct drm_encoder *encoder = &intel_sdvo->base.enc; + struct drm_encoder *encoder = &intel_sdvo->base.base; struct drm_connector *connector; struct intel_connector *intel_connector; struct intel_sdvo_connector *intel_sdvo_connector; @@ -2120,36 +2120,36 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) static bool intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) { - struct drm_encoder *encoder = &intel_sdvo->base.enc; - struct drm_connector *connector; - struct intel_connector *intel_connector; - struct intel_sdvo_connector *intel_sdvo_connector; + struct drm_encoder *encoder = &intel_sdvo->base.base; + struct drm_connector *connector; + struct intel_connector *intel_connector; + struct intel_sdvo_connector *intel_sdvo_connector; intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); if (!intel_sdvo_connector) return false; intel_connector = &intel_sdvo_connector->base; - connector = &intel_connector->base; - encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; - connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; + connector = &intel_connector->base; + encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; + connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; - intel_sdvo->controlled_output |= type; - intel_sdvo_connector->output_flag = type; + intel_sdvo->controlled_output |= type; + intel_sdvo_connector->output_flag = type; - intel_sdvo->is_tv = true; - intel_sdvo->base.needs_tv_clock = true; - intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; + intel_sdvo->is_tv = true; + intel_sdvo->base.needs_tv_clock = true; + intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; - intel_sdvo_connector_init(encoder, connector); + intel_sdvo_connector_init(encoder, connector); - if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type)) + if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type)) goto err; - if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) + if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) goto err; - return true; + return true; err: intel_sdvo_destroy_enhance_property(connector); @@ -2160,43 +2160,10 @@ err: static bool intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) { - struct drm_encoder *encoder = &intel_sdvo->base.enc; - struct drm_connector *connector; - struct intel_connector *intel_connector; - struct intel_sdvo_connector *intel_sdvo_connector; - - intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); - if (!intel_sdvo_connector) - return false; - - intel_connector = &intel_sdvo_connector->base; - connector = &intel_connector->base; - connector->polled = DRM_CONNECTOR_POLL_CONNECT; - encoder->encoder_type = DRM_MODE_ENCODER_DAC; - connector->connector_type = DRM_MODE_CONNECTOR_VGA; - - if (device == 0) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0; - intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; - } else if (device == 1) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1; - intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; - } - - intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | - (1 << INTEL_ANALOG_CLONE_BIT)); - - intel_sdvo_connector_init(encoder, connector); - return true; -} - -static bool -intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) -{ - struct drm_encoder *encoder = &intel_sdvo->base.enc; - struct drm_connector *connector; - struct intel_connector *intel_connector; - struct intel_sdvo_connector *intel_sdvo_connector; + struct drm_encoder *encoder = &intel_sdvo->base.base; + struct drm_connector *connector; + struct intel_connector *intel_connector; + struct intel_sdvo_connector *intel_sdvo_connector; intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); if (!intel_sdvo_connector) @@ -2204,22 +2171,55 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; - encoder->encoder_type = DRM_MODE_ENCODER_LVDS; - connector->connector_type = DRM_MODE_CONNECTOR_LVDS; + connector->polled = DRM_CONNECTOR_POLL_CONNECT; + encoder->encoder_type = DRM_MODE_ENCODER_DAC; + connector->connector_type = DRM_MODE_CONNECTOR_VGA; - if (device == 0) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0; - intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; - } else if (device == 1) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1; - intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; - } + if (device == 0) { + intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0; + intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; + } else if (device == 1) { + intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1; + intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; + } - intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) | + intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | + (1 << INTEL_ANALOG_CLONE_BIT)); + + intel_sdvo_connector_init(encoder, connector); + return true; +} + +static bool +intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) +{ + struct drm_encoder *encoder = &intel_sdvo->base.base; + struct drm_connector *connector; + struct intel_connector *intel_connector; + struct intel_sdvo_connector *intel_sdvo_connector; + + intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); + if (!intel_sdvo_connector) + return false; + + intel_connector = &intel_sdvo_connector->base; + connector = &intel_connector->base; + encoder->encoder_type = DRM_MODE_ENCODER_LVDS; + connector->connector_type = DRM_MODE_CONNECTOR_LVDS; + + if (device == 0) { + intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0; + intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; + } else if (device == 1) { + intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1; + intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; + } + + intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) | (1 << INTEL_SDVO_LVDS_CLONE_BIT)); - intel_sdvo_connector_init(encoder, connector); - if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) + intel_sdvo_connector_init(encoder, connector); + if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) goto err; return true; @@ -2291,7 +2291,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, int type) { - struct drm_device *dev = intel_sdvo->base.enc.dev; + struct drm_device *dev = intel_sdvo->base.base.dev; struct intel_sdvo_tv_format format; uint32_t format_map, i; @@ -2357,7 +2357,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, struct intel_sdvo_enhancements_reply enhancements) { - struct drm_device *dev = intel_sdvo->base.enc.dev; + struct drm_device *dev = intel_sdvo->base.base.dev; struct drm_connector *connector = &intel_sdvo_connector->base.base; uint16_t response, data_value[2]; @@ -2486,7 +2486,7 @@ intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, struct intel_sdvo_enhancements_reply enhancements) { - struct drm_device *dev = intel_sdvo->base.enc.dev; + struct drm_device *dev = intel_sdvo->base.base.dev; struct drm_connector *connector = &intel_sdvo_connector->base.base; uint16_t response, data_value[2]; @@ -2593,8 +2593,8 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; /* encoder type will be decided later */ - drm_encoder_init(dev, &intel_encoder->enc, &intel_sdvo_enc_funcs, 0); - drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs); + drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0); + drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs); /* In default case sdvo lvds is false */ if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) @@ -2637,7 +2637,7 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) return true; err_enc: - drm_encoder_cleanup(&intel_encoder->enc); + drm_encoder_cleanup(&intel_encoder->base); err_i2c: if (intel_sdvo->analog_ddc_bus != NULL) intel_i2c_destroy(intel_sdvo->analog_ddc_bus); diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index d4066729f27b..12f15cb31dbb 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -900,7 +900,7 @@ static const struct tv_mode tv_modes[] = { static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder) { - return container_of(enc_to_intel_encoder(encoder), struct intel_tv, base); + return container_of(encoder, struct intel_tv, base.base); } static void @@ -1230,7 +1230,7 @@ static const struct drm_display_mode reported_modes[] = { static int intel_tv_detect_type (struct intel_tv *intel_tv) { - struct drm_encoder *encoder = &intel_tv->base.enc; + struct drm_encoder *encoder = &intel_tv->base.base; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; @@ -1656,15 +1656,16 @@ intel_tv_init(struct drm_device *dev) drm_connector_init(dev, connector, &intel_tv_connector_funcs, DRM_MODE_CONNECTOR_SVIDEO); - drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs, + drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs, DRM_MODE_ENCODER_TVDAC); - drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc); + drm_mode_connector_attach_encoder(&intel_connector->base, + &intel_encoder->base); intel_encoder->type = INTEL_OUTPUT_TVOUT; intel_encoder->crtc_mask = (1 << 0) | (1 << 1); intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT); - intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1)); - intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); + intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1)); + intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT); intel_tv->type = DRM_MODE_CONNECTOR_Unknown; /* BIOS margin values */ @@ -1675,7 +1676,7 @@ intel_tv_init(struct drm_device *dev) intel_tv->tv_format = tv_modes[initial_mode].name; - drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs); + drm_encoder_helper_add(&intel_encoder->base, &intel_tv_helper_funcs); drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); connector->interlace_allowed = false; connector->doublescan_allowed = false; From f875c15a4fbf37534dda30771d8bde8604fbbf09 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 9 Sep 2010 15:44:14 +0100 Subject: [PATCH 114/476] drm/i915: Use the direct mapping of pipe->crtc Why iterate all the crtcs to find the pipe, when we already know which crtc is attached to which pipe? Signed-off-by: Chris Wilson Reviewed-by: Jesse Barnes --- drivers/gpu/drm/i915/intel_display.c | 12 ------------ drivers/gpu/drm/i915/intel_drv.h | 8 +++++++- drivers/gpu/drm/i915/intel_dvo.c | 2 +- drivers/gpu/drm/i915/intel_lvds.c | 2 +- 4 files changed, 9 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 24c7b0e4fdf6..7e67bf534f89 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5226,18 +5226,6 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, return 0; } -struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe) -{ - struct drm_crtc *crtc = NULL; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - if (intel_crtc->pipe == pipe) - break; - } - return crtc; -} - static int intel_encoder_clones(struct drm_device *dev, int type_mask) { struct intel_encoder *encoder; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 454bcf3933ce..ba94944e4eb1 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -177,6 +177,13 @@ struct intel_crtc { #define to_intel_encoder(x) container_of(x, struct intel_encoder, base) #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) +static inline struct drm_crtc * +intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + return dev_priv->pipe_to_crtc_mapping[pipe]; +} + struct intel_unpin_work { struct work_struct work; struct drm_device *dev; @@ -235,7 +242,6 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, struct drm_file *file_priv); extern void intel_wait_for_vblank_off(struct drm_device *dev, int pipe); extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); -extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, struct drm_connector *connector, struct drm_display_mode *mode, diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 7bf7311deb2e..b15c9da215d6 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -322,7 +322,7 @@ intel_dvo_get_current_mode(struct drm_connector *connector) struct drm_crtc *crtc; int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0; - crtc = intel_get_crtc_from_pipe(dev, pipe); + crtc = intel_get_crtc_for_pipe(dev, pipe); if (crtc) { mode = intel_crtc_mode_get(dev, crtc); if (mode) { diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index ed1c87636814..987973f4ff7d 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -932,7 +932,7 @@ void intel_lvds_init(struct drm_device *dev) lvds = I915_READ(LVDS); pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0; - crtc = intel_get_crtc_from_pipe(dev, pipe); + crtc = intel_get_crtc_for_pipe(dev, pipe); if (crtc && (lvds & LVDS_PORT_EN)) { dev_priv->panel_fixed_mode = intel_crtc_mode_get(dev, crtc); From df0e924883d029a8651a2a0c7b8da67a07611ed2 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 9 Sep 2010 16:20:55 +0100 Subject: [PATCH 115/476] drm/i915: Make the connector->encoder relationship explicit Currently we have a exact mapping of a connector onto an encoder for its whole lifetime. Make this an explicit property of the structure and so simplify the code. Signed-off-by: Chris Wilson Reviewed-by: Jesse Barnes --- drivers/gpu/drm/i915/intel_crt.c | 25 ++++----- drivers/gpu/drm/i915/intel_display.c | 27 +++------ drivers/gpu/drm/i915/intel_dp.c | 23 ++++---- drivers/gpu/drm/i915/intel_drv.h | 10 +++- drivers/gpu/drm/i915/intel_dvo.c | 24 ++++---- drivers/gpu/drm/i915/intel_hdmi.c | 17 +++--- drivers/gpu/drm/i915/intel_lvds.c | 11 ++-- drivers/gpu/drm/i915/intel_sdvo.c | 84 +++++++++++++++------------- drivers/gpu/drm/i915/intel_tv.c | 34 +++++------ 9 files changed, 129 insertions(+), 126 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index e3f5e218036d..b39183bcc9fa 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -404,8 +404,7 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = to_intel_encoder(encoder); + struct intel_encoder *encoder = intel_attached_encoder(connector); struct drm_crtc *crtc; int dpms_mode; enum drm_connector_status status; @@ -417,18 +416,18 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto return connector_status_disconnected; } - if (intel_crt_detect_ddc(encoder)) + if (intel_crt_detect_ddc(&encoder->base)) return connector_status_connected; /* for pre-945g platforms use load detect */ - if (encoder->crtc && encoder->crtc->enabled) { - status = intel_crt_load_detect(encoder->crtc, intel_encoder); + if (encoder->base.crtc && encoder->base.crtc->enabled) { + status = intel_crt_load_detect(encoder->base.crtc, encoder); } else { - crtc = intel_get_load_detect_pipe(intel_encoder, connector, + crtc = intel_get_load_detect_pipe(encoder, connector, NULL, &dpms_mode); if (crtc) { - status = intel_crt_load_detect(crtc, intel_encoder); - intel_release_load_detect_pipe(intel_encoder, + status = intel_crt_load_detect(crtc, encoder); + intel_release_load_detect_pipe(encoder, connector, dpms_mode); } else status = connector_status_unknown; @@ -447,13 +446,12 @@ static void intel_crt_destroy(struct drm_connector *connector) static int intel_crt_get_modes(struct drm_connector *connector) { int ret; - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = to_intel_encoder(encoder); + struct intel_encoder *encoder = intel_attached_encoder(connector); struct i2c_adapter *ddc_bus; struct drm_device *dev = connector->dev; - ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); + ret = intel_ddc_get_modes(connector, encoder->ddc_bus); if (ret || !IS_G4X(dev)) goto end; @@ -504,7 +502,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = { static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = { .mode_valid = intel_crt_mode_valid, .get_modes = intel_crt_get_modes, - .best_encoder = intel_attached_encoder, + .best_encoder = intel_best_encoder, }; static const struct drm_encoder_funcs intel_crt_enc_funcs = { @@ -536,8 +534,7 @@ void intel_crt_init(struct drm_device *dev) drm_encoder_init(dev, &intel_encoder->base, &intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC); - drm_mode_connector_attach_encoder(&intel_connector->base, - &intel_encoder->base); + intel_connector_attach_encoder(intel_connector, intel_encoder); /* Set up the DDC bus. */ if (HAS_PCH_SPLIT(dev)) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 7e67bf534f89..a9df2787d752 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6120,26 +6120,17 @@ void intel_modeset_cleanup(struct drm_device *dev) /* * Return which encoder is currently attached for connector. */ -struct drm_encoder *intel_attached_encoder (struct drm_connector *connector) +struct drm_encoder *intel_best_encoder(struct drm_connector *connector) { - struct drm_mode_object *obj; - struct drm_encoder *encoder; - int i; + return &intel_attached_encoder(connector)->base; +} - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { - if (connector->encoder_ids[i] == 0) - break; - - obj = drm_mode_object_find(connector->dev, - connector->encoder_ids[i], - DRM_MODE_OBJECT_ENCODER); - if (!obj) - continue; - - encoder = obj_to_encoder(obj); - return encoder; - } - return NULL; +void intel_connector_attach_encoder(struct intel_connector *connector, + struct intel_encoder *encoder) +{ + connector->encoder = encoder; + drm_mode_connector_attach_encoder(&connector->base, + &encoder->base); } /* diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 6cdc53de24ac..b9efeaf5d5b4 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -67,6 +67,12 @@ static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) return container_of(encoder, struct intel_dp, base.base); } +static struct intel_dp *intel_attached_dp(struct drm_connector *connector) +{ + return container_of(intel_attached_encoder(connector), + struct intel_dp, base); +} + static void intel_dp_start_link_train(struct intel_dp *intel_dp); static void intel_dp_complete_link_train(struct intel_dp *intel_dp); static void intel_dp_link_down(struct intel_dp *intel_dp); @@ -148,8 +154,7 @@ static int intel_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_dp *intel_dp = intel_attached_dp(connector); struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); @@ -1405,8 +1410,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) static enum drm_connector_status ironlake_dp_detect(struct drm_connector *connector) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_dp *intel_dp = intel_attached_dp(connector); enum drm_connector_status status; /* Panel needs power for AUX to work */ @@ -1436,8 +1440,7 @@ ironlake_dp_detect(struct drm_connector *connector) static enum drm_connector_status intel_dp_detect(struct drm_connector *connector) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_dp *intel_dp = intel_attached_dp(connector); struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t temp, bit; @@ -1480,8 +1483,7 @@ intel_dp_detect(struct drm_connector *connector) static int intel_dp_get_modes(struct drm_connector *connector) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_dp *intel_dp = intel_attached_dp(connector); struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; int ret; @@ -1554,7 +1556,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = { static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { .get_modes = intel_dp_get_modes, .mode_valid = intel_dp_mode_valid, - .best_encoder = intel_attached_encoder, + .best_encoder = intel_best_encoder, }; static const struct drm_encoder_funcs intel_dp_enc_funcs = { @@ -1674,8 +1676,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); - drm_mode_connector_attach_encoder(&intel_connector->base, - &intel_encoder->base); + intel_connector_attach_encoder(intel_connector, intel_encoder); drm_sysfs_connector_add(connector); /* Set up the DDC bus. */ diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index ba94944e4eb1..1ada684ea569 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -150,6 +150,7 @@ struct intel_encoder { struct intel_connector { struct drm_connector base; + struct intel_encoder *encoder; }; struct intel_crtc { @@ -234,7 +235,14 @@ extern void intel_encoder_prepare (struct drm_encoder *encoder); extern void intel_encoder_commit (struct drm_encoder *encoder); extern void intel_encoder_destroy(struct drm_encoder *encoder); -extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector); +static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector) +{ + return to_intel_connector(connector)->encoder; +} + +extern void intel_connector_attach_encoder(struct intel_connector *connector, + struct intel_encoder *encoder); +extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index b15c9da215d6..df42a9c9afc1 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -91,6 +91,12 @@ static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder) return container_of(encoder, struct intel_dvo, base.base); } +static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector) +{ + return container_of(intel_attached_encoder(connector), + struct intel_dvo, base); +} + static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) { struct drm_i915_private *dev_priv = encoder->dev->dev_private; @@ -112,8 +118,7 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) static int intel_dvo_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); + struct intel_dvo *intel_dvo = intel_attached_dvo(connector); if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; @@ -223,16 +228,13 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, */ static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); - + struct intel_dvo *intel_dvo = intel_attached_dvo(connector); return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev); } static int intel_dvo_get_modes(struct drm_connector *connector) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); + struct intel_dvo *intel_dvo = intel_attached_dvo(connector); /* We should probably have an i2c driver get_modes function for those * devices which will have a fixed set of modes determined by the chip @@ -280,7 +282,7 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = { static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = { .mode_valid = intel_dvo_mode_valid, .get_modes = intel_dvo_get_modes, - .best_encoder = intel_attached_encoder, + .best_encoder = intel_best_encoder, }; static void intel_dvo_enc_destroy(struct drm_encoder *encoder) @@ -310,8 +312,7 @@ intel_dvo_get_current_mode(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); + struct intel_dvo *intel_dvo = intel_attached_dvo(connector); uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg); struct drm_display_mode *mode = NULL; @@ -431,8 +432,7 @@ void intel_dvo_init(struct drm_device *dev) drm_encoder_helper_add(&intel_encoder->base, &intel_dvo_helper_funcs); - drm_mode_connector_attach_encoder(&intel_connector->base, - &intel_encoder->base); + intel_connector_attach_encoder(intel_connector, intel_encoder); if (dvo->type == INTEL_DVO_CHIP_LVDS) { /* For our LVDS chipsets, we should hopefully be able * to dig the fixed panel mode out of the BIOS data. diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 405afd75241b..bba0aba15a96 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -48,6 +48,12 @@ static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) return container_of(encoder, struct intel_hdmi, base.base); } +static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) +{ + return container_of(intel_attached_encoder(connector), + struct intel_hdmi, base); +} + static void intel_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -141,8 +147,7 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, static enum drm_connector_status intel_hdmi_detect(struct drm_connector *connector) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); struct edid *edid = NULL; enum drm_connector_status status = connector_status_disconnected; @@ -163,8 +168,7 @@ intel_hdmi_detect(struct drm_connector *connector) static int intel_hdmi_get_modes(struct drm_connector *connector) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); /* We should parse the EDID data and find out if it's an HDMI sink so * we can send audio to it. @@ -198,7 +202,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = { static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { .get_modes = intel_hdmi_get_modes, .mode_valid = intel_hdmi_mode_valid, - .best_encoder = intel_attached_encoder, + .best_encoder = intel_best_encoder, }; static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { @@ -270,8 +274,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); - drm_mode_connector_attach_encoder(&intel_connector->base, - &intel_encoder->base); + intel_connector_attach_encoder(intel_connector, intel_encoder); drm_sysfs_connector_add(connector); /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 987973f4ff7d..93a711d9dcf5 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -436,14 +436,11 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect static int intel_lvds_get_modes(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = to_intel_encoder(encoder); struct drm_i915_private *dev_priv = dev->dev_private; - int ret = 0; if (dev_priv->lvds_edid_good) { - ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); - + struct intel_encoder *encoder = intel_attached_encoder(connector); + int ret = intel_ddc_get_modes(connector, encoder->ddc_bus); if (ret) return ret; } @@ -596,7 +593,7 @@ static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = { static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { .get_modes = intel_lvds_get_modes, .mode_valid = intel_lvds_mode_valid, - .best_encoder = intel_attached_encoder, + .best_encoder = intel_best_encoder, }; static const struct drm_connector_funcs intel_lvds_connector_funcs = { @@ -847,7 +844,7 @@ void intel_lvds_init(struct drm_device *dev) drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS); - drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->base); + intel_connector_attach_encoder(intel_connector, intel_encoder); intel_encoder->type = INTEL_OUTPUT_LVDS; intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 3d8f4f4cf326..96952d20cd21 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -189,6 +189,12 @@ static struct intel_sdvo *enc_to_intel_sdvo(struct drm_encoder *encoder) return container_of(encoder, struct intel_sdvo, base.base); } +static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector) +{ + return container_of(intel_attached_encoder(connector), + struct intel_sdvo, base); +} + static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector) { return container_of(to_intel_connector(connector), struct intel_sdvo_connector, base); @@ -1239,8 +1245,7 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) static int intel_sdvo_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; @@ -1372,18 +1377,22 @@ static struct drm_connector * intel_find_analog_connector(struct drm_device *dev) { struct drm_connector *connector; - struct drm_encoder *encoder; - struct intel_sdvo *intel_sdvo; + struct intel_sdvo *encoder; - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - intel_sdvo = enc_to_intel_sdvo(encoder); - if (intel_sdvo->base.type == INTEL_OUTPUT_ANALOG) { - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (encoder == intel_attached_encoder(connector)) + list_for_each_entry(encoder, + &dev->mode_config.encoder_list, + base.base.head) { + if (encoder->base.type == INTEL_OUTPUT_ANALOG) { + list_for_each_entry(connector, + &dev->mode_config.connector_list, + head) { + if (&encoder->base == + intel_attached_encoder(connector)) return connector; } } } + return NULL; } @@ -1406,8 +1415,7 @@ intel_analog_is_connected(struct drm_device *dev) enum drm_connector_status intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); enum drm_connector_status status = connector_status_connected; struct edid *edid = NULL; @@ -1468,8 +1476,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) { uint16_t response; - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); enum drm_connector_status ret; @@ -1516,8 +1523,7 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); int num_modes; /* set the bus switch and get the modes */ @@ -1605,8 +1611,7 @@ struct drm_display_mode sdvo_tv_modes[] = { static void intel_sdvo_get_tv_modes(struct drm_connector *connector) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct intel_sdvo_sdtv_resolution_request tv_res; uint32_t reply = 0, format_map = 0; int i; @@ -1640,8 +1645,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct drm_i915_private *dev_priv = connector->dev->dev_private; struct drm_display_mode *newmode; @@ -1757,8 +1761,7 @@ intel_sdvo_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t val) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); uint16_t temp_value; uint8_t cmd; @@ -1861,9 +1864,8 @@ set_value: done: - if (encoder->crtc) { - struct drm_crtc *crtc = encoder->crtc; - + if (intel_sdvo->base.base.crtc) { + struct drm_crtc *crtc = intel_sdvo->base.base.crtc; drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); } @@ -1891,7 +1893,7 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = { static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { .get_modes = intel_sdvo_get_modes, .mode_valid = intel_sdvo_mode_valid, - .best_encoder = intel_attached_encoder, + .best_encoder = intel_best_encoder, }; static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) @@ -2058,20 +2060,23 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) } static void -intel_sdvo_connector_init(struct drm_encoder *encoder, - struct drm_connector *connector) +intel_sdvo_connector_init(struct intel_sdvo_connector *connector, + struct intel_sdvo *encoder) { - drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs, - connector->connector_type); + drm_connector_init(encoder->base.base.dev, + &connector->base.base, + &intel_sdvo_connector_funcs, + connector->base.base.connector_type); - drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs); + drm_connector_helper_add(&connector->base.base, + &intel_sdvo_connector_helper_funcs); - connector->interlace_allowed = 0; - connector->doublescan_allowed = 0; - connector->display_info.subpixel_order = SubPixelHorizontalRGB; + connector->base.base.interlace_allowed = 0; + connector->base.base.doublescan_allowed = 0; + connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; - drm_mode_connector_attach_encoder(connector, encoder); - drm_sysfs_connector_add(connector); + intel_connector_attach_encoder(&connector->base, &encoder->base); + drm_sysfs_connector_add(&connector->base.base); } static bool @@ -2112,7 +2117,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT)); - intel_sdvo_connector_init(encoder, connector); + intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); return true; } @@ -2141,7 +2146,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) intel_sdvo->base.needs_tv_clock = true; intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; - intel_sdvo_connector_init(encoder, connector); + intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type)) goto err; @@ -2186,7 +2191,8 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT)); - intel_sdvo_connector_init(encoder, connector); + intel_sdvo_connector_init(intel_sdvo_connector, + intel_sdvo); return true; } @@ -2218,7 +2224,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) | (1 << INTEL_SDVO_LVDS_CLONE_BIT)); - intel_sdvo_connector_init(encoder, connector); + intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) goto err; diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 12f15cb31dbb..267da3289263 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -903,6 +903,13 @@ static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder) return container_of(encoder, struct intel_tv, base.base); } +static struct intel_tv *intel_attached_tv(struct drm_connector *connector) +{ + return container_of(intel_attached_encoder(connector), + struct intel_tv, + base); +} + static void intel_tv_dpms(struct drm_encoder *encoder, int mode) { @@ -945,8 +952,7 @@ static enum drm_mode_status intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_tv *intel_tv = enc_to_intel_tv(encoder); + struct intel_tv *intel_tv = intel_attached_tv(connector); const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); /* Ensure TV refresh is close to desired refresh */ @@ -1306,8 +1312,7 @@ intel_tv_detect_type (struct intel_tv *intel_tv) */ static void intel_tv_find_better_format(struct drm_connector *connector) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_tv *intel_tv = enc_to_intel_tv(encoder); + struct intel_tv *intel_tv = intel_attached_tv(connector); const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); int i; @@ -1339,14 +1344,13 @@ static enum drm_connector_status intel_tv_detect(struct drm_connector *connector) { struct drm_display_mode mode; - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_tv *intel_tv = enc_to_intel_tv(encoder); + struct intel_tv *intel_tv = intel_attached_tv(connector); int type; mode = reported_modes[0]; drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); - if (encoder->crtc && encoder->crtc->enabled) { + if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) { type = intel_tv_detect_type(intel_tv); } else { struct drm_crtc *crtc; @@ -1391,8 +1395,7 @@ static void intel_tv_chose_preferred_modes(struct drm_connector *connector, struct drm_display_mode *mode_ptr) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_tv *intel_tv = enc_to_intel_tv(encoder); + struct intel_tv *intel_tv = intel_attached_tv(connector); const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) @@ -1417,8 +1420,7 @@ static int intel_tv_get_modes(struct drm_connector *connector) { struct drm_display_mode *mode_ptr; - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_tv *intel_tv = enc_to_intel_tv(encoder); + struct intel_tv *intel_tv = intel_attached_tv(connector); const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); int j, count = 0; u64 tmp; @@ -1483,9 +1485,8 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop uint64_t val) { struct drm_device *dev = connector->dev; - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_tv *intel_tv = enc_to_intel_tv(encoder); - struct drm_crtc *crtc = encoder->crtc; + struct intel_tv *intel_tv = intel_attached_tv(connector); + struct drm_crtc *crtc = intel_tv->base.base.crtc; int ret = 0; bool changed = false; @@ -1550,7 +1551,7 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = { static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = { .mode_valid = intel_tv_mode_valid, .get_modes = intel_tv_get_modes, - .best_encoder = intel_attached_encoder, + .best_encoder = intel_best_encoder, }; static const struct drm_encoder_funcs intel_tv_enc_funcs = { @@ -1659,8 +1660,7 @@ intel_tv_init(struct drm_device *dev) drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs, DRM_MODE_ENCODER_TVDAC); - drm_mode_connector_attach_encoder(&intel_connector->base, - &intel_encoder->base); + intel_connector_attach_encoder(intel_connector, intel_encoder); intel_encoder->type = INTEL_OUTPUT_TVOUT; intel_encoder->crtc_mask = (1 << 0) | (1 << 1); intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT); From 5ba2aaaaa1a282a71c27f385a743f0d86f3484ca Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 19 Aug 2010 18:04:08 +0100 Subject: [PATCH 116/476] drm/i915/debug: Include Ironlake in self-refresh status Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 7700ccf25784..3d882b70af16 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -752,15 +752,17 @@ static int i915_sr_status(struct seq_file *m, void *unused) drm_i915_private_t *dev_priv = dev->dev_private; bool sr_enabled = false; - if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev)) + if (IS_IRONLAKE(dev)) + sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; + else if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev)) sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; else if (IS_I915GM(dev)) sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; else if (IS_PINEVIEW(dev)) sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; - seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" : - "disabled"); + seq_printf(m, "self-refresh: %s\n", + sr_enabled ? "enabled" : "disabled"); return 0; } From e642abbf303741b245375b2e3f8f00e900d462dc Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 9 Sep 2010 12:46:34 +0100 Subject: [PATCH 117/476] drm/i915: Adapt workqueue to new alloc_workqueue interface create_singlethreaded_workqueue() is being phased out for a new concurrency managed task infrastructure. Adapt our workqueue constructor to explicitly create a domain that only allows the execution of a single task at any time. All the tasks are expected to require the dev->struct_mutex, so would block concurrency of other tasks if we allow more than a single i915 task to be run at once. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 428c75b466aa..7c7d1bc9d1be 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1951,7 +1951,22 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT; agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; - dev_priv->wq = create_singlethread_workqueue("i915"); + /* The i915 workqueue is primarily used for batched retirement of + * requests (and thus managing bo) once the task has been completed + * by the GPU. i915_gem_retire_requests() is called directly when we + * need high-priority retirement, such as waiting for an explicit + * bo. + * + * It is also used for periodic low-priority events, such as + * idle-timers and hangcheck. + * + * All tasks on the workqueue are expected to acquire the dev mutex + * so there is no point in running more than one instance of the + * workqueue at any time: max_active = 1 and NON_REENTRANT. + */ + dev_priv->wq = alloc_workqueue("i915", + WQ_UNBOUND | WQ_NON_REENTRANT, + 1); if (dev_priv->wq == NULL) { DRM_ERROR("Failed to create our workqueue.\n"); ret = -ENOMEM; From 6be4a6078e41a8ec511dad35d1377bc5338f97be Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 10 Sep 2010 10:26:01 -0700 Subject: [PATCH 118/476] drm/i915: split Ironlake CRTC enable/disable code This way we can also use it in CRTC prepare/commit. Also makes it easier to split out FDI and other code. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 673 ++++++++++++++------------- 1 file changed, 351 insertions(+), 322 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a9df2787d752..56ca589a83f5 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1848,7 +1848,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) DRM_DEBUG_KMS("FDI train done.\n"); } -static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) +static void ironlake_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1881,6 +1881,354 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) temp = I915_READ(pipeconf_reg); pipe_bpc = temp & PIPE_BPC_MASK; + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + temp = I915_READ(PCH_LVDS); + if ((temp & LVDS_PORT_EN) == 0) { + I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); + POSTING_READ(PCH_LVDS); + } + } + + if (!HAS_eDP) { + /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ + temp = I915_READ(fdi_rx_reg); + /* + * make the BPC in FDI Rx be consistent with that in + * pipeconf reg. + */ + temp &= ~(0x7 << 16); + temp |= (pipe_bpc << 11); + temp &= ~(7 << 19); + temp |= (intel_crtc->fdi_lanes - 1) << 19; + I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); + I915_READ(fdi_rx_reg); + udelay(200); + + /* Switch from Rawclk to PCDclk */ + temp = I915_READ(fdi_rx_reg); + I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK); + I915_READ(fdi_rx_reg); + udelay(200); + + /* Enable CPU FDI TX PLL, always on for Ironlake */ + temp = I915_READ(fdi_tx_reg); + if ((temp & FDI_TX_PLL_ENABLE) == 0) { + I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); + I915_READ(fdi_tx_reg); + udelay(100); + } + } + + /* Enable panel fitting for LVDS */ + if (dev_priv->pch_pf_size && + (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) + || HAS_eDP || intel_pch_has_edp(crtc))) { + /* Force use of hard-coded filter coefficients + * as some pre-programmed values are broken, + * e.g. x201. + */ + I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, + PF_ENABLE | PF_FILTER_MED_3x3); + I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS, + dev_priv->pch_pf_pos); + I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, + dev_priv->pch_pf_size); + } + + /* Enable CPU pipe */ + temp = I915_READ(pipeconf_reg); + if ((temp & PIPEACONF_ENABLE) == 0) { + I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); + I915_READ(pipeconf_reg); + udelay(100); + } + + /* configure and enable CPU plane */ + temp = I915_READ(dspcntr_reg); + if ((temp & DISPLAY_PLANE_ENABLE) == 0) { + I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE); + /* Flush the plane changes */ + I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); + } + + if (!HAS_eDP) { + /* For PCH output, training FDI link */ + if (IS_GEN6(dev)) + gen6_fdi_link_train(crtc); + else + ironlake_fdi_link_train(crtc); + + /* enable PCH DPLL */ + temp = I915_READ(pch_dpll_reg); + if ((temp & DPLL_VCO_ENABLE) == 0) { + I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); + I915_READ(pch_dpll_reg); + } + udelay(200); + + if (HAS_PCH_CPT(dev)) { + /* Be sure PCH DPLL SEL is set */ + temp = I915_READ(PCH_DPLL_SEL); + if (trans_dpll_sel == 0 && + (temp & TRANSA_DPLL_ENABLE) == 0) + temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); + else if (trans_dpll_sel == 1 && + (temp & TRANSB_DPLL_ENABLE) == 0) + temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); + I915_WRITE(PCH_DPLL_SEL, temp); + I915_READ(PCH_DPLL_SEL); + } + /* set transcoder timing */ + I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); + I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg)); + I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg)); + + I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg)); + I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg)); + I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg)); + + /* enable normal train */ + temp = I915_READ(fdi_tx_reg); + temp &= ~FDI_LINK_TRAIN_NONE; + I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | + FDI_TX_ENHANCE_FRAME_ENABLE); + I915_READ(fdi_tx_reg); + + temp = I915_READ(fdi_rx_reg); + if (HAS_PCH_CPT(dev)) { + temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; + temp |= FDI_LINK_TRAIN_NORMAL_CPT; + } else { + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_NONE; + } + I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); + I915_READ(fdi_rx_reg); + + /* wait one idle pattern time */ + udelay(100); + + /* For PCH DP, enable TRANS_DP_CTL */ + if (HAS_PCH_CPT(dev) && + intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { + int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; + int reg; + + reg = I915_READ(trans_dp_ctl); + reg &= ~(TRANS_DP_PORT_SEL_MASK | + TRANS_DP_SYNC_MASK); + reg |= (TRANS_DP_OUTPUT_ENABLE | + TRANS_DP_ENH_FRAMING); + + if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) + reg |= TRANS_DP_HSYNC_ACTIVE_HIGH; + if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) + reg |= TRANS_DP_VSYNC_ACTIVE_HIGH; + + switch (intel_trans_dp_port_sel(crtc)) { + case PCH_DP_B: + reg |= TRANS_DP_PORT_SEL_B; + break; + case PCH_DP_C: + reg |= TRANS_DP_PORT_SEL_C; + break; + case PCH_DP_D: + reg |= TRANS_DP_PORT_SEL_D; + break; + default: + DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); + reg |= TRANS_DP_PORT_SEL_B; + break; + } + + I915_WRITE(trans_dp_ctl, reg); + POSTING_READ(trans_dp_ctl); + } + + /* enable PCH transcoder */ + temp = I915_READ(transconf_reg); + /* + * make the BPC in transcoder be consistent with + * that in pipeconf reg. + */ + temp &= ~PIPE_BPC_MASK; + temp |= pipe_bpc; + I915_WRITE(transconf_reg, temp | TRANS_ENABLE); + I915_READ(transconf_reg); + + if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100)) + DRM_ERROR("failed to enable transcoder\n"); + } + + intel_crtc_load_lut(crtc); + + intel_update_fbc(crtc, &crtc->mode); +} + +static void ironlake_crtc_disable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + int plane = intel_crtc->plane; + int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B; + int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; + int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; + int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR; + int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; + int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; + int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; + int trans_dpll_sel = (pipe == 0) ? 0 : 1; + u32 temp; + u32 pipe_bpc; + + temp = I915_READ(pipeconf_reg); + pipe_bpc = temp & PIPE_BPC_MASK; + + drm_vblank_off(dev, pipe); + /* Disable display plane */ + temp = I915_READ(dspcntr_reg); + if ((temp & DISPLAY_PLANE_ENABLE) != 0) { + I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE); + /* Flush the plane changes */ + I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); + I915_READ(dspbase_reg); + } + + if (dev_priv->cfb_plane == plane && + dev_priv->display.disable_fbc) + dev_priv->display.disable_fbc(dev); + + /* disable cpu pipe, disable after all planes disabled */ + temp = I915_READ(pipeconf_reg); + if ((temp & PIPEACONF_ENABLE) != 0) { + I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); + + /* wait for cpu pipe off, pipe state */ + if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 50)) + DRM_ERROR("failed to turn off cpu pipe\n"); + } else + DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); + + udelay(100); + + /* Disable PF */ + I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0); + I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0); + + /* disable CPU FDI tx and PCH FDI rx */ + temp = I915_READ(fdi_tx_reg); + I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_ENABLE); + I915_READ(fdi_tx_reg); + + temp = I915_READ(fdi_rx_reg); + /* BPC in FDI rx is consistent with that in pipeconf */ + temp &= ~(0x07 << 16); + temp |= (pipe_bpc << 11); + I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); + I915_READ(fdi_rx_reg); + + udelay(100); + + /* still set train pattern 1 */ + temp = I915_READ(fdi_tx_reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + I915_WRITE(fdi_tx_reg, temp); + POSTING_READ(fdi_tx_reg); + + temp = I915_READ(fdi_rx_reg); + if (HAS_PCH_CPT(dev)) { + temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; + temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; + } else { + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + } + I915_WRITE(fdi_rx_reg, temp); + POSTING_READ(fdi_rx_reg); + + udelay(100); + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + temp = I915_READ(PCH_LVDS); + I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN); + I915_READ(PCH_LVDS); + udelay(100); + } + + /* disable PCH transcoder */ + temp = I915_READ(transconf_reg); + if ((temp & TRANS_ENABLE) != 0) { + I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE); + + /* wait for PCH transcoder off, transcoder state */ + if (wait_for((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0, 50)) + DRM_ERROR("failed to disable transcoder\n"); + } + + temp = I915_READ(transconf_reg); + /* BPC in transcoder is consistent with that in pipeconf */ + temp &= ~PIPE_BPC_MASK; + temp |= pipe_bpc; + I915_WRITE(transconf_reg, temp); + I915_READ(transconf_reg); + udelay(100); + + if (HAS_PCH_CPT(dev)) { + /* disable TRANS_DP_CTL */ + int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; + int reg; + + reg = I915_READ(trans_dp_ctl); + reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); + I915_WRITE(trans_dp_ctl, reg); + POSTING_READ(trans_dp_ctl); + + /* disable DPLL_SEL */ + temp = I915_READ(PCH_DPLL_SEL); + if (trans_dpll_sel == 0) + temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); + else + temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); + I915_WRITE(PCH_DPLL_SEL, temp); + I915_READ(PCH_DPLL_SEL); + + } + + /* disable PCH DPLL */ + temp = I915_READ(pch_dpll_reg); + I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE); + I915_READ(pch_dpll_reg); + + /* Switch from PCDclk to Rawclk */ + temp = I915_READ(fdi_rx_reg); + temp &= ~FDI_SEL_PCDCLK; + I915_WRITE(fdi_rx_reg, temp); + I915_READ(fdi_rx_reg); + + /* Disable CPU FDI TX PLL */ + temp = I915_READ(fdi_tx_reg); + I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE); + I915_READ(fdi_tx_reg); + udelay(100); + + temp = I915_READ(fdi_rx_reg); + temp &= ~FDI_RX_PLL_ENABLE; + I915_WRITE(fdi_rx_reg, temp); + I915_READ(fdi_rx_reg); + + /* Wait for the clocks to turn off. */ + udelay(100); +} + +static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) +{ + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + int plane = intel_crtc->plane; + /* XXX: When our outputs are all unaware of DPMS modes other than off * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. */ @@ -1889,331 +2237,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane); - - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { - temp = I915_READ(PCH_LVDS); - if ((temp & LVDS_PORT_EN) == 0) { - I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); - POSTING_READ(PCH_LVDS); - } - } - - if (!HAS_eDP) { - - /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ - temp = I915_READ(fdi_rx_reg); - /* - * make the BPC in FDI Rx be consistent with that in - * pipeconf reg. - */ - temp &= ~(0x7 << 16); - temp |= (pipe_bpc << 11); - temp &= ~(7 << 19); - temp |= (intel_crtc->fdi_lanes - 1) << 19; - I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); - I915_READ(fdi_rx_reg); - udelay(200); - - /* Switch from Rawclk to PCDclk */ - temp = I915_READ(fdi_rx_reg); - I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK); - I915_READ(fdi_rx_reg); - udelay(200); - - /* Enable CPU FDI TX PLL, always on for Ironlake */ - temp = I915_READ(fdi_tx_reg); - if ((temp & FDI_TX_PLL_ENABLE) == 0) { - I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); - I915_READ(fdi_tx_reg); - udelay(100); - } - } - - /* Enable panel fitting for LVDS */ - if (dev_priv->pch_pf_size && - (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) - || HAS_eDP || intel_pch_has_edp(crtc))) { - /* Force use of hard-coded filter coefficients - * as some pre-programmed values are broken, - * e.g. x201. - */ - I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, - PF_ENABLE | PF_FILTER_MED_3x3); - I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS, - dev_priv->pch_pf_pos); - I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, - dev_priv->pch_pf_size); - } - - /* Enable CPU pipe */ - temp = I915_READ(pipeconf_reg); - if ((temp & PIPEACONF_ENABLE) == 0) { - I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); - I915_READ(pipeconf_reg); - udelay(100); - } - - /* configure and enable CPU plane */ - temp = I915_READ(dspcntr_reg); - if ((temp & DISPLAY_PLANE_ENABLE) == 0) { - I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE); - /* Flush the plane changes */ - I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); - } - - if (!HAS_eDP) { - /* For PCH output, training FDI link */ - if (IS_GEN6(dev)) - gen6_fdi_link_train(crtc); - else - ironlake_fdi_link_train(crtc); - - /* enable PCH DPLL */ - temp = I915_READ(pch_dpll_reg); - if ((temp & DPLL_VCO_ENABLE) == 0) { - I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); - I915_READ(pch_dpll_reg); - } - udelay(200); - - if (HAS_PCH_CPT(dev)) { - /* Be sure PCH DPLL SEL is set */ - temp = I915_READ(PCH_DPLL_SEL); - if (trans_dpll_sel == 0 && - (temp & TRANSA_DPLL_ENABLE) == 0) - temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); - else if (trans_dpll_sel == 1 && - (temp & TRANSB_DPLL_ENABLE) == 0) - temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); - I915_WRITE(PCH_DPLL_SEL, temp); - I915_READ(PCH_DPLL_SEL); - } - - /* set transcoder timing */ - I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); - I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg)); - I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg)); - - I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg)); - I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg)); - I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg)); - - /* enable normal train */ - temp = I915_READ(fdi_tx_reg); - temp &= ~FDI_LINK_TRAIN_NONE; - I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | - FDI_TX_ENHANCE_FRAME_ENABLE); - I915_READ(fdi_tx_reg); - - temp = I915_READ(fdi_rx_reg); - if (HAS_PCH_CPT(dev)) { - temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; - temp |= FDI_LINK_TRAIN_NORMAL_CPT; - } else { - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_NONE; - } - I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); - I915_READ(fdi_rx_reg); - - /* wait one idle pattern time */ - udelay(100); - - /* For PCH DP, enable TRANS_DP_CTL */ - if (HAS_PCH_CPT(dev) && - intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { - int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; - int reg; - - reg = I915_READ(trans_dp_ctl); - reg &= ~(TRANS_DP_PORT_SEL_MASK | - TRANS_DP_SYNC_MASK); - reg |= (TRANS_DP_OUTPUT_ENABLE | - TRANS_DP_ENH_FRAMING); - - if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) - reg |= TRANS_DP_HSYNC_ACTIVE_HIGH; - if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) - reg |= TRANS_DP_VSYNC_ACTIVE_HIGH; - - switch (intel_trans_dp_port_sel(crtc)) { - case PCH_DP_B: - reg |= TRANS_DP_PORT_SEL_B; - break; - case PCH_DP_C: - reg |= TRANS_DP_PORT_SEL_C; - break; - case PCH_DP_D: - reg |= TRANS_DP_PORT_SEL_D; - break; - default: - DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); - reg |= TRANS_DP_PORT_SEL_B; - break; - } - - I915_WRITE(trans_dp_ctl, reg); - POSTING_READ(trans_dp_ctl); - } - - /* enable PCH transcoder */ - temp = I915_READ(transconf_reg); - /* - * make the BPC in transcoder be consistent with - * that in pipeconf reg. - */ - temp &= ~PIPE_BPC_MASK; - temp |= pipe_bpc; - I915_WRITE(transconf_reg, temp | TRANS_ENABLE); - I915_READ(transconf_reg); - - if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100)) - DRM_ERROR("failed to enable transcoder\n"); - } - - intel_crtc_load_lut(crtc); - - intel_update_fbc(crtc, &crtc->mode); + ironlake_crtc_enable(crtc); break; case DRM_MODE_DPMS_OFF: DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane); - - drm_vblank_off(dev, pipe); - /* Disable display plane */ - temp = I915_READ(dspcntr_reg); - if ((temp & DISPLAY_PLANE_ENABLE) != 0) { - I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE); - /* Flush the plane changes */ - I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); - I915_READ(dspbase_reg); - } - - if (dev_priv->cfb_plane == plane && - dev_priv->display.disable_fbc) - dev_priv->display.disable_fbc(dev); - - /* disable cpu pipe, disable after all planes disabled */ - temp = I915_READ(pipeconf_reg); - if ((temp & PIPEACONF_ENABLE) != 0) { - I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); - - /* wait for cpu pipe off, pipe state */ - if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 50)) - DRM_ERROR("failed to turn off cpu pipe\n"); - } else - DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); - - udelay(100); - - /* Disable PF */ - I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0); - I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0); - - /* disable CPU FDI tx and PCH FDI rx */ - temp = I915_READ(fdi_tx_reg); - I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_ENABLE); - I915_READ(fdi_tx_reg); - - temp = I915_READ(fdi_rx_reg); - /* BPC in FDI rx is consistent with that in pipeconf */ - temp &= ~(0x07 << 16); - temp |= (pipe_bpc << 11); - I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); - I915_READ(fdi_rx_reg); - - udelay(100); - - /* still set train pattern 1 */ - temp = I915_READ(fdi_tx_reg); - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_PATTERN_1; - I915_WRITE(fdi_tx_reg, temp); - POSTING_READ(fdi_tx_reg); - - temp = I915_READ(fdi_rx_reg); - if (HAS_PCH_CPT(dev)) { - temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; - temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; - } else { - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_PATTERN_1; - } - I915_WRITE(fdi_rx_reg, temp); - POSTING_READ(fdi_rx_reg); - - udelay(100); - - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { - temp = I915_READ(PCH_LVDS); - I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN); - I915_READ(PCH_LVDS); - udelay(100); - } - - /* disable PCH transcoder */ - temp = I915_READ(transconf_reg); - if ((temp & TRANS_ENABLE) != 0) { - I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE); - - /* wait for PCH transcoder off, transcoder state */ - if (wait_for((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0, 50)) - DRM_ERROR("failed to disable transcoder\n"); - } - - temp = I915_READ(transconf_reg); - /* BPC in transcoder is consistent with that in pipeconf */ - temp &= ~PIPE_BPC_MASK; - temp |= pipe_bpc; - I915_WRITE(transconf_reg, temp); - I915_READ(transconf_reg); - udelay(100); - - if (HAS_PCH_CPT(dev)) { - /* disable TRANS_DP_CTL */ - int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; - int reg; - - reg = I915_READ(trans_dp_ctl); - reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); - I915_WRITE(trans_dp_ctl, reg); - POSTING_READ(trans_dp_ctl); - - /* disable DPLL_SEL */ - temp = I915_READ(PCH_DPLL_SEL); - if (trans_dpll_sel == 0) - temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); - else - temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); - I915_WRITE(PCH_DPLL_SEL, temp); - I915_READ(PCH_DPLL_SEL); - - } - - /* disable PCH DPLL */ - temp = I915_READ(pch_dpll_reg); - I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE); - I915_READ(pch_dpll_reg); - - /* Switch from PCDclk to Rawclk */ - temp = I915_READ(fdi_rx_reg); - temp &= ~FDI_SEL_PCDCLK; - I915_WRITE(fdi_rx_reg, temp); - I915_READ(fdi_rx_reg); - - /* Disable CPU FDI TX PLL */ - temp = I915_READ(fdi_tx_reg); - I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE); - I915_READ(fdi_tx_reg); - udelay(100); - - temp = I915_READ(fdi_rx_reg); - temp &= ~FDI_RX_PLL_ENABLE; - I915_WRITE(fdi_rx_reg, temp); - I915_READ(fdi_rx_reg); - - /* Wait for the clocks to turn off. */ - udelay(100); + ironlake_crtc_disable(crtc); break; } } From 0b8765c6e7fb6e0aaa9b9081454fb0f202852523 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 10 Sep 2010 10:31:34 -0700 Subject: [PATCH 119/476] drm/i915: split i9xx CRTC enable/disable code So we can use it for CRTC prepare/commit. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 189 +++++++++++++++------------ 1 file changed, 105 insertions(+), 84 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 56ca589a83f5..fecb98c2d8ad 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2262,7 +2262,7 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) */ } -static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) +static void i9xx_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -2275,6 +2275,107 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; u32 temp; + /* Enable the DPLL */ + temp = I915_READ(dpll_reg); + if ((temp & DPLL_VCO_ENABLE) == 0) { + I915_WRITE(dpll_reg, temp); + I915_READ(dpll_reg); + /* Wait for the clocks to stabilize. */ + udelay(150); + I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); + I915_READ(dpll_reg); + /* Wait for the clocks to stabilize. */ + udelay(150); + I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); + I915_READ(dpll_reg); + /* Wait for the clocks to stabilize. */ + udelay(150); + } + + /* Enable the pipe */ + temp = I915_READ(pipeconf_reg); + if ((temp & PIPEACONF_ENABLE) == 0) + I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); + + /* Enable the plane */ + temp = I915_READ(dspcntr_reg); + if ((temp & DISPLAY_PLANE_ENABLE) == 0) { + I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE); + /* Flush the plane changes */ + I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); + } + + intel_crtc_load_lut(crtc); + + if ((IS_I965G(dev) || plane == 0)) + intel_update_fbc(crtc, &crtc->mode); + + /* Give the overlay scaler a chance to enable if it's on this pipe */ + intel_crtc_dpms_overlay(intel_crtc, true); +} + +static void i9xx_crtc_disable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + int plane = intel_crtc->plane; + int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; + int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; + int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR; + int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; + u32 temp; + + /* Give the overlay scaler a chance to disable if it's on this pipe */ + intel_crtc_dpms_overlay(intel_crtc, false); + drm_vblank_off(dev, pipe); + + if (dev_priv->cfb_plane == plane && + dev_priv->display.disable_fbc) + dev_priv->display.disable_fbc(dev); + + /* Disable display plane */ + temp = I915_READ(dspcntr_reg); + if ((temp & DISPLAY_PLANE_ENABLE) != 0) { + I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE); + /* Flush the plane changes */ + I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); + I915_READ(dspbase_reg); + } + + if (!IS_I9XX(dev)) { + /* Wait for vblank for the disable to take effect */ + intel_wait_for_vblank_off(dev, pipe); + } + + /* Don't disable pipe A or pipe A PLLs if needed */ + if (pipeconf_reg == PIPEACONF && + (dev_priv->quirks & QUIRK_PIPEA_FORCE)) + goto skip_pipe_off; + + /* Next, disable display pipes */ + temp = I915_READ(pipeconf_reg); + if ((temp & PIPEACONF_ENABLE) != 0) { + I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); + I915_READ(pipeconf_reg); + } + + /* Wait for vblank for the disable to take effect. */ + intel_wait_for_vblank_off(dev, pipe); + + temp = I915_READ(dpll_reg); + if ((temp & DPLL_VCO_ENABLE) != 0) { + I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); + I915_READ(dpll_reg); + } +skip_pipe_off: + /* Wait for the clocks to turn off. */ + udelay(150); +} + +static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) +{ /* XXX: When our outputs are all unaware of DPMS modes other than off * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. */ @@ -2282,90 +2383,10 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_ON: case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: - /* Enable the DPLL */ - temp = I915_READ(dpll_reg); - if ((temp & DPLL_VCO_ENABLE) == 0) { - I915_WRITE(dpll_reg, temp); - I915_READ(dpll_reg); - /* Wait for the clocks to stabilize. */ - udelay(150); - I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); - I915_READ(dpll_reg); - /* Wait for the clocks to stabilize. */ - udelay(150); - I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); - I915_READ(dpll_reg); - /* Wait for the clocks to stabilize. */ - udelay(150); - } - - /* Enable the pipe */ - temp = I915_READ(pipeconf_reg); - if ((temp & PIPEACONF_ENABLE) == 0) - I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); - - /* Enable the plane */ - temp = I915_READ(dspcntr_reg); - if ((temp & DISPLAY_PLANE_ENABLE) == 0) { - I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE); - /* Flush the plane changes */ - I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); - } - - intel_crtc_load_lut(crtc); - - if ((IS_I965G(dev) || plane == 0)) - intel_update_fbc(crtc, &crtc->mode); - - /* Give the overlay scaler a chance to enable if it's on this pipe */ - intel_crtc_dpms_overlay(intel_crtc, true); - break; + i9xx_crtc_enable(crtc); + break; case DRM_MODE_DPMS_OFF: - /* Give the overlay scaler a chance to disable if it's on this pipe */ - intel_crtc_dpms_overlay(intel_crtc, false); - drm_vblank_off(dev, pipe); - - if (dev_priv->cfb_plane == plane && - dev_priv->display.disable_fbc) - dev_priv->display.disable_fbc(dev); - - /* Disable display plane */ - temp = I915_READ(dspcntr_reg); - if ((temp & DISPLAY_PLANE_ENABLE) != 0) { - I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE); - /* Flush the plane changes */ - I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); - I915_READ(dspbase_reg); - } - - if (!IS_I9XX(dev)) { - /* Wait for vblank for the disable to take effect */ - intel_wait_for_vblank_off(dev, pipe); - } - - /* Don't disable pipe A or pipe A PLLs if needed */ - if (pipeconf_reg == PIPEACONF && - (dev_priv->quirks & QUIRK_PIPEA_FORCE)) - goto skip_pipe_off; - - /* Next, disable display pipes */ - temp = I915_READ(pipeconf_reg); - if ((temp & PIPEACONF_ENABLE) != 0) { - I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); - I915_READ(pipeconf_reg); - } - - /* Wait for vblank for the disable to take effect. */ - intel_wait_for_vblank_off(dev, pipe); - - temp = I915_READ(dpll_reg); - if ((temp & DPLL_VCO_ENABLE) != 0) { - I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); - I915_READ(dpll_reg); - } - skip_pipe_off: - /* Wait for the clocks to turn off. */ - udelay(150); + i9xx_crtc_disable(crtc); break; } } From 4d12fe0b4864682d3562021cde0f32961c655d75 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 10 Sep 2010 10:46:45 -0700 Subject: [PATCH 120/476] drm/i915: don't unlock panel regs This was just a workaround for some broken Ironlake CRTC code. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_dp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index b9efeaf5d5b4..103a60b3cad4 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -781,7 +781,7 @@ static bool ironlake_edp_panel_on (struct drm_device *dev) I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); - pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON; + pp |= POWER_TARGET_ON; I915_WRITE(PCH_PP_CONTROL, pp); if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000)) From 7e7d76c306adb73a41d2678a42a11004df2519b7 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 10 Sep 2010 10:47:20 -0700 Subject: [PATCH 121/476] drm/i915: use i915 and Ironlake CRTC enable/disable functions in prepare/commit This will allow us to optimize our prepare/commit paths a bit better. Signed-off-by: Jesse Barnes [ickle: minor tweak to handle the cursor across pipe resizing] Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 69 ++++++++++++++++++++++++---- 1 file changed, 60 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index fecb98c2d8ad..98276b8454fc 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2478,16 +2478,60 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) } } -static void intel_crtc_prepare (struct drm_crtc *crtc) +/* Prepare for a mode set. + * + * Note we could be a lot smarter here. We need to figure out which outputs + * will be enabled, which disabled (in short, how the config will changes) + * and perform the minimum necessary steps to accomplish that, e.g. updating + * watermarks, FBC configuration, making sure PLLs are programmed correctly, + * panel fitting is in the proper state, etc. + */ +static void i9xx_crtc_prepare(struct drm_crtc *crtc) { - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); + struct drm_device *dev = crtc->dev; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + intel_crtc->cursor_on = false; + intel_crtc_update_cursor(crtc); + + i9xx_crtc_disable(crtc); + intel_clear_scanline_wait(dev); } -static void intel_crtc_commit (struct drm_crtc *crtc) +static void i9xx_crtc_commit(struct drm_crtc *crtc) { - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); + struct drm_device *dev = crtc->dev; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + intel_update_watermarks(dev); + i9xx_crtc_enable(crtc); + + intel_crtc->cursor_on = true; + intel_crtc_update_cursor(crtc); +} + +static void ironlake_crtc_prepare(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + intel_crtc->cursor_on = false; + intel_crtc_update_cursor(crtc); + + ironlake_crtc_disable(crtc); + intel_clear_scanline_wait(dev); +} + +static void ironlake_crtc_commit(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + intel_update_watermarks(dev); + ironlake_crtc_enable(crtc); + + intel_crtc->cursor_on = true; + intel_crtc_update_cursor(crtc); } void intel_encoder_prepare (struct drm_encoder *encoder) @@ -5184,14 +5228,12 @@ cleanup_work: return ret; } -static const struct drm_crtc_helper_funcs intel_helper_funcs = { +static struct drm_crtc_helper_funcs intel_helper_funcs = { .dpms = intel_crtc_dpms, .mode_fixup = intel_crtc_mode_fixup, .mode_set = intel_crtc_mode_set, .mode_set_base = intel_pipe_set_base, .mode_set_base_atomic = intel_pipe_set_base_atomic, - .prepare = intel_crtc_prepare, - .commit = intel_crtc_commit, .load_lut = intel_crtc_load_lut, }; @@ -5241,6 +5283,15 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) intel_crtc->cursor_addr = 0; intel_crtc->dpms_mode = -1; + + if (HAS_PCH_SPLIT(dev)) { + intel_helper_funcs.prepare = ironlake_crtc_prepare; + intel_helper_funcs.commit = ironlake_crtc_commit; + } else { + intel_helper_funcs.prepare = i9xx_crtc_prepare; + intel_helper_funcs.commit = i9xx_crtc_commit; + } + drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); intel_crtc->busy = false; From c98e9dcf9023e72837c1c01251f370e2358a0de6 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 10 Sep 2010 10:57:18 -0700 Subject: [PATCH 122/476] drm/i915: enable PCH PLL, FDI training and transcoder even for eDP eDP panels require these to be set up prior to panel power sequencing, or they'll fail to power on due to an "asset not ready" check. And of course, eDP panels attached to anything other than DP_A need them enabled regardless, since they'll be driven from the CPU through FDI out to the PCH. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 260 +++++++++++++-------------- 1 file changed, 128 insertions(+), 132 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 98276b8454fc..ff549199c700 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1889,34 +1889,32 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) } } - if (!HAS_eDP) { - /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ - temp = I915_READ(fdi_rx_reg); - /* - * make the BPC in FDI Rx be consistent with that in - * pipeconf reg. - */ - temp &= ~(0x7 << 16); - temp |= (pipe_bpc << 11); - temp &= ~(7 << 19); - temp |= (intel_crtc->fdi_lanes - 1) << 19; - I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); - I915_READ(fdi_rx_reg); - udelay(200); + /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ + temp = I915_READ(fdi_rx_reg); + /* + * make the BPC in FDI Rx be consistent with that in + * pipeconf reg. + */ + temp &= ~(0x7 << 16); + temp |= (pipe_bpc << 11); + temp &= ~(7 << 19); + temp |= (intel_crtc->fdi_lanes - 1) << 19; + I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); + I915_READ(fdi_rx_reg); + udelay(200); - /* Switch from Rawclk to PCDclk */ - temp = I915_READ(fdi_rx_reg); - I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK); - I915_READ(fdi_rx_reg); - udelay(200); + /* Switch from Rawclk to PCDclk */ + temp = I915_READ(fdi_rx_reg); + I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK); + I915_READ(fdi_rx_reg); + udelay(200); - /* Enable CPU FDI TX PLL, always on for Ironlake */ - temp = I915_READ(fdi_tx_reg); - if ((temp & FDI_TX_PLL_ENABLE) == 0) { - I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); - I915_READ(fdi_tx_reg); - udelay(100); - } + /* Enable CPU FDI TX PLL, always on for Ironlake */ + temp = I915_READ(fdi_tx_reg); + if ((temp & FDI_TX_PLL_ENABLE) == 0) { + I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); + I915_READ(fdi_tx_reg); + udelay(100); } /* Enable panel fitting for LVDS */ @@ -1951,114 +1949,112 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); } - if (!HAS_eDP) { - /* For PCH output, training FDI link */ - if (IS_GEN6(dev)) - gen6_fdi_link_train(crtc); - else - ironlake_fdi_link_train(crtc); + /* For PCH output, training FDI link */ + if (IS_GEN6(dev)) + gen6_fdi_link_train(crtc); + else + ironlake_fdi_link_train(crtc); - /* enable PCH DPLL */ - temp = I915_READ(pch_dpll_reg); - if ((temp & DPLL_VCO_ENABLE) == 0) { - I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); - I915_READ(pch_dpll_reg); - } - udelay(200); - - if (HAS_PCH_CPT(dev)) { - /* Be sure PCH DPLL SEL is set */ - temp = I915_READ(PCH_DPLL_SEL); - if (trans_dpll_sel == 0 && - (temp & TRANSA_DPLL_ENABLE) == 0) - temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); - else if (trans_dpll_sel == 1 && - (temp & TRANSB_DPLL_ENABLE) == 0) - temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); - I915_WRITE(PCH_DPLL_SEL, temp); - I915_READ(PCH_DPLL_SEL); - } - /* set transcoder timing */ - I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); - I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg)); - I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg)); - - I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg)); - I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg)); - I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg)); - - /* enable normal train */ - temp = I915_READ(fdi_tx_reg); - temp &= ~FDI_LINK_TRAIN_NONE; - I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | - FDI_TX_ENHANCE_FRAME_ENABLE); - I915_READ(fdi_tx_reg); - - temp = I915_READ(fdi_rx_reg); - if (HAS_PCH_CPT(dev)) { - temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; - temp |= FDI_LINK_TRAIN_NORMAL_CPT; - } else { - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_NONE; - } - I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); - I915_READ(fdi_rx_reg); - - /* wait one idle pattern time */ - udelay(100); - - /* For PCH DP, enable TRANS_DP_CTL */ - if (HAS_PCH_CPT(dev) && - intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { - int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; - int reg; - - reg = I915_READ(trans_dp_ctl); - reg &= ~(TRANS_DP_PORT_SEL_MASK | - TRANS_DP_SYNC_MASK); - reg |= (TRANS_DP_OUTPUT_ENABLE | - TRANS_DP_ENH_FRAMING); - - if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) - reg |= TRANS_DP_HSYNC_ACTIVE_HIGH; - if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) - reg |= TRANS_DP_VSYNC_ACTIVE_HIGH; - - switch (intel_trans_dp_port_sel(crtc)) { - case PCH_DP_B: - reg |= TRANS_DP_PORT_SEL_B; - break; - case PCH_DP_C: - reg |= TRANS_DP_PORT_SEL_C; - break; - case PCH_DP_D: - reg |= TRANS_DP_PORT_SEL_D; - break; - default: - DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); - reg |= TRANS_DP_PORT_SEL_B; - break; - } - - I915_WRITE(trans_dp_ctl, reg); - POSTING_READ(trans_dp_ctl); - } - - /* enable PCH transcoder */ - temp = I915_READ(transconf_reg); - /* - * make the BPC in transcoder be consistent with - * that in pipeconf reg. - */ - temp &= ~PIPE_BPC_MASK; - temp |= pipe_bpc; - I915_WRITE(transconf_reg, temp | TRANS_ENABLE); - I915_READ(transconf_reg); - - if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100)) - DRM_ERROR("failed to enable transcoder\n"); + /* enable PCH DPLL */ + temp = I915_READ(pch_dpll_reg); + if ((temp & DPLL_VCO_ENABLE) == 0) { + I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); + I915_READ(pch_dpll_reg); } + udelay(200); + + if (HAS_PCH_CPT(dev)) { + /* Be sure PCH DPLL SEL is set */ + temp = I915_READ(PCH_DPLL_SEL); + if (trans_dpll_sel == 0 && + (temp & TRANSA_DPLL_ENABLE) == 0) + temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); + else if (trans_dpll_sel == 1 && + (temp & TRANSB_DPLL_ENABLE) == 0) + temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); + I915_WRITE(PCH_DPLL_SEL, temp); + I915_READ(PCH_DPLL_SEL); + } + /* set transcoder timing */ + I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); + I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg)); + I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg)); + + I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg)); + I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg)); + I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg)); + + /* enable normal train */ + temp = I915_READ(fdi_tx_reg); + temp &= ~FDI_LINK_TRAIN_NONE; + I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | + FDI_TX_ENHANCE_FRAME_ENABLE); + I915_READ(fdi_tx_reg); + + temp = I915_READ(fdi_rx_reg); + if (HAS_PCH_CPT(dev)) { + temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; + temp |= FDI_LINK_TRAIN_NORMAL_CPT; + } else { + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_NONE; + } + I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); + I915_READ(fdi_rx_reg); + + /* wait one idle pattern time */ + udelay(100); + + /* For PCH DP, enable TRANS_DP_CTL */ + if (HAS_PCH_CPT(dev) && + intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { + int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; + int reg; + + reg = I915_READ(trans_dp_ctl); + reg &= ~(TRANS_DP_PORT_SEL_MASK | + TRANS_DP_SYNC_MASK); + reg |= (TRANS_DP_OUTPUT_ENABLE | + TRANS_DP_ENH_FRAMING); + + if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) + reg |= TRANS_DP_HSYNC_ACTIVE_HIGH; + if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) + reg |= TRANS_DP_VSYNC_ACTIVE_HIGH; + + switch (intel_trans_dp_port_sel(crtc)) { + case PCH_DP_B: + reg |= TRANS_DP_PORT_SEL_B; + break; + case PCH_DP_C: + reg |= TRANS_DP_PORT_SEL_C; + break; + case PCH_DP_D: + reg |= TRANS_DP_PORT_SEL_D; + break; + default: + DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); + reg |= TRANS_DP_PORT_SEL_B; + break; + } + + I915_WRITE(trans_dp_ctl, reg); + POSTING_READ(trans_dp_ctl); + } + + /* enable PCH transcoder */ + temp = I915_READ(transconf_reg); + /* + * make the BPC in transcoder be consistent with + * that in pipeconf reg. + */ + temp &= ~PIPE_BPC_MASK; + temp |= pipe_bpc; + I915_WRITE(transconf_reg, temp | TRANS_ENABLE); + I915_READ(transconf_reg); + + if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100)) + DRM_ERROR("failed to enable transcoder\n"); intel_crtc_load_lut(crtc); From 0e23b99d2599112a332136728e9250e688a08b0c Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 10 Sep 2010 11:10:00 -0700 Subject: [PATCH 123/476] drm/i915: split Ironlake FDI enable function Easier to read, and will pair up with a disable function. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 72 +++++++++++++++++----------- 1 file changed, 45 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index ff549199c700..086df969de4c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1848,6 +1848,50 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) DRM_DEBUG_KMS("FDI train done.\n"); } +static void ironlake_fdi_enable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; + int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; + int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; + u32 temp; + u32 pipe_bpc; + + temp = I915_READ(pipeconf_reg); + pipe_bpc = temp & PIPE_BPC_MASK; + + /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ + temp = I915_READ(fdi_rx_reg); + /* + * make the BPC in FDI Rx be consistent with that in + * pipeconf reg. + */ + temp &= ~(0x7 << 16); + temp |= (pipe_bpc << 11); + temp &= ~(7 << 19); + temp |= (intel_crtc->fdi_lanes - 1) << 19; + I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); + I915_READ(fdi_rx_reg); + udelay(200); + + /* Switch from Rawclk to PCDclk */ + temp = I915_READ(fdi_rx_reg); + I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK); + I915_READ(fdi_rx_reg); + udelay(200); + + /* Enable CPU FDI TX PLL, always on for Ironlake */ + temp = I915_READ(fdi_tx_reg); + if ((temp & FDI_TX_PLL_ENABLE) == 0) { + I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); + I915_READ(fdi_tx_reg); + udelay(100); + } +} + static void ironlake_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -1889,33 +1933,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) } } - /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ - temp = I915_READ(fdi_rx_reg); - /* - * make the BPC in FDI Rx be consistent with that in - * pipeconf reg. - */ - temp &= ~(0x7 << 16); - temp |= (pipe_bpc << 11); - temp &= ~(7 << 19); - temp |= (intel_crtc->fdi_lanes - 1) << 19; - I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); - I915_READ(fdi_rx_reg); - udelay(200); - - /* Switch from Rawclk to PCDclk */ - temp = I915_READ(fdi_rx_reg); - I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK); - I915_READ(fdi_rx_reg); - udelay(200); - - /* Enable CPU FDI TX PLL, always on for Ironlake */ - temp = I915_READ(fdi_tx_reg); - if ((temp & FDI_TX_PLL_ENABLE) == 0) { - I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); - I915_READ(fdi_tx_reg); - udelay(100); - } + ironlake_fdi_enable(crtc); /* Enable panel fitting for LVDS */ if (dev_priv->pch_pf_size && From de9c27bf70964a8b257eaeb8f71f1898e9f4ac7d Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 10 Sep 2010 11:22:02 -0700 Subject: [PATCH 124/476] drm/i915: don't write TU size to N1 reg TU size is only part of the M1 and M2 regs, not the N regs. This keeps us from overwriting a reserved field. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 086df969de4c..358c30127f1a 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4170,7 +4170,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, if (HAS_PCH_SPLIT(dev)) { I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m); - I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n); + I915_WRITE(data_n1_reg, m_n.gmch_n); I915_WRITE(link_m1_reg, m_n.link_m); I915_WRITE(link_n1_reg, m_n.link_n); From c64e311e650921fb014af2b3c500180fc65802b9 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 10 Sep 2010 11:27:03 -0700 Subject: [PATCH 125/476] drm/i915: set FDI RX TU size to match transmit size This allows FDI error checking to work. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 358c30127f1a..c31a64daf479 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1857,12 +1857,18 @@ static void ironlake_fdi_enable(struct drm_crtc *crtc) int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; + int data_m1_reg = (pipe == 0) ? PIPEA_DATA_M1 : PIPEB_DATA_M1; u32 temp; u32 pipe_bpc; + u32 tx_size; temp = I915_READ(pipeconf_reg); pipe_bpc = temp & PIPE_BPC_MASK; + /* Write the TU size bits so error detection works */ + tx_size = I915_READ(data_m1_reg) & TU_SIZE_MASK; + I915_WRITE(FDI_RXA_TUSIZE1, tx_size); + /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ temp = I915_READ(fdi_rx_reg); /* From d5e0d2f51977fe1f7fd6ee5c1a4476b43bad8f92 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 10 Sep 2010 22:33:19 +0100 Subject: [PATCH 126/476] drm/i915: Ensure all PLL registers are flushed before a udelay() Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index c31a64daf479..95c841640508 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1641,6 +1641,7 @@ static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock) dpa_ctl |= DP_PLL_FREQ_270MHZ; } I915_WRITE(DP_A, dpa_ctl); + POSTING_READ(DP_A); udelay(500); } @@ -1708,6 +1709,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_2; I915_WRITE(fdi_rx_reg, temp); + POSTING_READ(fdi_rx_reg); udelay(150); tries = 0; @@ -1788,6 +1790,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; temp |= snb_b_fdi_train_param[i]; I915_WRITE(fdi_tx_reg, temp); + POSTING_READ(fdi_tx_reg); udelay(500); temp = I915_READ(fdi_rx_iir_reg); @@ -1823,6 +1826,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) temp |= FDI_LINK_TRAIN_PATTERN_2; } I915_WRITE(fdi_rx_reg, temp); + POSTING_READ(fdi_rx_reg); udelay(150); for (i = 0; i < 4; i++ ) { @@ -1830,6 +1834,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; temp |= snb_b_fdi_train_param[i]; I915_WRITE(fdi_tx_reg, temp); + POSTING_READ(fdi_tx_reg); udelay(500); temp = I915_READ(fdi_rx_iir_reg); From 8c4223bee91b771782f2ec07f2c85d81cdff3ed5 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 10 Sep 2010 22:33:42 +0100 Subject: [PATCH 127/476] drm/i915: Only call udelay() when waiting for clocks to stabilise Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 95c841640508..df410e4827e4 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1989,8 +1989,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) if ((temp & DPLL_VCO_ENABLE) == 0) { I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); I915_READ(pch_dpll_reg); + udelay(200); } - udelay(200); if (HAS_PCH_CPT(dev)) { /* Be sure PCH DPLL SEL is set */ @@ -2136,8 +2136,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) } else DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); - udelay(100); - /* Disable PF */ I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0); I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0); From 021357acc8ea85273a9882b3fe89935629f51b12 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 7 Sep 2010 20:54:59 +0100 Subject: [PATCH 128/476] drm/i915: Use the real FDI frequency for determining b/w Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_reg.h | 1 + drivers/gpu/drm/i915/intel_display.c | 17 ++++++++++++++++- drivers/gpu/drm/i915/intel_drv.h | 3 +++ 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index d0b4b2375d56..fbf58e0f2588 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2398,6 +2398,7 @@ #define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00 #define FDI_PLL_BIOS_0 0x46000 +#define FDI_PLL_FB_CLOCK_MASK 0xff #define FDI_PLL_BIOS_1 0x46004 #define FDI_PLL_BIOS_2 0x46008 #define DISPLAY_PORT_PLL_BIOS_0 0x4600c diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index df410e4827e4..5279b0c88e9c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -342,6 +342,13 @@ static bool intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *best_clock); +static inline u32 /* units of 100MHz */ +intel_fdi_link_freq(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; +} + static const intel_limit_t intel_limits_i8xx_dvo = { .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, @@ -3767,7 +3774,15 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, target_clock = mode->clock; else target_clock = adjusted_mode->clock; - link_bw = 270000; + + /* FDI is a binary signal running at ~2.7GHz, encoding + * each output octet as 10 bits. The actual frequency + * is stored as a divider into a 100MHz clock, and the + * mode pixel clock is stored in units of 1KHz. + * Hence the bw of each lane in terms of the mode signal + * is: + */ + link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; } /* determine panel color depth */ diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 1ada684ea569..6c6b897539f8 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -49,6 +49,9 @@ #define wait_for(COND, MS) _wait_for(COND, MS, 1) #define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0) +#define KHz(x) (1000*x) +#define MHz(x) KHz(1000*x) + /* * Display related stuff */ From ea056c14a269be393468fe3734f6c2319eb23a3f Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 10 Sep 2010 10:02:13 -0700 Subject: [PATCH 129/476] drm/i915: enable thermal reporting for IPS Thermal reporting may not be enabled by default on some machines, so enable the appropriate bits to allow IPS to get the data it needs from the CPU thermal device. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_reg.h | 4 ++++ drivers/gpu/drm/i915/intel_display.c | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index db22a23c65a2..27d20177708f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -928,6 +928,8 @@ #define CLKCFG_MEM_800 (3 << 4) #define CLKCFG_MEM_MASK (7 << 4) +#define TSC1 0x11001 +#define TSE (1<<0) #define TR1 0x11006 #define TSFS 0x11020 #define TSFS_SLOPE_MASK 0x0000ff00 @@ -1072,6 +1074,8 @@ #define MEMSTAT_SRC_CTL_STDBY 3 #define RCPREVBSYTUPAVG 0x113b8 #define RCPREVBSYTDNAVG 0x113bc +#define PMMISC 0x11214 +#define MCPPCE_EN (1<<0) /* enable PM_MSG from PCH->MPC */ #define SDEW 0x1124c #define CSIEW0 0x11250 #define CSIEW1 0x11254 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 2e9191d77127..c6b8292c0708 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5635,6 +5635,10 @@ void ironlake_enable_drps(struct drm_device *dev) u32 rgvmodectl = I915_READ(MEMMODECTL); u8 fmax, fmin, fstart, vstart; + /* Enable temp reporting */ + I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); + I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); + /* 100ms RC evaluation intervals */ I915_WRITE(RCUPEI, 100000); I915_WRITE(RCDNEI, 100000); From bed4a6734b5f56ffd240fdda755b6eb589d32482 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 11 Sep 2010 10:47:47 +0100 Subject: [PATCH 130/476] drm/i915: Fix updating FBC We need to track different state on each generation in order to detect when we need to refresh the FBC registers. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 3 + drivers/gpu/drm/i915/i915_drv.h | 3 + drivers/gpu/drm/i915/intel_display.c | 152 +++++++++++++++------------ 3 files changed, 90 insertions(+), 68 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 3d882b70af16..fb5c2a621907 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -719,6 +719,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused) } else { seq_printf(m, "FBC disabled: "); switch (dev_priv->no_fbc_reason) { + case FBC_NO_OUTPUT: + seq_printf(m, "no outputs"); + break; case FBC_STOLEN_TOO_SMALL: seq_printf(m, "not enough stolen memory"); break; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c8b22005ec18..b3efb30b2270 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -219,6 +219,7 @@ struct intel_device_info { }; enum no_fbc_reason { + FBC_NO_OUTPUT, /* no outputs enabled to compress */ FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ FBC_MODE_TOO_LARGE, /* mode too large for compression */ @@ -307,8 +308,10 @@ typedef struct drm_i915_private { unsigned long cfb_size; unsigned long cfb_pitch; + unsigned long cfb_offset; int cfb_fence; int cfb_plane; + int cfb_y; int irq_enabled; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index c6b8292c0708..f48e944423cb 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1047,7 +1047,6 @@ void intel_wait_for_vblank_off(struct drm_device *dev, int pipe) DRM_DEBUG_KMS("vblank wait timed out\n"); } -/* Parameters have changed, update FBC info */ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) { struct drm_device *dev = crtc->dev; @@ -1059,6 +1058,14 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) int plane, i; u32 fbc_ctl, fbc_ctl2; + if (fb->pitch == dev_priv->cfb_pitch && + obj_priv->fence_reg == dev_priv->cfb_fence && + intel_crtc->plane == dev_priv->cfb_plane && + I915_READ(FBC_CONTROL) & FBC_CTL_EN) + return; + + i8xx_disable_fbc(dev); + dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; if (fb->pitch < dev_priv->cfb_pitch) @@ -1100,12 +1107,6 @@ void i8xx_disable_fbc(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; u32 fbc_ctl; - if (!I915_HAS_FBC(dev)) - return; - - if (!(I915_READ(FBC_CONTROL) & FBC_CTL_EN)) - return; /* Already off, just return */ - /* Disable compression */ fbc_ctl = I915_READ(FBC_CONTROL); fbc_ctl &= ~FBC_CTL_EN; @@ -1140,9 +1141,23 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) unsigned long stall_watermark = 200; u32 dpfc_ctl; + dpfc_ctl = I915_READ(DPFC_CONTROL); + if (dpfc_ctl & DPFC_CTL_EN) { + if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && + dev_priv->cfb_fence == obj_priv->fence_reg && + dev_priv->cfb_plane == intel_crtc->plane && + dev_priv->cfb_y == crtc->y) + return; + + I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN); + POSTING_READ(DPFC_CONTROL); + intel_wait_for_vblank(dev, intel_crtc->pipe); + } + dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; dev_priv->cfb_fence = obj_priv->fence_reg; dev_priv->cfb_plane = intel_crtc->plane; + dev_priv->cfb_y = crtc->y; dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; if (obj_priv->tiling_mode != I915_TILING_NONE) { @@ -1152,7 +1167,6 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY); } - I915_WRITE(DPFC_CONTROL, dpfc_ctl); I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); @@ -1171,10 +1185,12 @@ void g4x_disable_fbc(struct drm_device *dev) /* Disable compression */ dpfc_ctl = I915_READ(DPFC_CONTROL); - dpfc_ctl &= ~DPFC_CTL_EN; - I915_WRITE(DPFC_CONTROL, dpfc_ctl); + if (dpfc_ctl & DPFC_CTL_EN) { + dpfc_ctl &= ~DPFC_CTL_EN; + I915_WRITE(DPFC_CONTROL, dpfc_ctl); - DRM_DEBUG_KMS("disabled FBC\n"); + DRM_DEBUG_KMS("disabled FBC\n"); + } } static bool g4x_fbc_enabled(struct drm_device *dev) @@ -1197,11 +1213,26 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) unsigned long stall_watermark = 200; u32 dpfc_ctl; + dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); + if (dpfc_ctl & DPFC_CTL_EN) { + if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && + dev_priv->cfb_fence == obj_priv->fence_reg && + dev_priv->cfb_plane == intel_crtc->plane && + dev_priv->cfb_offset == obj_priv->gtt_offset && + dev_priv->cfb_y == crtc->y) + return; + + I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN); + POSTING_READ(ILK_DPFC_CONTROL); + intel_wait_for_vblank(dev, intel_crtc->pipe); + } + dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; dev_priv->cfb_fence = obj_priv->fence_reg; dev_priv->cfb_plane = intel_crtc->plane; + dev_priv->cfb_offset = obj_priv->gtt_offset; + dev_priv->cfb_y = crtc->y; - dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); dpfc_ctl &= DPFC_RESERVED; dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); if (obj_priv->tiling_mode != I915_TILING_NONE) { @@ -1211,15 +1242,13 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY); } - I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID); /* enable it... */ - I915_WRITE(ILK_DPFC_CONTROL, I915_READ(ILK_DPFC_CONTROL) | - DPFC_CTL_EN); + I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); } @@ -1231,10 +1260,12 @@ void ironlake_disable_fbc(struct drm_device *dev) /* Disable compression */ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); - dpfc_ctl &= ~DPFC_CTL_EN; - I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); + if (dpfc_ctl & DPFC_CTL_EN) { + dpfc_ctl &= ~DPFC_CTL_EN; + I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); - DRM_DEBUG_KMS("disabled FBC\n"); + DRM_DEBUG_KMS("disabled FBC\n"); + } } static bool ironlake_fbc_enabled(struct drm_device *dev) @@ -1276,8 +1307,7 @@ void intel_disable_fbc(struct drm_device *dev) /** * intel_update_fbc - enable/disable FBC as needed - * @crtc: CRTC to point the compressor at - * @mode: mode in use + * @dev: the drm_device * * Set up the framebuffer compression hardware at mode set time. We * enable it if possible: @@ -1294,18 +1324,14 @@ void intel_disable_fbc(struct drm_device *dev) * * We need to enable/disable FBC on a global basis. */ -static void intel_update_fbc(struct drm_crtc *crtc, - struct drm_display_mode *mode) +static void intel_update_fbc(struct drm_device *dev) { - struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_framebuffer *fb = crtc->fb; + struct drm_crtc *crtc = NULL, *tmp_crtc; + struct intel_crtc *intel_crtc; + struct drm_framebuffer *fb; struct intel_framebuffer *intel_fb; struct drm_i915_gem_object *obj_priv; - struct drm_crtc *tmp_crtc; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int plane = intel_crtc->plane; - int crtcs_enabled = 0; DRM_DEBUG_KMS("\n"); @@ -1315,12 +1341,6 @@ static void intel_update_fbc(struct drm_crtc *crtc, if (!I915_HAS_FBC(dev)) return; - if (!crtc->fb) - return; - - intel_fb = to_intel_framebuffer(fb); - obj_priv = to_intel_bo(intel_fb->obj); - /* * If FBC is already on, we just have to verify that we can * keep it that way... @@ -1331,35 +1351,47 @@ static void intel_update_fbc(struct drm_crtc *crtc, * - going to an unsupported config (interlace, pixel multiply, etc.) */ list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { - if (tmp_crtc->enabled) - crtcs_enabled++; + if (tmp_crtc->enabled) { + if (crtc) { + DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); + dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; + goto out_disable; + } + crtc = tmp_crtc; + } } - DRM_DEBUG_KMS("%d pipes active\n", crtcs_enabled); - if (crtcs_enabled > 1) { - DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); - dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; + + if (!crtc || crtc->fb == NULL) { + DRM_DEBUG_KMS("no output, disabling\n"); + dev_priv->no_fbc_reason = FBC_NO_OUTPUT; goto out_disable; } + + intel_crtc = to_intel_crtc(crtc); + fb = crtc->fb; + intel_fb = to_intel_framebuffer(fb); + obj_priv = to_intel_bo(intel_fb->obj); + if (intel_fb->obj->size > dev_priv->cfb_size) { DRM_DEBUG_KMS("framebuffer too large, disabling " "compression\n"); dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; goto out_disable; } - if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || - (mode->flags & DRM_MODE_FLAG_DBLSCAN)) { + if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || + (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { DRM_DEBUG_KMS("mode incompatible with compression, " "disabling\n"); dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; goto out_disable; } - if ((mode->hdisplay > 2048) || - (mode->vdisplay > 1536)) { + if ((crtc->mode.hdisplay > 2048) || + (crtc->mode.vdisplay > 1536)) { DRM_DEBUG_KMS("mode too large for compression, disabling\n"); dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; goto out_disable; } - if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) { + if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) { DRM_DEBUG_KMS("plane not 0, disabling compression\n"); dev_priv->no_fbc_reason = FBC_BAD_PLANE; goto out_disable; @@ -1374,18 +1406,7 @@ static void intel_update_fbc(struct drm_crtc *crtc, if (in_dbg_master()) goto out_disable; - if (intel_fbc_enabled(dev)) { - /* We can re-enable it in this case, but need to update pitch */ - if ((fb->pitch > dev_priv->cfb_pitch) || - (obj_priv->fence_reg != dev_priv->cfb_fence) || - (plane != dev_priv->cfb_plane)) - intel_disable_fbc(dev); - } - - /* Now try to turn it back on if possible */ - if (!intel_fbc_enabled(dev)) - intel_enable_fbc(crtc, 500); - + intel_enable_fbc(crtc, 500); return; out_disable: @@ -1527,10 +1548,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, } POSTING_READ(dspbase); - if (IS_I965G(dev) || plane == 0) - intel_update_fbc(crtc, &crtc->mode); - - intel_wait_for_vblank(dev, intel_crtc->pipe); + intel_update_fbc(dev); intel_increase_pllclock(crtc); return 0; @@ -2093,8 +2111,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) DRM_ERROR("failed to enable transcoder\n"); intel_crtc_load_lut(crtc); - - intel_update_fbc(crtc, &crtc->mode); + intel_update_fbc(dev); } static void ironlake_crtc_disable(struct drm_crtc *crtc) @@ -2336,9 +2353,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) } intel_crtc_load_lut(crtc); - - if ((IS_I965G(dev) || plane == 0)) - intel_update_fbc(crtc, &crtc->mode); + intel_update_fbc(dev); /* Give the overlay scaler a chance to enable if it's on this pipe */ intel_crtc_dpms_overlay(intel_crtc, true); @@ -2473,9 +2488,9 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) dev_priv->display.dpms(crtc, mode); - if (mode == DRM_MODE_DPMS_ON) + if (mode == DRM_MODE_DPMS_ON) { intel_crtc_update_cursor(crtc); - else { + } else { /* XXX Note that this is not a complete solution, but a hack * to avoid the most frequently hit hang. */ @@ -2483,6 +2498,7 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) intel_update_watermarks(dev); } + intel_update_fbc(dev); if (!dev->primary->master) return; From 4ed765f966c8279acc6f6bc1a5dcb0424d074b40 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 11 Sep 2010 10:46:47 +0100 Subject: [PATCH 131/476] drm/i915: Tidy Ironlake watermark computation Refactor the common code into seperate functions and use the MIN(large, small) buffer calculation for self-refresh watermarks. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_reg.h | 4 +- drivers/gpu/drm/i915/intel_display.c | 212 +++++++++++++-------------- 2 files changed, 100 insertions(+), 116 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 27d20177708f..eb8cb9440449 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2219,8 +2219,8 @@ #define WM1_LP_SR_EN (1<<31) #define WM1_LP_LATENCY_SHIFT 24 #define WM1_LP_LATENCY_MASK (0x7f<<24) -#define WM1_LP_FBC_LP1_MASK (0xf<<20) -#define WM1_LP_FBC_LP1_SHIFT 20 +#define WM1_LP_FBC_MASK (0xf<<20) +#define WM1_LP_FBC_SHIFT 20 #define WM1_LP_SR_MASK (0x1ff<<8) #define WM1_LP_SR_SHIFT 8 #define WM1_LP_CURSOR_MASK (0x3f) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f48e944423cb..3ef6d7ea1e0e 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3404,146 +3404,130 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, #define ILK_LP0_PLANE_LATENCY 700 #define ILK_LP0_CURSOR_LATENCY 1300 -static void ironlake_update_wm(struct drm_device *dev, int planea_clock, - int planeb_clock, int sr_hdisplay, int sr_htotal, - int pixel_size) +static bool ironlake_compute_wm0(struct drm_device *dev, + int pipe, + int *plane_wm, + int *cursor_wm) +{ + struct drm_crtc *crtc; + int htotal, hdisplay, clock, pixel_size = 0; + int line_time_us, line_count, entries; + + crtc = intel_get_crtc_for_pipe(dev, pipe); + if (crtc->fb == NULL || !crtc->enabled) + return false; + + htotal = crtc->mode.htotal; + hdisplay = crtc->mode.hdisplay; + clock = crtc->mode.clock; + pixel_size = crtc->fb->bits_per_pixel / 8; + + /* Use the small buffer method to calculate plane watermark */ + entries = ((clock * pixel_size / 1000) * ILK_LP0_PLANE_LATENCY) / 1000; + entries = DIV_ROUND_UP(entries, + ironlake_display_wm_info.cacheline_size); + *plane_wm = entries + ironlake_display_wm_info.guard_size; + if (*plane_wm > (int)ironlake_display_wm_info.max_wm) + *plane_wm = ironlake_display_wm_info.max_wm; + + /* Use the large buffer method to calculate cursor watermark */ + line_time_us = ((htotal * 1000) / clock); + line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000; + entries = line_count * 64 * pixel_size; + entries = DIV_ROUND_UP(entries, + ironlake_cursor_wm_info.cacheline_size); + *cursor_wm = entries + ironlake_cursor_wm_info.guard_size; + if (*cursor_wm > ironlake_cursor_wm_info.max_wm) + *cursor_wm = ironlake_cursor_wm_info.max_wm; + + return true; +} + +static void ironlake_update_wm(struct drm_device *dev, + int planea_clock, int planeb_clock, + int sr_hdisplay, int sr_htotal, + int pixel_size) { struct drm_i915_private *dev_priv = dev->dev_private; - int planea_wm, planeb_wm, cursora_wm, cursorb_wm; - int sr_wm, cursor_wm; - unsigned long line_time_us; - int sr_clock, entries_required; - u32 reg_value; - int line_count; - int planea_htotal = 0, planeb_htotal = 0; - struct drm_crtc *crtc; + int plane_wm, cursor_wm, enabled; + int tmp; - /* Need htotal for all active display plane */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) { - if (intel_crtc->plane == 0) - planea_htotal = crtc->mode.htotal; - else - planeb_htotal = crtc->mode.htotal; - } + enabled = 0; + if (ironlake_compute_wm0(dev, 0, &plane_wm, &cursor_wm)) { + I915_WRITE(WM0_PIPEA_ILK, + (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); + DRM_DEBUG_KMS("FIFO watermarks For pipe A -" + " plane %d, " "cursor: %d\n", + plane_wm, cursor_wm); + enabled++; } - /* Calculate and update the watermark for plane A */ - if (planea_clock) { - entries_required = ((planea_clock / 1000) * pixel_size * - ILK_LP0_PLANE_LATENCY) / 1000; - entries_required = DIV_ROUND_UP(entries_required, - ironlake_display_wm_info.cacheline_size); - planea_wm = entries_required + - ironlake_display_wm_info.guard_size; - - if (planea_wm > (int)ironlake_display_wm_info.max_wm) - planea_wm = ironlake_display_wm_info.max_wm; - - /* Use the large buffer method to calculate cursor watermark */ - line_time_us = (planea_htotal * 1000) / planea_clock; - - /* Use ns/us then divide to preserve precision */ - line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000; - - /* calculate the cursor watermark for cursor A */ - entries_required = line_count * 64 * pixel_size; - entries_required = DIV_ROUND_UP(entries_required, - ironlake_cursor_wm_info.cacheline_size); - cursora_wm = entries_required + ironlake_cursor_wm_info.guard_size; - if (cursora_wm > ironlake_cursor_wm_info.max_wm) - cursora_wm = ironlake_cursor_wm_info.max_wm; - - reg_value = I915_READ(WM0_PIPEA_ILK); - reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); - reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) | - (cursora_wm & WM0_PIPE_CURSOR_MASK); - I915_WRITE(WM0_PIPEA_ILK, reg_value); - DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, " - "cursor: %d\n", planea_wm, cursora_wm); - } - /* Calculate and update the watermark for plane B */ - if (planeb_clock) { - entries_required = ((planeb_clock / 1000) * pixel_size * - ILK_LP0_PLANE_LATENCY) / 1000; - entries_required = DIV_ROUND_UP(entries_required, - ironlake_display_wm_info.cacheline_size); - planeb_wm = entries_required + - ironlake_display_wm_info.guard_size; - - if (planeb_wm > (int)ironlake_display_wm_info.max_wm) - planeb_wm = ironlake_display_wm_info.max_wm; - - /* Use the large buffer method to calculate cursor watermark */ - line_time_us = (planeb_htotal * 1000) / planeb_clock; - - /* Use ns/us then divide to preserve precision */ - line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000; - - /* calculate the cursor watermark for cursor B */ - entries_required = line_count * 64 * pixel_size; - entries_required = DIV_ROUND_UP(entries_required, - ironlake_cursor_wm_info.cacheline_size); - cursorb_wm = entries_required + ironlake_cursor_wm_info.guard_size; - if (cursorb_wm > ironlake_cursor_wm_info.max_wm) - cursorb_wm = ironlake_cursor_wm_info.max_wm; - - reg_value = I915_READ(WM0_PIPEB_ILK); - reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); - reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) | - (cursorb_wm & WM0_PIPE_CURSOR_MASK); - I915_WRITE(WM0_PIPEB_ILK, reg_value); - DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, " - "cursor: %d\n", planeb_wm, cursorb_wm); + if (ironlake_compute_wm0(dev, 1, &plane_wm, &cursor_wm)) { + I915_WRITE(WM0_PIPEB_ILK, + (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); + DRM_DEBUG_KMS("FIFO watermarks For pipe B -" + " plane %d, cursor: %d\n", + plane_wm, cursor_wm); + enabled++; } /* * Calculate and update the self-refresh watermark only when one * display plane is used. */ - if (!planea_clock || !planeb_clock) { - + tmp = 0; + if (enabled == 1 && /* XXX disabled due to buggy implmentation? */ 0) { + unsigned long line_time_us; + int small, large, plane_fbc; + int sr_clock, entries; + int line_count, line_size; /* Read the self-refresh latency. The unit is 0.5us */ int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK; sr_clock = planea_clock ? planea_clock : planeb_clock; - line_time_us = ((sr_htotal * 1000) / sr_clock); + line_time_us = (sr_htotal * 1000) / sr_clock; /* Use ns/us then divide to preserve precision */ line_count = ((ilk_sr_latency * 500) / line_time_us + 1000) / 1000; + line_size = sr_hdisplay * pixel_size; - /* calculate the self-refresh watermark for display plane */ - entries_required = line_count * sr_hdisplay * pixel_size; - entries_required = DIV_ROUND_UP(entries_required, - ironlake_display_srwm_info.cacheline_size); - sr_wm = entries_required + - ironlake_display_srwm_info.guard_size; + /* Use the minimum of the small and large buffer method for primary */ + small = ((sr_clock * pixel_size / 1000) * (ilk_sr_latency * 500)) / 1000; + large = line_count * line_size; + + entries = DIV_ROUND_UP(min(small, large), + ironlake_display_srwm_info.cacheline_size); + + plane_fbc = entries * 64; + plane_fbc = DIV_ROUND_UP(plane_fbc, line_size); + + plane_wm = entries + ironlake_display_srwm_info.guard_size; + if (plane_wm > (int)ironlake_display_srwm_info.max_wm) + plane_wm = ironlake_display_srwm_info.max_wm; /* calculate the self-refresh watermark for display cursor */ - entries_required = line_count * pixel_size * 64; - entries_required = DIV_ROUND_UP(entries_required, - ironlake_cursor_srwm_info.cacheline_size); - cursor_wm = entries_required + - ironlake_cursor_srwm_info.guard_size; + entries = line_count * pixel_size * 64; + entries = DIV_ROUND_UP(entries, + ironlake_cursor_srwm_info.cacheline_size); + + cursor_wm = entries + ironlake_cursor_srwm_info.guard_size; + if (cursor_wm > (int)ironlake_cursor_srwm_info.max_wm) + cursor_wm = ironlake_cursor_srwm_info.max_wm; /* configure watermark and enable self-refresh */ - reg_value = I915_READ(WM1_LP_ILK); - reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK | - WM1_LP_CURSOR_MASK); - reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) | - (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm; - - I915_WRITE(WM1_LP_ILK, reg_value); - DRM_DEBUG_KMS("self-refresh watermark: display plane %d " - "cursor %d\n", sr_wm, cursor_wm); - - } else { - /* Turn off self refresh if both pipes are enabled */ - I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); + tmp = (WM1_LP_SR_EN | + (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) | + (plane_fbc << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); + DRM_DEBUG_KMS("self-refresh watermark: display plane %d, fbc lines %d," + " cursor %d\n", plane_wm, plane_fbc, cursor_wm); } + I915_WRITE(WM1_LP_ILK, tmp); + /* XXX setup WM2 and WM3 */ } + /** * intel_update_watermarks - update FIFO watermark values based on current modes * From 5eddb70ba2b8cdbbdd563f5cb04e26fdc9b017f7 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 11 Sep 2010 13:48:45 +0100 Subject: [PATCH 132/476] drm/i915: Use macros to switch between equivalent pipe registers The purpose is to make the code much easier to read and therefore reduce the possibility for bugs. A side effect is that it also makes it much easier for the compiler, reducing the object size by 4k -- from just a few functions! Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_irq.c | 35 +- drivers/gpu/drm/i915/i915_reg.h | 97 ++- drivers/gpu/drm/i915/intel_display.c | 914 +++++++++++++-------------- drivers/gpu/drm/i915/intel_overlay.c | 7 +- drivers/gpu/drm/i915/intel_tv.c | 2 +- 5 files changed, 508 insertions(+), 547 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 7e2b476df744..bc8438d6d843 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -191,12 +191,7 @@ static int i915_pipe_enabled(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF; - - if (I915_READ(pipeconf) & PIPEACONF_ENABLE) - return 1; - - return 0; + return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; } /* Called from drm generic code, passed a 'crtc', which @@ -207,10 +202,7 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long high_frame; unsigned long low_frame; - u32 high1, high2, low, count; - - high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH; - low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; + u32 high1, high2, low; if (!i915_pipe_enabled(dev, pipe)) { DRM_DEBUG_DRIVER("trying to get vblank count for disabled " @@ -218,23 +210,23 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) return 0; } + high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH; + low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; + /* * High & low register fields aren't synchronized, so make sure * we get a low value that's stable across two reads of the high * register. */ do { - high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> - PIPE_FRAME_HIGH_SHIFT); - low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >> - PIPE_FRAME_LOW_SHIFT); - high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> - PIPE_FRAME_HIGH_SHIFT); + high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; + low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; + high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; } while (high1 != high2); - count = (high1 << 8) | low; - - return count; + high1 >>= PIPE_FRAME_HIGH_SHIFT; + low >>= PIPE_FRAME_LOW_SHIFT; + return (high1 << 8) | low; } u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) @@ -1207,11 +1199,8 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; - int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; - u32 pipeconf; - pipeconf = I915_READ(pipeconf_reg); - if (!(pipeconf & PIPEACONF_ENABLE)) + if (!i915_pipe_enabled(dev, pipe)) return -EINVAL; spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index eb8cb9440449..fd229abe0d86 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -25,6 +25,8 @@ #ifndef _I915_REG_H_ #define _I915_REG_H_ +#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) + /* * The Bridge device's PCI config space has information about the * fb aperture size and the amount of pre-reserved memory. @@ -605,6 +607,7 @@ #define VGA1_PD_P1_MASK (0x1f << 8) #define DPLL_A 0x06014 #define DPLL_B 0x06018 +#define DPLL(pipe) _PIPE(pipe, DPLL_A, DPLL_B) #define DPLL_VCO_ENABLE (1 << 31) #define DPLL_DVO_HIGH_SPEED (1 << 30) #define DPLL_SYNCLOCK_ENABLE (1 << 29) @@ -738,10 +741,13 @@ #define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f #define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 #define DPLL_B_MD 0x06020 /* 965+ only */ +#define DPLL_MD(pipe) _PIPE(pipe, DPLL_A_MD, DPLL_B_MD) #define FPA0 0x06040 #define FPA1 0x06044 #define FPB0 0x06048 #define FPB1 0x0604c +#define FP0(pipe) _PIPE(pipe, FPA0, FPB0) +#define FP1(pipe) _PIPE(pipe, FPA1, FPB1) #define FP_N_DIV_MASK 0x003f0000 #define FP_N_PINEVIEW_DIV_MASK 0x00ff0000 #define FP_N_DIV_SHIFT 16 @@ -1156,6 +1162,15 @@ #define PIPEBSRC 0x6101c #define BCLRPAT_B 0x61020 +#define HTOTAL(pipe) _PIPE(pipe, HTOTAL_A, HTOTAL_B) +#define HBLANK(pipe) _PIPE(pipe, HBLANK_A, HBLANK_B) +#define HSYNC(pipe) _PIPE(pipe, HSYNC_A, HSYNC_B) +#define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B) +#define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B) +#define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B) +#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC) +#define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B) + /* VGA port control */ #define ADPA 0x61100 #define ADPA_DAC_ENABLE (1<<31) @@ -2086,15 +2101,15 @@ #define PIPEADSL 0x70000 #define DSL_LINEMASK 0x00000fff #define PIPEACONF 0x70008 -#define PIPEACONF_ENABLE (1<<31) -#define PIPEACONF_DISABLE 0 -#define PIPEACONF_DOUBLE_WIDE (1<<30) +#define PIPECONF_ENABLE (1<<31) +#define PIPECONF_DISABLE 0 +#define PIPECONF_DOUBLE_WIDE (1<<30) #define I965_PIPECONF_ACTIVE (1<<30) -#define PIPEACONF_SINGLE_WIDE 0 -#define PIPEACONF_PIPE_UNLOCKED 0 -#define PIPEACONF_PIPE_LOCKED (1<<25) -#define PIPEACONF_PALETTE 0 -#define PIPEACONF_GAMMA (1<<24) +#define PIPECONF_SINGLE_WIDE 0 +#define PIPECONF_PIPE_UNLOCKED 0 +#define PIPECONF_PIPE_LOCKED (1<<25) +#define PIPECONF_PALETTE 0 +#define PIPECONF_GAMMA (1<<24) #define PIPECONF_FORCE_BORDER (1<<25) #define PIPECONF_PROGRESSIVE (0 << 21) #define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) @@ -2147,6 +2162,8 @@ #define PIPE_6BPC (2 << 5) #define PIPE_12BPC (3 << 5) +#define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF) + #define DSPARB 0x70030 #define DSPARB_CSTART_MASK (0x7f << 7) #define DSPARB_CSTART_SHIFT 7 @@ -2346,6 +2363,14 @@ #define DSPASURF 0x7019C /* 965+ only */ #define DSPATILEOFF 0x701A4 /* 965+ only */ +#define DSPCNTR(plane) _PIPE(plane, DSPACNTR, DSPBCNTR) +#define DSPADDR(plane) _PIPE(plane, DSPAADDR, DSPBADDR) +#define DSPSTRIDE(plane) _PIPE(plane, DSPASTRIDE, DSPBSTRIDE) +#define DSPPOS(plane) _PIPE(plane, DSPAPOS, DSPBPOS) +#define DSPSIZE(plane) _PIPE(plane, DSPASIZE, DSPBSIZE) +#define DSPSURF(plane) _PIPE(plane, DSPASURF, DSPBSURF) +#define DSPTILEOFF(plane) _PIPE(plane, DSPATILEOFF, DSPBTILEOFF) + /* VBIOS flags */ #define SWF00 0x71410 #define SWF01 0x71414 @@ -2434,46 +2459,47 @@ #define PIPEA_DATA_M1 0x60030 #define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */ #define TU_SIZE_MASK 0x7e000000 -#define PIPEA_DATA_M1_OFFSET 0 +#define PIPE_DATA_M1_OFFSET 0 #define PIPEA_DATA_N1 0x60034 -#define PIPEA_DATA_N1_OFFSET 0 +#define PIPE_DATA_N1_OFFSET 0 #define PIPEA_DATA_M2 0x60038 -#define PIPEA_DATA_M2_OFFSET 0 +#define PIPE_DATA_M2_OFFSET 0 #define PIPEA_DATA_N2 0x6003c -#define PIPEA_DATA_N2_OFFSET 0 +#define PIPE_DATA_N2_OFFSET 0 #define PIPEA_LINK_M1 0x60040 -#define PIPEA_LINK_M1_OFFSET 0 +#define PIPE_LINK_M1_OFFSET 0 #define PIPEA_LINK_N1 0x60044 -#define PIPEA_LINK_N1_OFFSET 0 +#define PIPE_LINK_N1_OFFSET 0 #define PIPEA_LINK_M2 0x60048 -#define PIPEA_LINK_M2_OFFSET 0 +#define PIPE_LINK_M2_OFFSET 0 #define PIPEA_LINK_N2 0x6004c -#define PIPEA_LINK_N2_OFFSET 0 +#define PIPE_LINK_N2_OFFSET 0 /* PIPEB timing regs are same start from 0x61000 */ #define PIPEB_DATA_M1 0x61030 -#define PIPEB_DATA_M1_OFFSET 0 #define PIPEB_DATA_N1 0x61034 -#define PIPEB_DATA_N1_OFFSET 0 #define PIPEB_DATA_M2 0x61038 -#define PIPEB_DATA_M2_OFFSET 0 #define PIPEB_DATA_N2 0x6103c -#define PIPEB_DATA_N2_OFFSET 0 #define PIPEB_LINK_M1 0x61040 -#define PIPEB_LINK_M1_OFFSET 0 #define PIPEB_LINK_N1 0x61044 -#define PIPEB_LINK_N1_OFFSET 0 #define PIPEB_LINK_M2 0x61048 -#define PIPEB_LINK_M2_OFFSET 0 #define PIPEB_LINK_N2 0x6104c -#define PIPEB_LINK_N2_OFFSET 0 + +#define PIPE_DATA_M1(pipe) _PIPE(pipe, PIPEA_DATA_M1, PIPEB_DATA_M1) +#define PIPE_DATA_N1(pipe) _PIPE(pipe, PIPEA_DATA_N1, PIPEB_DATA_N1) +#define PIPE_DATA_M2(pipe) _PIPE(pipe, PIPEA_DATA_M2, PIPEB_DATA_M2) +#define PIPE_DATA_N2(pipe) _PIPE(pipe, PIPEA_DATA_N2, PIPEB_DATA_N2) +#define PIPE_LINK_M1(pipe) _PIPE(pipe, PIPEA_LINK_M1, PIPEB_LINK_M1) +#define PIPE_LINK_N1(pipe) _PIPE(pipe, PIPEA_LINK_N1, PIPEB_LINK_N1) +#define PIPE_LINK_M2(pipe) _PIPE(pipe, PIPEA_LINK_M2, PIPEB_LINK_M2) +#define PIPE_LINK_N2(pipe) _PIPE(pipe, PIPEA_LINK_N2, PIPEB_LINK_N2) /* CPU panel fitter */ #define PFA_CTL_1 0x68080 @@ -2614,11 +2640,14 @@ #define PCH_DPLL_A 0xc6014 #define PCH_DPLL_B 0xc6018 +#define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B) #define PCH_FPA0 0xc6040 #define PCH_FPA1 0xc6044 #define PCH_FPB0 0xc6048 #define PCH_FPB1 0xc604c +#define PCH_FP0(pipe) _PIPE(pipe, PCH_FPA0, PCH_FPB0) +#define PCH_FP1(pipe) _PIPE(pipe, PCH_FPA1, PCH_FPB1) #define PCH_DPLL_TEST 0xc606c @@ -2704,6 +2733,13 @@ #define TRANS_VBLANK_B 0xe1010 #define TRANS_VSYNC_B 0xe1014 +#define TRANS_HTOTAL(pipe) _PIPE(pipe, TRANS_HTOTAL_A, TRANS_HTOTAL_B) +#define TRANS_HBLANK(pipe) _PIPE(pipe, TRANS_HBLANK_A, TRANS_HBLANK_B) +#define TRANS_HSYNC(pipe) _PIPE(pipe, TRANS_HSYNC_A, TRANS_HSYNC_B) +#define TRANS_VTOTAL(pipe) _PIPE(pipe, TRANS_VTOTAL_A, TRANS_VTOTAL_B) +#define TRANS_VBLANK(pipe) _PIPE(pipe, TRANS_VBLANK_A, TRANS_VBLANK_B) +#define TRANS_VSYNC(pipe) _PIPE(pipe, TRANS_VSYNC_A, TRANS_VSYNC_B) + #define TRANSB_DATA_M1 0xe1030 #define TRANSB_DATA_N1 0xe1034 #define TRANSB_DATA_M2 0xe1038 @@ -2715,6 +2751,7 @@ #define TRANSACONF 0xf0008 #define TRANSBCONF 0xf1008 +#define TRANSCONF(plane) _PIPE(plane, TRANSACONF, TRANSBCONF) #define TRANS_DISABLE (0<<31) #define TRANS_ENABLE (1<<31) #define TRANS_STATE_MASK (1<<30) @@ -2739,6 +2776,7 @@ /* CPU: FDI_TX */ #define FDI_TXA_CTL 0x60100 #define FDI_TXB_CTL 0x61100 +#define FDI_TX_CTL(pipe) _PIPE(pipe, FDI_TXA_CTL, FDI_TXB_CTL) #define FDI_TX_DISABLE (0<<31) #define FDI_TX_ENABLE (1<<31) #define FDI_LINK_TRAIN_PATTERN_1 (0<<28) @@ -2780,8 +2818,8 @@ /* FDI_RX, FDI_X is hard-wired to Transcoder_X */ #define FDI_RXA_CTL 0xf000c #define FDI_RXB_CTL 0xf100c +#define FDI_RX_CTL(pipe) _PIPE(pipe, FDI_RXA_CTL, FDI_RXB_CTL) #define FDI_RX_ENABLE (1<<31) -#define FDI_RX_DISABLE (0<<31) /* train, dp width same as FDI_TX */ #define FDI_DP_PORT_WIDTH_X8 (7<<19) #define FDI_8BPC (0<<16) @@ -2796,8 +2834,7 @@ #define FDI_FS_ERR_REPORT_ENABLE (1<<9) #define FDI_FE_ERR_REPORT_ENABLE (1<<8) #define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6) -#define FDI_SEL_RAWCLK (0<<4) -#define FDI_SEL_PCDCLK (1<<4) +#define FDI_PCDCLK (1<<4) /* CPT */ #define FDI_AUTO_TRAINING (1<<10) #define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8) @@ -2812,6 +2849,9 @@ #define FDI_RXA_TUSIZE2 0xf0038 #define FDI_RXB_TUSIZE1 0xf1030 #define FDI_RXB_TUSIZE2 0xf1038 +#define FDI_RX_MISC(pipe) _PIPE(pipe, FDI_RXA_MISC, FDI_RXB_MISC) +#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, FDI_RXA_TUSIZE1, FDI_RXB_TUSIZE1) +#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, FDI_RXA_TUSIZE2, FDI_RXB_TUSIZE2) /* FDI_RX interrupt register format */ #define FDI_RX_INTER_LANE_ALIGN (1<<10) @@ -2830,6 +2870,8 @@ #define FDI_RXA_IMR 0xf0018 #define FDI_RXB_IIR 0xf1014 #define FDI_RXB_IMR 0xf1018 +#define FDI_RX_IIR(pipe) _PIPE(pipe, FDI_RXA_IIR, FDI_RXB_IIR) +#define FDI_RX_IMR(pipe) _PIPE(pipe, FDI_RXA_IMR, FDI_RXB_IMR) #define FDI_PLL_CTL_1 0xfe000 #define FDI_PLL_CTL_2 0xfe004 @@ -2949,6 +2991,7 @@ #define TRANS_DP_CTL_A 0xe0300 #define TRANS_DP_CTL_B 0xe1300 #define TRANS_DP_CTL_C 0xe2300 +#define TRANS_DP_CTL(pipe) (TRANS_DP_CTL_A + (pipe) * 0x01000) #define TRANS_DP_OUTPUT_ENABLE (1<<31) #define TRANS_DP_PORT_SEL_B (0<<29) #define TRANS_DP_PORT_SEL_C (1<<29) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3ef6d7ea1e0e..1e88ebbc1a1e 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -959,26 +959,26 @@ static bool intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *best_clock) { - intel_clock_t clock; - if (target < 200000) { - clock.p1 = 2; - clock.p2 = 10; - clock.n = 2; - clock.m1 = 23; - clock.m2 = 8; - } else { - clock.p1 = 1; - clock.p2 = 10; - clock.n = 1; - clock.m1 = 14; - clock.m2 = 2; - } - clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); - clock.p = (clock.p1 * clock.p2); - clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; - clock.vco = 0; - memcpy(best_clock, &clock, sizeof(intel_clock_t)); - return true; + intel_clock_t clock; + if (target < 200000) { + clock.p1 = 2; + clock.p2 = 10; + clock.n = 2; + clock.m1 = 23; + clock.m2 = 8; + } else { + clock.p1 = 1; + clock.p2 = 10; + clock.n = 1; + clock.m1 = 14; + clock.m2 = 2; + } + clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); + clock.p = (clock.p1 * clock.p2); + clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; + clock.vco = 0; + memcpy(best_clock, &clock, sizeof(intel_clock_t)); + return true; } /** @@ -1099,7 +1099,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) I915_WRITE(FBC_CONTROL, fbc_ctl); DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ", - dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane); + dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane); } void i8xx_disable_fbc(struct drm_device *dev) @@ -1136,8 +1136,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : - DPFC_CTL_PLANEB); + int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; unsigned long stall_watermark = 200; u32 dpfc_ctl; @@ -1208,8 +1207,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int plane = (intel_crtc->plane == 0) ? DPFC_CTL_PLANEA : - DPFC_CTL_PLANEB; + int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; unsigned long stall_watermark = 200; u32 dpfc_ctl; @@ -1374,14 +1372,14 @@ static void intel_update_fbc(struct drm_device *dev) if (intel_fb->obj->size > dev_priv->cfb_size) { DRM_DEBUG_KMS("framebuffer too large, disabling " - "compression\n"); + "compression\n"); dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; goto out_disable; } if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { DRM_DEBUG_KMS("mode incompatible with compression, " - "disabling\n"); + "disabling\n"); dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; goto out_disable; } @@ -1479,12 +1477,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_gem_object *obj; int plane = intel_crtc->plane; unsigned long Start, Offset; - int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR); - int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF); - int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE; - int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF); - int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; u32 dspcntr; + u32 reg; switch (plane) { case 0: @@ -1499,7 +1493,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, obj = intel_fb->obj; obj_priv = to_intel_bo(obj); - dspcntr = I915_READ(dspcntr_reg); + reg = DSPCNTR(plane); + dspcntr = I915_READ(reg); /* Mask out pixel format bits in case we change it */ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; switch (fb->bits_per_pixel) { @@ -1531,22 +1526,21 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, /* must disable */ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; - I915_WRITE(dspcntr_reg, dspcntr); + I915_WRITE(reg, dspcntr); Start = obj_priv->gtt_offset; Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", Start, Offset, x, y, fb->pitch); - I915_WRITE(dspstride, fb->pitch); + I915_WRITE(DSPSTRIDE(plane), fb->pitch); if (IS_I965G(dev)) { - I915_WRITE(dspsurf, Start); - I915_WRITE(dsptileoff, (y << 16) | x); - I915_WRITE(dspbase, Offset); - } else { - I915_WRITE(dspbase, Start + Offset); - } - POSTING_READ(dspbase); + I915_WRITE(DSPSURF(plane), Start); + I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); + I915_WRITE(DSPADDR(plane), Offset); + } else + I915_WRITE(DSPADDR(plane), Start + Offset); + POSTING_READ(reg); intel_update_fbc(dev); intel_increase_pllclock(crtc); @@ -1634,7 +1628,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, return 0; } -static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock) +static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1666,8 +1660,8 @@ static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock) dpa_ctl |= DP_PLL_FREQ_270MHZ; } I915_WRITE(DP_A, dpa_ctl); - POSTING_READ(DP_A); + POSTING_READ(DP_A); udelay(500); } @@ -1678,85 +1672,84 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; - int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; - int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR; - int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; - u32 temp, tries = 0; + u32 reg, temp, tries; /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit for train result */ - temp = I915_READ(fdi_rx_imr_reg); + reg = FDI_RX_IMR(pipe); + temp = I915_READ(reg); temp &= ~FDI_RX_SYMBOL_LOCK; temp &= ~FDI_RX_BIT_LOCK; - I915_WRITE(fdi_rx_imr_reg, temp); - I915_READ(fdi_rx_imr_reg); + I915_WRITE(reg, temp); + I915_READ(reg); udelay(150); /* enable CPU FDI TX and PCH FDI RX */ - temp = I915_READ(fdi_tx_reg); - temp |= FDI_TX_ENABLE; + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); temp &= ~(7 << 19); temp |= (intel_crtc->fdi_lanes - 1) << 19; temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; - I915_WRITE(fdi_tx_reg, temp); - I915_READ(fdi_tx_reg); + I915_WRITE(reg, temp | FDI_TX_ENABLE); - temp = I915_READ(fdi_rx_reg); + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; - I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); - I915_READ(fdi_rx_reg); + I915_WRITE(reg, temp | FDI_RX_ENABLE); + + POSTING_READ(reg); udelay(150); + reg = FDI_RX_IIR(pipe); for (tries = 0; tries < 5; tries++) { - temp = I915_READ(fdi_rx_iir_reg); + temp = I915_READ(reg); DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); if ((temp & FDI_RX_BIT_LOCK)) { DRM_DEBUG_KMS("FDI train 1 done.\n"); - I915_WRITE(fdi_rx_iir_reg, - temp | FDI_RX_BIT_LOCK); + I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); break; } } if (tries == 5) - DRM_DEBUG_KMS("FDI train 1 fail!\n"); + DRM_ERROR("FDI train 1 fail!\n"); /* Train 2 */ - temp = I915_READ(fdi_tx_reg); + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_2; - I915_WRITE(fdi_tx_reg, temp); + I915_WRITE(reg, temp); - temp = I915_READ(fdi_rx_reg); + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_2; - I915_WRITE(fdi_rx_reg, temp); - POSTING_READ(fdi_rx_reg); + I915_WRITE(reg, temp); + + POSTING_READ(reg); udelay(150); - tries = 0; - + reg = FDI_RX_IIR(pipe); for (tries = 0; tries < 5; tries++) { - temp = I915_READ(fdi_rx_iir_reg); + temp = I915_READ(reg); DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); if (temp & FDI_RX_SYMBOL_LOCK) { - I915_WRITE(fdi_rx_iir_reg, - temp | FDI_RX_SYMBOL_LOCK); + I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); DRM_DEBUG_KMS("FDI train 2 done.\n"); break; } } if (tries == 5) - DRM_DEBUG_KMS("FDI train 2 fail!\n"); + DRM_ERROR("FDI train 2 fail!\n"); DRM_DEBUG_KMS("FDI train done\n"); } -static int snb_b_fdi_train_param [] = { +static const int const snb_b_fdi_train_param [] = { FDI_LINK_TRAIN_400MV_0DB_SNB_B, FDI_LINK_TRAIN_400MV_6DB_SNB_B, FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, @@ -1770,24 +1763,22 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; - int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; - int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR; - int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; - u32 temp, i; + u32 reg, temp, i; /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit for train result */ - temp = I915_READ(fdi_rx_imr_reg); + reg = FDI_RX_IMR(pipe); + temp = I915_READ(reg); temp &= ~FDI_RX_SYMBOL_LOCK; temp &= ~FDI_RX_BIT_LOCK; - I915_WRITE(fdi_rx_imr_reg, temp); - I915_READ(fdi_rx_imr_reg); + I915_WRITE(reg, temp); + + POSTING_READ(reg); udelay(150); /* enable CPU FDI TX and PCH FDI RX */ - temp = I915_READ(fdi_tx_reg); - temp |= FDI_TX_ENABLE; + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); temp &= ~(7 << 19); temp |= (intel_crtc->fdi_lanes - 1) << 19; temp &= ~FDI_LINK_TRAIN_NONE; @@ -1795,10 +1786,10 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; /* SNB-B */ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; - I915_WRITE(fdi_tx_reg, temp); - I915_READ(fdi_tx_reg); + I915_WRITE(reg, temp | FDI_TX_ENABLE); - temp = I915_READ(fdi_rx_reg); + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); if (HAS_PCH_CPT(dev)) { temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; @@ -1806,33 +1797,37 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; } - I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); - I915_READ(fdi_rx_reg); + I915_WRITE(reg, temp | FDI_RX_ENABLE); + + POSTING_READ(reg); udelay(150); for (i = 0; i < 4; i++ ) { - temp = I915_READ(fdi_tx_reg); + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; temp |= snb_b_fdi_train_param[i]; - I915_WRITE(fdi_tx_reg, temp); - POSTING_READ(fdi_tx_reg); + I915_WRITE(reg, temp); + + POSTING_READ(reg); udelay(500); - temp = I915_READ(fdi_rx_iir_reg); + reg = FDI_RX_IIR(pipe); + temp = I915_READ(reg); DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); if (temp & FDI_RX_BIT_LOCK) { - I915_WRITE(fdi_rx_iir_reg, - temp | FDI_RX_BIT_LOCK); + I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); DRM_DEBUG_KMS("FDI train 1 done.\n"); break; } } if (i == 4) - DRM_DEBUG_KMS("FDI train 1 fail!\n"); + DRM_ERROR("FDI train 1 fail!\n"); /* Train 2 */ - temp = I915_READ(fdi_tx_reg); + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_2; if (IS_GEN6(dev)) { @@ -1840,9 +1835,10 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) /* SNB-B */ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; } - I915_WRITE(fdi_tx_reg, temp); + I915_WRITE(reg, temp); - temp = I915_READ(fdi_rx_reg); + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); if (HAS_PCH_CPT(dev)) { temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; @@ -1850,30 +1846,33 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_2; } - I915_WRITE(fdi_rx_reg, temp); - POSTING_READ(fdi_rx_reg); + I915_WRITE(reg, temp); + + POSTING_READ(reg); udelay(150); for (i = 0; i < 4; i++ ) { - temp = I915_READ(fdi_tx_reg); + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; temp |= snb_b_fdi_train_param[i]; - I915_WRITE(fdi_tx_reg, temp); - POSTING_READ(fdi_tx_reg); + I915_WRITE(reg, temp); + + POSTING_READ(reg); udelay(500); - temp = I915_READ(fdi_rx_iir_reg); + reg = FDI_RX_IIR(pipe); + temp = I915_READ(reg); DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); if (temp & FDI_RX_SYMBOL_LOCK) { - I915_WRITE(fdi_rx_iir_reg, - temp | FDI_RX_SYMBOL_LOCK); + I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); DRM_DEBUG_KMS("FDI train 2 done.\n"); break; } } if (i == 4) - DRM_DEBUG_KMS("FDI train 2 fail!\n"); + DRM_ERROR("FDI train 2 fail!\n"); DRM_DEBUG_KMS("FDI train done.\n"); } @@ -1884,50 +1883,49 @@ static void ironlake_fdi_enable(struct drm_crtc *crtc) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; - int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; - int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; - int data_m1_reg = (pipe == 0) ? PIPEA_DATA_M1 : PIPEB_DATA_M1; - u32 temp; - u32 pipe_bpc; - u32 tx_size; - - temp = I915_READ(pipeconf_reg); - pipe_bpc = temp & PIPE_BPC_MASK; + u32 reg, temp; /* Write the TU size bits so error detection works */ - tx_size = I915_READ(data_m1_reg) & TU_SIZE_MASK; - I915_WRITE(FDI_RXA_TUSIZE1, tx_size); + I915_WRITE(FDI_RX_TUSIZE1(pipe), + I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ - temp = I915_READ(fdi_rx_reg); - /* - * make the BPC in FDI Rx be consistent with that in - * pipeconf reg. - */ - temp &= ~(0x7 << 16); - temp |= (pipe_bpc << 11); - temp &= ~(7 << 19); + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~((0x7 << 19) | (0x7 << 16)); temp |= (intel_crtc->fdi_lanes - 1) << 19; - I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); - I915_READ(fdi_rx_reg); + temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; + I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); + + POSTING_READ(reg); udelay(200); /* Switch from Rawclk to PCDclk */ - temp = I915_READ(fdi_rx_reg); - I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK); - I915_READ(fdi_rx_reg); + temp = I915_READ(reg); + I915_WRITE(reg, temp | FDI_PCDCLK); + + POSTING_READ(reg); udelay(200); /* Enable CPU FDI TX PLL, always on for Ironlake */ - temp = I915_READ(fdi_tx_reg); + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); if ((temp & FDI_TX_PLL_ENABLE) == 0) { - I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); - I915_READ(fdi_tx_reg); + I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); + + POSTING_READ(reg); udelay(100); } } +static void intel_flush_display_plane(struct drm_device *dev, + int plane) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 reg = DSPADDR(plane); + I915_WRITE(reg, I915_READ(reg)); +} + static void ironlake_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -1935,38 +1933,12 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; - int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B; - int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; - int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; - int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR; - int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; - int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; - int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; - int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; - int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; - int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; - int cpu_vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; - int cpu_vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; - int cpu_vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; - int trans_htot_reg = (pipe == 0) ? TRANS_HTOTAL_A : TRANS_HTOTAL_B; - int trans_hblank_reg = (pipe == 0) ? TRANS_HBLANK_A : TRANS_HBLANK_B; - int trans_hsync_reg = (pipe == 0) ? TRANS_HSYNC_A : TRANS_HSYNC_B; - int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B; - int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B; - int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; - int trans_dpll_sel = (pipe == 0) ? 0 : 1; - u32 temp; - u32 pipe_bpc; - - temp = I915_READ(pipeconf_reg); - pipe_bpc = temp & PIPE_BPC_MASK; + u32 reg, temp; if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { temp = I915_READ(PCH_LVDS); - if ((temp & LVDS_PORT_EN) == 0) { + if ((temp & LVDS_PORT_EN) == 0) I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); - POSTING_READ(PCH_LVDS); - } } ironlake_fdi_enable(crtc); @@ -1988,19 +1960,20 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) } /* Enable CPU pipe */ - temp = I915_READ(pipeconf_reg); - if ((temp & PIPEACONF_ENABLE) == 0) { - I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); - I915_READ(pipeconf_reg); + reg = PIPECONF(pipe); + temp = I915_READ(reg); + if ((temp & PIPECONF_ENABLE) == 0) { + I915_WRITE(reg, temp | PIPECONF_ENABLE); + POSTING_READ(reg); udelay(100); } /* configure and enable CPU plane */ - temp = I915_READ(dspcntr_reg); + reg = DSPCNTR(plane); + temp = I915_READ(reg); if ((temp & DISPLAY_PLANE_ENABLE) == 0) { - I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE); - /* Flush the plane changes */ - I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); + I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE); + intel_flush_display_plane(dev, plane); } /* For PCH output, training FDI link */ @@ -2010,42 +1983,42 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) ironlake_fdi_link_train(crtc); /* enable PCH DPLL */ - temp = I915_READ(pch_dpll_reg); + reg = PCH_DPLL(pipe); + temp = I915_READ(reg); if ((temp & DPLL_VCO_ENABLE) == 0) { - I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); - I915_READ(pch_dpll_reg); + I915_WRITE(reg, temp | DPLL_VCO_ENABLE); + POSTING_READ(reg); udelay(200); } if (HAS_PCH_CPT(dev)) { /* Be sure PCH DPLL SEL is set */ temp = I915_READ(PCH_DPLL_SEL); - if (trans_dpll_sel == 0 && - (temp & TRANSA_DPLL_ENABLE) == 0) + if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0) temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); - else if (trans_dpll_sel == 1 && - (temp & TRANSB_DPLL_ENABLE) == 0) + else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0) temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); I915_WRITE(PCH_DPLL_SEL, temp); - I915_READ(PCH_DPLL_SEL); } - /* set transcoder timing */ - I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); - I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg)); - I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg)); - I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg)); - I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg)); - I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg)); + /* set transcoder timing */ + I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe))); + I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe))); + I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe))); + + I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe))); + I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); + I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); /* enable normal train */ - temp = I915_READ(fdi_tx_reg); + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_NONE; - I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | - FDI_TX_ENHANCE_FRAME_ENABLE); - I915_READ(fdi_tx_reg); + temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; + I915_WRITE(reg, temp); - temp = I915_READ(fdi_rx_reg); + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); if (HAS_PCH_CPT(dev)) { temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_NORMAL_CPT; @@ -2053,61 +2026,57 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_NONE; } - I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); - I915_READ(fdi_rx_reg); + I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); /* wait one idle pattern time */ + POSTING_READ(reg); udelay(100); /* For PCH DP, enable TRANS_DP_CTL */ if (HAS_PCH_CPT(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { - int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; - int reg; - - reg = I915_READ(trans_dp_ctl); - reg &= ~(TRANS_DP_PORT_SEL_MASK | - TRANS_DP_SYNC_MASK); - reg |= (TRANS_DP_OUTPUT_ENABLE | - TRANS_DP_ENH_FRAMING); + reg = TRANS_DP_CTL(pipe); + temp = I915_READ(reg); + temp &= ~(TRANS_DP_PORT_SEL_MASK | + TRANS_DP_SYNC_MASK); + temp |= (TRANS_DP_OUTPUT_ENABLE | + TRANS_DP_ENH_FRAMING); if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) - reg |= TRANS_DP_HSYNC_ACTIVE_HIGH; + temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) - reg |= TRANS_DP_VSYNC_ACTIVE_HIGH; + temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; switch (intel_trans_dp_port_sel(crtc)) { case PCH_DP_B: - reg |= TRANS_DP_PORT_SEL_B; + temp |= TRANS_DP_PORT_SEL_B; break; case PCH_DP_C: - reg |= TRANS_DP_PORT_SEL_C; + temp |= TRANS_DP_PORT_SEL_C; break; case PCH_DP_D: - reg |= TRANS_DP_PORT_SEL_D; + temp |= TRANS_DP_PORT_SEL_D; break; default: DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); - reg |= TRANS_DP_PORT_SEL_B; + temp |= TRANS_DP_PORT_SEL_B; break; } - I915_WRITE(trans_dp_ctl, reg); - POSTING_READ(trans_dp_ctl); + I915_WRITE(reg, temp); } /* enable PCH transcoder */ - temp = I915_READ(transconf_reg); + reg = TRANSCONF(pipe); + temp = I915_READ(reg); /* * make the BPC in transcoder be consistent with * that in pipeconf reg. */ temp &= ~PIPE_BPC_MASK; - temp |= pipe_bpc; - I915_WRITE(transconf_reg, temp | TRANS_ENABLE); - I915_READ(transconf_reg); - - if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100)) + temp |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK; + I915_WRITE(reg, temp | TRANS_ENABLE); + if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) DRM_ERROR("failed to enable transcoder\n"); intel_crtc_load_lut(crtc); @@ -2121,28 +2090,16 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; - int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B; - int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; - int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; - int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR; - int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; - int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; - int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; - int trans_dpll_sel = (pipe == 0) ? 0 : 1; - u32 temp; - u32 pipe_bpc; - - temp = I915_READ(pipeconf_reg); - pipe_bpc = temp & PIPE_BPC_MASK; + u32 reg, temp; drm_vblank_off(dev, pipe); + /* Disable display plane */ - temp = I915_READ(dspcntr_reg); - if ((temp & DISPLAY_PLANE_ENABLE) != 0) { - I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE); - /* Flush the plane changes */ - I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); - I915_READ(dspbase_reg); + reg = DSPCNTR(plane); + temp = I915_READ(reg); + if (temp & DISPLAY_PLANE_ENABLE) { + I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE); + intel_flush_display_plane(dev, plane); } if (dev_priv->cfb_plane == plane && @@ -2150,42 +2107,43 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) dev_priv->display.disable_fbc(dev); /* disable cpu pipe, disable after all planes disabled */ - temp = I915_READ(pipeconf_reg); - if ((temp & PIPEACONF_ENABLE) != 0) { - I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); - + reg = PIPECONF(pipe); + temp = I915_READ(reg); + if (temp & PIPECONF_ENABLE) { + I915_WRITE(reg, temp & ~PIPECONF_ENABLE); /* wait for cpu pipe off, pipe state */ - if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 50)) + if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 50)) DRM_ERROR("failed to turn off cpu pipe\n"); - } else - DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); + } /* Disable PF */ I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0); I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0); /* disable CPU FDI tx and PCH FDI rx */ - temp = I915_READ(fdi_tx_reg); - I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_ENABLE); - I915_READ(fdi_tx_reg); + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + I915_WRITE(reg, temp & ~FDI_TX_ENABLE); + POSTING_READ(reg); - temp = I915_READ(fdi_rx_reg); - /* BPC in FDI rx is consistent with that in pipeconf */ - temp &= ~(0x07 << 16); - temp |= (pipe_bpc << 11); - I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); - I915_READ(fdi_rx_reg); + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~(0x7 << 16); + temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; + I915_WRITE(reg, temp & ~FDI_RX_ENABLE); + POSTING_READ(reg); udelay(100); /* still set train pattern 1 */ - temp = I915_READ(fdi_tx_reg); + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; - I915_WRITE(fdi_tx_reg, temp); - POSTING_READ(fdi_tx_reg); + I915_WRITE(reg, temp); - temp = I915_READ(fdi_rx_reg); + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); if (HAS_PCH_CPT(dev)) { temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; @@ -2193,80 +2151,73 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; } - I915_WRITE(fdi_rx_reg, temp); - POSTING_READ(fdi_rx_reg); + /* BPC in FDI rx is consistent with that in PIPECONF */ + temp &= ~(0x07 << 16); + temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; + I915_WRITE(reg, temp); + POSTING_READ(reg); udelay(100); if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { temp = I915_READ(PCH_LVDS); - I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN); - I915_READ(PCH_LVDS); - udelay(100); + if (temp & LVDS_PORT_EN) { + I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN); + POSTING_READ(PCH_LVDS); + udelay(100); + } } /* disable PCH transcoder */ - temp = I915_READ(transconf_reg); - if ((temp & TRANS_ENABLE) != 0) { - I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE); - + reg = TRANSCONF(plane); + temp = I915_READ(reg); + if (temp & TRANS_ENABLE) { + I915_WRITE(reg, temp & ~TRANS_ENABLE); /* wait for PCH transcoder off, transcoder state */ - if (wait_for((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0, 50)) + if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) DRM_ERROR("failed to disable transcoder\n"); } - temp = I915_READ(transconf_reg); - /* BPC in transcoder is consistent with that in pipeconf */ - temp &= ~PIPE_BPC_MASK; - temp |= pipe_bpc; - I915_WRITE(transconf_reg, temp); - I915_READ(transconf_reg); - udelay(100); - if (HAS_PCH_CPT(dev)) { /* disable TRANS_DP_CTL */ - int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; - int reg; - - reg = I915_READ(trans_dp_ctl); - reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); - I915_WRITE(trans_dp_ctl, reg); - POSTING_READ(trans_dp_ctl); + reg = TRANS_DP_CTL(pipe); + temp = I915_READ(reg); + temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); + I915_WRITE(reg, temp); /* disable DPLL_SEL */ temp = I915_READ(PCH_DPLL_SEL); - if (trans_dpll_sel == 0) + if (pipe == 0) temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); else temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); I915_WRITE(PCH_DPLL_SEL, temp); - I915_READ(PCH_DPLL_SEL); - } /* disable PCH DPLL */ - temp = I915_READ(pch_dpll_reg); - I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE); - I915_READ(pch_dpll_reg); + reg = PCH_DPLL(pipe); + temp = I915_READ(reg); + I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE); /* Switch from PCDclk to Rawclk */ - temp = I915_READ(fdi_rx_reg); - temp &= ~FDI_SEL_PCDCLK; - I915_WRITE(fdi_rx_reg, temp); - I915_READ(fdi_rx_reg); + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + I915_WRITE(reg, temp & ~FDI_PCDCLK); /* Disable CPU FDI TX PLL */ - temp = I915_READ(fdi_tx_reg); - I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE); - I915_READ(fdi_tx_reg); + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); + + POSTING_READ(reg); udelay(100); - temp = I915_READ(fdi_rx_reg); - temp &= ~FDI_RX_PLL_ENABLE; - I915_WRITE(fdi_rx_reg, temp); - I915_READ(fdi_rx_reg); + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); /* Wait for the clocks to turn off. */ + POSTING_READ(reg); udelay(100); } @@ -2316,40 +2267,43 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; - int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; - int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; - int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR; - int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; - u32 temp; + u32 reg, temp; /* Enable the DPLL */ - temp = I915_READ(dpll_reg); + reg = DPLL(pipe); + temp = I915_READ(reg); if ((temp & DPLL_VCO_ENABLE) == 0) { - I915_WRITE(dpll_reg, temp); - I915_READ(dpll_reg); + I915_WRITE(reg, temp); + /* Wait for the clocks to stabilize. */ + POSTING_READ(reg); udelay(150); - I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); - I915_READ(dpll_reg); + + I915_WRITE(reg, temp | DPLL_VCO_ENABLE); + /* Wait for the clocks to stabilize. */ + POSTING_READ(reg); udelay(150); - I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); - I915_READ(dpll_reg); + + I915_WRITE(reg, temp | DPLL_VCO_ENABLE); + /* Wait for the clocks to stabilize. */ + POSTING_READ(reg); udelay(150); } /* Enable the pipe */ - temp = I915_READ(pipeconf_reg); - if ((temp & PIPEACONF_ENABLE) == 0) - I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); + reg = PIPECONF(pipe); + temp = I915_READ(reg); + if ((temp & PIPECONF_ENABLE) == 0) + I915_WRITE(reg, temp | PIPECONF_ENABLE); /* Enable the plane */ - temp = I915_READ(dspcntr_reg); + reg = DSPCNTR(plane); + temp = I915_READ(reg); if ((temp & DISPLAY_PLANE_ENABLE) == 0) { - I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE); - /* Flush the plane changes */ - I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); + I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE); + intel_flush_display_plane(dev, plane); } intel_crtc_load_lut(crtc); @@ -2366,11 +2320,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; - int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; - int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; - int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR; - int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; - u32 temp; + u32 reg, temp; /* Give the overlay scaler a chance to disable if it's on this pipe */ intel_crtc_dpms_overlay(intel_crtc, false); @@ -2381,42 +2331,42 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) dev_priv->display.disable_fbc(dev); /* Disable display plane */ - temp = I915_READ(dspcntr_reg); - if ((temp & DISPLAY_PLANE_ENABLE) != 0) { - I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE); + reg = DSPCNTR(plane); + temp = I915_READ(reg); + if (temp & DISPLAY_PLANE_ENABLE) { + I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE); /* Flush the plane changes */ - I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); - I915_READ(dspbase_reg); - } + intel_flush_display_plane(dev, plane); - if (!IS_I9XX(dev)) { /* Wait for vblank for the disable to take effect */ - intel_wait_for_vblank_off(dev, pipe); + if (!IS_I9XX(dev)) + intel_wait_for_vblank_off(dev, pipe); } /* Don't disable pipe A or pipe A PLLs if needed */ - if (pipeconf_reg == PIPEACONF && - (dev_priv->quirks & QUIRK_PIPEA_FORCE)) - goto skip_pipe_off; + if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) + return; /* Next, disable display pipes */ - temp = I915_READ(pipeconf_reg); - if ((temp & PIPEACONF_ENABLE) != 0) { - I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); - I915_READ(pipeconf_reg); + reg = PIPECONF(pipe); + temp = I915_READ(reg); + if (temp & PIPECONF_ENABLE) { + I915_WRITE(reg, temp & ~PIPECONF_ENABLE); + + /* Wait for vblank for the disable to take effect. */ + POSTING_READ(reg); + intel_wait_for_vblank_off(dev, pipe); } - /* Wait for vblank for the disable to take effect. */ - intel_wait_for_vblank_off(dev, pipe); + reg = DPLL(pipe); + temp = I915_READ(reg); + if (temp & DPLL_VCO_ENABLE) { + I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE); - temp = I915_READ(dpll_reg); - if ((temp & DPLL_VCO_ENABLE) != 0) { - I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); - I915_READ(dpll_reg); + /* Wait for the clocks to turn off. */ + POSTING_READ(reg); + udelay(150); } -skip_pipe_off: - /* Wait for the clocks to turn off. */ - udelay(150); } static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) @@ -3030,7 +2980,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane) size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, - plane ? "B" : "A", size); + plane ? "B" : "A", size); return size; } @@ -3047,7 +2997,7 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane) size >>= 1; /* Convert to cachelines */ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, - plane ? "B" : "A", size); + plane ? "B" : "A", size); return size; } @@ -3062,8 +3012,8 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane) size >>= 2; /* Convert to cachelines */ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, - plane ? "B" : "A", - size); + plane ? "B" : "A", + size); return size; } @@ -3078,14 +3028,14 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane) size >>= 1; /* Convert to cachelines */ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, - plane ? "B" : "A", size); + plane ? "B" : "A", size); return size; } static void pineview_update_wm(struct drm_device *dev, int planea_clock, - int planeb_clock, int sr_hdisplay, int unused, - int pixel_size) + int planeb_clock, int sr_hdisplay, int unused, + int pixel_size) { struct drm_i915_private *dev_priv = dev->dev_private; const struct cxsr_latency *latency; @@ -3197,13 +3147,13 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, /* Use ns/us then divide to preserve precision */ sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * - pixel_size * sr_hdisplay; + pixel_size * sr_hdisplay; sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size); entries_required = (((sr_latency_ns / line_time_us) + 1000) / 1000) * pixel_size * 64; entries_required = DIV_ROUND_UP(entries_required, - g4x_cursor_wm_info.cacheline_size); + g4x_cursor_wm_info.cacheline_size); cursor_sr = entries_required + g4x_cursor_wm_info.guard_size; if (cursor_sr > g4x_cursor_wm_info.max_wm) @@ -3215,7 +3165,7 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, } else { /* Turn off self refresh if both pipes are enabled */ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) - & ~FW_BLC_SELF_EN); + & ~FW_BLC_SELF_EN); } DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", @@ -3253,7 +3203,7 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, /* Use ns/us then divide to preserve precision */ sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * - pixel_size * sr_hdisplay; + pixel_size * sr_hdisplay; sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE); DRM_DEBUG("self-refresh entries: %d\n", sr_entries); srwm = I965_FIFO_SIZE - sr_entries; @@ -3262,11 +3212,11 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, srwm &= 0x1ff; sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * - pixel_size * 64; + pixel_size * 64; sr_entries = DIV_ROUND_UP(sr_entries, i965_cursor_wm_info.cacheline_size); cursor_sr = i965_cursor_wm_info.fifo_size - - (sr_entries + i965_cursor_wm_info.guard_size); + (sr_entries + i965_cursor_wm_info.guard_size); if (cursor_sr > i965_cursor_wm_info.max_wm) cursor_sr = i965_cursor_wm_info.max_wm; @@ -3345,7 +3295,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, /* Use ns/us then divide to preserve precision */ sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * - pixel_size * sr_hdisplay; + pixel_size * sr_hdisplay; sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size); DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries); srwm = total_size - sr_entries; @@ -3370,7 +3320,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, } DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", - planea_wm, planeb_wm, cwm, srwm); + planea_wm, planeb_wm, cwm, srwm); fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); fwater_hi = (cwm & 0x1f); @@ -3489,7 +3439,7 @@ static void ironlake_update_wm(struct drm_device *dev, /* Use ns/us then divide to preserve precision */ line_count = ((ilk_sr_latency * 500) / line_time_us + 1000) - / 1000; + / 1000; line_size = sr_hdisplay * pixel_size; /* Use the minimum of the small and large buffer method for primary */ @@ -3559,7 +3509,7 @@ static void ironlake_update_wm(struct drm_device *dev, * * We don't use the sprite, so we can ignore that. And on Crestline we have * to set the non-SR watermarks to 8. - */ + */ static void intel_update_watermarks(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -3579,11 +3529,11 @@ static void intel_update_watermarks(struct drm_device *dev) enabled++; if (intel_crtc->plane == 0) { DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n", - intel_crtc->pipe, crtc->mode.clock); + intel_crtc->pipe, crtc->mode.clock); planea_clock = crtc->mode.clock; } else { DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n", - intel_crtc->pipe, crtc->mode.clock); + intel_crtc->pipe, crtc->mode.clock); planeb_clock = crtc->mode.clock; } sr_hdisplay = crtc->mode.hdisplay; @@ -3614,61 +3564,35 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; - int fp_reg = (pipe == 0) ? FPA0 : FPB0; - int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; - int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD; - int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; - int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; - int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; - int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; - int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; - int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; - int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; - int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; - int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE; - int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS; - int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; + u32 fp_reg, dpll_reg; int refclk, num_connectors = 0; intel_clock_t clock, reduced_clock; - u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; + u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; struct intel_encoder *has_edp_encoder = NULL; struct drm_mode_config *mode_config = &dev->mode_config; - struct drm_encoder *encoder; + struct intel_encoder *encoder; const intel_limit_t *limit; int ret; struct fdi_m_n m_n = {0}; - int data_m1_reg = (pipe == 0) ? PIPEA_DATA_M1 : PIPEB_DATA_M1; - int data_n1_reg = (pipe == 0) ? PIPEA_DATA_N1 : PIPEB_DATA_N1; - int link_m1_reg = (pipe == 0) ? PIPEA_LINK_M1 : PIPEB_LINK_M1; - int link_n1_reg = (pipe == 0) ? PIPEA_LINK_N1 : PIPEB_LINK_N1; - int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0; - int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B; - int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; - int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; - int trans_dpll_sel = (pipe == 0) ? 0 : 1; - int lvds_reg = LVDS; - u32 temp; + u32 reg, temp; int target_clock; drm_vblank_pre_modeset(dev, pipe); - list_for_each_entry(encoder, &mode_config->encoder_list, head) { - struct intel_encoder *intel_encoder; - - if (encoder->crtc != crtc) + list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { + if (encoder->base.crtc != crtc) continue; - intel_encoder = to_intel_encoder(encoder); - switch (intel_encoder->type) { + switch (encoder->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; break; case INTEL_OUTPUT_SDVO: case INTEL_OUTPUT_HDMI: is_sdvo = true; - if (intel_encoder->needs_tv_clock) + if (encoder->needs_tv_clock) is_tv = true; break; case INTEL_OUTPUT_DVO: @@ -3684,7 +3608,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, is_dp = true; break; case INTEL_OUTPUT_EDP: - has_edp_encoder = intel_encoder; + has_edp_encoder = encoder; break; } @@ -3694,7 +3618,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) { refclk = dev_priv->lvds_ssc_freq * 1000; DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", - refclk / 1000); + refclk / 1000); } else if (IS_I9XX(dev)) { refclk = 96000; if (HAS_PCH_SPLIT(dev)) @@ -3702,7 +3626,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } else { refclk = 48000; } - /* * Returns a set of divisors for the desired target clock with the given @@ -3722,9 +3645,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, if (is_lvds && dev_priv->lvds_downclock_avail) { has_reduced_clock = limit->find_pll(limit, crtc, - dev_priv->lvds_downclock, - refclk, - &reduced_clock); + dev_priv->lvds_downclock, + refclk, + &reduced_clock); if (has_reduced_clock && (clock.p != reduced_clock.p)) { /* * If the different P is found, it means that we can't @@ -3733,7 +3656,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, * feature. */ DRM_DEBUG_KMS("Different P is found for " - "LVDS clock/downclock\n"); + "LVDS clock/downclock\n"); has_reduced_clock = 0; } } @@ -3741,14 +3664,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, this mirrors vbios setting. */ if (is_sdvo && is_tv) { if (adjusted_mode->clock >= 100000 - && adjusted_mode->clock < 140500) { + && adjusted_mode->clock < 140500) { clock.p1 = 2; clock.p2 = 10; clock.n = 3; clock.m1 = 16; clock.m2 = 8; } else if (adjusted_mode->clock >= 140500 - && adjusted_mode->clock <= 200000) { + && adjusted_mode->clock <= 200000) { clock.p1 = 1; clock.p2 = 10; clock.n = 6; @@ -3785,12 +3708,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } /* determine panel color depth */ - temp = I915_READ(pipeconf_reg); + temp = I915_READ(PIPECONF(pipe)); temp &= ~PIPE_BPC_MASK; if (is_lvds) { - int lvds_reg = I915_READ(PCH_LVDS); /* the BPC will be 6 if it is 18-bit LVDS panel */ - if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) + if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) temp |= PIPE_8BPC; else temp |= PIPE_6BPC; @@ -3811,8 +3733,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } } else temp |= PIPE_8BPC; - I915_WRITE(pipeconf_reg, temp); - I915_READ(pipeconf_reg); + I915_WRITE(PIPECONF(pipe), temp); switch (temp & PIPE_BPC_MASK) { case PIPE_8BPC: @@ -3857,33 +3778,27 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* Always enable nonspread source */ temp &= ~DREF_NONSPREAD_SOURCE_MASK; temp |= DREF_NONSPREAD_SOURCE_ENABLE; - I915_WRITE(PCH_DREF_CONTROL, temp); - POSTING_READ(PCH_DREF_CONTROL); - temp &= ~DREF_SSC_SOURCE_MASK; temp |= DREF_SSC_SOURCE_ENABLE; I915_WRITE(PCH_DREF_CONTROL, temp); - POSTING_READ(PCH_DREF_CONTROL); + POSTING_READ(PCH_DREF_CONTROL); udelay(200); if (has_edp_encoder) { if (dev_priv->lvds_use_ssc) { temp |= DREF_SSC1_ENABLE; I915_WRITE(PCH_DREF_CONTROL, temp); - POSTING_READ(PCH_DREF_CONTROL); + POSTING_READ(PCH_DREF_CONTROL); udelay(200); temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; - I915_WRITE(PCH_DREF_CONTROL, temp); - POSTING_READ(PCH_DREF_CONTROL); } else { temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; - I915_WRITE(PCH_DREF_CONTROL, temp); - POSTING_READ(PCH_DREF_CONTROL); } + I915_WRITE(PCH_DREF_CONTROL, temp); } } @@ -3899,6 +3814,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, reduced_clock.m2; } + dpll = 0; if (!HAS_PCH_SPLIT(dev)) dpll = DPLL_VGA_MODE_DIS; @@ -3972,7 +3888,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, dpll |= PLL_REF_INPUT_DREFCLK; /* setup pipeconf */ - pipeconf = I915_READ(pipeconf_reg); + pipeconf = I915_READ(PIPECONF(pipe)); /* Set up the display plane register */ dspcntr = DISPPLANE_GAMMA_ENABLE; @@ -3995,16 +3911,15 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, */ if (mode->clock > dev_priv->display.get_display_clock_speed(dev) * 9 / 10) - pipeconf |= PIPEACONF_DOUBLE_WIDE; + pipeconf |= PIPECONF_DOUBLE_WIDE; else - pipeconf &= ~PIPEACONF_DOUBLE_WIDE; + pipeconf &= ~PIPECONF_DOUBLE_WIDE; } dspcntr |= DISPLAY_PLANE_ENABLE; - pipeconf |= PIPEACONF_ENABLE; + pipeconf |= PIPECONF_ENABLE; dpll |= DPLL_VCO_ENABLE; - /* Disable the panel fitter if it was on our pipe */ if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe) I915_WRITE(PFIT_CONTROL, 0); @@ -4014,26 +3929,31 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* assign to Ironlake registers */ if (HAS_PCH_SPLIT(dev)) { - fp_reg = pch_fp_reg; - dpll_reg = pch_dpll_reg; + fp_reg = PCH_FP0(pipe); + dpll_reg = PCH_DPLL(pipe); + } else { + fp_reg = FP0(pipe); + dpll_reg = DPLL(pipe); } if (!has_edp_encoder) { I915_WRITE(fp_reg, fp); I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); - I915_READ(dpll_reg); + + POSTING_READ(dpll_reg); udelay(150); } /* enable transcoder DPLL */ if (HAS_PCH_CPT(dev)) { temp = I915_READ(PCH_DPLL_SEL); - if (trans_dpll_sel == 0) - temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); + if (pipe == 0) + temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL; else - temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); + temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL; I915_WRITE(PCH_DPLL_SEL, temp); - I915_READ(PCH_DPLL_SEL); + + POSTING_READ(PCH_DPLL_SEL); udelay(150); } @@ -4042,33 +3962,32 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, * things on. */ if (is_lvds) { - u32 lvds; - + reg = LVDS; if (HAS_PCH_SPLIT(dev)) - lvds_reg = PCH_LVDS; + reg = PCH_LVDS; - lvds = I915_READ(lvds_reg); - lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; + temp = I915_READ(reg); + temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; if (pipe == 1) { if (HAS_PCH_CPT(dev)) - lvds |= PORT_TRANS_B_SEL_CPT; + temp |= PORT_TRANS_B_SEL_CPT; else - lvds |= LVDS_PIPEB_SELECT; + temp |= LVDS_PIPEB_SELECT; } else { if (HAS_PCH_CPT(dev)) - lvds &= ~PORT_TRANS_SEL_MASK; + temp &= ~PORT_TRANS_SEL_MASK; else - lvds &= ~LVDS_PIPEB_SELECT; + temp &= ~LVDS_PIPEB_SELECT; } /* set the corresponsding LVDS_BORDER bit */ - lvds |= dev_priv->lvds_border_bits; + temp |= dev_priv->lvds_border_bits; /* Set the B0-B3 data pairs corresponding to whether we're going to * set the DPLLs for dual-channel mode or not. */ if (clock.p2 == 7) - lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; + temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; else - lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); + temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) * appropriately here, but we need to look more thoroughly into how @@ -4077,12 +3996,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* set the dithering flag on non-PCH LVDS as needed */ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { if (dev_priv->lvds_dither) - lvds |= LVDS_ENABLE_DITHER; + temp |= LVDS_ENABLE_DITHER; else - lvds &= ~LVDS_ENABLE_DITHER; + temp &= ~LVDS_ENABLE_DITHER; } - I915_WRITE(lvds_reg, lvds); - I915_READ(lvds_reg); + I915_WRITE(reg, temp); } /* set the dithering flag and clear for anything other than a panel. */ @@ -4115,32 +4033,32 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, if (!has_edp_encoder) { I915_WRITE(fp_reg, fp); I915_WRITE(dpll_reg, dpll); - I915_READ(dpll_reg); + /* Wait for the clocks to stabilize. */ + POSTING_READ(dpll_reg); udelay(150); if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { + temp = 0; if (is_sdvo) { - int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); - if (pixel_multiplier > 1) - pixel_multiplier = (pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; + temp = intel_mode_get_pixel_multiplier(adjusted_mode); + if (temp > 1) + temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; else - pixel_multiplier = 0; - - I915_WRITE(dpll_md_reg, - (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | - pixel_multiplier); - } else - I915_WRITE(dpll_md_reg, 0); + temp = 0; + } + I915_WRITE(DPLL_MD(pipe), temp); } else { /* write it again -- the BIOS does, after all */ I915_WRITE(dpll_reg, dpll); } - I915_READ(dpll_reg); + /* Wait for the clocks to stabilize. */ + POSTING_READ(dpll_reg); udelay(150); } + intel_crtc->lowfreq_avail = false; if (is_lvds && has_reduced_clock && i915_powersave) { I915_WRITE(fp_reg + 4, fp2); intel_crtc->lowfreq_avail = true; @@ -4150,7 +4068,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } } else { I915_WRITE(fp_reg + 4, fp); - intel_crtc->lowfreq_avail = false; if (HAS_PIPE_CXSR(dev)) { DRM_DEBUG_KMS("disabling CxSR downclocking\n"); pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; @@ -4169,58 +4086,72 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } else pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */ - I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | + I915_WRITE(HTOTAL(pipe), + (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16)); - I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | + I915_WRITE(HBLANK(pipe), + (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16)); - I915_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | + I915_WRITE(HSYNC(pipe), + (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16)); - I915_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | + + I915_WRITE(VTOTAL(pipe), + (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16)); - I915_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | + I915_WRITE(VBLANK(pipe), + (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16)); - I915_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | + I915_WRITE(VSYNC(pipe), + (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16)); - /* pipesrc and dspsize control the size that is scaled from, which should - * always be the user's requested size. + + /* pipesrc and dspsize control the size that is scaled from, + * which should always be the user's requested size. */ if (!HAS_PCH_SPLIT(dev)) { - I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | - (mode->hdisplay - 1)); - I915_WRITE(dsppos_reg, 0); + I915_WRITE(DSPSIZE(plane), + ((mode->vdisplay - 1) << 16) | + (mode->hdisplay - 1)); + I915_WRITE(DSPPOS(plane), 0); } - I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); + I915_WRITE(PIPESRC(pipe), + ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m); - I915_WRITE(data_n1_reg, m_n.gmch_n); - I915_WRITE(link_m1_reg, m_n.link_m); - I915_WRITE(link_n1_reg, m_n.link_n); + I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); + I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); + I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); + I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); if (has_edp_encoder) { ironlake_set_pll_edp(crtc, adjusted_mode->clock); } else { /* enable FDI RX PLL too */ - temp = I915_READ(fdi_rx_reg); - I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); - I915_READ(fdi_rx_reg); + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); + + POSTING_READ(reg); udelay(200); /* enable FDI TX PLL too */ - temp = I915_READ(fdi_tx_reg); - I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); - I915_READ(fdi_tx_reg); + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); /* enable FDI RX PCDCLK */ - temp = I915_READ(fdi_rx_reg); - I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK); - I915_READ(fdi_rx_reg); + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + I915_WRITE(reg, temp | FDI_PCDCLK); + + POSTING_READ(reg); udelay(200); } } - I915_WRITE(pipeconf_reg, pipeconf); - I915_READ(pipeconf_reg); + I915_WRITE(PIPECONF(pipe), pipeconf); + POSTING_READ(PIPECONF(pipe)); intel_wait_for_vblank(dev, pipe); @@ -4230,9 +4161,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); } - I915_WRITE(dspcntr_reg, dspcntr); + I915_WRITE(DSPCNTR(plane), dspcntr); - /* Flush the plane changes */ ret = intel_pipe_set_base(crtc, x, y, old_fb); intel_update_watermarks(dev); diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 743ced7c4ae7..9dcddfc8394c 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -255,7 +255,7 @@ i830_activate_pipe_a(struct drm_device *dev) return 0; /* most i8xx have pipe a forced on, so don't trust dpms mode */ - if (I915_READ(PIPEACONF) & PIPEACONF_ENABLE) + if (I915_READ(PIPEACONF) & PIPECONF_ENABLE) return 0; crtc_funcs = crtc->base.helper_private; @@ -876,15 +876,14 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, { drm_i915_private_t *dev_priv = overlay->dev->dev_private; u32 pipeconf; - int pipeconf_reg = (crtc->pipe == 0) ? PIPEACONF : PIPEBCONF; if (!crtc->base.enabled || crtc->dpms_mode != DRM_MODE_DPMS_ON) return -EINVAL; - pipeconf = I915_READ(pipeconf_reg); + pipeconf = I915_READ(PIPECONF(crtc->pipe)); /* can't use the overlay with double wide pipe */ - if (!IS_I965G(overlay->dev) && pipeconf & PIPEACONF_DOUBLE_WIDE) + if (!IS_I965G(overlay->dev) && pipeconf & PIPECONF_DOUBLE_WIDE) return -EINVAL; return 0; diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 267da3289263..e819cadd5f46 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1168,7 +1168,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, if (!IS_I9XX(dev)) intel_wait_for_vblank(dev, intel_crtc->pipe); - I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE); + I915_WRITE(pipeconf_reg, pipeconf & ~PIPECONF_ENABLE); /* Wait for vblank for the disable to take effect. */ intel_wait_for_vblank_off(dev, intel_crtc->pipe); From fe255d0028903f1132a3c1214edc91cf95b7cd98 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 11 Sep 2010 21:37:48 +0100 Subject: [PATCH 133/476] drm/i915/dp: Convert a udelay(17000) to a sleep during link-off Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_dp.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 103a60b3cad4..208a4ec3e432 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1366,14 +1366,13 @@ intel_dp_link_down(struct intel_dp *intel_dp) if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) { DP &= ~DP_LINK_TRAIN_MASK_CPT; I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); - POSTING_READ(intel_dp->output_reg); } else { DP &= ~DP_LINK_TRAIN_MASK; I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); - POSTING_READ(intel_dp->output_reg); } + POSTING_READ(intel_dp->output_reg); - udelay(17000); + msleep(17); if (IS_eDP(intel_dp)) DP |= DP_LINK_TRAIN_OFF; From b222f2673354c65e178cbcba610e7883a05f5bf3 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 11 Sep 2010 21:48:25 +0100 Subject: [PATCH 134/476] drm/i915/i2c: The bit-banging interface controls the delay, drop ours Remove our redundant udelay() as the timings are already handled by the i2c-algo-bit controller. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_i2c.c | 34 ++++++++++++++------------------ 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index c2649c7df14c..de03989d6df3 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -38,16 +38,18 @@ void intel_i2c_quirk_set(struct drm_device *dev, bool enable) { struct drm_i915_private *dev_priv = dev->dev_private; + u32 val; /* When using bit bashing for I2C, this bit needs to be set to 1 */ if (!IS_PINEVIEW(dev)) return; + + val = I915_READ(DSPCLK_GATE_D); if (enable) - I915_WRITE(DSPCLK_GATE_D, - I915_READ(DSPCLK_GATE_D) | DPCUNIT_CLOCK_GATE_DISABLE); + val |= DPCUNIT_CLOCK_GATE_DISABLE; else - I915_WRITE(DSPCLK_GATE_D, - I915_READ(DSPCLK_GATE_D) & (~DPCUNIT_CLOCK_GATE_DISABLE)); + val &= ~DPCUNIT_CLOCK_GATE_DISABLE; + I915_WRITE(DSPCLK_GATE_D, val); } /* @@ -60,20 +62,14 @@ static int get_clock(void *data) { struct intel_i2c_chan *chan = data; struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; - u32 val; - - val = I915_READ(chan->reg); - return ((val & GPIO_CLOCK_VAL_IN) != 0); + return (I915_READ(chan->reg) & GPIO_CLOCK_VAL_IN) != 0; } static int get_data(void *data) { struct intel_i2c_chan *chan = data; struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; - u32 val; - - val = I915_READ(chan->reg); - return ((val & GPIO_DATA_VAL_IN) != 0); + return (I915_READ(chan->reg) & GPIO_DATA_VAL_IN) != 0; } static void set_clock(void *data, int state_high) @@ -94,7 +90,7 @@ static void set_clock(void *data, int state_high) clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_VAL_MASK; I915_WRITE(chan->reg, reserved | clock_bits); - udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ + POSTING_READ(chan->reg); } static void set_data(void *data, int state_high) @@ -116,7 +112,7 @@ static void set_data(void *data, int state_high) GPIO_DATA_VAL_MASK; I915_WRITE(chan->reg, reserved | data_bits); - udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ + POSTING_READ(chan->reg); } /* Clears the GMBUS setup. Our driver doesn't make use of the GMBUS I2C @@ -129,11 +125,10 @@ intel_i2c_reset_gmbus(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (HAS_PCH_SPLIT(dev)) { + if (HAS_PCH_SPLIT(dev)) I915_WRITE(PCH_GMBUS0, 0); - } else { + else I915_WRITE(GMBUS0, 0); - } } /** @@ -177,7 +172,7 @@ struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, chan->algo.setscl = set_clock; chan->algo.getsda = get_data; chan->algo.getscl = get_clock; - chan->algo.udelay = 20; + chan->algo.udelay = I2C_RISEFALL_TIME; chan->algo.timeout = usecs_to_jiffies(2200); chan->algo.data = chan; @@ -191,9 +186,10 @@ struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, /* JJJ: raise SCL and SDA? */ intel_i2c_quirk_set(dev, true); set_data(chan, 1); + udelay(I2C_RISEFALL_TIME); set_clock(chan, 1); + udelay(I2C_RISEFALL_TIME); intel_i2c_quirk_set(dev, false); - udelay(20); return &chan->adapter; From 77d07fd9d73ef28689737c0952dbd5d6a5017743 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 12 Sep 2010 12:42:35 +0100 Subject: [PATCH 135/476] drm/i915/lvds: Remove busy wait for powering down the panel Just assume that it will turn off... Reported-by: Sitsofe Wheeler Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_lvds.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 93a711d9dcf5..f533169e5d8b 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -83,10 +83,7 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on) if (on) { I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); - POSTING_READ(lvds_reg); - - I915_WRITE(ctl_reg, I915_READ(ctl_reg) | - POWER_TARGET_ON); + I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); if (wait_for(I915_READ(status_reg) & PP_ON, 1000)) DRM_ERROR("timed out waiting to enable LVDS pipe"); @@ -94,11 +91,7 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on) } else { intel_panel_set_backlight(dev, 0); - I915_WRITE(ctl_reg, I915_READ(ctl_reg) & - ~POWER_TARGET_ON); - if (wait_for((I915_READ(status_reg) & PP_ON) == 0, 1000)) - DRM_ERROR("timed out waiting for LVDS pipe to turn off"); - + I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); POSTING_READ(lvds_reg); } From c9f9ccc150e119bab6a1003e7762b024623011d8 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 12 Sep 2010 13:07:25 +0100 Subject: [PATCH 136/476] drm/i915/lvds: Remove busy wait for powering up the panel. We just assume that it will happen in a timely manner. A variant of this patch was first written and tested by Arjan van de Van. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_lvds.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index f533169e5d8b..9089604011f9 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -84,17 +84,13 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on) if (on) { I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); - if (wait_for(I915_READ(status_reg) & PP_ON, 1000)) - DRM_ERROR("timed out waiting to enable LVDS pipe"); - intel_panel_set_backlight(dev, dev_priv->backlight_level); } else { intel_panel_set_backlight(dev, 0); - I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); - POSTING_READ(lvds_reg); } + POSTING_READ(lvds_reg); } static void intel_lvds_dpms(struct drm_encoder *encoder, int mode) From ec5da01e23eec303dd313aa62b8ed4712c488437 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 12 Sep 2010 13:34:08 +0100 Subject: [PATCH 137/476] drm/i915: Use msleep instead of mdelay during wait_vblank_off Avoid a potentially long busy-wait if we not in the process of atomically switching to the kdb console. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 13 +++++++------ drivers/gpu/drm/i915/intel_drv.h | 7 +++++++ 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 1e88ebbc1a1e..594f8f2410ab 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1034,16 +1034,17 @@ void intel_wait_for_vblank_off(struct drm_device *dev, int pipe) struct drm_i915_private *dev_priv = dev->dev_private; int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL); unsigned long timeout = jiffies + msecs_to_jiffies(100); - u32 last_line; + u32 last_line, line; /* Wait for the display line to settle */ + line = I915_READ(pipedsl_reg) & DSL_LINEMASK; do { - last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK; - mdelay(5); - } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) && - time_after(timeout, jiffies)); + last_line = line; + MSLEEP(5); + line = I915_READ(pipedsl_reg) & DSL_LINEMASK; + } while (line != last_line && time_after(timeout, jiffies)); - if (time_after(jiffies, timeout)) + if (line != last_line) DRM_DEBUG_KMS("vblank wait timed out\n"); } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 6c6b897539f8..e5f2a61af9f6 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -49,6 +49,13 @@ #define wait_for(COND, MS) _wait_for(COND, MS, 1) #define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0) +#define MSLEEP(x) do { \ + if (in_dbg_master()) \ + mdelay(x); \ + else \ + msleep(x); \ +} while(0) + #define KHz(x) (1000*x) #define MHz(x) KHz(1000*x) From b5c616a75428d85f92407e4509553f937b720630 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 9 Sep 2010 19:06:13 +0100 Subject: [PATCH 138/476] drm/i915/sdvo: Poll command status 5 times without delay on read The documentation says that an SDVO command takes a maximum of 15us to be processed by the device, and that it is sufficient to read the status byte 3 times (whilst the command is still in the PENDING state) for the driver to be confident that sufficient time has elapsed. We err on the safe side and try 5 times before giving up. The only question that remains: was the old behaviour derived by experiments with real hardware? A look into the murky history of UMS, implies that the behaviour was accidental and the current retry mechanism was solely designed to catch the status byte indicating PENDING with no reference to hardware behaviour. (commit ac9181c014638dbeb334b40b4029d0ccb2b7a0fc in xf86-video-intel) Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_sdvo.c | 73 ++++++++++++++++--------------- 1 file changed, 37 insertions(+), 36 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 96952d20cd21..a812d65fa31e 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -462,54 +462,55 @@ static const char *cmd_status_names[] = { "Scaling not supported" }; -static void intel_sdvo_debug_response(struct intel_sdvo *intel_sdvo, - void *response, int response_len, - u8 status) +static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, + void *response, int response_len) { + u8 retry = 5; + u8 status; int i; + /* + * The documentation states that all commands will be + * processed within 15µs, and that we need only poll + * the status byte a maximum of 3 times in order for the + * command to be complete. + * + * Check 5 times in case the hardware failed to read the docs. + */ + do { + if (!intel_sdvo_read_byte(intel_sdvo, + SDVO_I2C_CMD_STATUS, + &status)) + return false; + } while (status == SDVO_CMD_STATUS_PENDING && --retry); + DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo)); - for (i = 0; i < response_len; i++) - DRM_LOG_KMS("%02X ", ((u8 *)response)[i]); - for (; i < 8; i++) - DRM_LOG_KMS(" "); if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) DRM_LOG_KMS("(%s)", cmd_status_names[status]); else DRM_LOG_KMS("(??? %d)", status); - DRM_LOG_KMS("\n"); -} -static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, - void *response, int response_len) -{ - int i; - u8 status; - u8 retry = 50; + if (status != SDVO_CMD_STATUS_SUCCESS) + goto log_fail; - while (retry--) { - /* Read the command response */ - for (i = 0; i < response_len; i++) { - if (!intel_sdvo_read_byte(intel_sdvo, - SDVO_I2C_RETURN_0 + i, - &((u8 *)response)[i])) - return false; - } - - /* read the return status */ - if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS, - &status)) - return false; - - intel_sdvo_debug_response(intel_sdvo, response, response_len, - status); - if (status != SDVO_CMD_STATUS_PENDING) - break; - - mdelay(50); + /* Read the command response */ + for (i = 0; i < response_len; i++) { + if (!intel_sdvo_read_byte(intel_sdvo, + SDVO_I2C_RETURN_0 + i, + &((u8 *)response)[i])) + goto log_fail; + DRM_LOG_KMS("%02X ", ((u8 *)response)[i]); } - return status == SDVO_CMD_STATUS_SUCCESS; + for (; i < 8; i++) + DRM_LOG_KMS(" "); + DRM_LOG_KMS("\n"); + + return true; + +log_fail: + DRM_LOG_KMS("\n"); + return false; } static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) From 6edc3242e35f03990e362e7c115e722717f0f7a7 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 12 Sep 2010 17:16:17 +0100 Subject: [PATCH 139/476] drm/i915/bios: Prevent NULL dereference after allocation failure Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_bios.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 8d7deca69830..8986a4b898db 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -169,6 +169,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, ((unsigned char *)entry + dvo_timing_offset); panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); + if (!panel_fixed_mode) + return; fill_detail_timing_data(panel_fixed_mode, dvo_timing); From e9e331a8abeece1565d383510ed985945132ffe3 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 13 Sep 2010 01:16:10 +0100 Subject: [PATCH 140/476] drm/i915/lvds: Ensure panel is unlocked for Ironlake or the panel fitter Commit 77d07fd9d73ef28689737c0952dbd5d6a5017743 introduced a regression where by not waiting for the panel to be turned off, left the panel and PLL registers locked across the modeset. Thus the panel remaining blank. As pointed out by Daniel Vetter, when testing LVDS it helps to open the laptop and look at the actual panel you are purporting to test. A second issue with the patch was that in order to modify the panel fitter before gen5, the pipe and the panel must have be completely powered down. So we wait. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 31 -------- drivers/gpu/drm/i915/intel_drv.h | 1 - drivers/gpu/drm/i915/intel_lvds.c | 103 +++++++++++++++++++-------- drivers/gpu/drm/i915/intel_overlay.c | 33 ++++++++- 4 files changed, 105 insertions(+), 63 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 594f8f2410ab..0004534e7c7d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2636,33 +2636,6 @@ static int i830_get_display_clock_speed(struct drm_device *dev) return 133000; } -/** - * Return the pipe currently connected to the panel fitter, - * or -1 if the panel fitter is not present or not in use - */ -int intel_panel_fitter_pipe (struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 pfit_control; - - /* i830 doesn't have a panel fitter */ - if (IS_I830(dev)) - return -1; - - pfit_control = I915_READ(PFIT_CONTROL); - - /* See if the panel fitter is in use */ - if ((pfit_control & PFIT_ENABLE) == 0) - return -1; - - /* 965 can place panel fitter on either pipe */ - if (IS_I965G(dev)) - return (pfit_control >> 29) & 0x3; - - /* older chips can only use pipe 1 */ - return 1; -} - struct fdi_m_n { u32 tu; u32 gmch_m; @@ -3921,10 +3894,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, pipeconf |= PIPECONF_ENABLE; dpll |= DPLL_VCO_ENABLE; - /* Disable the panel fitter if it was on our pipe */ - if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe) - I915_WRITE(PFIT_CONTROL, 0); - DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); drm_mode_debug_printmodeline(mode); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index e5f2a61af9f6..7e2646f1fec9 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -239,7 +239,6 @@ extern u32 intel_panel_get_max_backlight(struct drm_device *dev); extern u32 intel_panel_get_backlight(struct drm_device *dev); extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); -extern int intel_panel_fitter_pipe (struct drm_device *dev); extern void intel_crtc_load_lut(struct drm_crtc *crtc); extern void intel_encoder_prepare (struct drm_encoder *encoder); extern void intel_encoder_commit (struct drm_encoder *encoder); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 9089604011f9..bfc1bb443b05 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -46,6 +46,7 @@ struct intel_lvds { int fitting_mode; u32 pfit_control; u32 pfit_pgm_ratios; + bool pfit_dirty; }; static struct intel_lvds *enc_to_intel_lvds(struct drm_encoder *encoder) @@ -53,31 +54,20 @@ static struct intel_lvds *enc_to_intel_lvds(struct drm_encoder *encoder) return container_of(encoder, struct intel_lvds, base.base); } -static void intel_lvds_lock_panel(struct drm_device *dev, bool lock) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (lock) - I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3); - else - I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); -} - /** * Sets the power state for the panel. */ -static void intel_lvds_set_power(struct drm_device *dev, bool on) +static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on) { + struct drm_device *dev = intel_lvds->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 ctl_reg, status_reg, lvds_reg; + u32 ctl_reg, lvds_reg; if (HAS_PCH_SPLIT(dev)) { ctl_reg = PCH_PP_CONTROL; - status_reg = PCH_PP_STATUS; lvds_reg = PCH_LVDS; } else { ctl_reg = PP_CONTROL; - status_reg = PP_STATUS; lvds_reg = LVDS; } @@ -86,8 +76,18 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on) I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); intel_panel_set_backlight(dev, dev_priv->backlight_level); } else { + dev_priv->backlight_level = intel_panel_get_backlight(dev); + intel_panel_set_backlight(dev, 0); I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); + + if (intel_lvds->pfit_control) { + if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) + DRM_ERROR("timed out waiting for panel to power off\n"); + I915_WRITE(PFIT_CONTROL, 0); + intel_lvds->pfit_control = 0; + } + I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); } POSTING_READ(lvds_reg); @@ -95,12 +95,12 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on) static void intel_lvds_dpms(struct drm_encoder *encoder, int mode) { - struct drm_device *dev = encoder->dev; + struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder); if (mode == DRM_MODE_DPMS_ON) - intel_lvds_set_power(dev, true); + intel_lvds_set_power(intel_lvds, true); else - intel_lvds_set_power(dev, false); + intel_lvds_set_power(intel_lvds, false); /* XXX: We never power down the LVDS pairs. */ } @@ -331,8 +331,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, } out: - intel_lvds->pfit_control = pfit_control; - intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios; + if (pfit_control != intel_lvds->pfit_control || + pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { + intel_lvds->pfit_control = pfit_control; + intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios; + intel_lvds->pfit_dirty = true; + } dev_priv->lvds_border_bits = border; /* @@ -352,24 +356,56 @@ static void intel_lvds_prepare(struct drm_encoder *encoder) dev_priv->backlight_level = intel_panel_get_backlight(dev); - if (intel_lvds->pfit_control == I915_READ(PFIT_CONTROL)) - intel_lvds_lock_panel(dev, false); - else - intel_lvds_set_power(dev, false); + /* We try to do the minimum that is necessary in order to unlock + * the registers for mode setting. + * + * On Ironlake, this is quite simple as we just set the unlock key + * and ignore all subtleties. (This may cause some issues...) + * + * Prior to Ironlake, we must disable the pipe if we want to adjust + * the panel fitter. However at all other times we can just reset + * the registers regardless. + */ + + if (HAS_PCH_SPLIT(dev)) { + I915_WRITE(PCH_PP_CONTROL, + I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS); + } else if (intel_lvds->pfit_dirty) { + I915_WRITE(PP_CONTROL, + I915_READ(PP_CONTROL) & ~POWER_TARGET_ON); + I915_WRITE(LVDS, I915_READ(LVDS) & ~LVDS_PORT_EN); + } else { + I915_WRITE(PP_CONTROL, + I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); + } } -static void intel_lvds_commit( struct drm_encoder *encoder) +static void intel_lvds_commit(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder); if (dev_priv->backlight_level == 0) dev_priv->backlight_level = intel_panel_get_max_backlight(dev); - if ((I915_READ(PP_CONTROL) & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS) - intel_lvds_lock_panel(dev, true); - else - intel_lvds_set_power(dev, true); + /* Undo any unlocking done in prepare to prevent accidental + * adjustment of the registers. + */ + if (HAS_PCH_SPLIT(dev)) { + u32 val = I915_READ(PCH_PP_CONTROL); + if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS) + I915_WRITE(PCH_PP_CONTROL, val & 0x3); + } else { + u32 val = I915_READ(PP_CONTROL); + if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS) + I915_WRITE(PP_CONTROL, val & 0x3); + } + + /* Always do a full power on as we do not know what state + * we were left in. + */ + intel_lvds_set_power(intel_lvds, true); } static void intel_lvds_mode_set(struct drm_encoder *encoder, @@ -389,13 +425,20 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, if (HAS_PCH_SPLIT(dev)) return; + if (!intel_lvds->pfit_dirty) + return; + /* * Enable automatic panel scaling so that non-native modes fill the * screen. Should be enabled before the pipe is enabled, according to * register description and PRM. */ + if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) + DRM_ERROR("timed out waiting for panel to power off\n"); + I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); + intel_lvds->pfit_dirty = false; } /** @@ -824,6 +867,10 @@ void intel_lvds_init(struct drm_device *dev) return; } + if (!HAS_PCH_SPLIT(dev)) { + intel_lvds->pfit_control = I915_READ(PFIT_CONTROL); + } + intel_encoder = &intel_lvds->base; encoder = &intel_encoder->base; connector = &intel_connector->base; diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 9dcddfc8394c..3dff16118ee5 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -1050,6 +1050,33 @@ static int check_overlay_src(struct drm_device *dev, return 0; } +/** + * Return the pipe currently connected to the panel fitter, + * or -1 if the panel fitter is not present or not in use + */ +static int intel_panel_fitter_pipe(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pfit_control; + + /* i830 doesn't have a panel fitter */ + if (IS_I830(dev)) + return -1; + + pfit_control = I915_READ(PFIT_CONTROL); + + /* See if the panel fitter is in use */ + if ((pfit_control & PFIT_ENABLE) == 0) + return -1; + + /* 965 can place panel fitter on either pipe */ + if (IS_I965G(dev)) + return (pfit_control >> 29) & 0x3; + + /* older chips can only use pipe 1 */ + return 1; +} + int intel_overlay_put_image(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -1124,9 +1151,9 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, overlay->crtc = crtc; crtc->overlay = overlay; - if (intel_panel_fitter_pipe(dev) == crtc->pipe - /* and line to wide, i.e. one-line-mode */ - && mode->hdisplay > 1024) { + /* line too wide, i.e. one-line-mode */ + if (mode->hdisplay > 1024 && + intel_panel_fitter_pipe(dev) == crtc->pipe) { overlay->pfit_active = 1; update_pfit_vscale_ratio(overlay); } else From 8aadf70bd72c8f15994e68503af8f6722cd5c813 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 12 Sep 2010 16:33:47 +0100 Subject: [PATCH 141/476] drm/i915/lvds: Remove incorrect mode locking One doesn't need to hold the mode lock in order to duplicate a mode. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_lvds.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index bfc1bb443b05..a05ca3286782 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -732,7 +732,6 @@ static void intel_find_lvds_downclock(struct drm_device *dev, panel_fixed_mode = dev_priv->panel_fixed_mode; temp_downclock = panel_fixed_mode->clock; - mutex_lock(&dev->mode_config.mutex); list_for_each_entry(scan, &connector->probed_modes, head) { /* * If one mode has the same resolution with the fixed_panel @@ -758,7 +757,6 @@ static void intel_find_lvds_downclock(struct drm_device *dev, } } } - mutex_unlock(&dev->mode_config.mutex); if (temp_downclock < panel_fixed_mode->clock && i915_lvds_downclock) { /* We found the downclock for LVDS. */ @@ -929,23 +927,18 @@ void intel_lvds_init(struct drm_device *dev) dev_priv->lvds_edid_good = false; list_for_each_entry(scan, &connector->probed_modes, head) { - mutex_lock(&dev->mode_config.mutex); if (scan->type & DRM_MODE_TYPE_PREFERRED) { dev_priv->panel_fixed_mode = drm_mode_duplicate(dev, scan); - mutex_unlock(&dev->mode_config.mutex); intel_find_lvds_downclock(dev, connector); goto out; } - mutex_unlock(&dev->mode_config.mutex); } /* Failed to get EDID, what about VBT? */ if (dev_priv->lfp_lvds_vbt_mode) { - mutex_lock(&dev->mode_config.mutex); dev_priv->panel_fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); - mutex_unlock(&dev->mode_config.mutex); if (dev_priv->panel_fixed_mode) { dev_priv->panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; From 788319d48dc2b61db732b19bb9598c062c75ec37 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 12 Sep 2010 17:34:41 +0100 Subject: [PATCH 142/476] drm/i915/lvds: Move private data to the connector from the device. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 2 - drivers/gpu/drm/i915/intel_lvds.c | 155 +++++++++++++++--------------- 2 files changed, 78 insertions(+), 79 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b3efb30b2270..232555e33929 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -634,8 +634,6 @@ typedef struct drm_i915_private { /* Reclocking support */ bool render_reclock_avail; bool lvds_downclock_avail; - /* indicate whether the LVDS EDID is OK */ - bool lvds_edid_good; /* indicates the reduced downclock for LVDS*/ int lvds_downclock; struct work_struct idle_work; diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index a05ca3286782..b56b59236e31 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -43,17 +43,28 @@ /* Private structure for the integrated LVDS support */ struct intel_lvds { struct intel_encoder base; + + bool edid_good; + int fitting_mode; u32 pfit_control; u32 pfit_pgm_ratios; bool pfit_dirty; + + struct drm_display_mode *fixed_mode; }; -static struct intel_lvds *enc_to_intel_lvds(struct drm_encoder *encoder) +static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder) { return container_of(encoder, struct intel_lvds, base.base); } +static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector) +{ + return container_of(intel_attached_encoder(connector), + struct intel_lvds, base); +} + /** * Sets the power state for the panel. */ @@ -95,7 +106,7 @@ static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on) static void intel_lvds_dpms(struct drm_encoder *encoder, int mode) { - struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder); + struct intel_lvds *intel_lvds = to_intel_lvds(encoder); if (mode == DRM_MODE_DPMS_ON) intel_lvds_set_power(intel_lvds, true); @@ -108,16 +119,13 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode) static int intel_lvds_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode; + struct intel_lvds *intel_lvds = intel_attached_lvds(connector); + struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode; - if (fixed_mode) { - if (mode->hdisplay > fixed_mode->hdisplay) - return MODE_PANEL; - if (mode->vdisplay > fixed_mode->vdisplay) - return MODE_PANEL; - } + if (mode->hdisplay > fixed_mode->hdisplay) + return MODE_PANEL; + if (mode->vdisplay > fixed_mode->vdisplay) + return MODE_PANEL; return MODE_OK; } @@ -185,7 +193,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder); + struct intel_lvds *intel_lvds = to_intel_lvds(encoder); struct drm_encoder *tmp_encoder; u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; @@ -203,9 +211,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, return false; } } - /* If we don't have a panel mode, there is nothing we can do */ - if (dev_priv->panel_fixed_mode == NULL) - return true; /* * We have timings from the BIOS for the panel, put them in @@ -213,7 +218,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, * with the panel scaling set up to source from the H/VDisplay * of the original mode. */ - intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode); + intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode); if (HAS_PCH_SPLIT(dev)) { intel_pch_panel_fitting(dev, intel_lvds->fitting_mode, @@ -352,7 +357,7 @@ static void intel_lvds_prepare(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder); + struct intel_lvds *intel_lvds = to_intel_lvds(encoder); dev_priv->backlight_level = intel_panel_get_backlight(dev); @@ -384,7 +389,7 @@ static void intel_lvds_commit(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder); + struct intel_lvds *intel_lvds = to_intel_lvds(encoder); if (dev_priv->backlight_level == 0) dev_priv->backlight_level = intel_panel_get_max_backlight(dev); @@ -414,7 +419,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder); + struct intel_lvds *intel_lvds = to_intel_lvds(encoder); /* * The LVDS pin pair will already have been turned on in the @@ -467,35 +472,23 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect */ static int intel_lvds_get_modes(struct drm_connector *connector) { + struct intel_lvds *intel_lvds = intel_attached_lvds(connector); struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_display_mode *mode; - if (dev_priv->lvds_edid_good) { - struct intel_encoder *encoder = intel_attached_encoder(connector); - int ret = intel_ddc_get_modes(connector, encoder->ddc_bus); + if (intel_lvds->edid_good) { + int ret = intel_ddc_get_modes(connector, + intel_lvds->base.ddc_bus); if (ret) return ret; } - /* Didn't get an EDID, so - * Set wide sync ranges so we get all modes - * handed to valid_mode for checking - */ - connector->display_info.min_vfreq = 0; - connector->display_info.max_vfreq = 200; - connector->display_info.min_hfreq = 0; - connector->display_info.max_hfreq = 200; + mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); + if (mode == 0) + return 0; - if (dev_priv->panel_fixed_mode != NULL) { - struct drm_display_mode *mode; - - mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); - drm_mode_probed_add(connector, mode); - - return 1; - } - - return 0; + drm_mode_probed_add(connector, mode); + return 1; } static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id) @@ -584,18 +577,17 @@ static int intel_lvds_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t value) { + struct intel_lvds *intel_lvds = intel_attached_lvds(connector); struct drm_device *dev = connector->dev; - if (property == dev->mode_config.scaling_mode_property && - connector->encoder) { - struct drm_crtc *crtc = connector->encoder->crtc; - struct drm_encoder *encoder = connector->encoder; - struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder); + if (property == dev->mode_config.scaling_mode_property) { + struct drm_crtc *crtc = intel_lvds->base.base.crtc; if (value == DRM_MODE_SCALE_NONE) { DRM_DEBUG_KMS("no scaling not supported\n"); - return 0; + return -EINVAL; } + if (intel_lvds->fitting_mode == value) { /* the LVDS scaling property is not changed */ return 0; @@ -723,15 +715,14 @@ static const struct dmi_system_id intel_no_lvds[] = { * Find the reduced downclock for LVDS in EDID. */ static void intel_find_lvds_downclock(struct drm_device *dev, - struct drm_connector *connector) + struct drm_display_mode *fixed_mode, + struct drm_connector *connector) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_display_mode *scan, *panel_fixed_mode; + struct drm_display_mode *scan; int temp_downclock; - panel_fixed_mode = dev_priv->panel_fixed_mode; - temp_downclock = panel_fixed_mode->clock; - + temp_downclock = fixed_mode->clock; list_for_each_entry(scan, &connector->probed_modes, head) { /* * If one mode has the same resolution with the fixed_panel @@ -740,14 +731,14 @@ static void intel_find_lvds_downclock(struct drm_device *dev, * case we can set the different FPx0/1 to dynamically select * between low and high frequency. */ - if (scan->hdisplay == panel_fixed_mode->hdisplay && - scan->hsync_start == panel_fixed_mode->hsync_start && - scan->hsync_end == panel_fixed_mode->hsync_end && - scan->htotal == panel_fixed_mode->htotal && - scan->vdisplay == panel_fixed_mode->vdisplay && - scan->vsync_start == panel_fixed_mode->vsync_start && - scan->vsync_end == panel_fixed_mode->vsync_end && - scan->vtotal == panel_fixed_mode->vtotal) { + if (scan->hdisplay == fixed_mode->hdisplay && + scan->hsync_start == fixed_mode->hsync_start && + scan->hsync_end == fixed_mode->hsync_end && + scan->htotal == fixed_mode->htotal && + scan->vdisplay == fixed_mode->vdisplay && + scan->vsync_start == fixed_mode->vsync_start && + scan->vsync_end == fixed_mode->vsync_end && + scan->vtotal == fixed_mode->vtotal) { if (scan->clock < temp_downclock) { /* * The downclock is already found. But we @@ -757,16 +748,14 @@ static void intel_find_lvds_downclock(struct drm_device *dev, } } } - if (temp_downclock < panel_fixed_mode->clock && - i915_lvds_downclock) { + if (temp_downclock < fixed_mode->clock && i915_lvds_downclock) { /* We found the downclock for LVDS. */ dev_priv->lvds_downclock_avail = 1; dev_priv->lvds_downclock = temp_downclock; DRM_DEBUG_KMS("LVDS downclock is found in EDID. " - "Normal clock %dKhz, downclock %dKhz\n", - panel_fixed_mode->clock, temp_downclock); + "Normal clock %dKhz, downclock %dKhz\n", + fixed_mode->clock, temp_downclock); } - return; } /* @@ -921,26 +910,38 @@ void intel_lvds_init(struct drm_device *dev) * Attempt to get the fixed panel mode from DDC. Assume that the * preferred mode is the right one. */ - dev_priv->lvds_edid_good = true; - + intel_lvds->edid_good = true; if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus)) - dev_priv->lvds_edid_good = false; + intel_lvds->edid_good = false; + + if (!intel_lvds->edid_good) { + /* Didn't get an EDID, so + * Set wide sync ranges so we get all modes + * handed to valid_mode for checking + */ + connector->display_info.min_vfreq = 0; + connector->display_info.max_vfreq = 200; + connector->display_info.min_hfreq = 0; + connector->display_info.max_hfreq = 200; + } list_for_each_entry(scan, &connector->probed_modes, head) { if (scan->type & DRM_MODE_TYPE_PREFERRED) { - dev_priv->panel_fixed_mode = + intel_lvds->fixed_mode = drm_mode_duplicate(dev, scan); - intel_find_lvds_downclock(dev, connector); + intel_find_lvds_downclock(dev, + intel_lvds->fixed_mode, + connector); goto out; } } /* Failed to get EDID, what about VBT? */ if (dev_priv->lfp_lvds_vbt_mode) { - dev_priv->panel_fixed_mode = + intel_lvds->fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); - if (dev_priv->panel_fixed_mode) { - dev_priv->panel_fixed_mode->type |= + if (intel_lvds->fixed_mode) { + intel_lvds->fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; goto out; } @@ -961,16 +962,16 @@ void intel_lvds_init(struct drm_device *dev) crtc = intel_get_crtc_for_pipe(dev, pipe); if (crtc && (lvds & LVDS_PORT_EN)) { - dev_priv->panel_fixed_mode = intel_crtc_mode_get(dev, crtc); - if (dev_priv->panel_fixed_mode) { - dev_priv->panel_fixed_mode->type |= + intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc); + if (intel_lvds->fixed_mode) { + intel_lvds->fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; goto out; } } /* If we still don't have a mode after all that, give up. */ - if (!dev_priv->panel_fixed_mode) + if (!intel_lvds->fixed_mode) goto failed; out: From 6b383a7f6378f193c30200435d8170f95916b5f0 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 13 Sep 2010 13:54:26 +0100 Subject: [PATCH 143/476] drm/i915: Share crtc setup and teardown between dpms and disable/enable This closes a couple of corner cases where we introduced and forgot about a couple of routines that need to be called when disabling the crtc and then re-enabling it. The code needs to be moved again so that the common bits are shared across generations. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 125 ++++++++++----------------- drivers/gpu/drm/i915/intel_drv.h | 2 +- 2 files changed, 46 insertions(+), 81 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 0004534e7c7d..1e2a17d66ebb 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -44,7 +44,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type); static void intel_update_watermarks(struct drm_device *dev); static void intel_increase_pllclock(struct drm_crtc *crtc); -static void intel_crtc_update_cursor(struct drm_crtc *crtc); +static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); typedef struct { /* given values */ @@ -1927,6 +1927,26 @@ static void intel_flush_display_plane(struct drm_device *dev, I915_WRITE(reg, I915_READ(reg)); } +/* + * When we disable a pipe, we need to clear any pending scanline wait events + * to avoid hanging the ring, which we assume we are waiting on. + */ +static void intel_clear_scanline_wait(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 tmp; + + if (IS_GEN2(dev)) + /* Can't break the hang on i8xx */ + return; + + tmp = I915_READ(PRB0_CTL); + if (tmp & RING_WAIT) { + I915_WRITE(PRB0_CTL, tmp); + POSTING_READ(PRB0_CTL); + } +} + static void ironlake_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -1936,6 +1956,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) int plane = intel_crtc->plane; u32 reg, temp; + intel_update_watermarks(dev); + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { temp = I915_READ(PCH_LVDS); if ((temp & LVDS_PORT_EN) == 0) @@ -2082,6 +2104,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) intel_crtc_load_lut(crtc); intel_update_fbc(dev); + intel_crtc_update_cursor(crtc, true); } static void ironlake_crtc_disable(struct drm_crtc *crtc) @@ -2094,6 +2117,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) u32 reg, temp; drm_vblank_off(dev, pipe); + intel_crtc_update_cursor(crtc, false); /* Disable display plane */ reg = DSPCNTR(plane); @@ -2220,6 +2244,10 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) /* Wait for the clocks to turn off. */ POSTING_READ(reg); udelay(100); + + intel_update_watermarks(dev); + intel_update_fbc(dev); + intel_clear_scanline_wait(dev); } static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) @@ -2270,6 +2298,8 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) int plane = intel_crtc->plane; u32 reg, temp; + intel_update_watermarks(dev); + /* Enable the DPLL */ reg = DPLL(pipe); temp = I915_READ(reg); @@ -2312,6 +2342,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) /* Give the overlay scaler a chance to enable if it's on this pipe */ intel_crtc_dpms_overlay(intel_crtc, true); + intel_crtc_update_cursor(crtc, true); } static void i9xx_crtc_disable(struct drm_crtc *crtc) @@ -2325,6 +2356,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) /* Give the overlay scaler a chance to disable if it's on this pipe */ intel_crtc_dpms_overlay(intel_crtc, false); + intel_crtc_update_cursor(crtc, false); drm_vblank_off(dev, pipe); if (dev_priv->cfb_plane == plane && @@ -2346,7 +2378,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) /* Don't disable pipe A or pipe A PLLs if needed */ if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) - return; + goto done; /* Next, disable display pipes */ reg = PIPECONF(pipe); @@ -2368,6 +2400,11 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) POSTING_READ(reg); udelay(150); } + +done: + intel_update_fbc(dev); + intel_update_watermarks(dev); + intel_clear_scanline_wait(dev); } static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) @@ -2387,26 +2424,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) } } -/* - * When we disable a pipe, we need to clear any pending scanline wait events - * to avoid hanging the ring, which we assume we are waiting on. - */ -static void intel_clear_scanline_wait(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 tmp; - - if (IS_GEN2(dev)) - /* Can't break the hang on i8xx */ - return; - - tmp = I915_READ(PRB0_CTL); - if (tmp & RING_WAIT) { - I915_WRITE(PRB0_CTL, tmp); - POSTING_READ(PRB0_CTL); - } -} - /** * Sets the power management mode of the pipe and plane. */ @@ -2423,34 +2440,9 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) return; intel_crtc->dpms_mode = mode; - intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON; - - /* When switching on the display, ensure that SR is disabled - * with multiple pipes prior to enabling to new pipe. - * - * When switching off the display, make sure the cursor is - * properly hidden and there are no pending waits prior to - * disabling the pipe. - */ - if (mode == DRM_MODE_DPMS_ON) - intel_update_watermarks(dev); - else - intel_crtc_update_cursor(crtc); dev_priv->display.dpms(crtc, mode); - if (mode == DRM_MODE_DPMS_ON) { - intel_crtc_update_cursor(crtc); - } else { - /* XXX Note that this is not a complete solution, but a hack - * to avoid the most frequently hit hang. - */ - intel_clear_scanline_wait(dev); - - intel_update_watermarks(dev); - } - intel_update_fbc(dev); - if (!dev->primary->master) return; @@ -2485,50 +2477,22 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) */ static void i9xx_crtc_prepare(struct drm_crtc *crtc) { - struct drm_device *dev = crtc->dev; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - - intel_crtc->cursor_on = false; - intel_crtc_update_cursor(crtc); - i9xx_crtc_disable(crtc); - intel_clear_scanline_wait(dev); } static void i9xx_crtc_commit(struct drm_crtc *crtc) { - struct drm_device *dev = crtc->dev; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - - intel_update_watermarks(dev); i9xx_crtc_enable(crtc); - - intel_crtc->cursor_on = true; - intel_crtc_update_cursor(crtc); } static void ironlake_crtc_prepare(struct drm_crtc *crtc) { - struct drm_device *dev = crtc->dev; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - - intel_crtc->cursor_on = false; - intel_crtc_update_cursor(crtc); - ironlake_crtc_disable(crtc); - intel_clear_scanline_wait(dev); } static void ironlake_crtc_commit(struct drm_crtc *crtc) { - struct drm_device *dev = crtc->dev; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - - intel_update_watermarks(dev); ironlake_crtc_enable(crtc); - - intel_crtc->cursor_on = true; - intel_crtc_update_cursor(crtc); } void intel_encoder_prepare (struct drm_encoder *encoder) @@ -3615,7 +3579,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } /* Ensure that the cursor is valid for the new mode before changing... */ - intel_crtc_update_cursor(crtc); + intel_crtc_update_cursor(crtc, true); if (is_lvds && dev_priv->lvds_downclock_avail) { has_reduced_clock = limit->find_pll(limit, crtc, @@ -4225,7 +4189,8 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) } /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ -static void intel_crtc_update_cursor(struct drm_crtc *crtc) +static void intel_crtc_update_cursor(struct drm_crtc *crtc, + bool on) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -4238,7 +4203,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc) pos = 0; - if (intel_crtc->cursor_on && crtc->fb) { + if (on && crtc->enabled && crtc->fb) { base = intel_crtc->cursor_addr; if (x > (int) crtc->fb->width) base = 0; @@ -4370,7 +4335,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, intel_crtc->cursor_width = width; intel_crtc->cursor_height = height; - intel_crtc_update_cursor(crtc); + intel_crtc_update_cursor(crtc, true); return 0; fail_unpin: @@ -4389,7 +4354,7 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) intel_crtc->cursor_x = x; intel_crtc->cursor_y = y; - intel_crtc_update_cursor(crtc); + intel_crtc_update_cursor(crtc, true); return 0; } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 7e2646f1fec9..c0891b1ec7b5 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -180,7 +180,7 @@ struct intel_crtc { uint32_t cursor_addr; int16_t cursor_x, cursor_y; int16_t cursor_width, cursor_height; - bool cursor_visible, cursor_on; + bool cursor_visible; }; #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) From f7abfe8b281991c66406c42c1a6c6c9ee0daa0ff Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 13 Sep 2010 14:19:16 +0100 Subject: [PATCH 144/476] drm/i915: Fix an overlay regression from 7e7d76c When separating out the prepare/commit into its own separate functions we overlooked that the intel_crtc->dpms_mode was being used elsewhere to check on the actual status of the pipe. Track that bit of logic separately from the actual dpms mode, so there is no confusion should we be able to handle multiple dpms modes, nor any semantic conflict between prepare/commit and dpms. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 18 +++++++++++++++++- drivers/gpu/drm/i915/intel_drv.h | 1 + drivers/gpu/drm/i915/intel_overlay.c | 8 +++----- 3 files changed, 21 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 1e2a17d66ebb..a54b701f867c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1956,6 +1956,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) int plane = intel_crtc->plane; u32 reg, temp; + if (intel_crtc->active) + return; + + intel_crtc->active = true; intel_update_watermarks(dev); if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { @@ -2116,6 +2120,9 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) int plane = intel_crtc->plane; u32 reg, temp; + if (!intel_crtc->active) + return; + drm_vblank_off(dev, pipe); intel_crtc_update_cursor(crtc, false); @@ -2245,6 +2252,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) POSTING_READ(reg); udelay(100); + intel_crtc->active = false; intel_update_watermarks(dev); intel_update_fbc(dev); intel_clear_scanline_wait(dev); @@ -2298,6 +2306,10 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) int plane = intel_crtc->plane; u32 reg, temp; + if (intel_crtc->active) + return; + + intel_crtc->active = true; intel_update_watermarks(dev); /* Enable the DPLL */ @@ -2354,6 +2366,9 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) int plane = intel_crtc->plane; u32 reg, temp; + if (!intel_crtc->active) + return; + /* Give the overlay scaler a chance to disable if it's on this pipe */ intel_crtc_dpms_overlay(intel_crtc, false); intel_crtc_update_cursor(crtc, false); @@ -2402,6 +2417,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) } done: + intel_crtc->active = false; intel_update_fbc(dev); intel_update_watermarks(dev); intel_clear_scanline_wait(dev); @@ -3463,7 +3479,7 @@ static void intel_update_watermarks(struct drm_device *dev) /* Get the clock config from both planes */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) { + if (intel_crtc->active) { enabled++; if (intel_crtc->plane == 0) { DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n", diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index c0891b1ec7b5..5171b0523178 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -169,6 +169,7 @@ struct intel_crtc { enum plane plane; u8 lut_r[256], lut_g[256], lut_b[256]; int dpms_mode; + bool active; /* is the crtc on? independent of the dpms mode */ bool busy; /* is scanout buffer being updated frequently? */ struct timer_list idle_timer; bool lowfreq_avail; diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 3dff16118ee5..c4699c916698 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -875,15 +875,13 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, struct intel_crtc *crtc) { drm_i915_private_t *dev_priv = overlay->dev->dev_private; - u32 pipeconf; - if (!crtc->base.enabled || crtc->dpms_mode != DRM_MODE_DPMS_ON) + if (!crtc->active) return -EINVAL; - pipeconf = I915_READ(PIPECONF(crtc->pipe)); - /* can't use the overlay with double wide pipe */ - if (!IS_I965G(overlay->dev) && pipeconf & PIPECONF_DOUBLE_WIDE) + if (!IS_I965G(overlay->dev) && + (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE) return -EINVAL; return 0; From e65d9305f528d4f354378690135ee8d1c60ec0a5 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 13 Sep 2010 16:58:39 +0100 Subject: [PATCH 145/476] drm/i915: Initialize intel_crtc->active Fix a regression in the previous regression fix... In order to turn off the pipes entirely upon the first modeset, we pretend that BIOS (or earlier module incarnation) left them active. The first task performed by setup_initial_configuration() is to disable all pipes and so to avoid skipping that step and so to ensure a known configuration we need to mark all the crtcs as active. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a54b701f867c..e4fd7a3a9ee6 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5201,6 +5201,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) intel_crtc->cursor_addr = 0; intel_crtc->dpms_mode = -1; + intel_crtc->active = true; /* force the pipe off on setup_init_config */ if (HAS_PCH_SPLIT(dev)) { intel_helper_funcs.prepare = ironlake_crtc_prepare; From e2e767abd85806d05a5266b3b112baaf80ee3382 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 13 Sep 2010 16:53:12 +0100 Subject: [PATCH 146/476] drm/i915: Remove redundant initialisation of crtc->pipe Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e4fd7a3a9ee6..16ae34559caa 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5178,8 +5178,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); - intel_crtc->pipe = pipe; - intel_crtc->plane = pipe; for (i = 0; i < 256; i++) { intel_crtc->lut_r[i] = i; intel_crtc->lut_g[i] = i; @@ -5189,9 +5187,9 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) /* Swap pipes & planes for FBC on pre-965 */ intel_crtc->pipe = pipe; intel_crtc->plane = pipe; - if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) { + if (IS_MOBILE(dev) && IS_GEN3(dev)) { DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); - intel_crtc->plane = ((pipe == 0) ? 1 : 0); + intel_crtc->plane = !pipe; } BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || From b3b079dbef06c7f775178d561a4c8e47b7447139 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 13 Sep 2010 23:44:34 +0100 Subject: [PATCH 147/476] drm/i915: Reduce hangcheck frequency By reducing the hangcheck frequency we check less often, conserving resources, and still detect a lock up quickly. On a fast machine with a slow GPU (like a Core2 paired with a 945G) it is easy for the hangcheck to misfire as we check too fast. Also once hung and if we fail to completely reset the chip, we have a nasty habit of proclaming a hang many times a second and generating a strobe-like display. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_gem.c | 6 ++++-- drivers/gpu/drm/i915/i915_irq.c | 9 ++++++--- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 232555e33929..70cbe3cee1ab 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -299,7 +299,7 @@ typedef struct drm_i915_private { int num_pipe; /* For hangcheck timer */ -#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */ +#define DRM_I915_HANGCHECK_PERIOD 250 /* in ms */ struct timer_list hangcheck_timer; int hangcheck_count; uint32_t last_acthd; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e0b7ddc917c2..9391765af70d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1639,9 +1639,11 @@ i915_add_request(struct drm_device *dev, } if (!dev_priv->mm.suspended) { - mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); + mod_timer(&dev_priv->hangcheck_timer, + jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); if (was_empty) - queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); + queue_delayed_work(dev_priv->wq, + &dev_priv->mm.retire_work, HZ); } return seqno; } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index bc8438d6d843..e64b8eaa0b9d 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -328,7 +328,8 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) trace_i915_gem_request_complete(dev, seqno); DRM_WAKEUP(&dev_priv->render_ring.irq_queue); dev_priv->hangcheck_count = 0; - mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); + mod_timer(&dev_priv->hangcheck_timer, + jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); } if (gt_iir & GT_BSD_USER_INTERRUPT) DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); @@ -1018,7 +1019,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) trace_i915_gem_request_complete(dev, seqno); DRM_WAKEUP(&dev_priv->render_ring.irq_queue); dev_priv->hangcheck_count = 0; - mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); + mod_timer(&dev_priv->hangcheck_timer, + jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); } if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) @@ -1394,7 +1396,8 @@ void i915_hangcheck_elapsed(unsigned long data) out: /* Reset timer case chip hangs without another request being added */ - mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); + mod_timer(&dev_priv->hangcheck_timer, + jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); } /* drm_dma.h hooks From 7213342db58adb7b8e399a00fc423951d7f75369 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 13 Sep 2010 23:56:38 +0100 Subject: [PATCH 148/476] drm/i915: Consolidate flushing the display plane Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 1 - drivers/gpu/drm/i915/i915_gem.c | 42 +++++++--------------------- drivers/gpu/drm/i915/intel_display.c | 16 ++++------- drivers/gpu/drm/i915/intel_fb.c | 8 +----- 4 files changed, 17 insertions(+), 50 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 70cbe3cee1ab..24b7796c33af 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1024,7 +1024,6 @@ void i915_gem_free_all_phys_object(struct drm_device *dev); int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); void i915_gem_object_put_pages(struct drm_gem_object *obj); void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); -int i915_gem_object_flush_write_domain(struct drm_gem_object *obj); void i915_gem_shrinker_init(void); void i915_gem_shrinker_exit(void); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 9391765af70d..328f8c9ee966 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2645,26 +2645,6 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) old_write_domain); } -int -i915_gem_object_flush_write_domain(struct drm_gem_object *obj) -{ - int ret = 0; - - switch (obj->write_domain) { - case I915_GEM_DOMAIN_GTT: - i915_gem_object_flush_gtt_write_domain(obj); - break; - case I915_GEM_DOMAIN_CPU: - i915_gem_object_flush_cpu_write_domain(obj); - break; - default: - ret = i915_gem_object_flush_gpu_write_domain(obj, true); - break; - } - - return ret; -} - /** * Moves a single object to the GTT read, and possibly write domain. * @@ -2686,21 +2666,16 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) if (ret != 0) return ret; - old_write_domain = obj->write_domain; - old_read_domains = obj->read_domains; + i915_gem_object_flush_cpu_write_domain(obj); - /* If we're writing through the GTT domain, then CPU and GPU caches - * will need to be invalidated at next use. - */ if (write) { ret = i915_gem_object_wait_rendering(obj); if (ret) return ret; - - obj->read_domains &= I915_GEM_DOMAIN_GTT; } - i915_gem_object_flush_cpu_write_domain(obj); + old_write_domain = obj->write_domain; + old_read_domains = obj->read_domains; /* It should now be out of any other write domains, and we can update * the domain values for our changes. @@ -2708,6 +2683,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); obj->read_domains |= I915_GEM_DOMAIN_GTT; if (write) { + obj->read_domains = I915_GEM_DOMAIN_GTT; obj->write_domain = I915_GEM_DOMAIN_GTT; obj_priv->dirty = 1; } @@ -2773,6 +2749,12 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) */ i915_gem_object_set_to_full_cpu_read_domain(obj); + if (write) { + ret = i915_gem_object_wait_rendering(obj); + if (ret) + return ret; + } + old_write_domain = obj->write_domain; old_read_domains = obj->read_domains; @@ -2792,10 +2774,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) * need to be invalidated at next use. */ if (write) { - ret = i915_gem_object_wait_rendering(obj); - if (ret) - return ret; - obj->read_domains &= I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 16ae34559caa..810ed2dca4c7 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1448,6 +1448,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) if (ret != 0) return ret; + ret = i915_gem_object_set_to_display_plane(obj); + if (ret != 0) { + i915_gem_object_unpin(obj); + return ret; + } + /* Install a fence for tiled scan-out. Pre-i965 always needs a * fence, whereas 965+ only requires a fence if using * framebuffer compression. For simplicity, we always install @@ -1589,13 +1595,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, return ret; } - ret = i915_gem_object_set_to_display_plane(obj); - if (ret != 0) { - i915_gem_object_unpin(obj); - mutex_unlock(&dev->struct_mutex); - return ret; - } - ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y); if (ret) { i915_gem_object_unpin(obj); @@ -5043,9 +5042,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, drm_gem_object_reference(obj); crtc->fb = fb; - ret = i915_gem_object_flush_write_domain(obj); - if (ret) - goto cleanup_objs; ret = drm_vblank_get(dev, intel_crtc->pipe); if (ret) diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 0ee4a8c16608..e2d13e394a0d 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -93,19 +93,13 @@ static int intelfb_create(struct intel_fbdev *ifbdev, mutex_lock(&dev->struct_mutex); + /* Flush everything out, we'll be doing GTT only from now on */ ret = intel_pin_and_fence_fb_obj(dev, fbo); if (ret) { DRM_ERROR("failed to pin fb: %d\n", ret); goto out_unref; } - /* Flush everything out, we'll be doing GTT only from now on */ - ret = i915_gem_object_set_to_gtt_domain(fbo, 1); - if (ret) { - DRM_ERROR("failed to bind fb: %d.\n", ret); - goto out_unpin; - } - info = framebuffer_alloc(0, device); if (!info) { ret = -ENOMEM; From 0bc23aad3b67ca0cd7480dec0b7652d9b8686432 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 14 Sep 2010 10:22:23 +0100 Subject: [PATCH 149/476] drm/i915: Fix regression in ba3d8d749b I pulled the wrong version of the patch from Daniel Vetter which was missing the read barriers -- and the one that was causing all the trouble was from i915_gem_object_put_fence_reg(), leading to GPU hangs on gen3. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 328f8c9ee966..4a0d85c78d47 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2430,12 +2430,16 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj) int ret; ret = i915_gem_object_flush_gpu_write_domain(obj, false); - if (ret != 0) + if (ret) + return ret; + + ret = i915_gem_object_wait_rendering(obj); + if (ret) return ret; } i915_gem_object_flush_gtt_write_domain(obj); - i915_gem_clear_fence_reg (obj); + i915_gem_clear_fence_reg(obj); return 0; } From 9e76e7b8bd716413cfd722a807aa22723f3a895f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 14 Sep 2010 12:12:11 +0100 Subject: [PATCH 150/476] agp/intel: Use macro to set the count of the size array It's a fixed size array so let the compiler do the hard work of updating all the call sites. Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 9cb7c98afb9c..dedf05dc433d 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -539,8 +539,7 @@ static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge, return addr | bridge->driver->masks[type].mask; } -static struct aper_size_info_fixed intel_fake_agp_sizes[] = -{ +static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = { {128, 32768, 5}, /* The 64M mode still requires a 128k gatt */ {64, 16384, 5}, @@ -833,16 +832,17 @@ static int intel_gtt_init(void) static int intel_fake_agp_fetch_size(void) { + int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes); unsigned int aper_size; int i; - int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes); aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT) / MB(1); for (i = 0; i < num_sizes; i++) { if (aper_size == intel_fake_agp_sizes[i].size) { - agp_bridge->current_size = intel_fake_agp_sizes + i; + agp_bridge->current_size = + (void *) (intel_fake_agp_sizes + i); return aper_size; } } @@ -1363,9 +1363,9 @@ static const struct agp_bridge_driver intel_810_driver = { static const struct agp_bridge_driver intel_830_driver = { .owner = THIS_MODULE, - .aperture_sizes = intel_fake_agp_sizes, .size_type = FIXED_APER_SIZE, - .num_aperture_sizes = 4, + .aperture_sizes = intel_fake_agp_sizes, + .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes), .needs_scratch_page = true, .configure = intel_i830_configure, .fetch_size = intel_fake_agp_fetch_size, @@ -1390,9 +1390,9 @@ static const struct agp_bridge_driver intel_830_driver = { static const struct agp_bridge_driver intel_915_driver = { .owner = THIS_MODULE, - .aperture_sizes = intel_fake_agp_sizes, .size_type = FIXED_APER_SIZE, - .num_aperture_sizes = 4, + .aperture_sizes = intel_fake_agp_sizes, + .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes), .needs_scratch_page = true, .configure = intel_i9xx_configure, .fetch_size = intel_fake_agp_fetch_size, @@ -1423,9 +1423,9 @@ static const struct agp_bridge_driver intel_915_driver = { static const struct agp_bridge_driver intel_i965_driver = { .owner = THIS_MODULE, - .aperture_sizes = intel_fake_agp_sizes, .size_type = FIXED_APER_SIZE, - .num_aperture_sizes = 4, + .aperture_sizes = intel_fake_agp_sizes, + .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes), .needs_scratch_page = true, .configure = intel_i9xx_configure, .fetch_size = intel_fake_agp_fetch_size, @@ -1456,9 +1456,9 @@ static const struct agp_bridge_driver intel_i965_driver = { static const struct agp_bridge_driver intel_gen6_driver = { .owner = THIS_MODULE, - .aperture_sizes = intel_fake_agp_sizes, .size_type = FIXED_APER_SIZE, - .num_aperture_sizes = 4, + .aperture_sizes = intel_fake_agp_sizes, + .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes), .needs_scratch_page = true, .configure = intel_i9xx_configure, .fetch_size = intel_fake_agp_fetch_size, @@ -1489,9 +1489,9 @@ static const struct agp_bridge_driver intel_gen6_driver = { static const struct agp_bridge_driver intel_g33_driver = { .owner = THIS_MODULE, - .aperture_sizes = intel_fake_agp_sizes, .size_type = FIXED_APER_SIZE, - .num_aperture_sizes = 4, + .aperture_sizes = intel_fake_agp_sizes, + .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes), .needs_scratch_page = true, .configure = intel_i9xx_configure, .fetch_size = intel_fake_agp_fetch_size, From 48b956c5a89c7b100ef3b818b6ccf759ab695383 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 14 Sep 2010 12:50:34 +0100 Subject: [PATCH 151/476] drm/i915: Push pipelining of display plane flushes to the caller This ensures that we do wait upon the flushes to complete if necessary and avoid the visual tears, whilst enabling pipelined page-flips. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 3 +- drivers/gpu/drm/i915/i915_gem.c | 8 ++-- drivers/gpu/drm/i915/intel_display.c | 58 ++++++++++++++++------------ drivers/gpu/drm/i915/intel_drv.h | 3 +- drivers/gpu/drm/i915/intel_fb.c | 2 +- 5 files changed, 44 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 24b7796c33af..b97d62d81905 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1013,7 +1013,8 @@ void i915_gem_process_flushing_list(struct drm_device *dev, struct intel_ring_buffer *ring); int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write); -int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj); +int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, + bool pipelined); int i915_gem_attach_phys_object(struct drm_device *dev, struct drm_gem_object *obj, int id, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 4a0d85c78d47..85a3cf4ab481 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2597,6 +2597,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, /* Queue the GPU write cache flushing we need. */ old_write_domain = obj->write_domain; i915_gem_flush(dev, 0, obj->write_domain); + BUG_ON(obj->write_domain); trace_i915_gem_object_change_domain(obj, obj->read_domains, @@ -2704,7 +2705,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) * wait, as in modesetting process we're not supposed to be interrupted. */ int -i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) +i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, + bool pipelined) { struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t old_read_domains; @@ -2714,8 +2716,8 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) if (obj_priv->gtt_space == NULL) return -EINVAL; - ret = i915_gem_object_flush_gpu_write_domain(obj, true); - if (ret != 0) + ret = i915_gem_object_flush_gpu_write_domain(obj, pipelined); + if (ret) return ret; i915_gem_object_flush_cpu_write_domain(obj); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 810ed2dca4c7..a7628fdd0c4c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1417,7 +1417,9 @@ out_disable: } int -intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) +intel_pin_and_fence_fb_obj(struct drm_device *dev, + struct drm_gem_object *obj, + bool pipelined) { struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); u32 alignment; @@ -1445,14 +1447,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) } ret = i915_gem_object_pin(obj, alignment); - if (ret != 0) + if (ret) return ret; - ret = i915_gem_object_set_to_display_plane(obj); - if (ret != 0) { - i915_gem_object_unpin(obj); - return ret; - } + ret = i915_gem_object_set_to_display_plane(obj, pipelined); + if (ret) + goto err_unpin; /* Install a fence for tiled scan-out. Pre-i965 always needs a * fence, whereas 965+ only requires a fence if using @@ -1462,13 +1462,15 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) if (obj_priv->fence_reg == I915_FENCE_REG_NONE && obj_priv->tiling_mode != I915_TILING_NONE) { ret = i915_gem_object_get_fence_reg(obj); - if (ret != 0) { - i915_gem_object_unpin(obj); - return ret; - } + if (ret) + goto err_unpin; } return 0; + +err_unpin: + i915_gem_object_unpin(obj); + return ret; } /* Assume fb object is pinned & idle & fenced and just update base pointers */ @@ -1589,7 +1591,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, obj_priv = to_intel_bo(obj); mutex_lock(&dev->struct_mutex); - ret = intel_pin_and_fence_fb_obj(dev, obj); + ret = intel_pin_and_fence_fb_obj(dev, obj, false); if (ret != 0) { mutex_unlock(&dev->struct_mutex); return ret; @@ -5004,7 +5006,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, struct intel_unpin_work *work; unsigned long flags, offset; int pipe = intel_crtc->pipe; - u32 pf, pipesrc; + u32 was_dirty, pf, pipesrc; int ret; work = kzalloc(sizeof *work, GFP_KERNEL); @@ -5033,7 +5035,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, obj = intel_fb->obj; mutex_lock(&dev->struct_mutex); - ret = intel_pin_and_fence_fb_obj(dev, obj); + was_dirty = obj->write_domain & I915_GEM_GPU_DOMAINS; + ret = intel_pin_and_fence_fb_obj(dev, obj, true); if (ret) goto cleanup_work; @@ -5051,17 +5054,24 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, atomic_inc(&obj_priv->pending_flip); work->pending_flip_obj = obj; - if (IS_GEN3(dev) || IS_GEN2(dev)) { - u32 flip_mask; - - if (intel_crtc->plane) - flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; - else - flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; - + if (was_dirty || IS_GEN3(dev) || IS_GEN2(dev)) { BEGIN_LP_RING(2); - OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); - OUT_RING(0); + if (IS_GEN3(dev) || IS_GEN2(dev)) { + u32 flip_mask; + + /* Can't queue multiple flips, so wait for the previous + * one to finish before executing the next. + */ + + if (intel_crtc->plane) + flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; + else + flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; + + OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); + } else + OUT_RING(MI_NOOP); + OUT_RING(MI_FLUSH); ADVANCE_LP_RING(); } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 5171b0523178..31f072d31e37 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -281,7 +281,8 @@ extern void ironlake_enable_drps(struct drm_device *dev); extern void ironlake_disable_drps(struct drm_device *dev); extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, - struct drm_gem_object *obj); + struct drm_gem_object *obj, + bool pipelined); extern int intel_framebuffer_init(struct drm_device *dev, struct intel_framebuffer *ifb, diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index e2d13e394a0d..8a23bf772c95 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -94,7 +94,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, mutex_lock(&dev->struct_mutex); /* Flush everything out, we'll be doing GTT only from now on */ - ret = intel_pin_and_fence_fb_obj(dev, fbo); + ret = intel_pin_and_fence_fb_obj(dev, fbo, false); if (ret) { DRM_ERROR("failed to pin fb: %d\n", ret); goto out_unref; From 2cf34d7b7ee99c27c1a6bdd2f91344cbfa5fef5c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 14 Sep 2010 13:03:28 +0100 Subject: [PATCH 152/476] drm/i915: Allow get_fence_reg() to be uninterruptible As we currently may need to acquire a fence register during a modeset, we need to be able to do so in an uninterruptible manner. So expose that parameter to the callers of the fence management code. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 6 ++-- drivers/gpu/drm/i915/i915_gem.c | 43 +++++++++++++++----------- drivers/gpu/drm/i915/i915_gem_tiling.c | 2 +- drivers/gpu/drm/i915/intel_display.c | 2 +- 4 files changed, 31 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b97d62d81905..b0692c40b0c7 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -986,8 +986,10 @@ void i915_gem_lastclose(struct drm_device *dev); uint32_t i915_get_gem_seqno(struct drm_device *dev, struct intel_ring_buffer *ring); bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); -int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); -int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); +int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, + bool interruptible); +int i915_gem_object_put_fence_reg(struct drm_gem_object *obj, + bool interruptible); void i915_gem_retire_requests(struct drm_device *dev); void i915_gem_clflush_object(struct drm_gem_object *obj); int i915_gem_object_set_domain(struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 85a3cf4ab481..02719df418e3 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -48,7 +48,8 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, uint64_t offset, uint64_t size); static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); -static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); +static int i915_gem_object_wait_rendering(struct drm_gem_object *obj, + bool interruptible); static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment); static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); @@ -1181,7 +1182,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) /* Need a new fence register? */ if (obj_priv->tiling_mode != I915_TILING_NONE) { - ret = i915_gem_object_get_fence_reg(obj); + ret = i915_gem_object_get_fence_reg(obj, true); if (ret) goto unlock; } @@ -1919,7 +1920,8 @@ i915_gem_flush(struct drm_device *dev, * safe to unbind from the GTT or access from the CPU. */ static int -i915_gem_object_wait_rendering(struct drm_gem_object *obj) +i915_gem_object_wait_rendering(struct drm_gem_object *obj, + bool interruptible) { struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); @@ -1938,10 +1940,11 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj) DRM_INFO("%s: object %p wait for seqno %08x\n", __func__, obj, obj_priv->last_rendering_seqno); #endif - ret = i915_wait_request(dev, - obj_priv->last_rendering_seqno, - obj_priv->ring); - if (ret != 0) + ret = i915_do_wait_request(dev, + obj_priv->last_rendering_seqno, + interruptible, + obj_priv->ring); + if (ret) return ret; } @@ -2234,7 +2237,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); } -static int i915_find_fence_reg(struct drm_device *dev) +static int i915_find_fence_reg(struct drm_device *dev, + bool interruptible) { struct drm_i915_fence_reg *reg = NULL; struct drm_i915_gem_object *obj_priv = NULL; @@ -2279,7 +2283,7 @@ static int i915_find_fence_reg(struct drm_device *dev) * private reference to obj like the other callers of put_fence_reg * (set_tiling ioctl) do. */ drm_gem_object_reference(obj); - ret = i915_gem_object_put_fence_reg(obj); + ret = i915_gem_object_put_fence_reg(obj, interruptible); drm_gem_object_unreference(obj); if (ret != 0) return ret; @@ -2301,7 +2305,8 @@ static int i915_find_fence_reg(struct drm_device *dev) * and tiling format. */ int -i915_gem_object_get_fence_reg(struct drm_gem_object *obj) +i915_gem_object_get_fence_reg(struct drm_gem_object *obj, + bool interruptible) { struct drm_device *dev = obj->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -2336,7 +2341,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) break; } - ret = i915_find_fence_reg(dev); + ret = i915_find_fence_reg(dev, interruptible); if (ret < 0) return ret; @@ -2403,12 +2408,14 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) * i915_gem_object_put_fence_reg - waits on outstanding fenced access * to the buffer to finish, and then resets the fence register. * @obj: tiled object holding a fence register. + * @bool: whether the wait upon the fence is interruptible * * Zeroes out the fence register itself and clears out the associated * data structures in dev_priv and obj_priv. */ int -i915_gem_object_put_fence_reg(struct drm_gem_object *obj) +i915_gem_object_put_fence_reg(struct drm_gem_object *obj, + bool interruptible) { struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); @@ -2429,11 +2436,11 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj) if (!IS_I965G(dev)) { int ret; - ret = i915_gem_object_flush_gpu_write_domain(obj, false); + ret = i915_gem_object_flush_gpu_write_domain(obj, true); if (ret) return ret; - ret = i915_gem_object_wait_rendering(obj); + ret = i915_gem_object_wait_rendering(obj, interruptible); if (ret) return ret; } @@ -2606,7 +2613,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, if (pipelined) return 0; - return i915_gem_object_wait_rendering(obj); + return i915_gem_object_wait_rendering(obj, true); } /** Flushes the GTT write domain for the object if it's dirty. */ @@ -2674,7 +2681,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) i915_gem_object_flush_cpu_write_domain(obj); if (write) { - ret = i915_gem_object_wait_rendering(obj); + ret = i915_gem_object_wait_rendering(obj, true); if (ret) return ret; } @@ -2756,7 +2763,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) i915_gem_object_set_to_full_cpu_read_domain(obj); if (write) { - ret = i915_gem_object_wait_rendering(obj); + ret = i915_gem_object_wait_rendering(obj, true); if (ret) return ret; } @@ -3125,7 +3132,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, * properly handle blits to/from tiled surfaces. */ if (need_fence) { - ret = i915_gem_object_get_fence_reg(obj); + ret = i915_gem_object_get_fence_reg(obj, false); if (ret != 0) { i915_gem_object_unpin(obj); return ret; diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 3c0859edfdf7..caef7ff2aa39 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -328,7 +328,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode)) ret = i915_gem_object_unbind(obj); else if (obj_priv->fence_reg != I915_FENCE_REG_NONE) - ret = i915_gem_object_put_fence_reg(obj); + ret = i915_gem_object_put_fence_reg(obj, true); else i915_gem_release_mmap(obj); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a7628fdd0c4c..11d643acf2fa 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1461,7 +1461,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, */ if (obj_priv->fence_reg == I915_FENCE_REG_NONE && obj_priv->tiling_mode != I915_TILING_NONE) { - ret = i915_gem_object_get_fence_reg(obj); + ret = i915_gem_object_get_fence_reg(obj, false); if (ret) goto err_unpin; } From 890f3359f7b84d7015104360d647ccac5f515542 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 14 Sep 2010 16:46:59 +0100 Subject: [PATCH 153/476] drm/i915/i2c: Track the parent encoder rather than just the dev The SDVO proxy i2c adapter wants to be able to use information stored in the encoder, so pass that through intel_i2c rather than iterate over all known encoders every time. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_crt.c | 9 ++--- drivers/gpu/drm/i915/intel_drv.h | 5 ++- drivers/gpu/drm/i915/intel_dvo.c | 9 +++-- drivers/gpu/drm/i915/intel_hdmi.c | 18 +++++---- drivers/gpu/drm/i915/intel_i2c.c | 26 ++++++++----- drivers/gpu/drm/i915/intel_lvds.c | 3 +- drivers/gpu/drm/i915/intel_sdvo.c | 62 +++++++++++++------------------ 7 files changed, 67 insertions(+), 65 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index b39183bcc9fa..0403ec9e164a 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -445,19 +445,17 @@ static void intel_crt_destroy(struct drm_connector *connector) static int intel_crt_get_modes(struct drm_connector *connector) { - int ret; struct intel_encoder *encoder = intel_attached_encoder(connector); struct i2c_adapter *ddc_bus; struct drm_device *dev = connector->dev; - + int ret; ret = intel_ddc_get_modes(connector, encoder->ddc_bus); if (ret || !IS_G4X(dev)) goto end; /* Try to probe digital port for output in DVI-I -> VGA mode. */ - ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); - + ddc_bus = intel_i2c_create(encoder, GPIOD, "CRTDDC_D"); if (!ddc_bus) { dev_printk(KERN_ERR, &connector->dev->pdev->dev, "DDC bus registration failed for CRTDDC_D.\n"); @@ -545,7 +543,8 @@ void intel_crt_init(struct drm_device *dev) if (dev_priv->crt_ddc_bus != 0) i2c_reg = dev_priv->crt_ddc_bus; } - intel_encoder->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); + intel_encoder->ddc_bus = intel_i2c_create(intel_encoder, + i2c_reg, "CRTDDC_A"); if (!intel_encoder->ddc_bus) { dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " "failed.\n"); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 31f072d31e37..8fe6b730c679 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -128,7 +128,7 @@ intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode) } struct intel_i2c_chan { - struct drm_device *drm_dev; /* for getting at dev. private (mmio etc.) */ + struct intel_encoder *encoder; u32 reg; /* GPIO reg */ struct i2c_adapter adapter; struct i2c_algo_bit_data algo; @@ -206,7 +206,8 @@ struct intel_unpin_work { bool enable_stall_check; }; -struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, +struct i2c_adapter *intel_i2c_create(struct intel_encoder *encoder, + const u32 reg, const char *name); void intel_i2c_destroy(struct i2c_adapter *adapter); int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index df42a9c9afc1..7de7d1a68c07 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -362,7 +362,8 @@ void intel_dvo_init(struct drm_device *dev) intel_encoder = &intel_dvo->base; /* Set up the DDC bus */ - intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); + intel_encoder->ddc_bus = intel_i2c_create(intel_encoder, + GPIOD, "DVODDC_D"); if (!intel_encoder->ddc_bus) goto free_intel; @@ -389,10 +390,10 @@ void intel_dvo_init(struct drm_device *dev) */ if (i2cbus != NULL) intel_i2c_destroy(i2cbus); - if (!(i2cbus = intel_i2c_create(dev, gpio, - gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) { + i2cbus = intel_i2c_create(intel_encoder, gpio, + gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"); + if (i2cbus == NULL) continue; - } intel_dvo->dev = *dvo; ret = dvo->dev_ops->init(&intel_dvo->dev, i2cbus); diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index bba0aba15a96..93d5b61bf5bd 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -243,26 +243,28 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) /* Set up the DDC bus. */ if (sdvox_reg == SDVOB) { intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); - intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); + intel_encoder->ddc_bus = intel_i2c_create(intel_encoder, + GPIOE, "HDMIB"); dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; } else if (sdvox_reg == SDVOC) { intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); - intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); + intel_encoder->ddc_bus = intel_i2c_create(intel_encoder, + GPIOD, "HDMIC"); dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; } else if (sdvox_reg == HDMIB) { intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); - intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, - "HDMIB"); + intel_encoder->ddc_bus = intel_i2c_create(intel_encoder, + PCH_GPIOE, "HDMIB"); dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; } else if (sdvox_reg == HDMIC) { intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); - intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, - "HDMIC"); + intel_encoder->ddc_bus = intel_i2c_create(intel_encoder, + PCH_GPIOD, "HDMIC"); dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; } else if (sdvox_reg == HDMID) { intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); - intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, - "HDMID"); + intel_encoder->ddc_bus = intel_i2c_create(intel_encoder, + PCH_GPIOF, "HDMID"); dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; } if (!intel_encoder->ddc_bus) diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index de03989d6df3..d3d65a9cfba1 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -58,25 +58,31 @@ void intel_i2c_quirk_set(struct drm_device *dev, bool enable) #define I2C_RISEFALL_TIME 20 +static inline struct drm_i915_private * +get_dev_priv(struct intel_i2c_chan *chan) +{ + return chan->encoder->base.dev->dev_private; +} + static int get_clock(void *data) { struct intel_i2c_chan *chan = data; - struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; + struct drm_i915_private *dev_priv = get_dev_priv(chan); return (I915_READ(chan->reg) & GPIO_CLOCK_VAL_IN) != 0; } static int get_data(void *data) { struct intel_i2c_chan *chan = data; - struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; + struct drm_i915_private *dev_priv = get_dev_priv(chan); return (I915_READ(chan->reg) & GPIO_DATA_VAL_IN) != 0; } static void set_clock(void *data, int state_high) { struct intel_i2c_chan *chan = data; - struct drm_device *dev = chan->drm_dev; - struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; + struct drm_i915_private *dev_priv = get_dev_priv(chan); + struct drm_device *dev = dev_priv->dev; u32 reserved = 0, clock_bits; /* On most chips, these bits must be preserved in software. */ @@ -96,8 +102,8 @@ static void set_clock(void *data, int state_high) static void set_data(void *data, int state_high) { struct intel_i2c_chan *chan = data; - struct drm_device *dev = chan->drm_dev; - struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; + struct drm_i915_private *dev_priv = get_dev_priv(chan); + struct drm_device *dev = dev_priv->dev; u32 reserved = 0, data_bits; /* On most chips, these bits must be preserved in software. */ @@ -153,16 +159,18 @@ intel_i2c_reset_gmbus(struct drm_device *dev) * %GPIOH * see PRM for details on how these different busses are used. */ -struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, +struct i2c_adapter *intel_i2c_create(struct intel_encoder *encoder, + const u32 reg, const char *name) { struct intel_i2c_chan *chan; + struct drm_device *dev = encoder->base.dev; chan = kzalloc(sizeof(struct intel_i2c_chan), GFP_KERNEL); if (!chan) goto out_free; - chan->drm_dev = dev; + chan->encoder = encoder; chan->reg = reg; snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name); chan->adapter.owner = THIS_MODULE; @@ -178,7 +186,7 @@ struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, i2c_set_adapdata(&chan->adapter, chan); - if(i2c_bit_add_bus(&chan->adapter)) + if (i2c_bit_add_bus(&chan->adapter)) goto out_free; intel_i2c_reset_gmbus(dev); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index b56b59236e31..2ff4a5cb2d56 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -899,7 +899,8 @@ void intel_lvds_init(struct drm_device *dev) */ /* Set up the DDC bus. */ - intel_encoder->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C"); + intel_encoder->ddc_bus = intel_i2c_create(intel_encoder, + gpio, "LVDSDDC_C"); if (!intel_encoder->ddc_bus) { dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " "failed.\n"); diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index a812d65fa31e..0e68f9622565 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -184,7 +184,7 @@ struct intel_sdvo_connector { u32 cur_dot_crawl, max_dot_crawl; }; -static struct intel_sdvo *enc_to_intel_sdvo(struct drm_encoder *encoder) +static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder) { return container_of(encoder, struct intel_sdvo, base.base); } @@ -1051,7 +1051,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); + struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); int multiplier; /* We need to construct preferred input timings based on our @@ -1093,7 +1093,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = encoder->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); + struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); u32 sdvox; struct intel_sdvo_in_out_map in_out; struct intel_sdvo_dtd input_dtd; @@ -1200,7 +1200,7 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); + struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); u32 temp; @@ -1899,7 +1899,7 @@ static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) { - struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); + struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); if (intel_sdvo->analog_ddc_bus) intel_i2c_destroy(intel_sdvo->analog_ddc_bus); @@ -1984,35 +1984,15 @@ intel_sdvo_get_digital_encoding_mode(struct intel_sdvo *intel_sdvo, int device) &intel_sdvo->is_hdmi, 1); } -static struct intel_sdvo * -intel_sdvo_chan_to_intel_sdvo(struct intel_i2c_chan *chan) -{ - struct drm_device *dev = chan->drm_dev; - struct drm_encoder *encoder; - - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); - if (intel_sdvo->base.ddc_bus == &chan->adapter) - return intel_sdvo; - } - - return NULL; -} - static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num) { struct intel_sdvo *intel_sdvo; - struct i2c_algo_bit_data *algo_data; const struct i2c_algorithm *algo; - algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; - intel_sdvo = - intel_sdvo_chan_to_intel_sdvo((struct intel_i2c_chan *) - (algo_data->data)); - if (intel_sdvo == NULL) - return -EINVAL; - + intel_sdvo = container_of(i2c_adap->algo_data, + struct intel_sdvo, + base); algo = intel_sdvo->base.i2c_bus->algo; intel_sdvo_set_control_bus_switch(intel_sdvo, intel_sdvo->ddc_bus); @@ -2560,9 +2540,13 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) /* setup the DDC bus. */ if (IS_SDVOB(sdvo_reg)) - intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOB"); + intel_encoder->i2c_bus = + intel_i2c_create(intel_encoder, + i2c_reg, "SDVOCTRL_E for SDVOB"); else - intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOC"); + intel_encoder->i2c_bus = + intel_i2c_create(intel_encoder, + i2c_reg, "SDVOCTRL_E for SDVOC"); if (!intel_encoder->i2c_bus) goto err_inteloutput; @@ -2583,14 +2567,20 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) /* setup the DDC bus. */ if (IS_SDVOB(sdvo_reg)) { - intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS"); - intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg, - "SDVOB/VGA DDC BUS"); + intel_encoder->ddc_bus = + intel_i2c_create(intel_encoder, + ddc_reg, "SDVOB DDC BUS"); + intel_sdvo->analog_ddc_bus = + intel_i2c_create(intel_encoder, + analog_ddc_reg, "SDVOB/VGA DDC BUS"); dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; } else { - intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS"); - intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg, - "SDVOC/VGA DDC BUS"); + intel_encoder->ddc_bus = + intel_i2c_create(intel_encoder, + ddc_reg, "SDVOC DDC BUS"); + intel_sdvo->analog_ddc_bus = + intel_i2c_create(intel_encoder, + analog_ddc_reg, "SDVOC/VGA DDC BUS"); dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; } if (intel_encoder->ddc_bus == NULL || intel_sdvo->analog_ddc_bus == NULL) From 2b6efaa47615b29e572d7fc444565db62ca9bcf7 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 14 Sep 2010 17:04:02 +0100 Subject: [PATCH 154/476] drm/i915: Remove unused intel_ringbuffer->ring_flag This can always be re-added should somebody find a use... Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 5 ++--- drivers/gpu/drm/i915/intel_ringbuffer.c | 2 -- drivers/gpu/drm/i915/intel_ringbuffer.h | 1 - 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 02719df418e3..a83574df096e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1580,9 +1580,8 @@ i915_gem_process_flushing_list(struct drm_device *dev, gpu_write_list) { struct drm_gem_object *obj = &obj_priv->base; - if ((obj->write_domain & flush_domains) == - obj->write_domain && - obj_priv->ring->ring_flag == ring->ring_flag) { + if (obj->write_domain & flush_domains && + obj_priv->ring == ring) { uint32_t old_write_domain = obj->write_domain; obj->write_domain = 0; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 1ae2b25bf7e4..11bcfc871a0d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -805,7 +805,6 @@ struct intel_ring_buffer render_ring = { .tail = PRB0_TAIL, .start = PRB0_START }, - .ring_flag = I915_EXEC_RENDER, .size = 32 * PAGE_SIZE, .alignment = PAGE_SIZE, .virtual_start = NULL, @@ -843,7 +842,6 @@ struct intel_ring_buffer bsd_ring = { .tail = BSD_RING_TAIL, .start = BSD_RING_START }, - .ring_flag = I915_EXEC_BSD, .size = 32 * PAGE_SIZE, .alignment = PAGE_SIZE, .virtual_start = NULL, diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index d3e5f40a8040..fa5d84f85c26 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -16,7 +16,6 @@ struct intel_ring_buffer { u32 tail; u32 start; } regs; - unsigned int ring_flag; unsigned long size; unsigned int alignment; void *virtual_start; From 9d1a903d4ba1033b811ded8d3e5d0f01eeeaaa1d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 14 Sep 2010 17:58:19 +0100 Subject: [PATCH 155/476] drm/i915/sdvo: Tidy intel_sdvo_hdmi_sink_detect Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_sdvo.c | 55 ++++++++++++++----------------- 1 file changed, 24 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 0e68f9622565..05f5313e14a6 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1417,60 +1417,53 @@ enum drm_connector_status intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); - struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); - enum drm_connector_status status = connector_status_connected; - struct edid *edid = NULL; + enum drm_connector_status status; + struct edid *edid; edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus); - /* This is only applied to SDVO cards with multiple outputs */ if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) { - uint8_t saved_ddc, temp_ddc; - saved_ddc = intel_sdvo->ddc_bus; - temp_ddc = intel_sdvo->ddc_bus >> 1; + u8 saved_ddc = intel_sdvo->ddc_bus, ddc; + /* * Don't use the 1 as the argument of DDC bus switch to get * the EDID. It is used for SDVO SPD ROM. */ - while(temp_ddc > 1) { - intel_sdvo->ddc_bus = temp_ddc; + for (ddc = intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) { + intel_sdvo->ddc_bus = ddc; edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus); - if (edid) { - /* - * When we can get the EDID, maybe it is the - * correct DDC bus. Update it. - */ - intel_sdvo->ddc_bus = temp_ddc; + if (edid) break; - } - temp_ddc >>= 1; } + + /* + * If we found the EDID on the other bus, maybe that is the + * correct DDC bus. + */ if (edid == NULL) intel_sdvo->ddc_bus = saved_ddc; } - /* when there is no edid and no monitor is connected with VGA - * port, try to use the CRT ddc to read the EDID for DVI-connector + + /* + * When there is no edid and no monitor is connected with VGA + * port, try to use the CRT ddc to read the EDID for DVI-connector. */ - if (edid == NULL && intel_sdvo->analog_ddc_bus && + if (edid == NULL && + intel_sdvo->analog_ddc_bus && !intel_analog_is_connected(connector->dev)) edid = drm_get_edid(connector, intel_sdvo->analog_ddc_bus); + status = connector_status_disconnected; if (edid != NULL) { - bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL); - bool need_digital = !!(intel_sdvo_connector->output_flag & SDVO_TMDS_MASK); - /* DDC bus is shared, match EDID to connector type */ - if (is_digital && need_digital) + if (edid->input & DRM_EDID_INPUT_DIGITAL) { + status = connector_status_connected; intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid); - else if (is_digital != need_digital) - status = connector_status_disconnected; - + } connector->display_info.raw_edid = NULL; - } else - status = connector_status_disconnected; + kfree(edid); + } - kfree(edid); - return status; } From 819f3fb7fe349d0e6aadbd7088529ab95fe5cd9f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 14 Sep 2010 19:11:56 +0100 Subject: [PATCH 156/476] drm/i915/sdvo: Propagate i2c error from switching DDC control bus. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_sdvo.c | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 05f5313e14a6..fa9dbb72fb46 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -530,8 +530,8 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) * another I2C transaction after issuing the DDC bus switch, it will be * switched to the internal SDVO register. */ -static void intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, - u8 target) +static int intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, + u8 target) { u8 out_buf[2], cmd_buf[2], ret_value[2], ret; struct i2c_msg msgs[] = { @@ -557,9 +557,10 @@ static void intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, }; intel_sdvo_debug_write(intel_sdvo, SDVO_CMD_SET_CONTROL_BUS_SWITCH, - &target, 1); + &target, 1); /* write the DDC switch command argument */ - intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0, target); + if (!intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0, target)) + return -EIO; out_buf[0] = SDVO_I2C_OPCODE; out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH; @@ -569,17 +570,20 @@ static void intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, ret_value[1] = 0; ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 3); + if (ret < 0) + return ret; if (ret != 3) { /* failure in I2C transfer */ DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); - return; + return -EIO; } if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("DDC switch command returns response %d\n", - ret_value[0]); - return; + ret_value[0]); + return -EIO; } - return; + + return 0; } static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len) @@ -1982,13 +1986,18 @@ static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, { struct intel_sdvo *intel_sdvo; const struct i2c_algorithm *algo; + int ret; intel_sdvo = container_of(i2c_adap->algo_data, struct intel_sdvo, base); algo = intel_sdvo->base.i2c_bus->algo; - intel_sdvo_set_control_bus_switch(intel_sdvo, intel_sdvo->ddc_bus); + ret = intel_sdvo_set_control_bus_switch(intel_sdvo, + intel_sdvo->ddc_bus); + if (ret) + return ret; + return algo->master_xfer(i2c_adap, msgs, num); } From b1c5b0f8cc16a1d22e2e521c4236a6ceca1b2983 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 14 Sep 2010 19:30:13 +0100 Subject: [PATCH 157/476] agp/intel: Remove redundant setting of gtt_mappable_entries Two calls enter, only one will leave. Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index dedf05dc433d..791582c73ff7 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -774,18 +774,17 @@ static unsigned int intel_gtt_total_entries(void) static unsigned int intel_gtt_mappable_entries(void) { unsigned int aperture_size; - u16 gmch_ctrl; - - aperture_size = 1024 * 1024; - - pci_read_config_word(intel_private.bridge_dev, - I830_GMCH_CTRL, &gmch_ctrl); if (INTEL_GTT_GEN == 2) { + u16 gmch_ctrl; + + pci_read_config_word(intel_private.bridge_dev, + I830_GMCH_CTRL, &gmch_ctrl); + if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M) - aperture_size *= 64; + aperture_size = MB(64); else - aperture_size *= 128; + aperture_size = MB(128); } else { /* 9xx supports large sizes, just look at the length */ aperture_size = pci_resource_len(intel_private.pcidev, 2); @@ -799,8 +798,6 @@ static int intel_gtt_init(void) u32 gtt_map_size; int ret; - intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries(); - ret = intel_private.driver->setup(); if (ret != 0) return ret; From 3f08e4ef807c3103ceebf7993c7463c7a90646f3 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 14 Sep 2010 20:15:22 +0100 Subject: [PATCH 158/476] agp/intel: Fix resume regression from 2d2430cf On i915 [EeePCs] something scribles over the registers during suspend and resume so we must save a copy of the PGETBL_CTL register programmed by the BIOS and restore that upon resume. Reported-by: Sitsofe Wheeler Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 791582c73ff7..ebdeab26ee3c 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -98,6 +98,7 @@ static struct _intel_private { u8 __iomem *registers; phys_addr_t gtt_bus_addr; phys_addr_t gma_bus_addr; + phys_addr_t pte_bus_addr; u32 __iomem *gtt; /* I915G */ int num_dcache_entries; union { @@ -896,11 +897,9 @@ static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) static void intel_enable_gtt(void) { - u32 ptetbl_addr, gma_addr; + u32 gma_addr; u16 gmch_ctrl; - ptetbl_addr = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; - if (INTEL_GTT_GEN == 2) pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &gma_addr); @@ -914,7 +913,8 @@ static void intel_enable_gtt(void) gmch_ctrl |= I830_GMCH_ENABLED; pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl); - writel(ptetbl_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); + writel(intel_private.pte_bus_addr|I810_PGETBL_ENABLED, + intel_private.registers+I810_PGETBL_CTL); readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ } @@ -930,6 +930,8 @@ static int i830_setup(void) return -ENOMEM; intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE; + intel_private.pte_bus_addr = + readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; intel_i830_setup_flush(); @@ -1279,6 +1281,7 @@ static int i9xx_setup(void) if (INTEL_GTT_GEN == 3) { u32 gtt_addr; + pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, >t_addr); intel_private.gtt_bus_addr = gtt_addr; @@ -1298,6 +1301,9 @@ static int i9xx_setup(void) intel_private.gtt_bus_addr = reg_addr + gtt_offset; } + intel_private.pte_bus_addr = + readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; + intel_i9xx_setup_flush(); return 0; From ff482d8317736908e1f803ef94ee5c736a3b8a3a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 15 Sep 2010 10:40:38 +0100 Subject: [PATCH 159/476] drm/i915/sdvo: Only create the analog encoder as required We only need to use the analog encoder for rare devices which share the DDC between the DVI-I and VGA connectors, so only create as needed. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_sdvo.c | 69 ++++++++++++++++++------------- 1 file changed, 41 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index fa9dbb72fb46..fbdae4d9220e 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -125,9 +125,6 @@ struct intel_sdvo { /* DDC bus used by this SDVO encoder */ uint8_t ddc_bus; - /* Mac mini hack -- use the same DDC as the analog connector */ - struct i2c_adapter *analog_ddc_bus; - /* Input timings for adjusted_mode */ struct intel_sdvo_dtd input_dtd; }; @@ -1417,6 +1414,34 @@ intel_analog_is_connected(struct drm_device *dev) return true; } +/* Mac mini hack -- use the same DDC as the analog connector */ +static struct edid * +intel_sdvo_get_analog_edid(struct drm_connector *connector) +{ + struct intel_encoder *encoder = intel_attached_encoder(connector); + struct drm_device *dev = connector->dev; + struct i2c_adapter *ddc; + struct edid *edid; + u32 ddc_reg; + + if (!intel_analog_is_connected(dev)) + return NULL; + + if (HAS_PCH_SPLIT(dev)) + ddc_reg = PCH_GPIOA; + else + ddc_reg = GPIOA; + + ddc = intel_i2c_create(encoder, ddc_reg, "SDVO/VGA DDC BUS"); + if (ddc == NULL) + return NULL; + + edid = drm_get_edid(connector, ddc); + intel_i2c_destroy(ddc); + + return edid; +} + enum drm_connector_status intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) { @@ -1452,10 +1477,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) * When there is no edid and no monitor is connected with VGA * port, try to use the CRT ddc to read the EDID for DVI-connector. */ - if (edid == NULL && - intel_sdvo->analog_ddc_bus && - !intel_analog_is_connected(connector->dev)) - edid = drm_get_edid(connector, intel_sdvo->analog_ddc_bus); + if (edid == NULL) + edid = intel_sdvo_get_analog_edid(connector); status = connector_status_disconnected; if (edid != NULL) { @@ -1522,10 +1545,13 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); + struct edid *edid; int num_modes; /* set the bus switch and get the modes */ num_modes = intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus); + if (num_modes) + return; /* * Mac mini hack. On this device, the DVI-I connector shares one DDC @@ -1533,12 +1559,12 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) * DDC fails, check to see if the analog output is disconnected, in * which case we'll look there for the digital DDC data. */ - if (num_modes == 0 && - intel_sdvo->analog_ddc_bus && - !intel_analog_is_connected(connector->dev)) { - /* Switch to the analog ddc bus and try that - */ - (void) intel_ddc_get_modes(connector, intel_sdvo->analog_ddc_bus); + edid = intel_sdvo_get_analog_edid(connector); + if (edid != NULL) { + drm_mode_connector_update_edid_property(connector, edid); + drm_add_edid_modes(connector, edid); + connector->display_info.raw_edid = NULL; + kfree(edid); } } @@ -1898,9 +1924,6 @@ static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) { struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); - if (intel_sdvo->analog_ddc_bus) - intel_i2c_destroy(intel_sdvo->analog_ddc_bus); - if (intel_sdvo->sdvo_lvds_fixed_mode != NULL) drm_mode_destroy(encoder->dev, intel_sdvo->sdvo_lvds_fixed_mode); @@ -2519,7 +2542,7 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) struct intel_sdvo *intel_sdvo; u8 ch[0x40]; int i; - u32 i2c_reg, ddc_reg, analog_ddc_reg; + u32 i2c_reg, ddc_reg; intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); if (!intel_sdvo) @@ -2533,11 +2556,9 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) if (HAS_PCH_SPLIT(dev)) { i2c_reg = PCH_GPIOE; ddc_reg = PCH_GPIOE; - analog_ddc_reg = PCH_GPIOA; } else { i2c_reg = GPIOE; ddc_reg = GPIOE; - analog_ddc_reg = GPIOA; } /* setup the DDC bus. */ @@ -2572,20 +2593,14 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) intel_encoder->ddc_bus = intel_i2c_create(intel_encoder, ddc_reg, "SDVOB DDC BUS"); - intel_sdvo->analog_ddc_bus = - intel_i2c_create(intel_encoder, - analog_ddc_reg, "SDVOB/VGA DDC BUS"); dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; } else { intel_encoder->ddc_bus = intel_i2c_create(intel_encoder, ddc_reg, "SDVOC DDC BUS"); - intel_sdvo->analog_ddc_bus = - intel_i2c_create(intel_encoder, - analog_ddc_reg, "SDVOC/VGA DDC BUS"); dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; } - if (intel_encoder->ddc_bus == NULL || intel_sdvo->analog_ddc_bus == NULL) + if (intel_encoder->ddc_bus == NULL) goto err_i2c; /* Wrap with our custom algo which switches to DDC mode */ @@ -2638,8 +2653,6 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) err_enc: drm_encoder_cleanup(&intel_encoder->base); err_i2c: - if (intel_sdvo->analog_ddc_bus != NULL) - intel_i2c_destroy(intel_sdvo->analog_ddc_bus); if (intel_encoder->ddc_bus != NULL) intel_i2c_destroy(intel_encoder->ddc_bus); if (intel_encoder->i2c_bus != NULL) From 2f551c84563df2bf144a819993b2d729c66583ee Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 15 Sep 2010 10:42:50 +0100 Subject: [PATCH 160/476] drm/i915/sdvo: Mark the status as unknown if attached with EDID One problem with devices that share the DDC bus between the VGA and DVI-I connectors is that with two devices attached we cannot know if there is truly a monitor attached to the DVI connector. In this case, it is preferrrable to mark the status as unknown, so that the user can supply the known set of modes and continue to use the output. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_sdvo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index fbdae4d9220e..d2b4a6a28405 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1480,7 +1480,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) if (edid == NULL) edid = intel_sdvo_get_analog_edid(connector); - status = connector_status_disconnected; + status = connector_status_unknown; if (edid != NULL) { /* DDC bus is shared, match EDID to connector type */ if (edid->input & DRM_EDID_INPUT_DIGITAL) { From 373a3cf744c774478f44921c50011b896ab08f9d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 15 Sep 2010 12:03:59 +0100 Subject: [PATCH 161/476] drm/i915: call drm_encoder_init first Later initialisation of the encoder often requires that drm_encoder_init() has already been called, for instance, initialiasing the DDC buses. Yet another recent regression, as 819f3fb7 depended upon these fixes which I missed when cherry-picking. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_crt.c | 4 ++++ drivers/gpu/drm/i915/intel_dvo.c | 5 +++-- drivers/gpu/drm/i915/intel_hdmi.c | 6 ++++-- drivers/gpu/drm/i915/intel_sdvo.c | 15 +++++++-------- 4 files changed, 18 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 0403ec9e164a..2353da625d25 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -548,6 +548,10 @@ void intel_crt_init(struct drm_device *dev) if (!intel_encoder->ddc_bus) { dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " "failed.\n"); + drm_connector_cleanup(&intel_connector->base); + kfree(intel_connector); + drm_encoder_cleanup(&intel_encoder->base); + kfree(intel_encoder); return; } diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 7de7d1a68c07..d8a586b41275 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -360,6 +360,8 @@ void intel_dvo_init(struct drm_device *dev) } intel_encoder = &intel_dvo->base; + drm_encoder_init(dev, &intel_encoder->base, + &intel_dvo_enc_funcs, encoder_type); /* Set up the DDC bus */ intel_encoder->ddc_bus = intel_i2c_create(intel_encoder, @@ -428,8 +430,6 @@ void intel_dvo_init(struct drm_device *dev) connector->interlace_allowed = false; connector->doublescan_allowed = false; - drm_encoder_init(dev, &intel_encoder->base, - &intel_dvo_enc_funcs, encoder_type); drm_encoder_helper_add(&intel_encoder->base, &intel_dvo_helper_funcs); @@ -456,6 +456,7 @@ void intel_dvo_init(struct drm_device *dev) if (i2cbus != NULL) intel_i2c_destroy(i2cbus); free_intel: + drm_encoder_cleanup(&intel_encoder->base); kfree(intel_dvo); kfree(intel_connector); } diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 93d5b61bf5bd..783924c7682a 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -228,6 +228,9 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) } intel_encoder = &intel_hdmi->base; + drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, + DRM_MODE_ENCODER_TMDS); + connector = &intel_connector->base; drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA); @@ -272,8 +275,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) intel_hdmi->sdvox_reg = sdvox_reg; - drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, - DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); intel_connector_attach_encoder(intel_connector, intel_encoder); @@ -291,6 +292,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) return; err_connector: + drm_encoder_cleanup(&intel_encoder->base); drm_connector_cleanup(connector); kfree(intel_hdmi); kfree(intel_connector); diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index d2b4a6a28405..f7030e481083 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -2552,6 +2552,8 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) intel_encoder = &intel_sdvo->base; intel_encoder->type = INTEL_OUTPUT_SDVO; + /* encoder type will be decided later */ + drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0); if (HAS_PCH_SPLIT(dev)) { i2c_reg = PCH_GPIOE; @@ -2606,31 +2608,29 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) /* Wrap with our custom algo which switches to DDC mode */ intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; - /* encoder type will be decided later */ - drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0); drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs); /* In default case sdvo lvds is false */ if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) - goto err_enc; + goto err_i2c; if (intel_sdvo_output_setup(intel_sdvo, intel_sdvo->caps.output_flags) != true) { DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", IS_SDVOB(sdvo_reg) ? 'B' : 'C'); - goto err_enc; + goto err_i2c; } intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); /* Set the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) - goto err_enc; + goto err_i2c; if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo, &intel_sdvo->pixel_clock_min, &intel_sdvo->pixel_clock_max)) - goto err_enc; + goto err_i2c; DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, " "clock range %dMHz - %dMHz, " @@ -2650,14 +2650,13 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); return true; -err_enc: - drm_encoder_cleanup(&intel_encoder->base); err_i2c: if (intel_encoder->ddc_bus != NULL) intel_i2c_destroy(intel_encoder->ddc_bus); if (intel_encoder->i2c_bus != NULL) intel_i2c_destroy(intel_encoder->i2c_bus); err_inteloutput: + drm_encoder_cleanup(&intel_encoder->base); kfree(intel_sdvo); return false; From f899fc64cda8569d0529452aafc0da31c042df2e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 20 Jul 2010 15:44:45 -0700 Subject: [PATCH 162/476] drm/i915: use GMBUS to manage i2c links Use the GMBUS interface rather than direct bit banging to grab the EDID over DDC (and for other forms of auxiliary communication with external display controllers). The hope is that this method will be much faster and more reliable than bit banging for fetching EDIDs from buggy monitors or through switches, though we still preserve the bit banging as a fallback in case GMBUS fails. Based on an original patch by Jesse Barnes. Cc: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/drm_edid.c | 1 - drivers/gpu/drm/i915/dvo_ch7017.c | 9 +- drivers/gpu/drm/i915/dvo_ch7xxx.c | 10 +- drivers/gpu/drm/i915/dvo_ivch.c | 10 +- drivers/gpu/drm/i915/dvo_sil164.c | 10 +- drivers/gpu/drm/i915/dvo_tfp410.c | 10 +- drivers/gpu/drm/i915/i915_dma.c | 2 + drivers/gpu/drm/i915/i915_drv.h | 14 +- drivers/gpu/drm/i915/i915_reg.h | 51 +++- drivers/gpu/drm/i915/i915_suspend.c | 4 +- drivers/gpu/drm/i915/intel_bios.c | 16 +- drivers/gpu/drm/i915/intel_crt.c | 48 +--- drivers/gpu/drm/i915/intel_display.c | 6 - drivers/gpu/drm/i915/intel_dp.c | 3 +- drivers/gpu/drm/i915/intel_drv.h | 19 +- drivers/gpu/drm/i915/intel_dvo.c | 33 +-- drivers/gpu/drm/i915/intel_hdmi.c | 38 +-- drivers/gpu/drm/i915/intel_i2c.c | 387 +++++++++++++++++++-------- drivers/gpu/drm/i915/intel_lvds.c | 16 +- drivers/gpu/drm/i915/intel_modes.c | 16 +- drivers/gpu/drm/i915/intel_sdvo.c | 163 ++++------- 21 files changed, 444 insertions(+), 422 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 96e963108225..fd033ebbdf84 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -30,7 +30,6 @@ #include #include #include -#include #include "drmP.h" #include "drm_edid.h" #include "drm_edid_modes.h" diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c index 14d59804acd7..0bc8ce1ad9aa 100644 --- a/drivers/gpu/drm/i915/dvo_ch7017.c +++ b/drivers/gpu/drm/i915/dvo_ch7017.c @@ -168,7 +168,6 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode); static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val) { struct i2c_adapter *adapter = dvo->i2c_bus; - struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); u8 out_buf[2]; u8 in_buf[2]; @@ -190,7 +189,7 @@ static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val) out_buf[0] = addr; out_buf[1] = 0; - if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) { + if (i2c_transfer(adapter, msgs, 2) == 2) { *val= in_buf[0]; return true; }; @@ -201,7 +200,6 @@ static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val) static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val) { struct i2c_adapter *adapter = dvo->i2c_bus; - struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); uint8_t out_buf[2]; struct i2c_msg msg = { .addr = dvo->slave_addr, @@ -213,7 +211,7 @@ static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val) out_buf[0] = addr; out_buf[1] = val; - if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) + if (i2c_transfer(adapter, &msg, 1) == 1) return true; return false; @@ -223,7 +221,6 @@ static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val) static bool ch7017_init(struct intel_dvo_device *dvo, struct i2c_adapter *adapter) { - struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); struct ch7017_priv *priv; uint8_t val; @@ -242,7 +239,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo, val != CH7019_DEVICE_ID_VALUE) { DRM_DEBUG_KMS("ch701x not detected, got %d: from %s " "Slave %d.\n", - val, i2cbus->adapter.name,dvo->slave_addr); + val, adapter->name,dvo->slave_addr); goto fail; } diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c index 6f1944b24441..7eaa94e4ff06 100644 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c @@ -113,7 +113,6 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) { struct ch7xxx_priv *ch7xxx= dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; - struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); u8 out_buf[2]; u8 in_buf[2]; @@ -135,14 +134,14 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) out_buf[0] = addr; out_buf[1] = 0; - if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) { + if (i2c_transfer(adapter, msgs, 2) == 2) { *ch = in_buf[0]; return true; }; if (!ch7xxx->quiet) { DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", - addr, i2cbus->adapter.name, dvo->slave_addr); + addr, adapter->name, dvo->slave_addr); } return false; } @@ -152,7 +151,6 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) { struct ch7xxx_priv *ch7xxx = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; - struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); uint8_t out_buf[2]; struct i2c_msg msg = { .addr = dvo->slave_addr, @@ -164,12 +162,12 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) out_buf[0] = addr; out_buf[1] = ch; - if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) + if (i2c_transfer(adapter, &msg, 1) == 1) return true; if (!ch7xxx->quiet) { DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", - addr, i2cbus->adapter.name, dvo->slave_addr); + addr, adapter->name, dvo->slave_addr); } return false; diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c index a2ec3f487202..a12ed9414cc7 100644 --- a/drivers/gpu/drm/i915/dvo_ivch.c +++ b/drivers/gpu/drm/i915/dvo_ivch.c @@ -167,7 +167,6 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) { struct ivch_priv *priv = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; - struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); u8 out_buf[1]; u8 in_buf[2]; @@ -193,7 +192,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) out_buf[0] = addr; - if (i2c_transfer(&i2cbus->adapter, msgs, 3) == 3) { + if (i2c_transfer(adapter, msgs, 3) == 3) { *data = (in_buf[1] << 8) | in_buf[0]; return true; }; @@ -201,7 +200,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) if (!priv->quiet) { DRM_DEBUG_KMS("Unable to read register 0x%02x from " "%s:%02x.\n", - addr, i2cbus->adapter.name, dvo->slave_addr); + addr, adapter->name, dvo->slave_addr); } return false; } @@ -211,7 +210,6 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) { struct ivch_priv *priv = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; - struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); u8 out_buf[3]; struct i2c_msg msg = { .addr = dvo->slave_addr, @@ -224,12 +222,12 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) out_buf[1] = data & 0xff; out_buf[2] = data >> 8; - if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) + if (i2c_transfer(adapter, &msg, 1) == 1) return true; if (!priv->quiet) { DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", - addr, i2cbus->adapter.name, dvo->slave_addr); + addr, adapter->name, dvo->slave_addr); } return false; diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c index 9b8e6765cf26..e4b4091df942 100644 --- a/drivers/gpu/drm/i915/dvo_sil164.c +++ b/drivers/gpu/drm/i915/dvo_sil164.c @@ -69,7 +69,6 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) { struct sil164_priv *sil = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; - struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); u8 out_buf[2]; u8 in_buf[2]; @@ -91,14 +90,14 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) out_buf[0] = addr; out_buf[1] = 0; - if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) { + if (i2c_transfer(adapter, msgs, 2) == 2) { *ch = in_buf[0]; return true; }; if (!sil->quiet) { DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", - addr, i2cbus->adapter.name, dvo->slave_addr); + addr, adapter->name, dvo->slave_addr); } return false; } @@ -107,7 +106,6 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) { struct sil164_priv *sil= dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; - struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); uint8_t out_buf[2]; struct i2c_msg msg = { .addr = dvo->slave_addr, @@ -119,12 +117,12 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) out_buf[0] = addr; out_buf[1] = ch; - if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) + if (i2c_transfer(adapter, &msg, 1) == 1) return true; if (!sil->quiet) { DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", - addr, i2cbus->adapter.name, dvo->slave_addr); + addr, adapter->name, dvo->slave_addr); } return false; diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c index 56f66426207f..8ab2855bb544 100644 --- a/drivers/gpu/drm/i915/dvo_tfp410.c +++ b/drivers/gpu/drm/i915/dvo_tfp410.c @@ -94,7 +94,6 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) { struct tfp410_priv *tfp = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; - struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); u8 out_buf[2]; u8 in_buf[2]; @@ -116,14 +115,14 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) out_buf[0] = addr; out_buf[1] = 0; - if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) { + if (i2c_transfer(adapter, msgs, 2) == 2) { *ch = in_buf[0]; return true; }; if (!tfp->quiet) { DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", - addr, i2cbus->adapter.name, dvo->slave_addr); + addr, adapter->name, dvo->slave_addr); } return false; } @@ -132,7 +131,6 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) { struct tfp410_priv *tfp = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; - struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); uint8_t out_buf[2]; struct i2c_msg msg = { .addr = dvo->slave_addr, @@ -144,12 +142,12 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) out_buf[0] = addr; out_buf[1] = ch; - if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) + if (i2c_transfer(adapter, &msg, 1) == 1) return true; if (!tfp->quiet) { DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", - addr, i2cbus->adapter.name, dvo->slave_addr); + addr, adapter->name, dvo->slave_addr); } return false; diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 7c7d1bc9d1be..39aaffe79583 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -2001,6 +2001,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) /* Try to make sure MCHBAR is enabled before poking at it */ intel_setup_mchbar(dev); + intel_setup_gmbus(dev); intel_opregion_setup(dev); i915_gem_load(dev); @@ -2155,6 +2156,7 @@ int i915_driver_unload(struct drm_device *dev) intel_cleanup_overlay(dev); } + intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); destroy_workqueue(dev_priv->wq); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b0692c40b0c7..cf08128798a7 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -34,6 +34,7 @@ #include "intel_bios.h" #include "intel_ringbuffer.h" #include +#include #include /* General customization: @@ -246,6 +247,12 @@ typedef struct drm_i915_private { void __iomem *regs; + struct intel_gmbus { + struct i2c_adapter adapter; + struct i2c_adapter *force_bitbanging; + int pin; + } *gmbus; + struct pci_dev *bridge_dev; struct intel_ring_buffer render_ring; struct intel_ring_buffer bsd_ring; @@ -339,7 +346,7 @@ typedef struct drm_i915_private { struct notifier_block lid_notifier; - int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */ + int crt_ddc_pin; struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ int num_fence_regs; /* 8 on pre-965, 16 otherwise */ @@ -1070,6 +1077,11 @@ extern int i915_restore_state(struct drm_device *dev); extern int i915_save_state(struct drm_device *dev); extern int i915_restore_state(struct drm_device *dev); +/* intel_i2c.c */ +extern int intel_setup_gmbus(struct drm_device *dev); +extern void intel_teardown_gmbus(struct drm_device *dev); +extern void intel_i2c_reset(struct drm_device *dev); + /* intel_opregion.c */ extern int intel_opregion_setup(struct drm_device *dev); #ifdef CONFIG_ACPI diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index fd229abe0d86..18e3749fbd11 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -583,12 +583,51 @@ # define GPIO_DATA_VAL_IN (1 << 12) # define GPIO_DATA_PULLUP_DISABLE (1 << 13) -#define GMBUS0 0x5100 -#define GMBUS1 0x5104 -#define GMBUS2 0x5108 -#define GMBUS3 0x510c -#define GMBUS4 0x5110 -#define GMBUS5 0x5120 +#define GMBUS0 0x5100 /* clock/port select */ +#define GMBUS_RATE_100KHZ (0<<8) +#define GMBUS_RATE_50KHZ (1<<8) +#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */ +#define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */ +#define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */ +#define GMBUS_PORT_DISABLED 0 +#define GMBUS_PORT_SSC 1 +#define GMBUS_PORT_VGADDC 2 +#define GMBUS_PORT_PANEL 3 +#define GMBUS_PORT_DPC 4 /* HDMIC */ +#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */ + /* 6 reserved */ +#define GMBUS_PORT_DPD 7 /* HDMID */ +#define GMBUS_NUM_PORTS 8 +#define GMBUS1 0x5104 /* command/status */ +#define GMBUS_SW_CLR_INT (1<<31) +#define GMBUS_SW_RDY (1<<30) +#define GMBUS_ENT (1<<29) /* enable timeout */ +#define GMBUS_CYCLE_NONE (0<<25) +#define GMBUS_CYCLE_WAIT (1<<25) +#define GMBUS_CYCLE_INDEX (2<<25) +#define GMBUS_CYCLE_STOP (4<<25) +#define GMBUS_BYTE_COUNT_SHIFT 16 +#define GMBUS_SLAVE_INDEX_SHIFT 8 +#define GMBUS_SLAVE_ADDR_SHIFT 1 +#define GMBUS_SLAVE_READ (1<<0) +#define GMBUS_SLAVE_WRITE (0<<0) +#define GMBUS2 0x5108 /* status */ +#define GMBUS_INUSE (1<<15) +#define GMBUS_HW_WAIT_PHASE (1<<14) +#define GMBUS_STALL_TIMEOUT (1<<13) +#define GMBUS_INT (1<<12) +#define GMBUS_HW_RDY (1<<11) +#define GMBUS_SATOER (1<<10) +#define GMBUS_ACTIVE (1<<9) +#define GMBUS3 0x510c /* data buffer bytes 3-0 */ +#define GMBUS4 0x5110 /* interrupt mask (Pineview+) */ +#define GMBUS_SLAVE_TIMEOUT_EN (1<<4) +#define GMBUS_NAK_EN (1<<3) +#define GMBUS_IDLE_EN (1<<2) +#define GMBUS_HW_WAIT_EN (1<<1) +#define GMBUS_HW_RDY_EN (1<<0) +#define GMBUS5 0x5120 /* byte index */ +#define GMBUS_2BYTE_INDEX_EN (1<<31) /* * Clock control & power management diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 2c6b98f2440e..5c0de6501828 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -860,9 +860,7 @@ int i915_restore_state(struct drm_device *dev) for (i = 0; i < 3; i++) I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); - /* I2C state */ - intel_i2c_reset_gmbus(dev); + intel_i2c_reset(dev); return 0; } - diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 8986a4b898db..d11bbcad4fea 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -291,14 +291,6 @@ parse_general_definitions(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct bdb_general_definitions *general; - const int crt_bus_map_table[] = { - GPIOB, - GPIOA, - GPIOC, - GPIOD, - GPIOE, - GPIOF, - }; general = find_section(bdb, BDB_GENERAL_DEFINITIONS); if (general) { @@ -306,10 +298,8 @@ parse_general_definitions(struct drm_i915_private *dev_priv, if (block_size >= sizeof(*general)) { int bus_pin = general->crt_ddc_gmbus_pin; DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin); - if ((bus_pin >= 1) && (bus_pin <= 6)) { - dev_priv->crt_ddc_bus = - crt_bus_map_table[bus_pin-1]; - } + if (bus_pin >= 1 && bus_pin <= 6) + dev_priv->crt_ddc_pin = bus_pin - 1; } else { DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n", block_size); @@ -533,6 +523,8 @@ intel_init_bios(struct drm_device *dev) struct bdb_header *bdb = NULL; u8 __iomem *bios = NULL; + dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC; + /* XXX Should this validation be moved to intel_opregion.c? */ if (dev_priv->opregion.vbt) { struct vbt_header *vbt = dev_priv->opregion.vbt; diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 2353da625d25..8b782ee63085 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -264,12 +264,13 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) static bool intel_crt_detect_ddc(struct drm_encoder *encoder) { struct intel_encoder *intel_encoder = to_intel_encoder(encoder); + struct drm_i915_private *dev_priv = encoder->dev->dev_private; /* CRT should always be at 0, but check anyway */ if (intel_encoder->type != INTEL_OUTPUT_ANALOG) return false; - return intel_ddc_probe(intel_encoder); + return intel_ddc_probe(intel_encoder, dev_priv->crt_ddc_pin); } static enum drm_connector_status @@ -445,29 +446,18 @@ static void intel_crt_destroy(struct drm_connector *connector) static int intel_crt_get_modes(struct drm_connector *connector) { - struct intel_encoder *encoder = intel_attached_encoder(connector); - struct i2c_adapter *ddc_bus; struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = dev->dev_private; int ret; - ret = intel_ddc_get_modes(connector, encoder->ddc_bus); + ret = intel_ddc_get_modes(connector, + &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); if (ret || !IS_G4X(dev)) - goto end; + return ret; /* Try to probe digital port for output in DVI-I -> VGA mode. */ - ddc_bus = intel_i2c_create(encoder, GPIOD, "CRTDDC_D"); - if (!ddc_bus) { - dev_printk(KERN_ERR, &connector->dev->pdev->dev, - "DDC bus registration failed for CRTDDC_D.\n"); - goto end; - } - /* Try to get modes by GPIOD port */ - ret = intel_ddc_get_modes(connector, ddc_bus); - intel_i2c_destroy(ddc_bus); - -end: - return ret; - + return intel_ddc_get_modes(connector, + &dev_priv->gmbus[GMBUS_PORT_DPB].adapter); } static int intel_crt_set_property(struct drm_connector *connector, @@ -513,7 +503,6 @@ void intel_crt_init(struct drm_device *dev) struct intel_encoder *intel_encoder; struct intel_connector *intel_connector; struct drm_i915_private *dev_priv = dev->dev_private; - u32 i2c_reg; intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL); if (!intel_encoder) @@ -534,27 +523,6 @@ void intel_crt_init(struct drm_device *dev) intel_connector_attach_encoder(intel_connector, intel_encoder); - /* Set up the DDC bus. */ - if (HAS_PCH_SPLIT(dev)) - i2c_reg = PCH_GPIOA; - else { - i2c_reg = GPIOA; - /* Use VBT information for CRT DDC if available */ - if (dev_priv->crt_ddc_bus != 0) - i2c_reg = dev_priv->crt_ddc_bus; - } - intel_encoder->ddc_bus = intel_i2c_create(intel_encoder, - i2c_reg, "CRTDDC_A"); - if (!intel_encoder->ddc_bus) { - dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " - "failed.\n"); - drm_connector_cleanup(&intel_connector->base); - kfree(intel_connector); - drm_encoder_cleanup(&intel_encoder->base); - kfree(intel_encoder); - return; - } - intel_encoder->type = INTEL_OUTPUT_ANALOG; intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT) | diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 11d643acf2fa..86ea3890aa8a 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2530,12 +2530,6 @@ void intel_encoder_destroy(struct drm_encoder *encoder) { struct intel_encoder *intel_encoder = to_intel_encoder(encoder); - if (intel_encoder->ddc_bus) - intel_i2c_destroy(intel_encoder->ddc_bus); - - if (intel_encoder->i2c_bus) - intel_i2c_destroy(intel_encoder->i2c_bus); - drm_encoder_cleanup(encoder); kfree(intel_encoder); } diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 208a4ec3e432..9a87ec5175e6 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1490,7 +1490,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) /* We should parse the EDID data and find out if it has an audio sink */ - ret = intel_ddc_get_modes(connector, intel_dp->base.ddc_bus); + ret = intel_ddc_get_modes(connector, &intel_dp->adapter); if (ret) { if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && !dev_priv->panel_fixed_mode) { @@ -1705,7 +1705,6 @@ intel_dp_init(struct drm_device *dev, int output_reg) intel_dp_i2c_init(intel_dp, intel_connector, name); - intel_encoder->ddc_bus = &intel_dp->adapter; intel_encoder->hot_plug = intel_dp_hot_plug; if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) { diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 8fe6b730c679..60ce9305e772 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -26,8 +26,6 @@ #define __INTEL_DRV_H__ #include -#include -#include #include "i915_drv.h" #include "drm_crtc.h" #include "drm_crtc_helper.h" @@ -127,13 +125,6 @@ intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode) return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK) >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT; } -struct intel_i2c_chan { - struct intel_encoder *encoder; - u32 reg; /* GPIO reg */ - struct i2c_adapter adapter; - struct i2c_algo_bit_data algo; -}; - struct intel_framebuffer { struct drm_framebuffer base; struct drm_gem_object *obj; @@ -149,8 +140,6 @@ struct intel_fbdev { struct intel_encoder { struct drm_encoder base; int type; - struct i2c_adapter *i2c_bus; - struct i2c_adapter *ddc_bus; bool load_detect_temp; bool needs_tv_clock; void (*hot_plug)(struct intel_encoder *); @@ -206,14 +195,8 @@ struct intel_unpin_work { bool enable_stall_check; }; -struct i2c_adapter *intel_i2c_create(struct intel_encoder *encoder, - const u32 reg, - const char *name); -void intel_i2c_destroy(struct i2c_adapter *adapter); int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); -extern bool intel_ddc_probe(struct intel_encoder *intel_encoder); -void intel_i2c_quirk_set(struct drm_device *dev, bool enable); -void intel_i2c_reset_gmbus(struct drm_device *dev); +extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus); extern void intel_crt_init(struct drm_device *dev); extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index d8a586b41275..1ee0dbbf6ee1 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -72,7 +72,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = { .name = "ch7017", .dvo_reg = DVOC, .slave_addr = 0x75, - .gpio = GPIOE, + .gpio = GMBUS_PORT_DPD, .dev_ops = &ch7017_ops, } }; @@ -81,6 +81,7 @@ struct intel_dvo { struct intel_encoder base; struct intel_dvo_device dev; + int ddc_bus; struct drm_display_mode *panel_fixed_mode; bool panel_wants_dither; @@ -235,13 +236,15 @@ static enum drm_connector_status intel_dvo_detect(struct drm_connector *connecto static int intel_dvo_get_modes(struct drm_connector *connector) { struct intel_dvo *intel_dvo = intel_attached_dvo(connector); + struct drm_i915_private *dev_priv = connector->dev->dev_private; /* We should probably have an i2c driver get_modes function for those * devices which will have a fixed set of modes determined by the chip * (TV-out, for example), but for now with just TMDS and LVDS, * that's not the case. */ - intel_ddc_get_modes(connector, intel_dvo->base.ddc_bus); + intel_ddc_get_modes(connector, + &dev_priv->gmbus[intel_dvo->ddc_bus].adapter); if (!list_empty(&connector->probed_modes)) return 1; @@ -341,10 +344,10 @@ intel_dvo_get_current_mode(struct drm_connector *connector) void intel_dvo_init(struct drm_device *dev) { + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *intel_encoder; struct intel_dvo *intel_dvo; struct intel_connector *intel_connector; - struct i2c_adapter *i2cbus = NULL; int ret = 0; int i; int encoder_type = DRM_MODE_ENCODER_NONE; @@ -364,15 +367,13 @@ void intel_dvo_init(struct drm_device *dev) &intel_dvo_enc_funcs, encoder_type); /* Set up the DDC bus */ - intel_encoder->ddc_bus = intel_i2c_create(intel_encoder, - GPIOD, "DVODDC_D"); - if (!intel_encoder->ddc_bus) - goto free_intel; + intel_dvo->ddc_bus = GMBUS_PORT_DPB; /* Now, try to find a controller */ for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { struct drm_connector *connector = &intel_connector->base; const struct intel_dvo_device *dvo = &intel_dvo_devices[i]; + struct i2c_adapter *i2c; int gpio; /* Allow the I2C driver info to specify the GPIO to be used in @@ -382,23 +383,18 @@ void intel_dvo_init(struct drm_device *dev) if (dvo->gpio != 0) gpio = dvo->gpio; else if (dvo->type == INTEL_DVO_CHIP_LVDS) - gpio = GPIOB; + gpio = GMBUS_PORT_PANEL; else - gpio = GPIOE; + gpio = GMBUS_PORT_DPD; /* Set up the I2C bus necessary for the chip we're probing. * It appears that everything is on GPIOE except for panels * on i830 laptops, which are on GPIOB (DVOA). */ - if (i2cbus != NULL) - intel_i2c_destroy(i2cbus); - i2cbus = intel_i2c_create(intel_encoder, gpio, - gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"); - if (i2cbus == NULL) - continue; + i2c = &dev_priv->gmbus[gpio].adapter; intel_dvo->dev = *dvo; - ret = dvo->dev_ops->init(&intel_dvo->dev, i2cbus); + ret = dvo->dev_ops->init(&intel_dvo->dev, i2c); if (!ret) continue; @@ -451,11 +447,6 @@ void intel_dvo_init(struct drm_device *dev) return; } - intel_i2c_destroy(intel_encoder->ddc_bus); - /* Didn't find a chip, so tear down. */ - if (i2cbus != NULL) - intel_i2c_destroy(i2cbus); -free_intel: drm_encoder_cleanup(&intel_encoder->base); kfree(intel_dvo); kfree(intel_connector); diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 783924c7682a..f814cb035e01 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -40,6 +40,7 @@ struct intel_hdmi { struct intel_encoder base; u32 sdvox_reg; + int ddc_bus; bool has_hdmi_sink; }; @@ -148,11 +149,13 @@ static enum drm_connector_status intel_hdmi_detect(struct drm_connector *connector) { struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); - struct edid *edid = NULL; + struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct edid *edid; enum drm_connector_status status = connector_status_disconnected; intel_hdmi->has_hdmi_sink = false; - edid = drm_get_edid(connector, intel_hdmi->base.ddc_bus); + edid = drm_get_edid(connector, + &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); if (edid) { if (edid->input & DRM_EDID_INPUT_DIGITAL) { @@ -169,12 +172,14 @@ intel_hdmi_detect(struct drm_connector *connector) static int intel_hdmi_get_modes(struct drm_connector *connector) { struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); + struct drm_i915_private *dev_priv = connector->dev->dev_private; /* We should parse the EDID data and find out if it's an HDMI sink so * we can send audio to it. */ - return intel_ddc_get_modes(connector, intel_hdmi->base.ddc_bus); + return intel_ddc_get_modes(connector, + &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); } static void intel_hdmi_destroy(struct drm_connector *connector) @@ -246,32 +251,25 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) /* Set up the DDC bus. */ if (sdvox_reg == SDVOB) { intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); - intel_encoder->ddc_bus = intel_i2c_create(intel_encoder, - GPIOE, "HDMIB"); + intel_hdmi->ddc_bus = GMBUS_PORT_DPB; dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; } else if (sdvox_reg == SDVOC) { intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); - intel_encoder->ddc_bus = intel_i2c_create(intel_encoder, - GPIOD, "HDMIC"); + intel_hdmi->ddc_bus = GMBUS_PORT_DPC; dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; } else if (sdvox_reg == HDMIB) { intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); - intel_encoder->ddc_bus = intel_i2c_create(intel_encoder, - PCH_GPIOE, "HDMIB"); + intel_hdmi->ddc_bus = GMBUS_PORT_DPB; dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; } else if (sdvox_reg == HDMIC) { intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); - intel_encoder->ddc_bus = intel_i2c_create(intel_encoder, - PCH_GPIOD, "HDMIC"); + intel_hdmi->ddc_bus = GMBUS_PORT_DPC; dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; } else if (sdvox_reg == HDMID) { intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); - intel_encoder->ddc_bus = intel_i2c_create(intel_encoder, - PCH_GPIOF, "HDMID"); + intel_hdmi->ddc_bus = GMBUS_PORT_DPD; dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; } - if (!intel_encoder->ddc_bus) - goto err_connector; intel_hdmi->sdvox_reg = sdvox_reg; @@ -288,14 +286,4 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) u32 temp = I915_READ(PEG_BAND_GAP_DATA); I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); } - - return; - -err_connector: - drm_encoder_cleanup(&intel_encoder->base); - drm_connector_cleanup(connector); - kfree(intel_hdmi); - kfree(intel_connector); - - return; } diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index d3d65a9cfba1..6f4d128935ac 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2006 Dave Airlie - * Copyright © 2006-2008 Intel Corporation + * Copyright © 2006-2008,2010 Intel Corporation * Jesse Barnes * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,10 +24,9 @@ * * Authors: * Eric Anholt + * Chris Wilson */ #include -#include -#include #include #include "drmP.h" #include "drm.h" @@ -35,13 +34,33 @@ #include "i915_drm.h" #include "i915_drv.h" -void intel_i2c_quirk_set(struct drm_device *dev, bool enable) +/* Intel GPIO access functions */ + +#define I2C_RISEFALL_TIME 20 + +struct intel_gpio { + struct i2c_adapter adapter; + struct i2c_algo_bit_data algo; + struct drm_i915_private *dev_priv; + u32 reg; +}; + +void +intel_i2c_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + if (HAS_PCH_SPLIT(dev)) + I915_WRITE(PCH_GMBUS0, 0); + else + I915_WRITE(GMBUS0, 0); +} + +static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable) +{ u32 val; /* When using bit bashing for I2C, this bit needs to be set to 1 */ - if (!IS_PINEVIEW(dev)) + if (!IS_PINEVIEW(dev_priv->dev)) return; val = I915_READ(DSPCLK_GATE_D); @@ -52,42 +71,30 @@ void intel_i2c_quirk_set(struct drm_device *dev, bool enable) I915_WRITE(DSPCLK_GATE_D, val); } -/* - * Intel GPIO access functions - */ - -#define I2C_RISEFALL_TIME 20 - -static inline struct drm_i915_private * -get_dev_priv(struct intel_i2c_chan *chan) -{ - return chan->encoder->base.dev->dev_private; -} - static int get_clock(void *data) { - struct intel_i2c_chan *chan = data; - struct drm_i915_private *dev_priv = get_dev_priv(chan); - return (I915_READ(chan->reg) & GPIO_CLOCK_VAL_IN) != 0; + struct intel_gpio *gpio = data; + struct drm_i915_private *dev_priv = gpio->dev_priv; + return (I915_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0; } static int get_data(void *data) { - struct intel_i2c_chan *chan = data; - struct drm_i915_private *dev_priv = get_dev_priv(chan); - return (I915_READ(chan->reg) & GPIO_DATA_VAL_IN) != 0; + struct intel_gpio *gpio = data; + struct drm_i915_private *dev_priv = gpio->dev_priv; + return (I915_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0; } static void set_clock(void *data, int state_high) { - struct intel_i2c_chan *chan = data; - struct drm_i915_private *dev_priv = get_dev_priv(chan); + struct intel_gpio *gpio = data; + struct drm_i915_private *dev_priv = gpio->dev_priv; struct drm_device *dev = dev_priv->dev; u32 reserved = 0, clock_bits; /* On most chips, these bits must be preserved in software. */ if (!IS_I830(dev) && !IS_845G(dev)) - reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE | + reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE); if (state_high) @@ -95,20 +102,21 @@ static void set_clock(void *data, int state_high) else clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_VAL_MASK; - I915_WRITE(chan->reg, reserved | clock_bits); - POSTING_READ(chan->reg); + + I915_WRITE(gpio->reg, reserved | clock_bits); + POSTING_READ(gpio->reg); } static void set_data(void *data, int state_high) { - struct intel_i2c_chan *chan = data; - struct drm_i915_private *dev_priv = get_dev_priv(chan); + struct intel_gpio *gpio = data; + struct drm_i915_private *dev_priv = gpio->dev_priv; struct drm_device *dev = dev_priv->dev; u32 reserved = 0, data_bits; /* On most chips, these bits must be preserved in software. */ if (!IS_I830(dev) && !IS_845G(dev)) - reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE | + reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE); if (state_high) @@ -117,111 +125,258 @@ static void set_data(void *data, int state_high) data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | GPIO_DATA_VAL_MASK; - I915_WRITE(chan->reg, reserved | data_bits); - POSTING_READ(chan->reg); + I915_WRITE(gpio->reg, reserved | data_bits); + POSTING_READ(gpio->reg); } -/* Clears the GMBUS setup. Our driver doesn't make use of the GMBUS I2C - * engine, but if the BIOS leaves it enabled, then that can break our use - * of the bit-banging I2C interfaces. This is notably the case with the - * Mac Mini in EFI mode. - */ -void -intel_i2c_reset_gmbus(struct drm_device *dev) +static struct i2c_adapter * +intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin) { - struct drm_i915_private *dev_priv = dev->dev_private; + static const int map_pin_to_reg[] = { + 0, + GPIOB, + GPIOA, + GPIOC, + GPIOD, + GPIOE, + GPIOF, + }; + struct intel_gpio *gpio; - if (HAS_PCH_SPLIT(dev)) - I915_WRITE(PCH_GMBUS0, 0); - else - I915_WRITE(GMBUS0, 0); -} + if (pin < 1 || pin > 7) + return NULL; -/** - * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg - * @dev: DRM device - * @output: driver specific output device - * @reg: GPIO reg to use - * @name: name for this bus - * @slave_addr: slave address (if fixed) - * - * Creates and registers a new i2c bus with the Linux i2c layer, for use - * in output probing and control (e.g. DDC or SDVO control functions). - * - * Possible values for @reg include: - * %GPIOA - * %GPIOB - * %GPIOC - * %GPIOD - * %GPIOE - * %GPIOF - * %GPIOG - * %GPIOH - * see PRM for details on how these different busses are used. - */ -struct i2c_adapter *intel_i2c_create(struct intel_encoder *encoder, - const u32 reg, - const char *name) -{ - struct intel_i2c_chan *chan; - struct drm_device *dev = encoder->base.dev; + gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL); + if (gpio == NULL) + return NULL; - chan = kzalloc(sizeof(struct intel_i2c_chan), GFP_KERNEL); - if (!chan) + gpio->reg = map_pin_to_reg[pin]; + if (HAS_PCH_SPLIT(dev_priv->dev)) + gpio->reg += PCH_GPIOA - GPIOA; + gpio->dev_priv = dev_priv; + + snprintf(gpio->adapter.name, I2C_NAME_SIZE, "GPIO %d", pin); + gpio->adapter.owner = THIS_MODULE; + gpio->adapter.algo_data = &gpio->algo; + gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev; + gpio->algo.setsda = set_data; + gpio->algo.setscl = set_clock; + gpio->algo.getsda = get_data; + gpio->algo.getscl = get_clock; + gpio->algo.udelay = I2C_RISEFALL_TIME; + gpio->algo.timeout = usecs_to_jiffies(2200); + gpio->algo.data = gpio; + + if (i2c_bit_add_bus(&gpio->adapter)) goto out_free; - chan->encoder = encoder; - chan->reg = reg; - snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name); - chan->adapter.owner = THIS_MODULE; - chan->adapter.algo_data = &chan->algo; - chan->adapter.dev.parent = &dev->pdev->dev; - chan->algo.setsda = set_data; - chan->algo.setscl = set_clock; - chan->algo.getsda = get_data; - chan->algo.getscl = get_clock; - chan->algo.udelay = I2C_RISEFALL_TIME; - chan->algo.timeout = usecs_to_jiffies(2200); - chan->algo.data = chan; - - i2c_set_adapdata(&chan->adapter, chan); - - if (i2c_bit_add_bus(&chan->adapter)) - goto out_free; - - intel_i2c_reset_gmbus(dev); + intel_i2c_reset(dev_priv->dev); /* JJJ: raise SCL and SDA? */ - intel_i2c_quirk_set(dev, true); - set_data(chan, 1); + intel_i2c_quirk_set(dev_priv, true); + set_data(gpio, 1); udelay(I2C_RISEFALL_TIME); - set_clock(chan, 1); + set_clock(gpio, 1); udelay(I2C_RISEFALL_TIME); - intel_i2c_quirk_set(dev, false); + intel_i2c_quirk_set(dev_priv, false); - return &chan->adapter; + return &gpio->adapter; out_free: - kfree(chan); + kfree(gpio); return NULL; } -/** - * intel_i2c_destroy - unregister and free i2c bus resources - * @output: channel to free - * - * Unregister the adapter from the i2c layer, then free the structure. - */ -void intel_i2c_destroy(struct i2c_adapter *adapter) +static int +quirk_i2c_transfer(struct drm_i915_private *dev_priv, + struct i2c_adapter *adapter, + struct i2c_msg *msgs, + int num) { - struct intel_i2c_chan *chan; + int ret; - if (!adapter) + intel_i2c_reset(dev_priv->dev); + + intel_i2c_quirk_set(dev_priv, true); + ret = i2c_transfer(adapter, msgs, num); + intel_i2c_quirk_set(dev_priv, false); + + return ret; +} + +static int +gmbus_xfer(struct i2c_adapter *adapter, + struct i2c_msg *msgs, + int num) +{ + struct intel_gmbus *bus = container_of(adapter, + struct intel_gmbus, + adapter); + struct drm_i915_private *dev_priv = adapter->algo_data; + int i, speed, reg_offset; + + if (bus->force_bitbanging) + return quirk_i2c_transfer(dev_priv, bus->force_bitbanging, msgs, num); + + reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0; + + speed = GMBUS_RATE_100KHZ; + if (INTEL_INFO(dev_priv->dev)->gen > 4 || IS_G4X(dev_priv->dev)) { + if (bus->pin == GMBUS_PORT_DPB) /* SDVO only? */ + speed = GMBUS_RATE_1MHZ; + else + speed = GMBUS_RATE_400KHZ; + } + I915_WRITE(GMBUS0 + reg_offset, speed | bus->pin); + + for (i = 0; i < num; i++) { + u16 len = msgs[i].len; + u8 *buf = msgs[i].buf; + + if (msgs[i].flags & I2C_M_RD) { + I915_WRITE(GMBUS1 + reg_offset, + GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) | + (len << GMBUS_BYTE_COUNT_SHIFT) | + (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) | + GMBUS_SLAVE_READ | GMBUS_SW_RDY); + do { + u32 val, loop = 0; + + if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50)) + goto timeout; + if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) + return 0; + + val = I915_READ(GMBUS3 + reg_offset); + do { + *buf++ = val & 0xff; + val >>= 8; + } while (--len && ++loop < 4); + } while (len); + } else { + u32 val = 0, loop = 0; + + BUG_ON(msgs[i].len > 4); + + do { + val |= *buf++ << (loop*8); + } while (--len && +loop < 4); + + I915_WRITE(GMBUS3 + reg_offset, val); + I915_WRITE(GMBUS1 + reg_offset, + (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT ) | + (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) | + (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) | + GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); + } + + if (i + 1 < num && wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50)) + goto timeout; + if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) + return 0; + } + + return num; + +timeout: + DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d\n", bus->pin); + /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ + bus->force_bitbanging = intel_gpio_create(dev_priv, bus->pin); + if (!bus->force_bitbanging) + return -ENOMEM; + + return quirk_i2c_transfer(dev_priv, bus->force_bitbanging, msgs, num); +} + +static u32 gmbus_func(struct i2c_adapter *adapter) +{ + return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | + /* I2C_FUNC_10BIT_ADDR | */ + I2C_FUNC_SMBUS_READ_BLOCK_DATA | + I2C_FUNC_SMBUS_BLOCK_PROC_CALL); +} + +static const struct i2c_algorithm gmbus_algorithm = { + .master_xfer = gmbus_xfer, + .functionality = gmbus_func +}; + +/** + * intel_gmbus_setup - instantiate all Intel i2c GMBuses + * @dev: DRM device + */ +int intel_setup_gmbus(struct drm_device *dev) +{ + static const char *names[] = { + "disabled", + "ssc", + "vga", + "panel", + "dpc", + "dpb", + "dpd", + "reserved" + }; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret, i; + + dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS, + GFP_KERNEL); + if (dev_priv->gmbus == NULL) + return -ENOMEM; + + for (i = 0; i < GMBUS_NUM_PORTS; i++) { + struct intel_gmbus *bus = &dev_priv->gmbus[i]; + + bus->adapter.owner = THIS_MODULE; + bus->adapter.class = I2C_CLASS_DDC; + snprintf(bus->adapter.name, + I2C_NAME_SIZE, + "gmbus %s", + names[i]); + + bus->adapter.dev.parent = &dev->pdev->dev; + bus->adapter.algo_data = dev_priv; + + bus->adapter.algo = &gmbus_algorithm; + ret = i2c_add_adapter(&bus->adapter); + if (ret) + goto err; + + bus->pin = i; + } + + intel_i2c_reset(dev_priv->dev); + + return 0; + +err: + while (--i) { + struct intel_gmbus *bus = &dev_priv->gmbus[i]; + i2c_del_adapter(&bus->adapter); + } + kfree(dev_priv->gmbus); + dev_priv->gmbus = NULL; + return ret; +} + +void intel_teardown_gmbus(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + if (dev_priv->gmbus == NULL) return; - chan = container_of(adapter, - struct intel_i2c_chan, - adapter); - i2c_del_adapter(&chan->adapter); - kfree(chan); + for (i = 0; i < GMBUS_NUM_PORTS; i++) { + struct intel_gmbus *bus = &dev_priv->gmbus[i]; + if (bus->force_bitbanging) { + i2c_del_adapter(bus->force_bitbanging); + kfree(bus->force_bitbanging); + } + i2c_del_adapter(&bus->adapter); + } + + kfree(dev_priv->gmbus); + dev_priv->gmbus = NULL; } diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 2ff4a5cb2d56..9177c17853e5 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -474,11 +474,12 @@ static int intel_lvds_get_modes(struct drm_connector *connector) { struct intel_lvds *intel_lvds = intel_attached_lvds(connector); struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_display_mode *mode; if (intel_lvds->edid_good) { int ret = intel_ddc_get_modes(connector, - intel_lvds->base.ddc_bus); + &dev_priv->gmbus[GMBUS_PORT_PANEL].adapter); if (ret) return ret; } @@ -898,21 +899,12 @@ void intel_lvds_init(struct drm_device *dev) * if closed, act like it's not there for now */ - /* Set up the DDC bus. */ - intel_encoder->ddc_bus = intel_i2c_create(intel_encoder, - gpio, "LVDSDDC_C"); - if (!intel_encoder->ddc_bus) { - dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " - "failed.\n"); - goto failed; - } - /* * Attempt to get the fixed panel mode from DDC. Assume that the * preferred mode is the right one. */ intel_lvds->edid_good = true; - if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus)) + if (!intel_ddc_get_modes(connector, &dev_priv->gmbus[GMBUS_PORT_PANEL].adapter)) intel_lvds->edid_good = false; if (!intel_lvds->edid_good) { @@ -999,8 +991,6 @@ out: failed: DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); - if (intel_encoder->ddc_bus) - intel_i2c_destroy(intel_encoder->ddc_bus); drm_connector_cleanup(connector); drm_encoder_cleanup(encoder); kfree(intel_lvds); diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index 1138aa98573d..f70b7cf32bff 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2007 Dave Airlie - * Copyright (c) 2007 Intel Corporation + * Copyright (c) 2007, 2010 Intel Corporation * Jesse Barnes * * Permission is hereby granted, free of charge, to any person obtaining a @@ -34,11 +34,11 @@ * intel_ddc_probe * */ -bool intel_ddc_probe(struct intel_encoder *intel_encoder) +bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus) { + struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private; u8 out_buf[] = { 0x0, 0x0}; u8 buf[2]; - int ret; struct i2c_msg msgs[] = { { .addr = 0x50, @@ -54,13 +54,7 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder) } }; - intel_i2c_quirk_set(intel_encoder->base.dev, true); - ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2); - intel_i2c_quirk_set(intel_encoder->base.dev, false); - if (ret == 2) - return true; - - return false; + return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 2) == 2; } /** @@ -76,9 +70,7 @@ int intel_ddc_get_modes(struct drm_connector *connector, struct edid *edid; int ret = 0; - intel_i2c_quirk_set(connector->dev, true); edid = drm_get_edid(connector, adapter); - intel_i2c_quirk_set(connector->dev, false); if (edid) { drm_mode_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index f7030e481083..2b3b4754c97d 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -65,6 +65,7 @@ static const char *tv_format_names[] = { struct intel_sdvo { struct intel_encoder base; + struct i2c_adapter *i2c; u8 slave_addr; /* Register for the SDVO device: SDVOB or SDVOC */ @@ -264,7 +265,7 @@ static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch) }; int ret; - if ((ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 2)) == 2) + if ((ret = i2c_transfer(intel_sdvo->i2c, msgs, 2)) == 2) { *ch = buf[0]; return true; @@ -286,7 +287,7 @@ static bool intel_sdvo_write_byte(struct intel_sdvo *intel_sdvo, int addr, u8 ch } }; - return i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 1) == 1; + return i2c_transfer(intel_sdvo->i2c, msgs, 1) == 1; } #define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} @@ -566,7 +567,7 @@ static int intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, ret_value[0] = 0; ret_value[1] = 0; - ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 3); + ret = i2c_transfer(intel_sdvo->i2c, msgs, 3); if (ret < 0) return ret; if (ret != 3) { @@ -1375,6 +1376,19 @@ intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) return (caps > 1); } +static struct edid * +intel_sdvo_get_edid(struct drm_connector *connector, int ddc) +{ + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); + int ret; + + ret = intel_sdvo_set_control_bus_switch(intel_sdvo, ddc); + if (ret) + return NULL; + + return drm_get_edid(connector, intel_sdvo->i2c); +} + static struct drm_connector * intel_find_analog_connector(struct drm_device *dev) { @@ -1418,28 +1432,12 @@ intel_analog_is_connected(struct drm_device *dev) static struct edid * intel_sdvo_get_analog_edid(struct drm_connector *connector) { - struct intel_encoder *encoder = intel_attached_encoder(connector); - struct drm_device *dev = connector->dev; - struct i2c_adapter *ddc; - struct edid *edid; - u32 ddc_reg; + struct drm_i915_private *dev_priv = connector->dev->dev_private; - if (!intel_analog_is_connected(dev)) + if (!intel_analog_is_connected(connector->dev)) return NULL; - if (HAS_PCH_SPLIT(dev)) - ddc_reg = PCH_GPIOA; - else - ddc_reg = GPIOA; - - ddc = intel_i2c_create(encoder, ddc_reg, "SDVO/VGA DDC BUS"); - if (ddc == NULL) - return NULL; - - edid = drm_get_edid(connector, ddc); - intel_i2c_destroy(ddc); - - return edid; + return drm_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); } enum drm_connector_status @@ -1449,28 +1447,26 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) enum drm_connector_status status; struct edid *edid; - edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus); + edid = intel_sdvo_get_edid(connector, intel_sdvo->ddc_bus); if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) { - u8 saved_ddc = intel_sdvo->ddc_bus, ddc; + u8 ddc; /* * Don't use the 1 as the argument of DDC bus switch to get * the EDID. It is used for SDVO SPD ROM. */ for (ddc = intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) { - intel_sdvo->ddc_bus = ddc; - edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus); - if (edid) + edid = intel_sdvo_get_edid(connector, ddc); + if (edid) { + /* + * If we found the EDID on the other bus, + * assume that is the correct DDC bus. + */ + intel_sdvo->ddc_bus = ddc; break; + } } - - /* - * If we found the EDID on the other bus, maybe that is the - * correct DDC bus. - */ - if (edid == NULL) - intel_sdvo->ddc_bus = saved_ddc; } /* @@ -1546,12 +1542,9 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct edid *edid; - int num_modes; /* set the bus switch and get the modes */ - num_modes = intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus); - if (num_modes) - return; + edid = intel_sdvo_get_edid(connector, intel_sdvo->ddc_bus); /* * Mac mini hack. On this device, the DVI-I connector shares one DDC @@ -1559,7 +1552,9 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) * DDC fails, check to see if the analog output is disconnected, in * which case we'll look there for the digital DDC data. */ - edid = intel_sdvo_get_analog_edid(connector); + if (edid == NULL) + edid = intel_sdvo_get_analog_edid(connector); + if (edid != NULL) { drm_mode_connector_update_edid_property(connector, edid); drm_add_edid_modes(connector, edid); @@ -1678,7 +1673,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) * Assume that the preferred modes are * arranged in priority order. */ - intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus); + intel_ddc_get_modes(connector, intel_sdvo->i2c); if (list_empty(&connector->probed_modes) == false) goto end; @@ -2004,30 +1999,6 @@ intel_sdvo_get_digital_encoding_mode(struct intel_sdvo *intel_sdvo, int device) &intel_sdvo->is_hdmi, 1); } -static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, - struct i2c_msg msgs[], int num) -{ - struct intel_sdvo *intel_sdvo; - const struct i2c_algorithm *algo; - int ret; - - intel_sdvo = container_of(i2c_adap->algo_data, - struct intel_sdvo, - base); - algo = intel_sdvo->base.i2c_bus->algo; - - ret = intel_sdvo_set_control_bus_switch(intel_sdvo, - intel_sdvo->ddc_bus); - if (ret) - return ret; - - return algo->master_xfer(i2c_adap, msgs, num); -} - -static struct i2c_algorithm intel_sdvo_i2c_bit_algo = { - .master_xfer = intel_sdvo_master_xfer, -}; - static u8 intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) { @@ -2540,9 +2511,7 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *intel_encoder; struct intel_sdvo *intel_sdvo; - u8 ch[0x40]; int i; - u32 i2c_reg, ddc_reg; intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); if (!intel_sdvo) @@ -2555,82 +2524,49 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) /* encoder type will be decided later */ drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0); - if (HAS_PCH_SPLIT(dev)) { - i2c_reg = PCH_GPIOE; - ddc_reg = PCH_GPIOE; - } else { - i2c_reg = GPIOE; - ddc_reg = GPIOE; - } - - /* setup the DDC bus. */ - if (IS_SDVOB(sdvo_reg)) - intel_encoder->i2c_bus = - intel_i2c_create(intel_encoder, - i2c_reg, "SDVOCTRL_E for SDVOB"); - else - intel_encoder->i2c_bus = - intel_i2c_create(intel_encoder, - i2c_reg, "SDVOCTRL_E for SDVOC"); - - if (!intel_encoder->i2c_bus) - goto err_inteloutput; + intel_sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter; intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg); - /* Save the bit-banging i2c functionality for use by the DDC wrapper */ - intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality; - /* Read the regs to test if we can talk to the device */ for (i = 0; i < 0x40; i++) { - if (!intel_sdvo_read_byte(intel_sdvo, i, &ch[i])) { + u8 byte; + + if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) { DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", IS_SDVOB(sdvo_reg) ? 'B' : 'C'); - goto err_i2c; + goto err; } } - /* setup the DDC bus. */ - if (IS_SDVOB(sdvo_reg)) { - intel_encoder->ddc_bus = - intel_i2c_create(intel_encoder, - ddc_reg, "SDVOB DDC BUS"); + if (IS_SDVOB(sdvo_reg)) dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; - } else { - intel_encoder->ddc_bus = - intel_i2c_create(intel_encoder, - ddc_reg, "SDVOC DDC BUS"); + else dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; - } - if (intel_encoder->ddc_bus == NULL) - goto err_i2c; - - /* Wrap with our custom algo which switches to DDC mode */ - intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs); /* In default case sdvo lvds is false */ if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) - goto err_i2c; + goto err; if (intel_sdvo_output_setup(intel_sdvo, intel_sdvo->caps.output_flags) != true) { DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", IS_SDVOB(sdvo_reg) ? 'B' : 'C'); - goto err_i2c; + goto err; } intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); /* Set the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) - goto err_i2c; + goto err; if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo, &intel_sdvo->pixel_clock_min, &intel_sdvo->pixel_clock_max)) - goto err_i2c; + goto err; DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, " "clock range %dMHz - %dMHz, " @@ -2650,12 +2586,7 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); return true; -err_i2c: - if (intel_encoder->ddc_bus != NULL) - intel_i2c_destroy(intel_encoder->ddc_bus); - if (intel_encoder->i2c_bus != NULL) - intel_i2c_destroy(intel_encoder->i2c_bus); -err_inteloutput: +err: drm_encoder_cleanup(&intel_encoder->base); kfree(intel_sdvo); From 219adae138513bae20b256f1946b9cb3b75ca05c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 16 Sep 2010 23:05:10 +0100 Subject: [PATCH 163/476] drm/i915: Cache LVDS EDID We assume that the panel is permenantly connected and that the EDID data is consistent from boot, so simply cache the whole EDID for the panel. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_lvds.c | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index f6a72cbb152d..5666e89288d3 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -44,7 +44,7 @@ struct intel_lvds { struct intel_encoder base; - bool edid_good; + struct edid *edid; int fitting_mode; u32 pfit_control; @@ -475,14 +475,12 @@ static int intel_lvds_get_modes(struct drm_connector *connector) { struct intel_lvds *intel_lvds = intel_attached_lvds(connector); struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_display_mode *mode; - if (intel_lvds->edid_good) { - int ret = intel_ddc_get_modes(connector, - &dev_priv->gmbus[GMBUS_PORT_PANEL].adapter); - if (ret) - return ret; + if (intel_lvds->edid) { + drm_mode_connector_update_edid_property(connector, + intel_lvds->edid); + return drm_add_edid_modes(connector, intel_lvds->edid); } mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); @@ -906,11 +904,10 @@ void intel_lvds_init(struct drm_device *dev) * Attempt to get the fixed panel mode from DDC. Assume that the * preferred mode is the right one. */ - intel_lvds->edid_good = true; - if (!intel_ddc_get_modes(connector, &dev_priv->gmbus[GMBUS_PORT_PANEL].adapter)) - intel_lvds->edid_good = false; + intel_lvds->edid = drm_get_edid(connector, + &dev_priv->gmbus[GMBUS_PORT_PANEL].adapter); - if (!intel_lvds->edid_good) { + if (!intel_lvds->edid) { /* Didn't get an EDID, so * Set wide sync ranges so we get all modes * handed to valid_mode for checking From a6c45cf013a57e32ddae43dd4ac911eb4a3919fd Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 17 Sep 2010 00:32:17 +0100 Subject: [PATCH 164/476] drm/i915: INTEL_INFO->gen supercedes i8xx, i9xx, i965g Avoid confusion between i965g meaning broadwater and the gen4+ chipset families. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 11 ++-- drivers/gpu/drm/i915/i915_dma.c | 28 ++++----- drivers/gpu/drm/i915/i915_drv.c | 82 ++++++++++++------------- drivers/gpu/drm/i915/i915_drv.h | 18 ++---- drivers/gpu/drm/i915/i915_gem.c | 29 ++++++--- drivers/gpu/drm/i915/i915_gem_tiling.c | 30 ++++----- drivers/gpu/drm/i915/i915_irq.c | 29 +++++---- drivers/gpu/drm/i915/i915_suspend.c | 24 ++++---- drivers/gpu/drm/i915/intel_crt.c | 8 +-- drivers/gpu/drm/i915/intel_display.c | 74 +++++++++++----------- drivers/gpu/drm/i915/intel_fb.c | 4 +- drivers/gpu/drm/i915/intel_lvds.c | 10 +-- drivers/gpu/drm/i915/intel_overlay.c | 29 ++++----- drivers/gpu/drm/i915/intel_panel.c | 4 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 8 +-- drivers/gpu/drm/i915/intel_sdvo.c | 4 +- drivers/gpu/drm/i915/intel_tv.c | 4 +- 17 files changed, 196 insertions(+), 200 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index fb5c2a621907..361a825c2363 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -58,13 +58,9 @@ static int i915_capabilities(struct seq_file *m, void *data) seq_printf(m, "gen: %d\n", info->gen); #define B(x) seq_printf(m, #x ": %s\n", yesno(info->x)) B(is_mobile); - B(is_i8xx); B(is_i85x); B(is_i915g); - B(is_i9xx); B(is_i945gm); - B(is_i965g); - B(is_i965gm); B(is_g33); B(need_gfx_hws); B(is_g4x); @@ -79,6 +75,7 @@ static int i915_capabilities(struct seq_file *m, void *data) B(cursor_needs_physical); B(has_overlay); B(overlay_needs_physical); + B(supports_tv); #undef B return 0; @@ -473,7 +470,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) seq_printf(m, "RingHead : %08x\n", head); seq_printf(m, "RingTail : %08x\n", tail); seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size); - seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD)); + seq_printf(m, "Acthd : %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD)); return 0; } @@ -535,7 +532,7 @@ static int i915_error_state(struct seq_file *m, void *unused) seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr); seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone); seq_printf(m, " ACTHD: 0x%08x\n", error->acthd); - if (IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { seq_printf(m, " INSTPS: 0x%08x\n", error->instps); seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); } @@ -757,7 +754,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) if (IS_IRONLAKE(dev)) sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; - else if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev)) + else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; else if (IS_I915GM(dev)) sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 39aaffe79583..9977a0a5308a 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -63,7 +63,7 @@ static int i915_init_phys_hws(struct drm_device *dev) memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE); - if (IS_I965G(dev)) + if (INTEL_INFO(dev)->gen >= 4) dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & 0xf0; @@ -376,7 +376,7 @@ i915_emit_box(struct drm_device *dev, return -EINVAL; } - if (IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { BEGIN_LP_RING(4); OUT_RING(GFX_OP_DRAWRECT_INFO_I965); OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); @@ -480,7 +480,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, if (!IS_I830(dev) && !IS_845G(dev)) { BEGIN_LP_RING(2); - if (IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); OUT_RING(batch->start); } else { @@ -887,12 +887,12 @@ static int intel_alloc_mchbar_resource(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; + int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; u32 temp_lo, temp_hi = 0; u64 mchbar_addr; int ret; - if (IS_I965G(dev)) + if (INTEL_INFO(dev)->gen >= 4) pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); mchbar_addr = ((u64)temp_hi << 32) | temp_lo; @@ -919,7 +919,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev) return ret; } - if (IS_I965G(dev)) + if (INTEL_INFO(dev)->gen >= 4) pci_write_config_dword(dev_priv->bridge_dev, reg + 4, upper_32_bits(dev_priv->mch_res.start)); @@ -933,7 +933,7 @@ static void intel_setup_mchbar(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; + int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; u32 temp; bool enabled; @@ -970,7 +970,7 @@ static void intel_teardown_mchbar(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; + int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; u32 temp; if (dev_priv->mchbar_need_disable) { @@ -1012,11 +1012,11 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev, { unsigned long *gtt; unsigned long entry, phys; - int gtt_bar = IS_I9XX(dev) ? 0 : 1; + int gtt_bar = IS_GEN2(dev) ? 1 : 0; int gtt_offset, gtt_size; - if (IS_I965G(dev)) { - if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { + if (IS_G4X(dev) || INTEL_INFO(dev)->gen > 4) { gtt_offset = 2*1024*1024; gtt_size = 2*1024*1024; } else { @@ -1041,10 +1041,8 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev, DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry); /* Mask out these reserved bits on this hardware. */ - if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) || - IS_I945G(dev) || IS_I945GM(dev)) { + if (INTEL_INFO(dev)->gen < 4 && !IS_G33(dev)) entry &= ~PTE_ADDRESS_MASK_HIGH; - } /* If it's not a mapping type we know, then bail. */ if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED && @@ -1899,7 +1897,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev_priv->info = (struct intel_device_info *) flags; /* Add register map (needed for suspend/resume) */ - mmio_bar = IS_I9XX(dev) ? 0 : 1; + mmio_bar = IS_GEN2(dev) ? 1 : 0; base = pci_resource_start(dev->pdev, mmio_bar); size = pci_resource_len(dev->pdev, mmio_bar); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 13dca9da6507..87c6b5f81fea 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -61,97 +61,101 @@ extern int intel_agp_enabled; .driver_data = (unsigned long) info } static const struct intel_device_info intel_i830_info = { - .gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, + .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_845g_info = { - .gen = 2, .is_i8xx = 1, + .gen = 2, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i85x_info = { - .gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, + .gen = 2, .is_i85x = 1, .is_mobile = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i865g_info = { - .gen = 2, .is_i8xx = 1, + .gen = 2, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i915g_info = { - .gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, + .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i915gm_info = { - .gen = 3, .is_i9xx = 1, .is_mobile = 1, + .gen = 3, .is_mobile = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, + .supports_tv = 1, }; static const struct intel_device_info intel_i945g_info = { - .gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, + .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i945gm_info = { - .gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, + .gen = 3, .is_i945gm = 1, .is_mobile = 1, .has_hotplug = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, + .supports_tv = 1, }; static const struct intel_device_info intel_i965g_info = { - .gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, + .gen = 4, .is_broadwater = 1, .has_hotplug = 1, .has_overlay = 1, }; static const struct intel_device_info intel_i965gm_info = { - .gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1, + .gen = 4, .is_crestline = 1, .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, .has_overlay = 1, + .supports_tv = 1, }; static const struct intel_device_info intel_g33_info = { - .gen = 3, .is_g33 = 1, .is_i9xx = 1, + .gen = 3, .is_g33 = 1, .need_gfx_hws = 1, .has_hotplug = 1, .has_overlay = 1, }; static const struct intel_device_info intel_g45_info = { - .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, + .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, }; static const struct intel_device_info intel_gm45_info = { - .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, + .gen = 4, .is_g4x = 1, .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, + .supports_tv = 1, }; static const struct intel_device_info intel_pineview_info = { - .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, + .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .need_gfx_hws = 1, .has_hotplug = 1, .has_overlay = 1, }; static const struct intel_device_info intel_ironlake_d_info = { - .gen = 5, .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, + .gen = 5, .is_ironlake = 1, .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, }; static const struct intel_device_info intel_ironlake_m_info = { - .gen = 5, .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, + .gen = 5, .is_ironlake = 1, .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, }; static const struct intel_device_info intel_sandybridge_d_info = { - .gen = 6, .is_i965g = 1, .is_i9xx = 1, + .gen = 6, .need_gfx_hws = 1, .has_hotplug = 1, }; static const struct intel_device_info intel_sandybridge_m_info = { - .gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, + .gen = 6, .is_mobile = 1, .need_gfx_hws = 1, .has_hotplug = 1, }; @@ -359,33 +363,27 @@ int i965_reset(struct drm_device *dev, u8 flags) if (need_display) i915_save_display(dev); - if (IS_I965G(dev) || IS_G4X(dev)) { - /* - * Set the domains we want to reset, then the reset bit (bit 0). - * Clear the reset bit after a while and wait for hardware status - * bit (bit 1) to be set - */ + /* + * Set the domains we want to reset, then the reset bit (bit 0). + * Clear the reset bit after a while and wait for hardware status + * bit (bit 1) to be set + */ + pci_read_config_byte(dev->pdev, GDRST, &gdrst); + pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0)); + udelay(50); + pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe); + + /* ...we don't want to loop forever though, 500ms should be plenty */ + timeout = jiffies + msecs_to_jiffies(500); + do { + udelay(100); pci_read_config_byte(dev->pdev, GDRST, &gdrst); - pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0)); - udelay(50); - pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe); + } while ((gdrst & 0x1) && time_after(timeout, jiffies)); - /* ...we don't want to loop forever though, 500ms should be plenty */ - timeout = jiffies + msecs_to_jiffies(500); - do { - udelay(100); - pci_read_config_byte(dev->pdev, GDRST, &gdrst); - } while ((gdrst & 0x1) && time_after(timeout, jiffies)); - - if (gdrst & 0x1) { - WARN(true, "i915: Failed to reset chip\n"); - mutex_unlock(&dev->struct_mutex); - return -EIO; - } - } else { - DRM_ERROR("Error occurred. Don't know how to reset this chip.\n"); + if (gdrst & 0x1) { + WARN(true, "i915: Failed to reset chip\n"); mutex_unlock(&dev->struct_mutex); - return -ENODEV; + return -EIO; } /* Ok, now get things going again... */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index cf08128798a7..4b6aeb5e66b9 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -196,13 +196,9 @@ struct drm_i915_display_funcs { struct intel_device_info { u8 gen; u8 is_mobile : 1; - u8 is_i8xx : 1; u8 is_i85x : 1; u8 is_i915g : 1; - u8 is_i9xx : 1; u8 is_i945gm : 1; - u8 is_i965g : 1; - u8 is_i965gm : 1; u8 is_g33 : 1; u8 need_gfx_hws : 1; u8 is_g4x : 1; @@ -217,6 +213,7 @@ struct intel_device_info { u8 cursor_needs_physical : 1; u8 has_overlay : 1; u8 overlay_needs_physical : 1; + u8 supports_tv : 1; }; enum no_fbc_reason { @@ -1220,8 +1217,6 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) #define IS_I945G(dev) ((dev)->pci_device == 0x2772) #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) -#define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g) -#define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm) #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) @@ -1233,7 +1228,6 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) #define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake) -#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) @@ -1251,20 +1245,18 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte * rows, which changed the alignment requirements and fence programming. */ -#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \ +#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ IS_I915GM(dev))) -#define SUPPORTS_DIGITAL_OUTPUTS(dev) (IS_I9XX(dev) && !IS_PINEVIEW(dev)) +#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) -#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ - !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev) && \ - !IS_GEN6(dev)) +#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) /* dsparb controlled by hw only */ #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) -#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev)) +#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) #define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0355cd28b270..71a2723545b9 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1346,14 +1346,14 @@ i915_gem_get_gtt_alignment(struct drm_gem_object *obj) * Minimum alignment is 4k (GTT page size), but might be greater * if a fence register is needed for the object. */ - if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE) + if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE) return 4096; /* * Previous chips need to be aligned to the size of the smallest * fence register that can contain the object. */ - if (IS_I9XX(dev)) + if (INTEL_INFO(dev)->gen == 3) start = 1024*1024; else start = 512*1024; @@ -1660,7 +1660,7 @@ i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring) uint32_t flush_domains = 0; /* The sampler always gets flushed on i965 (sigh) */ - if (IS_I965G(dev)) + if (INTEL_INFO(dev)->gen >= 4) flush_domains |= I915_GEM_DOMAIN_SAMPLER; ring->flush(dev, ring, @@ -2443,7 +2443,7 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj, * therefore we must wait for any outstanding access to complete * before clearing the fence. */ - if (!IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen < 4) { int ret; ret = i915_gem_object_flush_gpu_write_domain(obj, true); @@ -3893,7 +3893,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; exec2_list[i].alignment = exec_list[i].alignment; exec2_list[i].offset = exec_list[i].offset; - if (!IS_I965G(dev)) + if (INTEL_INFO(dev)->gen < 4) exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; else exec2_list[i].flags = 0; @@ -4614,21 +4614,30 @@ i915_gem_load(struct drm_device *dev) if (!drm_core_check_feature(dev, DRIVER_MODESET)) dev_priv->fence_reg_start = 3; - if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) dev_priv->num_fence_regs = 16; else dev_priv->num_fence_regs = 8; /* Initialize fence registers to zero */ - if (IS_I965G(dev)) { + switch (INTEL_INFO(dev)->gen) { + case 6: + for (i = 0; i < 16; i++) + I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0); + break; + case 5: + case 4: for (i = 0; i < 16; i++) I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0); - } else { - for (i = 0; i < 8; i++) - I915_WRITE(FENCE_REG_830_0 + (i * 4), 0); + break; + case 3: if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) for (i = 0; i < 8; i++) I915_WRITE(FENCE_REG_945_8 + (i * 4), 0); + case 2: + for (i = 0; i < 8; i++) + I915_WRITE(FENCE_REG_830_0 + (i * 4), 0); + break; } i915_gem_detect_bit_6_swizzle(dev); init_waitqueue_head(&dev_priv->pending_flip_queue); diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index caef7ff2aa39..b09b157f6ada 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -98,7 +98,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) */ swizzle_x = I915_BIT_6_SWIZZLE_9_10; swizzle_y = I915_BIT_6_SWIZZLE_9; - } else if (!IS_I9XX(dev)) { + } else if (IS_GEN2(dev)) { /* As far as we know, the 865 doesn't have these bit 6 * swizzling issues. */ @@ -190,19 +190,19 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) if (tiling_mode == I915_TILING_NONE) return true; - if (!IS_I9XX(dev) || + if (IS_GEN2(dev) || (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) tile_width = 128; else tile_width = 512; /* check maximum stride & object size */ - if (IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { /* i965 stores the end address of the gtt mapping in the fence * reg, so dont bother to check the size */ if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) return false; - } else if (IS_GEN3(dev) || IS_GEN2(dev)) { + } else { if (stride > 8192) return false; @@ -216,7 +216,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) } /* 965+ just needs multiples of tile width */ - if (IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { if (stride & (tile_width - 1)) return false; return true; @@ -244,16 +244,18 @@ i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) if (tiling_mode == I915_TILING_NONE) return true; - if (!IS_I965G(dev)) { - if (obj_priv->gtt_offset & (obj->size - 1)) + if (INTEL_INFO(dev)->gen >= 4) + return true; + + if (obj_priv->gtt_offset & (obj->size - 1)) + return false; + + if (IS_GEN3(dev)) { + if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK) + return false; + } else { + if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK) return false; - if (IS_I9XX(dev)) { - if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK) - return false; - } else { - if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK) - return false; - } } return true; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index e64b8eaa0b9d..2b5e54c2900f 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -172,7 +172,7 @@ void intel_enable_asle (struct drm_device *dev) else { i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE); - if (IS_I965G(dev)) + if (INTEL_INFO(dev)->gen >= 4) i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE); } @@ -397,15 +397,18 @@ static void i915_error_work_func(struct work_struct *work) kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); if (atomic_read(&dev_priv->mm.wedged)) { - if (IS_I965G(dev)) { + switch (INTEL_INFO(dev)->gen) { + case 4: DRM_DEBUG_DRIVER("resetting chip\n"); kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); if (!i965_reset(dev, GDRST_RENDER)) { atomic_set(&dev_priv->mm.wedged, 0); kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); } - } else { + break; + default: DRM_DEBUG_DRIVER("reboot required\n"); + break; } } } @@ -501,7 +504,7 @@ i915_get_bbaddr(struct drm_device *dev, u32 *ring) if (IS_I830(dev) || IS_845G(dev)) cmd = MI_BATCH_BUFFER; - else if (IS_I965G(dev)) + else if (INTEL_INFO(dev)->gen >= 4) cmd = (MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); else @@ -580,7 +583,7 @@ static void i915_capture_error_state(struct drm_device *dev) error->pipeastat = I915_READ(PIPEASTAT); error->pipebstat = I915_READ(PIPEBSTAT); error->instpm = I915_READ(INSTPM); - if (!IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen < 4) { error->ipeir = I915_READ(IPEIR); error->ipehr = I915_READ(IPEHR); error->instdone = I915_READ(INSTDONE); @@ -778,7 +781,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) } } - if (IS_I9XX(dev)) { + if (!IS_GEN2(dev)) { if (eir & I915_ERROR_PAGE_TABLE) { u32 pgtbl_err = I915_READ(PGTBL_ER); printk(KERN_ERR "page table error\n"); @@ -804,7 +807,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) printk(KERN_ERR "instruction error\n"); printk(KERN_ERR " INSTPM: 0x%08x\n", I915_READ(INSTPM)); - if (!IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen < 4) { u32 ipeir = I915_READ(IPEIR); printk(KERN_ERR " IPEIR: 0x%08x\n", @@ -905,7 +908,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ obj_priv = to_intel_bo(work->pending_flip_obj); - if(IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF; stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset; } else { @@ -944,7 +947,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) iir = I915_READ(IIR); - if (IS_I965G(dev)) + if (INTEL_INFO(dev)->gen >= 4) vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS; else vblank_status = PIPE_VBLANK_INTERRUPT_STATUS; @@ -1209,7 +1212,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) if (HAS_PCH_SPLIT(dev)) ironlake_enable_display_irq(dev_priv, (pipe == 0) ? DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); - else if (IS_I965G(dev)) + else if (INTEL_INFO(dev)->gen >= 4) i915_enable_pipestat(dev_priv, pipe, PIPE_START_VBLANK_INTERRUPT_ENABLE); else @@ -1322,11 +1325,7 @@ void i915_hangcheck_elapsed(unsigned long data) drm_i915_private_t *dev_priv = dev->dev_private; uint32_t acthd, instdone, instdone1; - /* No reset support on this chip yet. */ - if (IS_GEN6(dev)) - return; - - if (!IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen < 4) { acthd = I915_READ(ACTHD); instdone = I915_READ(INSTDONE); instdone1 = 0; diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 967dcde312b2..989c19d2d959 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -256,7 +256,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) dev_priv->saveFPA1 = I915_READ(FPA1); dev_priv->saveDPLL_A = I915_READ(DPLL_A); } - if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) + if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); @@ -294,7 +294,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) dev_priv->saveDSPASIZE = I915_READ(DSPASIZE); dev_priv->saveDSPAPOS = I915_READ(DSPAPOS); dev_priv->saveDSPAADDR = I915_READ(DSPAADDR); - if (IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { dev_priv->saveDSPASURF = I915_READ(DSPASURF); dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF); } @@ -313,7 +313,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) dev_priv->saveFPB1 = I915_READ(FPB1); dev_priv->saveDPLL_B = I915_READ(DPLL_B); } - if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) + if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); @@ -351,7 +351,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE); dev_priv->saveDSPBPOS = I915_READ(DSPBPOS); dev_priv->saveDSPBADDR = I915_READ(DSPBADDR); - if (IS_I965GM(dev) || IS_GM45(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { dev_priv->saveDSPBSURF = I915_READ(DSPBSURF); dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF); } @@ -404,7 +404,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev) I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A); POSTING_READ(dpll_a_reg); udelay(150); - if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { + if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); POSTING_READ(DPLL_A_MD); } @@ -448,7 +448,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev) I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC); I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR); I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE); - if (IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { I915_WRITE(DSPASURF, dev_priv->saveDSPASURF); I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF); } @@ -473,7 +473,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev) I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B); POSTING_READ(dpll_b_reg); udelay(150); - if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { + if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); POSTING_READ(DPLL_B_MD); } @@ -517,7 +517,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev) I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC); I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR); I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); - if (IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF); I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); } @@ -550,7 +550,7 @@ void i915_save_display(struct drm_device *dev) dev_priv->saveCURBCNTR = I915_READ(CURBCNTR); dev_priv->saveCURBPOS = I915_READ(CURBPOS); dev_priv->saveCURBBASE = I915_READ(CURBBASE); - if (!IS_I9XX(dev)) + if (IS_GEN2(dev)) dev_priv->saveCURSIZE = I915_READ(CURSIZE); /* CRT state */ @@ -573,7 +573,7 @@ void i915_save_display(struct drm_device *dev) dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL); - if (IS_I965G(dev)) + if (INTEL_INFO(dev)->gen >= 4) dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); if (IS_MOBILE(dev) && !IS_I830(dev)) dev_priv->saveLVDS = I915_READ(LVDS); @@ -664,7 +664,7 @@ void i915_restore_display(struct drm_device *dev) I915_WRITE(CURBPOS, dev_priv->saveCURBPOS); I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR); I915_WRITE(CURBBASE, dev_priv->saveCURBBASE); - if (!IS_I9XX(dev)) + if (IS_GEN2(dev)) I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); /* CRT state */ @@ -674,7 +674,7 @@ void i915_restore_display(struct drm_device *dev) I915_WRITE(ADPA, dev_priv->saveADPA); /* LVDS state */ - if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) + if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); if (HAS_PCH_SPLIT(dev)) { diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 6d3385511663..8e484c9ac1f5 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -79,7 +79,7 @@ static int intel_crt_mode_valid(struct drm_connector *connector, if (mode->clock < 25000) return MODE_CLOCK_LOW; - if (!IS_I9XX(dev)) + if (IS_GEN2(dev)) max_clock = 350000; else max_clock = 400000; @@ -123,7 +123,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, * Disable separate mode multiplier used when cloning SDVO to CRT * XXX this needs to be adjusted when we really are cloning */ - if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { + if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { dpll_md = I915_READ(dpll_md_reg); I915_WRITE(dpll_md_reg, dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); @@ -325,7 +325,7 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder /* Set the border color to purple. */ I915_WRITE(bclrpat_reg, 0x500050); - if (IS_I9XX(dev)) { + if (!IS_GEN2(dev)) { uint32_t pipeconf = I915_READ(pipeconf_reg); I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER); POSTING_READ(pipeconf_reg); @@ -411,7 +411,7 @@ intel_crt_detect(struct drm_connector *connector, bool force) int dpms_mode; enum drm_connector_status status; - if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) { + if (I915_HAS_HOTPLUG(dev)) { if (intel_crt_detect_hotplug(connector)) return connector_status_connected; else diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 1b5d878be975..c3f0400963de 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -708,16 +708,16 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc) limit = intel_ironlake_limit(crtc); else if (IS_G4X(dev)) { limit = intel_g4x_limit(crtc); - } else if (IS_I9XX(dev) && !IS_PINEVIEW(dev)) { - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) - limit = &intel_limits_i9xx_lvds; - else - limit = &intel_limits_i9xx_sdvo; } else if (IS_PINEVIEW(dev)) { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) limit = &intel_limits_pineview_lvds; else limit = &intel_limits_pineview_sdvo; + } else if (!IS_GEN2(dev)) { + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) + limit = &intel_limits_i9xx_lvds; + else + limit = &intel_limits_i9xx_sdvo; } else { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) limit = &intel_limits_i8xx_lvds; @@ -1429,7 +1429,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, case I915_TILING_NONE: if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) alignment = 128 * 1024; - else if (IS_I965G(dev)) + else if (INTEL_INFO(dev)->gen >= 4) alignment = 4 * 1024; else alignment = 64 * 1024; @@ -1524,7 +1524,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, DRM_ERROR("Unknown color depth\n"); return -EINVAL; } - if (IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { if (obj_priv->tiling_mode != I915_TILING_NONE) dspcntr |= DISPPLANE_TILED; else @@ -1543,7 +1543,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", Start, Offset, x, y, fb->pitch); I915_WRITE(DSPSTRIDE(plane), fb->pitch); - if (IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { I915_WRITE(DSPSURF(plane), Start); I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); I915_WRITE(DSPADDR(plane), Offset); @@ -2388,7 +2388,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) intel_flush_display_plane(dev, plane); /* Wait for vblank for the disable to take effect */ - if (!IS_I9XX(dev)) + if (IS_GEN2(dev)) intel_wait_for_vblank_off(dev, pipe); } @@ -3181,11 +3181,11 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, DRM_DEBUG_KMS("self-refresh watermark: display plane %d " "cursor %d\n", srwm, cursor_sr); - if (IS_I965GM(dev)) + if (IS_CRESTLINE(dev)) I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); } else { /* Turn off self refresh if both pipes are enabled */ - if (IS_I965GM(dev)) + if (IS_CRESTLINE(dev)) I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); } @@ -3215,9 +3215,9 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, int sr_clock, sr_entries = 0; /* Create copies of the base settings for each pipe */ - if (IS_I965GM(dev) || IS_I945GM(dev)) + if (IS_CRESTLINE(dev) || IS_I945GM(dev)) planea_params = planeb_params = i945_wm_info; - else if (IS_I9XX(dev)) + else if (!IS_GEN2(dev)) planea_params = planeb_params = i915_wm_info; else planea_params = planeb_params = i855_wm_info; @@ -3576,7 +3576,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, refclk = dev_priv->lvds_ssc_freq * 1000; DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", refclk / 1000); - } else if (IS_I9XX(dev)) { + } else if (!IS_GEN2(dev)) { refclk = 96000; if (HAS_PCH_SPLIT(dev)) refclk = 120000; /* 120Mhz refclk */ @@ -3775,7 +3775,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, if (!HAS_PCH_SPLIT(dev)) dpll = DPLL_VGA_MODE_DIS; - if (IS_I9XX(dev)) { + if (!IS_GEN2(dev)) { if (is_lvds) dpll |= DPLLB_MODE_LVDS; else @@ -3818,7 +3818,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; break; } - if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) + if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); } else { if (is_lvds) { @@ -3859,7 +3859,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, dspcntr |= DISPPLANE_SEL_PIPE_B; } - if (pipe == 0 && !IS_I965G(dev)) { + if (pipe == 0 && INTEL_INFO(dev)->gen < 4) { /* Enable pixel doubling when the dot clock is > 90% of the (display) * core speed. * @@ -3947,7 +3947,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, * panels behave in the two modes. */ /* set the dithering flag on non-PCH LVDS as needed */ - if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { + if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { if (dev_priv->lvds_dither) temp |= LVDS_ENABLE_DITHER; else @@ -3991,7 +3991,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, POSTING_READ(dpll_reg); udelay(150); - if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { + if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { temp = 0; if (is_sdvo) { temp = intel_mode_get_pixel_multiplier(adjusted_mode); @@ -4334,7 +4334,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, addr = obj_priv->phys_obj->handle->busaddr; } - if (!IS_I9XX(dev)) + if (IS_GEN2(dev)) I915_WRITE(CURSIZE, (height << 12) | width); finish: @@ -4569,7 +4569,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; } - if (IS_I9XX(dev)) { + if (!IS_GEN2(dev)) { if (IS_PINEVIEW(dev)) clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); @@ -5768,20 +5768,20 @@ void intel_init_clock_gating(struct drm_device *dev) if (IS_GM45(dev)) dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; I915_WRITE(DSPCLK_GATE_D, dspclk_gate); - } else if (IS_I965GM(dev)) { + } else if (IS_CRESTLINE(dev)) { I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); I915_WRITE(RENCLK_GATE_D2, 0); I915_WRITE(DSPCLK_GATE_D, 0); I915_WRITE(RAMCLK_GATE_D, 0); I915_WRITE16(DEUC, 0); - } else if (IS_I965G(dev)) { + } else if (IS_BROADWATER(dev)) { I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | I965_RCC_CLOCK_GATE_DISABLE | I965_RCPB_CLOCK_GATE_DISABLE | I965_ISC_CLOCK_GATE_DISABLE | I965_FBC_CLOCK_GATE_DISABLE); I915_WRITE(RENCLK_GATE_D2, 0); - } else if (IS_I9XX(dev)) { + } else if (IS_GEN3(dev)) { u32 dstate = I915_READ(D_STATE); dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | @@ -5863,7 +5863,7 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.fbc_enabled = g4x_fbc_enabled; dev_priv->display.enable_fbc = g4x_enable_fbc; dev_priv->display.disable_fbc = g4x_disable_fbc; - } else if (IS_I965GM(dev)) { + } else if (IS_CRESTLINE(dev)) { dev_priv->display.fbc_enabled = i8xx_fbc_enabled; dev_priv->display.enable_fbc = i8xx_enable_fbc; dev_priv->display.disable_fbc = i8xx_disable_fbc; @@ -5923,9 +5923,9 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.update_wm = pineview_update_wm; } else if (IS_G4X(dev)) dev_priv->display.update_wm = g4x_update_wm; - else if (IS_I965G(dev)) + else if (IS_GEN4(dev)) dev_priv->display.update_wm = i965_update_wm; - else if (IS_I9XX(dev)) { + else if (IS_GEN3(dev)) { dev_priv->display.update_wm = i9xx_update_wm; dev_priv->display.get_fifo_size = i9xx_get_fifo_size; } else if (IS_I85X(dev)) { @@ -6039,24 +6039,24 @@ void intel_modeset_init(struct drm_device *dev) intel_init_display(dev); - if (IS_I965G(dev)) { - dev->mode_config.max_width = 8192; - dev->mode_config.max_height = 8192; - } else if (IS_I9XX(dev)) { + if (IS_GEN2(dev)) { + dev->mode_config.max_width = 2048; + dev->mode_config.max_height = 2048; + } else if (IS_GEN3(dev)) { dev->mode_config.max_width = 4096; dev->mode_config.max_height = 4096; } else { - dev->mode_config.max_width = 2048; - dev->mode_config.max_height = 2048; + dev->mode_config.max_width = 8192; + dev->mode_config.max_height = 8192; } /* set memory base */ - if (IS_I9XX(dev)) - dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2); - else + if (IS_GEN2(dev)) dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0); + else + dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2); - if (IS_MOBILE(dev) || IS_I9XX(dev)) + if (IS_MOBILE(dev) || !IS_GEN2(dev)) dev_priv->num_pipe = 2; else dev_priv->num_pipe = 1; diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 8a23bf772c95..7af4accafb7f 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -68,7 +68,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, struct drm_gem_object *fbo = NULL; struct drm_i915_gem_object *obj_priv; struct device *device = &dev->pdev->dev; - int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1; + int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0; /* we don't do packed 24bpp */ if (sizes->surface_bpp == 24) @@ -129,7 +129,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, goto out_unpin; } info->apertures->ranges[0].base = dev->mode_config.fb_base; - if (IS_I9XX(dev)) + if (!IS_GEN2(dev)) info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2); else info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 5666e89288d3..02c5aed36c87 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -198,7 +198,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; /* Should never happen!! */ - if (!IS_I965G(dev) && intel_crtc->pipe == 0) { + if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) { DRM_ERROR("Can't support LVDS on pipe A\n"); return false; } @@ -227,7 +227,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, } /* Make sure pre-965s set dither correctly */ - if (!IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen < 4) { if (dev_priv->panel_wants_dither || dev_priv->lvds_dither) pfit_control |= PANEL_8TO6_DITHER_ENABLE; } @@ -238,7 +238,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, goto out; /* 965+ wants fuzzy fitting */ - if (IS_I965G(dev)) + if (INTEL_INFO(dev)->gen >= 4) pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) | PFIT_FILTER_FUZZY); @@ -264,7 +264,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, case DRM_MODE_SCALE_ASPECT: /* Scale but preserve the aspect ratio */ - if (IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; @@ -323,7 +323,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, * Fortunately this is all done for us in hw. */ pfit_control |= PFIT_ENABLE; - if (IS_I965G(dev)) + if (INTEL_INFO(dev)->gen >= 4) pfit_control |= PFIT_SCALING_AUTO; else pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE | diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index c4699c916698..375316a8420e 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -552,15 +552,15 @@ static int uv_vsubsampling(u32 format) static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width) { u32 mask, shift, ret; - if (IS_I9XX(dev)) { - mask = 0x3f; - shift = 6; - } else { + if (IS_GEN2(dev)) { mask = 0x1f; shift = 5; + } else { + mask = 0x3f; + shift = 6; } ret = ((offset + width + mask) >> shift) - (offset >> shift); - if (IS_I9XX(dev)) + if (!IS_GEN2(dev)) ret <<= 1; ret -=1; return ret << 2; @@ -768,7 +768,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, goto out_unpin; } regs->OCONFIG = OCONF_CC_OUT_8BIT; - if (IS_I965GM(overlay->dev)) + if (IS_GEN4(overlay->dev)) regs->OCONFIG |= OCONF_CSC_MODE_BT709; regs->OCONFIG |= overlay->crtc->pipe == 0 ? OCONF_PIPE_A : OCONF_PIPE_B; @@ -880,7 +880,7 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, return -EINVAL; /* can't use the overlay with double wide pipe */ - if (!IS_I965G(overlay->dev) && + if (INTEL_INFO(overlay->dev)->gen < 4 && (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE) return -EINVAL; @@ -897,14 +897,15 @@ static void update_pfit_vscale_ratio(struct intel_overlay *overlay) /* XXX: This is not the same logic as in the xorg driver, but more in * line with the intel documentation for the i965 */ - if (!IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { + /* on i965 use the PGM reg to read out the autoscaler values */ + ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965; + } else { if (pfit_control & VERT_AUTO_SCALE) ratio = I915_READ(PFIT_AUTO_RATIOS); else ratio = I915_READ(PFIT_PGM_RATIOS); ratio >>= PFIT_VERT_SCALE_SHIFT; - } else { /* on i965 use the PGM reg to read out the autoscaler values */ - ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965; } overlay->pfit_vscale_ratio = ratio; @@ -1007,7 +1008,7 @@ static int check_overlay_src(struct drm_device *dev, if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) return -EINVAL; - if (IS_I965G(dev) && rec->stride_Y < 512) + if (IS_GEN4(dev) && rec->stride_Y < 512) return -EINVAL; tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ? @@ -1068,7 +1069,7 @@ static int intel_panel_fitter_pipe(struct drm_device *dev) return -1; /* 965 can place panel fitter on either pipe */ - if (IS_I965G(dev)) + if (IS_GEN4(dev)) return (pfit_control >> 29) & 0x3; /* older chips can only use pipe 1 */ @@ -1302,7 +1303,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, attrs->contrast = overlay->contrast; attrs->saturation = overlay->saturation; - if (IS_I9XX(dev)) { + if (!IS_GEN2(dev)) { attrs->gamma0 = I915_READ(OGAMC0); attrs->gamma1 = I915_READ(OGAMC1); attrs->gamma2 = I915_READ(OGAMC2); @@ -1334,7 +1335,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, intel_overlay_unmap_regs(overlay, regs); if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { - if (!IS_I9XX(dev)) + if (IS_GEN2(dev)) goto out_unlock; if (overlay->active) { diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 30abe7afc942..92ff8f385278 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -116,7 +116,7 @@ static int is_backlight_combination_mode(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (IS_I965G(dev)) + if (INTEL_INFO(dev)->gen >= 4) return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE; if (IS_GEN2(dev)) @@ -138,7 +138,7 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev) max >>= 17; } else { max >>= 16; - if (!IS_I965G(dev)) + if (INTEL_INFO(dev)->gen < 4) max &= ~1; } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 11bcfc871a0d..670f94af6b07 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -97,7 +97,7 @@ render_ring_flush(struct drm_device *dev, if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) cmd &= ~MI_NO_WRITE_FLUSH; - if (!IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen < 4) { /* * On the 965, the sampler cache always gets flushed * and this bit is reserved. @@ -138,7 +138,7 @@ static unsigned int render_ring_get_active_head(struct drm_device *dev, struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; - u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; + u32 acthd_reg = INTEL_INFO(dev)->gen ? ACTHD_I965 : ACTHD; return I915_READ(acthd_reg); } @@ -224,7 +224,7 @@ static int init_render_ring(struct drm_device *dev, int ret = init_ring_common(dev, ring); int mode; - if (IS_I9XX(dev) && !IS_GEN3(dev)) { + if (INTEL_INFO(dev)->gen > 3) { mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; if (IS_GEN6(dev)) mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; @@ -528,7 +528,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev, intel_ring_emit(dev, ring, 0); } else { intel_ring_begin(dev, ring, 4); - if (IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index ba058e600ce7..7cd2d9592d65 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1162,7 +1162,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, return; /* Set the SDVO control regs. */ - if (IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { sdvox = SDVO_BORDER_ENABLE; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) sdvox |= SDVO_VSYNC_ACTIVE_HIGH; @@ -1185,7 +1185,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, if (intel_sdvo->is_hdmi) sdvox |= SDVO_AUDIO_ENABLE; - if (IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { /* done in crtc_mode_set as the dpll_md reg must be written early */ } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { /* done in crtc_mode_set as it lives inside the dpll register */ diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index e03783fbbf95..49ab11c667bb 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1139,7 +1139,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, color_conversion->av); } - if (IS_I965G(dev)) + if (INTEL_INFO(dev)->gen >= 4) I915_WRITE(TV_CLR_KNOBS, 0x00404000); else I915_WRITE(TV_CLR_KNOBS, 0x00606000); @@ -1165,7 +1165,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); /* Wait for vblank for the disable to take effect */ - if (!IS_I9XX(dev)) + if (IS_GEN2(dev)) intel_wait_for_vblank(dev, intel_crtc->pipe); I915_WRITE(pipeconf_reg, pipeconf & ~PIPECONF_ENABLE); From f49f0586191fe16140410db0a46d43bdc690d6af Mon Sep 17 00:00:00 2001 From: Kenneth Graunke Date: Sat, 11 Sep 2010 01:19:14 -0700 Subject: [PATCH 165/476] drm/i915: Actually set the reset bit in i965_reset. Previously, it was only being set if passed GDRST_FULL - but the only caller passed GDRST_RENDER. So the hardware never actually reset. The comments also did not match the code. Instead, just set the reset bit regardless of what flags were passed. The GPU now resets correctly on my GM45. Signed-off-by: Kenneth Graunke Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.c | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 87c6b5f81fea..7209997f18fe 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -32,6 +32,7 @@ #include "drm.h" #include "i915_drm.h" #include "i915_drv.h" +#include "intel_drv.h" #include #include "drm_crtc_helper.h" @@ -326,6 +327,13 @@ int i915_resume(struct drm_device *dev) return i915_drm_thaw(dev); } +static int i965_reset_complete(struct drm_device *dev) +{ + u8 gdrst; + pci_read_config_byte(dev->pdev, GDRST, &gdrst); + return gdrst & 0x1; +} + /** * i965_reset - reset chip after a hang * @dev: drm device to reset @@ -345,7 +353,6 @@ int i915_resume(struct drm_device *dev) int i965_reset(struct drm_device *dev, u8 flags) { drm_i915_private_t *dev_priv = dev->dev_private; - unsigned long timeout; u8 gdrst; /* * We really should only reset the display subsystem if we actually @@ -364,23 +371,15 @@ int i965_reset(struct drm_device *dev, u8 flags) i915_save_display(dev); /* - * Set the domains we want to reset, then the reset bit (bit 0). - * Clear the reset bit after a while and wait for hardware status - * bit (bit 1) to be set + * Set the domains we want to reset (GRDOM/bits 2 and 3) as + * well as the reset bit (GR/bit 0). Setting the GR bit + * triggers the reset; when done, the hardware will clear it. */ pci_read_config_byte(dev->pdev, GDRST, &gdrst); - pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0)); - udelay(50); - pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe); + pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | 0x1); - /* ...we don't want to loop forever though, 500ms should be plenty */ - timeout = jiffies + msecs_to_jiffies(500); - do { - udelay(100); - pci_read_config_byte(dev->pdev, GDRST, &gdrst); - } while ((gdrst & 0x1) && time_after(timeout, jiffies)); - - if (gdrst & 0x1) { + /* Wait for the hardware to reset (but no more than 500 ms) */ + if (wait_for(i965_reset_complete(dev), 500)) { WARN(true, "i915: Failed to reset chip\n"); mutex_unlock(&dev->struct_mutex); return -EIO; From eeccdcac07c1e21d25e7d3cf70030059a3017f0c Mon Sep 17 00:00:00 2001 From: Kenneth Graunke Date: Sat, 11 Sep 2010 01:24:50 -0700 Subject: [PATCH 166/476] drm/i915: Rename graphics reset registers. The graphics domains are listed as GRDOM in the documentation, and the GDRST PCI config register (0xc0) is only valid on I965 and GM45. Newer chips (like Sandy Bridge) have a different GDRST. Signed-off-by: Kenneth Graunke Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.c | 6 +++--- drivers/gpu/drm/i915/i915_irq.c | 2 +- drivers/gpu/drm/i915/i915_reg.h | 10 ++++++---- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 7209997f18fe..45027d5ad1e5 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -330,7 +330,7 @@ int i915_resume(struct drm_device *dev) static int i965_reset_complete(struct drm_device *dev) { u8 gdrst; - pci_read_config_byte(dev->pdev, GDRST, &gdrst); + pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); return gdrst & 0x1; } @@ -375,8 +375,8 @@ int i965_reset(struct drm_device *dev, u8 flags) * well as the reset bit (GR/bit 0). Setting the GR bit * triggers the reset; when done, the hardware will clear it. */ - pci_read_config_byte(dev->pdev, GDRST, &gdrst); - pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | 0x1); + pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); + pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1); /* Wait for the hardware to reset (but no more than 500 ms) */ if (wait_for(i965_reset_complete(dev), 500)) { diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 2b5e54c2900f..b1dc943a02cc 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -401,7 +401,7 @@ static void i915_error_work_func(struct work_struct *work) case 4: DRM_DEBUG_DRIVER("resetting chip\n"); kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); - if (!i965_reset(dev, GDRST_RENDER)) { + if (!i965_reset(dev, GRDOM_RENDER)) { atomic_set(&dev_priv->mm.wedged, 0); kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 18e3749fbd11..565a7a3ccd4e 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -108,10 +108,12 @@ #define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) #define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) #define LBB 0xf4 -#define GDRST 0xc0 -#define GDRST_FULL (0<<2) -#define GDRST_RENDER (1<<2) -#define GDRST_MEDIA (3<<2) + +/* Graphics reset regs */ +#define I965_GDRST 0xc0 +#define GRDOM_FULL (0<<2) +#define GRDOM_RENDER (1<<2) +#define GRDOM_MEDIA (3<<2) /* VGA stuff */ From 0573ed4a947d7a563db197511611d8a9039feb41 Mon Sep 17 00:00:00 2001 From: Kenneth Graunke Date: Sat, 11 Sep 2010 03:17:19 -0700 Subject: [PATCH 167/476] drm/i915: Add support for GPU soft reset on Ironlake. Ironlake's graphics reset register has to be accessed via the MCHBAR, rather than via PCI config space, which requires some refactoring. Signed-off-by: Kenneth Graunke Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.c | 30 ++++++++++++++++++++++++------ drivers/gpu/drm/i915/i915_irq.c | 1 + drivers/gpu/drm/i915/i915_reg.h | 3 ++- 3 files changed, 27 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 45027d5ad1e5..e88aabdfd1d9 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -334,6 +334,24 @@ static int i965_reset_complete(struct drm_device *dev) return gdrst & 0x1; } +static int i965_do_reset(struct drm_device *dev, u8 flags) +{ + u8 gdrst; + + pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); + pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1); + + return wait_for(i965_reset_complete(dev), 500); +} + +static int ironlake_do_reset(struct drm_device *dev, u8 flags) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); + I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1); + return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); +} + /** * i965_reset - reset chip after a hang * @dev: drm device to reset @@ -353,12 +371,12 @@ static int i965_reset_complete(struct drm_device *dev) int i965_reset(struct drm_device *dev, u8 flags) { drm_i915_private_t *dev_priv = dev->dev_private; - u8 gdrst; /* * We really should only reset the display subsystem if we actually * need to */ bool need_display = true; + int ret; mutex_lock(&dev->struct_mutex); @@ -375,11 +393,11 @@ int i965_reset(struct drm_device *dev, u8 flags) * well as the reset bit (GR/bit 0). Setting the GR bit * triggers the reset; when done, the hardware will clear it. */ - pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); - pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1); - - /* Wait for the hardware to reset (but no more than 500 ms) */ - if (wait_for(i965_reset_complete(dev), 500)) { + if (IS_IRONLAKE(dev)) + ret = ironlake_do_reset(dev, flags); + else + ret = i965_do_reset(dev, flags); + if (ret) { WARN(true, "i915: Failed to reset chip\n"); mutex_unlock(&dev->struct_mutex); return -EIO; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index b1dc943a02cc..a5197e13d942 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -398,6 +398,7 @@ static void i915_error_work_func(struct work_struct *work) if (atomic_read(&dev_priv->mm.wedged)) { switch (INTEL_INFO(dev)->gen) { + case 5: case 4: DRM_DEBUG_DRIVER("resetting chip\n"); kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 565a7a3ccd4e..b46e580421e1 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -110,7 +110,8 @@ #define LBB 0xf4 /* Graphics reset regs */ -#define I965_GDRST 0xc0 +#define I965_GDRST 0xc0 /* PCI config register */ +#define ILK_GDSR 0x2ca4 /* MCHBAR offset */ #define GRDOM_FULL (0<<2) #define GRDOM_RENDER (1<<2) #define GRDOM_MEDIA (3<<2) From 9fd981413e005827e7363a37fd0b61f9d0928034 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 18 Sep 2010 08:08:06 +0100 Subject: [PATCH 168/476] drm/i915: After a reset perform a forced modeset On more recent chipsets, restoring the display is not as simple as writing a few registers, so force a full modeset of the current configuration in order to retrain the display link. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index e88aabdfd1d9..e58e91736f81 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -385,9 +385,6 @@ int i965_reset(struct drm_device *dev, u8 flags) */ i915_gem_retire_requests(dev); - if (need_display) - i915_save_display(dev); - /* * Set the domains we want to reset (GRDOM/bits 2 and 3) as * well as the reset bit (GR/bit 0). Setting the GR bit @@ -428,13 +425,19 @@ int i965_reset(struct drm_device *dev, u8 flags) mutex_lock(&dev->struct_mutex); } - /* - * Display needs restore too... - */ - if (need_display) - i915_restore_display(dev); - mutex_unlock(&dev->struct_mutex); + + /* + * Perform a full modeset as on later generations, e.g. Ironlake, we may + * need to retrain the display link and cannot just restore the register + * values. + */ + if (need_display) { + mutex_lock(&dev->mode_config.mutex); + drm_helper_resume_force_mode(dev); + mutex_unlock(&dev->mode_config.mutex); + } + return 0; } From 82690bba375586ab93d74265710c2fd5788c8178 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 18 Sep 2010 01:37:30 +0100 Subject: [PATCH 169/476] drm/i915/debug: Dump BSD ring buffers to debugfs Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 361a825c2363..ac48115429ed 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -40,9 +40,10 @@ #if defined(CONFIG_DEBUG_FS) -#define ACTIVE_LIST 1 -#define FLUSHING_LIST 2 -#define INACTIVE_LIST 3 +#define RENDER_LIST 1 +#define BSD_LIST 2 +#define FLUSHING_LIST 3 +#define INACTIVE_LIST 4 static const char *yesno(int v) { @@ -137,10 +138,14 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) return ret; switch (list) { - case ACTIVE_LIST: - seq_printf(m, "Active:\n"); + case RENDER_LIST: + seq_printf(m, "Render:\n"); head = &dev_priv->render_ring.active_list; break; + case BSD_LIST: + seq_printf(m, "BSD:\n"); + head = &dev_priv->bsd_ring.active_list; + break; case INACTIVE_LIST: seq_printf(m, "Inactive:\n"); head = &dev_priv->mm.inactive_list; @@ -974,7 +979,8 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor) static struct drm_info_list i915_debugfs_list[] = { {"i915_capabilities", i915_capabilities, 0, 0}, - {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, + {"i915_gem_render_active", i915_gem_object_list_info, 0, (void *) RENDER_LIST}, + {"i915_gem_bsd_active", i915_gem_object_list_info, 0, (void *) BSD_LIST}, {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, From b84d5f0c22914d37d709add54c66e741c404fa56 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 18 Sep 2010 01:38:04 +0100 Subject: [PATCH 170/476] drm/i915: Inline i915_gem_ring_retire_request() Change the semantics to retire any buffer older than the current seqno rather than repeatedly calling calling the function to retire the buffer at the head of the list matching the request seqno. Whilst this should have no semantic impact on the implementation, Daniel was wondering if there was a bug where we might miss a retirement and so end up with a continually growing active list. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 93 ++++++++++++++------------------- 1 file changed, 39 insertions(+), 54 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 71a2723545b9..1c02798bb7e4 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1667,47 +1667,6 @@ i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring) I915_GEM_DOMAIN_COMMAND, flush_domains); } -/** - * Moves buffers associated only with the given active seqno from the active - * to inactive list, potentially freeing them. - */ -static void -i915_gem_retire_request(struct drm_device *dev, - struct drm_i915_gem_request *request) -{ - trace_i915_gem_request_retire(dev, request->seqno); - - /* Move any buffers on the active list that are no longer referenced - * by the ringbuffer to the flushing/inactive lists as appropriate. - */ - while (!list_empty(&request->ring->active_list)) { - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; - - obj_priv = list_first_entry(&request->ring->active_list, - struct drm_i915_gem_object, - list); - obj = &obj_priv->base; - - /* If the seqno being retired doesn't match the oldest in the - * list, then the oldest in the list must still be newer than - * this seqno. - */ - if (obj_priv->last_rendering_seqno != request->seqno) - return; - -#if WATCH_LRU - DRM_INFO("%s: retire %d moves to inactive list %p\n", - __func__, request->seqno, obj); -#endif - - if (obj->write_domain != 0) - i915_gem_object_move_to_flushing(obj); - else - i915_gem_object_move_to_inactive(obj); - } -} - /** * Returns true if seq1 is later than seq2. */ @@ -1733,36 +1692,62 @@ i915_gem_retire_requests_ring(struct drm_device *dev, { drm_i915_private_t *dev_priv = dev->dev_private; uint32_t seqno; + bool wedged; - if (!ring->status_page.page_addr - || list_empty(&ring->request_list)) + if (!ring->status_page.page_addr || + list_empty(&ring->request_list)) return; seqno = i915_get_gem_seqno(dev, ring); + wedged = atomic_read(&dev_priv->mm.wedged); while (!list_empty(&ring->request_list)) { struct drm_i915_gem_request *request; - uint32_t retiring_seqno; request = list_first_entry(&ring->request_list, struct drm_i915_gem_request, list); - retiring_seqno = request->seqno; - if (i915_seqno_passed(seqno, retiring_seqno) || - atomic_read(&dev_priv->mm.wedged)) { - i915_gem_retire_request(dev, request); - - list_del(&request->list); - list_del(&request->client_list); - kfree(request); - } else + if (!wedged && !i915_seqno_passed(seqno, request->seqno)) break; + + trace_i915_gem_request_retire(dev, request->seqno); + + list_del(&request->list); + list_del(&request->client_list); + kfree(request); + } + + /* Move any buffers on the active list that are no longer referenced + * by the ringbuffer to the flushing/inactive lists as appropriate. + */ + while (!list_empty(&ring->active_list)) { + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + + obj_priv = list_first_entry(&ring->active_list, + struct drm_i915_gem_object, + list); + + if (!wedged && + !i915_seqno_passed(seqno, obj_priv->last_rendering_seqno)) + break; + + obj = &obj_priv->base; + +#if WATCH_LRU + DRM_INFO("%s: retire %d moves to inactive list %p\n", + __func__, request->seqno, obj); +#endif + + if (obj->write_domain != 0) + i915_gem_object_move_to_flushing(obj); + else + i915_gem_object_move_to_inactive(obj); } if (unlikely (dev_priv->trace_irq_seqno && i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { - ring->user_irq_put(dev, ring); dev_priv->trace_irq_seqno = 0; } From 9220434a8768902cd9cf248709972678b74aa8c1 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 18 Sep 2010 11:02:01 +0100 Subject: [PATCH 171/476] drm/i915: Only emit a flush request on the active ring. When flushing the GPU domains,we emit a flush on *both* rings, even though they share a unified cache. Only emit the flush on the currently active ring. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 5 +-- drivers/gpu/drm/i915/i915_gem.c | 58 ++++++++++++++++++------- drivers/gpu/drm/i915/intel_ringbuffer.c | 6 +-- drivers/gpu/drm/i915/intel_ringbuffer.h | 4 ++ 4 files changed, 51 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4b6aeb5e66b9..ed09846fac7b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -623,6 +623,8 @@ typedef struct drm_i915_private { /* storage for physical objects */ struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; + + uint32_t flush_rings; } mm; struct sdvo_device_mapping sdvo_mappings[2]; /* indicate whether the LVDS_BORDER should be enabled or not */ @@ -1014,9 +1016,6 @@ int i915_do_wait_request(struct drm_device *dev, bool interruptible, struct intel_ring_buffer *ring); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); -void i915_gem_process_flushing_list(struct drm_device *dev, - uint32_t flush_domains, - struct intel_ring_buffer *ring); int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write); int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1c02798bb7e4..cf2765529cfe 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1567,7 +1567,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) i915_verify_inactive(dev, __FILE__, __LINE__); } -void +static void i915_gem_process_flushing_list(struct drm_device *dev, uint32_t flush_domains, struct intel_ring_buffer *ring) @@ -1879,24 +1879,37 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno, return i915_do_wait_request(dev, seqno, 1, ring); } +static void +i915_gem_flush_ring(struct drm_device *dev, + struct intel_ring_buffer *ring, + uint32_t invalidate_domains, + uint32_t flush_domains) +{ + ring->flush(dev, ring, invalidate_domains, flush_domains); + i915_gem_process_flushing_list(dev, flush_domains, ring); +} + static void i915_gem_flush(struct drm_device *dev, uint32_t invalidate_domains, - uint32_t flush_domains) + uint32_t flush_domains, + uint32_t flush_rings) { drm_i915_private_t *dev_priv = dev->dev_private; if (flush_domains & I915_GEM_DOMAIN_CPU) drm_agp_chipset_flush(dev); - dev_priv->render_ring.flush(dev, &dev_priv->render_ring, - invalidate_domains, - flush_domains); - - if (HAS_BSD(dev)) - dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring, - invalidate_domains, - flush_domains); + if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { + if (flush_rings & RING_RENDER) + i915_gem_flush_ring(dev, + &dev_priv->render_ring, + invalidate_domains, flush_domains); + if (flush_rings & RING_BSD) + i915_gem_flush_ring(dev, + &dev_priv->bsd_ring, + invalidate_domains, flush_domains); + } } /** @@ -2022,7 +2035,9 @@ i915_gpu_idle(struct drm_device *dev) return 0; /* Flush everything onto the inactive list. */ - i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); + i915_gem_flush_ring(dev, + &dev_priv->render_ring, + I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); ret = i915_wait_request(dev, i915_gem_next_request_seqno(dev, &dev_priv->render_ring), @@ -2031,6 +2046,10 @@ i915_gpu_idle(struct drm_device *dev) return ret; if (HAS_BSD(dev)) { + i915_gem_flush_ring(dev, + &dev_priv->bsd_ring, + I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); + ret = i915_wait_request(dev, i915_gem_next_request_seqno(dev, &dev_priv->bsd_ring), &dev_priv->bsd_ring); @@ -2598,7 +2617,9 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, /* Queue the GPU write cache flushing we need. */ old_write_domain = obj->write_domain; - i915_gem_flush(dev, 0, obj->write_domain); + i915_gem_flush_ring(dev, + to_intel_bo(obj)->ring, + 0, obj->write_domain); BUG_ON(obj->write_domain); trace_i915_gem_object_change_domain(obj, @@ -2908,6 +2929,7 @@ static void i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t invalidate_domains = 0; uint32_t flush_domains = 0; @@ -2972,6 +2994,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) dev->invalidate_domains |= invalidate_domains; dev->flush_domains |= flush_domains; + if (obj_priv->ring) + dev_priv->mm.flush_rings |= obj_priv->ring->id; #if WATCH_BUF DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n", __func__, @@ -3684,6 +3708,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, */ dev->invalidate_domains = 0; dev->flush_domains = 0; + dev_priv->mm.flush_rings = 0; for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; @@ -3703,7 +3728,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, #endif i915_gem_flush(dev, dev->invalidate_domains, - dev->flush_domains); + dev->flush_domains, + dev_priv->mm.flush_rings); } if (dev_priv->render_ring.outstanding_lazy_request) { @@ -4170,8 +4196,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, * use this buffer rather sooner than later, so issuing the required * flush earlier is beneficial. */ - if (obj->write_domain) { - i915_gem_flush(dev, 0, obj->write_domain); + if (obj->write_domain & I915_GEM_GPU_DOMAINS) { + i915_gem_flush_ring(dev, + obj_priv->ring, + 0, obj->write_domain); (void)i915_add_request(dev, file_priv, NULL, obj_priv->ring); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 670f94af6b07..45f66e289af1 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -116,8 +116,6 @@ render_ring_flush(struct drm_device *dev, intel_ring_emit(dev, ring, MI_NOOP); intel_ring_advance(dev, ring); } - - i915_gem_process_flushing_list(dev, flush_domains, ring); } static unsigned int render_ring_get_head(struct drm_device *dev, @@ -386,8 +384,6 @@ bsd_ring_flush(struct drm_device *dev, intel_ring_emit(dev, ring, MI_FLUSH); intel_ring_emit(dev, ring, MI_NOOP); intel_ring_advance(dev, ring); - - i915_gem_process_flushing_list(dev, flush_domains, ring); } static inline unsigned int bsd_ring_get_head(struct drm_device *dev, @@ -799,6 +795,7 @@ void intel_fill_struct(struct drm_device *dev, struct intel_ring_buffer render_ring = { .name = "render ring", + .id = RING_RENDER, .regs = { .ctl = PRB0_CTL, .head = PRB0_HEAD, @@ -836,6 +833,7 @@ struct intel_ring_buffer render_ring = { struct intel_ring_buffer bsd_ring = { .name = "bsd ring", + .id = RING_BSD, .regs = { .ctl = BSD_RING_CTL, .head = BSD_RING_HEAD, diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index fa5d84f85c26..8dc0e62b7d2a 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -10,6 +10,10 @@ struct intel_hw_status_page { struct drm_i915_gem_execbuffer2; struct intel_ring_buffer { const char *name; + enum intel_ring_id { + RING_RENDER = 0x1, + RING_BSD = 0x2, + } id; struct ring_regs { u32 ctl; u32 head; From 9375e446e7f43be9a7c21e246cee35ea912532ec Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 19 Sep 2010 12:21:28 +0100 Subject: [PATCH 172/476] drm/i915: Clear flushing lists on GPU reset Owain Ainsworth noticed that the reset code failed to clear the flushing list leaving the driver in an inconsistent state following a hung GPU. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.c | 6 ++++++ drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_gem.c | 16 ++++++++++++++++ 3 files changed, 23 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index e58e91736f81..cb1ddc6af6a6 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -385,6 +385,12 @@ int i965_reset(struct drm_device *dev, u8 flags) */ i915_gem_retire_requests(dev); + /* Remove anything from the flushing lists. The GPU cache is likely + * to be lost on reset along with the data, so simply move the + * lost bo to the inactive list. + */ + i915_gem_reset_flushing_list(dev); + /* * Set the domains we want to reset (GRDOM/bits 2 and 3) as * well as the reset bit (GR/bit 0). Setting the GR bit diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ed09846fac7b..50fcb91218e8 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -997,6 +997,7 @@ int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, int i915_gem_object_put_fence_reg(struct drm_gem_object *obj, bool interruptible); void i915_gem_retire_requests(struct drm_device *dev); +void i915_gem_reset_flushing_list(struct drm_device *dev); void i915_gem_clflush_object(struct drm_gem_object *obj); int i915_gem_object_set_domain(struct drm_gem_object *obj, uint32_t read_domains, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index cf2765529cfe..4e978e4044a3 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1683,6 +1683,22 @@ i915_get_gem_seqno(struct drm_device *dev, return ring->get_gem_seqno(dev, ring); } +void i915_gem_reset_flushing_list(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + while (!list_empty(&dev_priv->mm.flushing_list)) { + struct drm_i915_gem_object *obj_priv; + + obj_priv = list_first_entry(&dev_priv->mm.flushing_list, + struct drm_i915_gem_object, + list); + + obj_priv->base.write_domain = 0; + i915_gem_object_move_to_inactive(&obj_priv->base); + } +} + /** * This function clears the request list as sequence numbers are passed. */ From 77f01230223a08792f5320ebba27af9cbb81b0cf Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 19 Sep 2010 12:31:36 +0100 Subject: [PATCH 173/476] drm/i915: Clear GPU read domains on reset Clear the GPU read domain for the inactive objects on a reset so that they are correctly invalidated on reuse. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.c | 5 +++++ drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_gem.c | 13 +++++++++++++ 3 files changed, 19 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index cb1ddc6af6a6..38e889bfd99c 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -391,6 +391,11 @@ int i965_reset(struct drm_device *dev, u8 flags) */ i915_gem_reset_flushing_list(dev); + /* Move everything out of the GPU domains to ensure we do any + * necessary invalidation upon reuse. + */ + i915_gem_reset_inactive_gpu_domains(dev); + /* * Set the domains we want to reset (GRDOM/bits 2 and 3) as * well as the reset bit (GR/bit 0). Setting the GR bit diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 50fcb91218e8..ae05008a5900 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -998,6 +998,7 @@ int i915_gem_object_put_fence_reg(struct drm_gem_object *obj, bool interruptible); void i915_gem_retire_requests(struct drm_device *dev); void i915_gem_reset_flushing_list(struct drm_device *dev); +void i915_gem_reset_inactive_gpu_domains(struct drm_device *dev); void i915_gem_clflush_object(struct drm_gem_object *obj); int i915_gem_object_set_domain(struct drm_gem_object *obj, uint32_t read_domains, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 4e978e4044a3..325f52bc1401 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1699,6 +1699,19 @@ void i915_gem_reset_flushing_list(struct drm_device *dev) } } +void i915_gem_reset_inactive_gpu_domains(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv; + + list_for_each_entry(obj_priv, + &dev_priv->mm.inactive_list, + list) + { + obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS; + } +} + /** * This function clears the request list as sequence numbers are passed. */ From f803aa5532d14efc463abbeae10faa115c457a07 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 19 Sep 2010 12:38:26 +0100 Subject: [PATCH 174/476] drm/i915: Clean up bo lists on all hung gpus Previously we only tidied up the active bo lists for chipsets were we would attempt to reset the GPU. However, this action is necessary for the system to continue and reclaim the dead bo for all chipsets. Pointed out, in passing, by Owain Ainsworth. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.c | 15 ++++++++++----- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_irq.c | 18 +++++------------- 3 files changed, 16 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 38e889bfd99c..2ddac06d5967 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -368,7 +368,7 @@ static int ironlake_do_reset(struct drm_device *dev, u8 flags) * - re-init interrupt state * - re-init display */ -int i965_reset(struct drm_device *dev, u8 flags) +int i915_reset(struct drm_device *dev, u8 flags) { drm_i915_private_t *dev_priv = dev->dev_private; /* @@ -401,14 +401,19 @@ int i965_reset(struct drm_device *dev, u8 flags) * well as the reset bit (GR/bit 0). Setting the GR bit * triggers the reset; when done, the hardware will clear it. */ - if (IS_IRONLAKE(dev)) + ret = -ENODEV; + switch (INTEL_INFO(dev)->gen) { + case 5: ret = ironlake_do_reset(dev, flags); - else + break; + case 4: ret = i965_do_reset(dev, flags); + break; + } if (ret) { - WARN(true, "i915: Failed to reset chip\n"); + DRM_ERROR("Failed to reset chip.\n"); mutex_unlock(&dev->struct_mutex); - return -EIO; + return ret; } /* Ok, now get things going again... */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ae05008a5900..b57e049a4623 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -874,7 +874,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, extern int i915_emit_box(struct drm_device *dev, struct drm_clip_rect *boxes, int i, int DR1, int DR4); -extern int i965_reset(struct drm_device *dev, u8 flags); +extern int i915_reset(struct drm_device *dev, u8 flags); extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index a5197e13d942..b1e7655288d8 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -397,19 +397,11 @@ static void i915_error_work_func(struct work_struct *work) kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); if (atomic_read(&dev_priv->mm.wedged)) { - switch (INTEL_INFO(dev)->gen) { - case 5: - case 4: - DRM_DEBUG_DRIVER("resetting chip\n"); - kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); - if (!i965_reset(dev, GRDOM_RENDER)) { - atomic_set(&dev_priv->mm.wedged, 0); - kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); - } - break; - default: - DRM_DEBUG_DRIVER("reboot required\n"); - break; + DRM_DEBUG_DRIVER("resetting chip\n"); + kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); + if (!i915_reset(dev, GRDOM_RENDER)) { + atomic_set(&dev_priv->mm.wedged, 0); + kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); } } } From 92f49d9cec0052e09d938ac913d8e9ab432a0584 Mon Sep 17 00:00:00 2001 From: "Xiang, Haihao" Date: Thu, 16 Sep 2010 10:43:10 +0800 Subject: [PATCH 175/476] drm/i915: fix HAS_BSD with a device info flag Signed-off-by: Xiang, Haihao Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.c | 4 ++++ drivers/gpu/drm/i915/i915_drv.h | 3 ++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 2ddac06d5967..393696cee86d 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -125,6 +125,7 @@ static const struct intel_device_info intel_g33_info = { static const struct intel_device_info intel_g45_info = { .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, + .has_bsd_ring = 1, }; static const struct intel_device_info intel_gm45_info = { @@ -132,6 +133,7 @@ static const struct intel_device_info intel_gm45_info = { .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, .supports_tv = 1, + .has_bsd_ring = 1, }; static const struct intel_device_info intel_pineview_info = { @@ -143,11 +145,13 @@ static const struct intel_device_info intel_pineview_info = { static const struct intel_device_info intel_ironlake_d_info = { .gen = 5, .is_ironlake = 1, .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, + .has_bsd_ring = 1, }; static const struct intel_device_info intel_ironlake_m_info = { .gen = 5, .is_ironlake = 1, .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, + .has_bsd_ring = 1, }; static const struct intel_device_info intel_sandybridge_d_info = { diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b57e049a4623..4b6812015056 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -214,6 +214,7 @@ struct intel_device_info { u8 has_overlay : 1; u8 overlay_needs_physical : 1; u8 supports_tv : 1; + u8 has_bsd_ring : 1; }; enum no_fbc_reason { @@ -1237,7 +1238,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) -#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev)) +#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) From 5c1143bbecf50184ff7cad6287b4e0993bacbd9f Mon Sep 17 00:00:00 2001 From: "Xiang, Haihao" Date: Thu, 16 Sep 2010 10:43:11 +0800 Subject: [PATCH 176/476] drm/i915: do not export the instances of struct intel_ring_buffer Introduce intel_init_render_ring_buffer(), intel_init_bsd_ring_buffer for ring initialization. Signed-off-by: Xiang, Haihao Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 14 ++---------- drivers/gpu/drm/i915/intel_ringbuffer.c | 29 +++++++++++++++++++++++-- drivers/gpu/drm/i915/intel_ringbuffer.h | 4 ++-- 3 files changed, 31 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 325f52bc1401..7b33b4d5ebff 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4496,28 +4496,18 @@ i915_gem_init_ringbuffer(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; int ret; - dev_priv->render_ring = render_ring; - - if (!I915_NEED_GFX_HWS(dev)) { - dev_priv->render_ring.status_page.page_addr - = dev_priv->status_page_dmah->vaddr; - memset(dev_priv->render_ring.status_page.page_addr, - 0, PAGE_SIZE); - } - if (HAS_PIPE_CONTROL(dev)) { ret = i915_gem_init_pipe_control(dev); if (ret) return ret; } - ret = intel_init_ring_buffer(dev, &dev_priv->render_ring); + ret = intel_init_render_ring_buffer(dev); if (ret) goto cleanup_pipe_control; if (HAS_BSD(dev)) { - dev_priv->bsd_ring = bsd_ring; - ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring); + ret = intel_init_bsd_ring_buffer(dev); if (ret) goto cleanup_render_ring; } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 45f66e289af1..178e2cea9835 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -793,7 +793,7 @@ void intel_fill_struct(struct drm_device *dev, intel_ring_advance(dev, ring); } -struct intel_ring_buffer render_ring = { +static struct intel_ring_buffer render_ring = { .name = "render ring", .id = RING_RENDER, .regs = { @@ -831,7 +831,7 @@ struct intel_ring_buffer render_ring = { /* ring buffer for bit-stream decoder */ -struct intel_ring_buffer bsd_ring = { +static struct intel_ring_buffer bsd_ring = { .name = "bsd ring", .id = RING_BSD, .regs = { @@ -866,3 +866,28 @@ struct intel_ring_buffer bsd_ring = { .status_page = {NULL, 0, NULL}, .map = {0,} }; + +int intel_init_render_ring_buffer(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + dev_priv->render_ring = render_ring; + + if (!I915_NEED_GFX_HWS(dev)) { + dev_priv->render_ring.status_page.page_addr + = dev_priv->status_page_dmah->vaddr; + memset(dev_priv->render_ring.status_page.page_addr, + 0, PAGE_SIZE); + } + + return intel_init_ring_buffer(dev, &dev_priv->render_ring); +} + +int intel_init_bsd_ring_buffer(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + dev_priv->bsd_ring = bsd_ring; + + return intel_init_ring_buffer(dev, &dev_priv->bsd_ring); +} diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 8dc0e62b7d2a..5603d6e945e9 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -133,7 +133,7 @@ void intel_ring_advance(struct drm_device *dev, u32 intel_ring_get_seqno(struct drm_device *dev, struct intel_ring_buffer *ring); -extern struct intel_ring_buffer render_ring; -extern struct intel_ring_buffer bsd_ring; +int intel_init_render_ring_buffer(struct drm_device *dev); +int intel_init_bsd_ring_buffer(struct drm_device *dev); #endif /* _INTEL_RINGBUFFER_H_ */ From d46eefa29724b1be0e8e90a3a51a190b912ebfab Mon Sep 17 00:00:00 2001 From: "Xiang, Haihao" Date: Thu, 16 Sep 2010 10:43:12 +0800 Subject: [PATCH 177/476] drm/i915: add set_tail hook in struct intel_ring_buffer This is prepared for video codec ring buffer on Sandybridge. It is needed to read/write more than one register to move the tail pointer of the video codec ring on Sandybridge. Signed-off-by: Xiang, Haihao Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_ringbuffer.c | 22 +++++++++++++++++----- drivers/gpu/drm/i915/intel_ringbuffer.h | 2 ++ 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 178e2cea9835..7debb1972eb2 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -132,6 +132,12 @@ static unsigned int render_ring_get_tail(struct drm_device *dev, return I915_READ(PRB0_TAIL) & TAIL_ADDR; } +static inline void render_ring_set_tail(struct drm_device *dev, u32 value) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + I915_WRITE(PRB0_TAIL, value); +} + static unsigned int render_ring_get_active_head(struct drm_device *dev, struct intel_ring_buffer *ring) { @@ -144,8 +150,7 @@ static unsigned int render_ring_get_active_head(struct drm_device *dev, static void render_ring_advance_ring(struct drm_device *dev, struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = dev->dev_private; - I915_WRITE(PRB0_TAIL, ring->tail); + render_ring_set_tail(dev, ring->tail); } static int init_ring_common(struct drm_device *dev, @@ -159,7 +164,7 @@ static int init_ring_common(struct drm_device *dev, /* Stop the ring if it's running. */ I915_WRITE(ring->regs.ctl, 0); I915_WRITE(ring->regs.head, 0); - I915_WRITE(ring->regs.tail, 0); + ring->set_tail(dev, 0); /* Initialize the ring. */ I915_WRITE(ring->regs.start, obj_priv->gtt_offset); @@ -400,6 +405,12 @@ static inline unsigned int bsd_ring_get_tail(struct drm_device *dev, return I915_READ(BSD_RING_TAIL) & TAIL_ADDR; } +static inline void bsd_ring_set_tail(struct drm_device *dev, u32 value) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + I915_WRITE(BSD_RING_TAIL, value); +} + static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev, struct intel_ring_buffer *ring) { @@ -410,8 +421,7 @@ static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev, static inline void bsd_ring_advance_ring(struct drm_device *dev, struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = dev->dev_private; - I915_WRITE(BSD_RING_TAIL, ring->tail); + bsd_ring_set_tail(dev, ring->tail); } static int init_bsd_ring(struct drm_device *dev, @@ -817,6 +827,7 @@ static struct intel_ring_buffer render_ring = { .init = init_render_ring, .get_head = render_ring_get_head, .get_tail = render_ring_get_tail, + .set_tail = render_ring_set_tail, .get_active_head = render_ring_get_active_head, .advance_ring = render_ring_advance_ring, .flush = render_ring_flush, @@ -855,6 +866,7 @@ static struct intel_ring_buffer bsd_ring = { .init = init_bsd_ring, .get_head = bsd_ring_get_head, .get_tail = bsd_ring_get_tail, + .set_tail = bsd_ring_set_tail, .get_active_head = bsd_ring_get_active_head, .advance_ring = bsd_ring_advance_ring, .flush = bsd_ring_flush, diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 5603d6e945e9..7bd571c796ae 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -48,6 +48,8 @@ struct intel_ring_buffer { struct intel_ring_buffer *ring); unsigned int (*get_tail)(struct drm_device *dev, struct intel_ring_buffer *ring); + void (*set_tail)(struct drm_device *dev, + u32 value); unsigned int (*get_active_head)(struct drm_device *dev, struct intel_ring_buffer *ring); void (*advance_ring)(struct drm_device *dev, From a3f07cd53e31c1c27364e56266a541b9467c1895 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 19 Sep 2010 14:36:46 +0100 Subject: [PATCH 178/476] drm/i915/ringbuffer: Implement advance using set_tail As noted by Zhenyu, we can now simply replace the existing advance hook by calling the new set_tail function pointer directly. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_ringbuffer.c | 16 +--------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 2 -- 2 files changed, 1 insertion(+), 17 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 7debb1972eb2..3f80f18e2844 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -147,12 +147,6 @@ static unsigned int render_ring_get_active_head(struct drm_device *dev, return I915_READ(acthd_reg); } -static void render_ring_advance_ring(struct drm_device *dev, - struct intel_ring_buffer *ring) -{ - render_ring_set_tail(dev, ring->tail); -} - static int init_ring_common(struct drm_device *dev, struct intel_ring_buffer *ring) { @@ -418,12 +412,6 @@ static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev, return I915_READ(BSD_RING_ACTHD); } -static inline void bsd_ring_advance_ring(struct drm_device *dev, - struct intel_ring_buffer *ring) -{ - bsd_ring_set_tail(dev, ring->tail); -} - static int init_bsd_ring(struct drm_device *dev, struct intel_ring_buffer *ring) { @@ -785,7 +773,7 @@ void intel_ring_advance(struct drm_device *dev, struct intel_ring_buffer *ring) { ring->tail &= ring->size - 1; - ring->advance_ring(dev, ring); + ring->set_tail(dev, ring->tail); } void intel_fill_struct(struct drm_device *dev, @@ -829,7 +817,6 @@ static struct intel_ring_buffer render_ring = { .get_tail = render_ring_get_tail, .set_tail = render_ring_set_tail, .get_active_head = render_ring_get_active_head, - .advance_ring = render_ring_advance_ring, .flush = render_ring_flush, .add_request = render_ring_add_request, .get_gem_seqno = render_ring_get_gem_seqno, @@ -868,7 +855,6 @@ static struct intel_ring_buffer bsd_ring = { .get_tail = bsd_ring_get_tail, .set_tail = bsd_ring_set_tail, .get_active_head = bsd_ring_get_active_head, - .advance_ring = bsd_ring_advance_ring, .flush = bsd_ring_flush, .add_request = bsd_ring_add_request, .get_gem_seqno = bsd_ring_get_gem_seqno, diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 7bd571c796ae..be1fd9b37088 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -52,8 +52,6 @@ struct intel_ring_buffer { u32 value); unsigned int (*get_active_head)(struct drm_device *dev, struct intel_ring_buffer *ring); - void (*advance_ring)(struct drm_device *dev, - struct intel_ring_buffer *ring); void (*flush)(struct drm_device *dev, struct intel_ring_buffer *ring, u32 invalidate_domains, From 881f47b64723f4d697084533491a489e3e74b10f Mon Sep 17 00:00:00 2001 From: "Xiang, Haihao" Date: Sun, 19 Sep 2010 14:40:43 +0100 Subject: [PATCH 179/476] drm/i915: add a new BSD ring buffer for Sandybridge This ring buffer is used for video decoding/encoding on Sandybridge. Signed-off-by: Xiang, Haihao Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.c | 2 + drivers/gpu/drm/i915/i915_irq.c | 15 ++- drivers/gpu/drm/i915/i915_reg.h | 26 ++++- drivers/gpu/drm/i915/intel_ringbuffer.c | 124 +++++++++++++++++++++++- 4 files changed, 159 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 393696cee86d..2c87f9b97b6f 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -157,11 +157,13 @@ static const struct intel_device_info intel_ironlake_m_info = { static const struct intel_device_info intel_sandybridge_d_info = { .gen = 6, .need_gfx_hws = 1, .has_hotplug = 1, + .has_bsd_ring = 1, }; static const struct intel_device_info intel_sandybridge_m_info = { .gen = 6, .is_mobile = 1, .need_gfx_hws = 1, .has_hotplug = 1, + .has_bsd_ring = 1, }; static const struct pci_device_id pciidlist[] = { /* aka */ diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index b1e7655288d8..d4c053e1c376 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -300,6 +300,10 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) u32 de_iir, gt_iir, de_ier, pch_iir; struct drm_i915_master_private *master_priv; struct intel_ring_buffer *render_ring = &dev_priv->render_ring; + u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; + + if (IS_GEN6(dev)) + bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT; /* disable master interrupt before clearing iir */ de_ier = I915_READ(DEIER); @@ -331,10 +335,9 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) mod_timer(&dev_priv->hangcheck_timer, jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); } - if (gt_iir & GT_BSD_USER_INTERRUPT) + if (gt_iir & bsd_usr_interrupt) DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); - if (de_iir & DE_GSE) intel_opregion_gse_intr(dev); @@ -1436,17 +1439,19 @@ static int ironlake_irq_postinstall(struct drm_device *dev) I915_WRITE(DEIER, dev_priv->de_irq_enable_reg); (void) I915_READ(DEIER); - /* Gen6 only needs render pipe_control now */ if (IS_GEN6(dev)) - render_mask = GT_PIPE_NOTIFY; + render_mask = GT_PIPE_NOTIFY | GT_GEN6_BSD_USER_INTERRUPT; dev_priv->gt_irq_mask_reg = ~render_mask; dev_priv->gt_irq_enable_reg = render_mask; I915_WRITE(GTIIR, I915_READ(GTIIR)); I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); - if (IS_GEN6(dev)) + if (IS_GEN6(dev)) { I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT); + I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT); + } + I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); (void) I915_READ(GTIER); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index b46e580421e1..8d51de0e01f2 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -197,11 +197,11 @@ #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) #define MI_STORE_DWORD_INDEX_SHIFT 2 #define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1) +#define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */ #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) #define MI_BATCH_NON_SECURE (1) #define MI_BATCH_NON_SECURE_I965 (1<<8) #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) - /* * 3D instructions used by the kernel */ @@ -483,6 +483,28 @@ #define BSD_RING_ACTHD 0x04074 #define BSD_HWS_PGA 0x04080 +/* + * video command stream instruction and interrupt control register defines + * for GEN6 + */ +#define GEN6_BSD_RING_TAIL 0x12030 +#define GEN6_BSD_RING_HEAD 0x12034 +#define GEN6_BSD_RING_START 0x12038 +#define GEN6_BSD_RING_CTL 0x1203c +#define GEN6_BSD_RING_ACTHD 0x12074 +#define GEN6_BSD_HWS_PGA 0x14080 + +#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 +#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16) +#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0) +#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0 +#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3) + +#define GEN6_BSD_IMR 0x120a8 +#define GEN6_BSD_IMR_USER_INTERRUPT (1 << 12) + +#define GEN6_BSD_RNCID 0x12198 + /* * Framebuffer compression (915+ only) */ @@ -2598,7 +2620,7 @@ #define GT_SYNC_STATUS (1 << 2) #define GT_USER_INTERRUPT (1 << 0) #define GT_BSD_USER_INTERRUPT (1 << 5) - +#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12) #define GTISR 0x44010 #define GTIMR 0x44014 diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 3f80f18e2844..478406d1886c 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -32,6 +32,7 @@ #include "i915_drv.h" #include "i915_drm.h" #include "i915_trace.h" +#include "intel_drv.h" static u32 i915_gem_get_seqno(struct drm_device *dev) { @@ -865,6 +866,124 @@ static struct intel_ring_buffer bsd_ring = { .map = {0,} }; + +static void gen6_bsd_setup_status_page(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + I915_WRITE(GEN6_BSD_HWS_PGA, ring->status_page.gfx_addr); + I915_READ(GEN6_BSD_HWS_PGA); +} + +static inline unsigned int gen6_bsd_ring_get_head(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + return I915_READ(GEN6_BSD_RING_HEAD) & HEAD_ADDR; +} + +static inline unsigned int gen6_bsd_ring_get_tail(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + return I915_READ(GEN6_BSD_RING_TAIL) & TAIL_ADDR; +} + +static inline void gen6_bsd_ring_set_tail(struct drm_device *dev, + u32 value) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + /* Every tail move must follow the sequence below */ + I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, + GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | + GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE); + I915_WRITE(GEN6_BSD_RNCID, 0x0); + + if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & + GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0, + 50)) + DRM_ERROR("timed out waiting for IDLE Indicator\n"); + + I915_WRITE(GEN6_BSD_RING_TAIL, value); + I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, + GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | + GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); +} + +static inline unsigned int gen6_bsd_ring_get_active_head(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + return I915_READ(GEN6_BSD_RING_ACTHD); +} + +static void gen6_bsd_ring_flush(struct drm_device *dev, + struct intel_ring_buffer *ring, + u32 invalidate_domains, + u32 flush_domains) +{ + intel_ring_begin(dev, ring, 4); + intel_ring_emit(dev, ring, MI_FLUSH_DW); + intel_ring_emit(dev, ring, 0); + intel_ring_emit(dev, ring, 0); + intel_ring_emit(dev, ring, 0); + intel_ring_advance(dev, ring); +} + +static int +gen6_bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, + struct intel_ring_buffer *ring, + struct drm_i915_gem_execbuffer2 *exec, + struct drm_clip_rect *cliprects, + uint64_t exec_offset) +{ + uint32_t exec_start; + exec_start = (uint32_t) exec_offset + exec->batch_start_offset; + intel_ring_begin(dev, ring, 2); + intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); /* bit0-7 is the length on GEN6+ */ + intel_ring_emit(dev, ring, exec_start); + intel_ring_advance(dev, ring); + return 0; +} + +/* ring buffer for Video Codec for Gen6+ */ +static struct intel_ring_buffer gen6_bsd_ring = { + .name = "gen6 bsd ring", + .id = RING_BSD, + .regs = { + .ctl = GEN6_BSD_RING_CTL, + .head = GEN6_BSD_RING_HEAD, + .tail = GEN6_BSD_RING_TAIL, + .start = GEN6_BSD_RING_START + }, + .size = 32 * PAGE_SIZE, + .alignment = PAGE_SIZE, + .virtual_start = NULL, + .dev = NULL, + .gem_object = NULL, + .head = 0, + .tail = 0, + .space = 0, + .user_irq_refcount = 0, + .irq_gem_seqno = 0, + .waiting_gem_seqno = 0, + .setup_status_page = gen6_bsd_setup_status_page, + .init = init_bsd_ring, + .get_head = gen6_bsd_ring_get_head, + .get_tail = gen6_bsd_ring_get_tail, + .set_tail = gen6_bsd_ring_set_tail, + .get_active_head = gen6_bsd_ring_get_active_head, + .flush = gen6_bsd_ring_flush, + .add_request = bsd_ring_add_request, + .get_gem_seqno = bsd_ring_get_gem_seqno, + .user_irq_get = bsd_ring_get_user_irq, + .user_irq_put = bsd_ring_put_user_irq, + .dispatch_gem_execbuffer = gen6_bsd_ring_dispatch_gem_execbuffer, + .status_page = {NULL, 0, NULL}, + .map = {0,} +}; + int intel_init_render_ring_buffer(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -885,7 +1004,10 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - dev_priv->bsd_ring = bsd_ring; + if (IS_GEN6(dev)) + dev_priv->bsd_ring = gen6_bsd_ring; + else + dev_priv->bsd_ring = bsd_ring; return intel_init_ring_buffer(dev, &dev_priv->bsd_ring); } From e070868ef2101ce548d4fbb25edfd301e59fb719 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 19 Sep 2010 14:46:27 +0100 Subject: [PATCH 180/476] drm/i915/ringbuffer: Mark the initialisation structs as constant. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_ringbuffer.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 478406d1886c..f0b44d3ba958 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -792,7 +792,7 @@ void intel_fill_struct(struct drm_device *dev, intel_ring_advance(dev, ring); } -static struct intel_ring_buffer render_ring = { +static const struct intel_ring_buffer render_ring = { .name = "render ring", .id = RING_RENDER, .regs = { @@ -830,7 +830,7 @@ static struct intel_ring_buffer render_ring = { /* ring buffer for bit-stream decoder */ -static struct intel_ring_buffer bsd_ring = { +static const struct intel_ring_buffer bsd_ring = { .name = "bsd ring", .id = RING_BSD, .regs = { @@ -948,7 +948,7 @@ gen6_bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, } /* ring buffer for Video Codec for Gen6+ */ -static struct intel_ring_buffer gen6_bsd_ring = { +static const struct intel_ring_buffer gen6_bsd_ring = { .name = "gen6 bsd ring", .id = RING_BSD, .regs = { From c7f9f9a8b89bb4d53edc030f5b61ae11d6859721 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 19 Sep 2010 15:05:13 +0100 Subject: [PATCH 181/476] drm/i915: Use ring->flush() instead of MI_FLUSH Use the ring abstraction to hide the details of having choose the appropriate flushing method. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 4 ++++ drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/intel_display.c | 33 ++++++++++++++-------------- 3 files changed, 21 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4b6812015056..790ffec135df 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1001,6 +1001,10 @@ void i915_gem_retire_requests(struct drm_device *dev); void i915_gem_reset_flushing_list(struct drm_device *dev); void i915_gem_reset_inactive_gpu_domains(struct drm_device *dev); void i915_gem_clflush_object(struct drm_gem_object *obj); +void i915_gem_flush_ring(struct drm_device *dev, + struct intel_ring_buffer *ring, + uint32_t invalidate_domains, + uint32_t flush_domains); int i915_gem_object_set_domain(struct drm_gem_object *obj, uint32_t read_domains, uint32_t write_domain); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 7b33b4d5ebff..b242530ffcbd 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1908,7 +1908,7 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno, return i915_do_wait_request(dev, seqno, 1, ring); } -static void +void i915_gem_flush_ring(struct drm_device *dev, struct intel_ring_buffer *ring, uint32_t invalidate_domains, diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index c3f0400963de..0505ddb76a10 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5056,24 +5056,23 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, atomic_inc(&obj_priv->pending_flip); work->pending_flip_obj = obj; - if (was_dirty || IS_GEN3(dev) || IS_GEN2(dev)) { + /* Schedule the pipelined flush */ + if (was_dirty) + i915_gem_flush_ring(dev, obj_priv->ring, 0, was_dirty); + + if (IS_GEN3(dev) || IS_GEN2(dev)) { + u32 flip_mask; + + /* Can't queue multiple flips, so wait for the previous + * one to finish before executing the next. + */ BEGIN_LP_RING(2); - if (IS_GEN3(dev) || IS_GEN2(dev)) { - u32 flip_mask; - - /* Can't queue multiple flips, so wait for the previous - * one to finish before executing the next. - */ - - if (intel_crtc->plane) - flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; - else - flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; - - OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); - } else - OUT_RING(MI_NOOP); - OUT_RING(MI_FLUSH); + if (intel_crtc->plane) + flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; + else + flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; + OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); + OUT_RING(MI_NOOP); ADVANCE_LP_RING(); } From b8aea0c8003927f13e257c7ff370b6b73dbe2a5a Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 2 Aug 2010 15:28:30 +0200 Subject: [PATCH 182/476] drm/i915: kill duplicated/unneeded register defines This looks like a copy-paste remnant from the i810. All the regs that are actually used are already defined somewhere else in i915_reg.h! Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_reg.h | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 8d51de0e01f2..8b8ac60cc839 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -709,24 +709,6 @@ #define ADPA_DPMS_STANDBY (2<<10) #define ADPA_DPMS_OFF (3<<10) -#define RING_TAIL 0x00 -#define TAIL_ADDR 0x001FFFF8 -#define RING_HEAD 0x04 -#define HEAD_WRAP_COUNT 0xFFE00000 -#define HEAD_WRAP_ONE 0x00200000 -#define HEAD_ADDR 0x001FFFFC -#define RING_START 0x08 -#define START_ADDR 0xFFFFF000 -#define RING_LEN 0x0C -#define RING_NR_PAGES 0x001FF000 -#define RING_REPORT_MASK 0x00000006 -#define RING_REPORT_64K 0x00000002 -#define RING_REPORT_128K 0x00000004 -#define RING_NO_REPORT 0x00000000 -#define RING_VALID_MASK 0x00000001 -#define RING_VALID 0x00000001 -#define RING_INVALID 0x00000000 - /* Scratch pad debug 0 reg: */ #define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 From 333e9fe94d00ce8c334d91099449b9948bf76b92 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 2 Aug 2010 16:24:01 +0200 Subject: [PATCH 183/476] drm/i915: add relative ring register macros Documentation explicitly mentions that the ring registers are designed to have the same offsets relative to a base registers. Use this to fight the code beaurocratic in intel_ringbuffer.c. No code changes in this patch, just the new definitions. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_reg.h | 8 ++++++++ drivers/gpu/drm/i915/intel_ringbuffer.c | 3 +++ drivers/gpu/drm/i915/intel_ringbuffer.h | 1 + 3 files changed, 12 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 8b8ac60cc839..77c9191f3fd6 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -288,6 +288,14 @@ #define PRB0_HEAD 0x02034 #define PRB0_START 0x02038 #define PRB0_CTL 0x0203c +#define RENDER_RING_BASE 0x02000 +#define BSD_RING_BASE 0x04000 +#define GEN6_BSD_RING_BASE 0x12000 +#define RING_TAIL(base) (base)+0x30 +#define RING_HEAD(base) (base)+0x34 +#define RING_START(base) (base)+0x38 +#define RING_CTL(base) (base)+0x3c +#define RING_HWS_PGA(base) (base)+0x80 #define TAIL_ADDR 0x001FFFF8 #define HEAD_WRAP_COUNT 0xFFE00000 #define HEAD_WRAP_ONE 0x00200000 diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index f0b44d3ba958..c1517b2d2f08 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -801,6 +801,7 @@ static const struct intel_ring_buffer render_ring = { .tail = PRB0_TAIL, .start = PRB0_START }, + .mmio_base = RENDER_RING_BASE, .size = 32 * PAGE_SIZE, .alignment = PAGE_SIZE, .virtual_start = NULL, @@ -839,6 +840,7 @@ static const struct intel_ring_buffer bsd_ring = { .tail = BSD_RING_TAIL, .start = BSD_RING_START }, + .mmio_base = BSD_RING_BASE, .size = 32 * PAGE_SIZE, .alignment = PAGE_SIZE, .virtual_start = NULL, @@ -957,6 +959,7 @@ static const struct intel_ring_buffer gen6_bsd_ring = { .tail = GEN6_BSD_RING_TAIL, .start = GEN6_BSD_RING_START }, + .mmio_base = GEN6_BSD_RING_BASE, .size = 32 * PAGE_SIZE, .alignment = PAGE_SIZE, .virtual_start = NULL, diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index be1fd9b37088..3917d8b1c1a9 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -20,6 +20,7 @@ struct intel_ring_buffer { u32 tail; u32 start; } regs; + u32 mmio_base; unsigned long size; unsigned int alignment; void *virtual_start; From 870e86ddc2d110124812b277643ed0f2767148ee Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 2 Aug 2010 16:29:44 +0200 Subject: [PATCH 184/476] drm/i915: use new macros to access the ring tail register Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_ringbuffer.c | 63 +++++++------------------ drivers/gpu/drm/i915/intel_ringbuffer.h | 9 ++-- 2 files changed, 22 insertions(+), 50 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index c1517b2d2f08..4b797e7dc95d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -126,17 +126,12 @@ static unsigned int render_ring_get_head(struct drm_device *dev, return I915_READ(PRB0_HEAD) & HEAD_ADDR; } -static unsigned int render_ring_get_tail(struct drm_device *dev, - struct intel_ring_buffer *ring) +static void ring_set_tail(struct drm_device *dev, + struct intel_ring_buffer *ring, + u32 value) { drm_i915_private_t *dev_priv = dev->dev_private; - return I915_READ(PRB0_TAIL) & TAIL_ADDR; -} - -static inline void render_ring_set_tail(struct drm_device *dev, u32 value) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - I915_WRITE(PRB0_TAIL, value); + I915_WRITE_TAIL(ring, ring->tail); } static unsigned int render_ring_get_active_head(struct drm_device *dev, @@ -159,7 +154,7 @@ static int init_ring_common(struct drm_device *dev, /* Stop the ring if it's running. */ I915_WRITE(ring->regs.ctl, 0); I915_WRITE(ring->regs.head, 0); - ring->set_tail(dev, 0); + ring->set_tail(dev, ring, 0); /* Initialize the ring. */ I915_WRITE(ring->regs.start, obj_priv->gtt_offset); @@ -172,7 +167,7 @@ static int init_ring_common(struct drm_device *dev, ring->name, I915_READ(ring->regs.ctl), I915_READ(ring->regs.head), - I915_READ(ring->regs.tail), + I915_READ_TAIL(ring), I915_READ(ring->regs.start)); I915_WRITE(ring->regs.head, 0); @@ -182,7 +177,7 @@ static int init_ring_common(struct drm_device *dev, ring->name, I915_READ(ring->regs.ctl), I915_READ(ring->regs.head), - I915_READ(ring->regs.tail), + I915_READ_TAIL(ring), I915_READ(ring->regs.start)); } @@ -198,7 +193,7 @@ static int init_ring_common(struct drm_device *dev, ring->name, I915_READ(ring->regs.ctl), I915_READ(ring->regs.head), - I915_READ(ring->regs.tail), + I915_READ_TAIL(ring), I915_READ(ring->regs.start)); return -EIO; } @@ -207,7 +202,7 @@ static int init_ring_common(struct drm_device *dev, i915_kernel_lost_context(dev); else { ring->head = ring->get_head(dev, ring); - ring->tail = ring->get_tail(dev, ring); + ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; ring->space = ring->head - (ring->tail + 8); if (ring->space < 0) ring->space += ring->size; @@ -393,19 +388,6 @@ static inline unsigned int bsd_ring_get_head(struct drm_device *dev, return I915_READ(BSD_RING_HEAD) & HEAD_ADDR; } -static inline unsigned int bsd_ring_get_tail(struct drm_device *dev, - struct intel_ring_buffer *ring) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - return I915_READ(BSD_RING_TAIL) & TAIL_ADDR; -} - -static inline void bsd_ring_set_tail(struct drm_device *dev, u32 value) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - I915_WRITE(BSD_RING_TAIL, value); -} - static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev, struct intel_ring_buffer *ring) { @@ -620,6 +602,7 @@ err: int intel_init_ring_buffer(struct drm_device *dev, struct intel_ring_buffer *ring) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv; struct drm_gem_object *obj; int ret; @@ -668,7 +651,7 @@ int intel_init_ring_buffer(struct drm_device *dev, i915_kernel_lost_context(dev); else { ring->head = ring->get_head(dev, ring); - ring->tail = ring->get_tail(dev, ring); + ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; ring->space = ring->head - (ring->tail + 8); if (ring->space < 0) ring->space += ring->size; @@ -774,7 +757,7 @@ void intel_ring_advance(struct drm_device *dev, struct intel_ring_buffer *ring) { ring->tail &= ring->size - 1; - ring->set_tail(dev, ring->tail); + ring->set_tail(dev, ring, ring->tail); } void intel_fill_struct(struct drm_device *dev, @@ -798,7 +781,6 @@ static const struct intel_ring_buffer render_ring = { .regs = { .ctl = PRB0_CTL, .head = PRB0_HEAD, - .tail = PRB0_TAIL, .start = PRB0_START }, .mmio_base = RENDER_RING_BASE, @@ -816,8 +798,7 @@ static const struct intel_ring_buffer render_ring = { .setup_status_page = render_setup_status_page, .init = init_render_ring, .get_head = render_ring_get_head, - .get_tail = render_ring_get_tail, - .set_tail = render_ring_set_tail, + .set_tail = ring_set_tail, .get_active_head = render_ring_get_active_head, .flush = render_ring_flush, .add_request = render_ring_add_request, @@ -837,7 +818,6 @@ static const struct intel_ring_buffer bsd_ring = { .regs = { .ctl = BSD_RING_CTL, .head = BSD_RING_HEAD, - .tail = BSD_RING_TAIL, .start = BSD_RING_START }, .mmio_base = BSD_RING_BASE, @@ -855,8 +835,7 @@ static const struct intel_ring_buffer bsd_ring = { .setup_status_page = bsd_setup_status_page, .init = init_bsd_ring, .get_head = bsd_ring_get_head, - .get_tail = bsd_ring_get_tail, - .set_tail = bsd_ring_set_tail, + .set_tail = ring_set_tail, .get_active_head = bsd_ring_get_active_head, .flush = bsd_ring_flush, .add_request = bsd_ring_add_request, @@ -884,15 +863,9 @@ static inline unsigned int gen6_bsd_ring_get_head(struct drm_device *dev, return I915_READ(GEN6_BSD_RING_HEAD) & HEAD_ADDR; } -static inline unsigned int gen6_bsd_ring_get_tail(struct drm_device *dev, - struct intel_ring_buffer *ring) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - return I915_READ(GEN6_BSD_RING_TAIL) & TAIL_ADDR; -} - static inline void gen6_bsd_ring_set_tail(struct drm_device *dev, - u32 value) + struct intel_ring_buffer *ring, + u32 value) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -907,7 +880,7 @@ static inline void gen6_bsd_ring_set_tail(struct drm_device *dev, 50)) DRM_ERROR("timed out waiting for IDLE Indicator\n"); - I915_WRITE(GEN6_BSD_RING_TAIL, value); + I915_WRITE_TAIL(ring, value); I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); @@ -956,7 +929,6 @@ static const struct intel_ring_buffer gen6_bsd_ring = { .regs = { .ctl = GEN6_BSD_RING_CTL, .head = GEN6_BSD_RING_HEAD, - .tail = GEN6_BSD_RING_TAIL, .start = GEN6_BSD_RING_START }, .mmio_base = GEN6_BSD_RING_BASE, @@ -974,7 +946,6 @@ static const struct intel_ring_buffer gen6_bsd_ring = { .setup_status_page = gen6_bsd_setup_status_page, .init = init_bsd_ring, .get_head = gen6_bsd_ring_get_head, - .get_tail = gen6_bsd_ring_get_tail, .set_tail = gen6_bsd_ring_set_tail, .get_active_head = gen6_bsd_ring_get_active_head, .flush = gen6_bsd_ring_flush, diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 3917d8b1c1a9..2dfcd9bad3ce 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -7,6 +7,9 @@ struct intel_hw_status_page { struct drm_gem_object *obj; }; +#define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base)) +#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val) + struct drm_i915_gem_execbuffer2; struct intel_ring_buffer { const char *name; @@ -17,7 +20,6 @@ struct intel_ring_buffer { struct ring_regs { u32 ctl; u32 head; - u32 tail; u32 start; } regs; u32 mmio_base; @@ -47,10 +49,9 @@ struct intel_ring_buffer { unsigned int (*get_head)(struct drm_device *dev, struct intel_ring_buffer *ring); - unsigned int (*get_tail)(struct drm_device *dev, - struct intel_ring_buffer *ring); void (*set_tail)(struct drm_device *dev, - u32 value); + struct intel_ring_buffer *ring, + u32 value); unsigned int (*get_active_head)(struct drm_device *dev, struct intel_ring_buffer *ring); void (*flush)(struct drm_device *dev, From 6c0e1c556ee659cd8c976cd175c0b70e209acb92 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 2 Aug 2010 16:33:33 +0200 Subject: [PATCH 185/476] drm/i915: use new macros to access the ring start register Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_ringbuffer.c | 11 ++++------- drivers/gpu/drm/i915/intel_ringbuffer.h | 3 ++- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 4b797e7dc95d..395c4d34b1e2 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -157,7 +157,7 @@ static int init_ring_common(struct drm_device *dev, ring->set_tail(dev, ring, 0); /* Initialize the ring. */ - I915_WRITE(ring->regs.start, obj_priv->gtt_offset); + I915_WRITE_START(ring, obj_priv->gtt_offset); head = ring->get_head(dev, ring); /* G45 ring initialization fails to reset head to zero */ @@ -168,7 +168,7 @@ static int init_ring_common(struct drm_device *dev, I915_READ(ring->regs.ctl), I915_READ(ring->regs.head), I915_READ_TAIL(ring), - I915_READ(ring->regs.start)); + I915_READ_START(ring)); I915_WRITE(ring->regs.head, 0); @@ -178,7 +178,7 @@ static int init_ring_common(struct drm_device *dev, I915_READ(ring->regs.ctl), I915_READ(ring->regs.head), I915_READ_TAIL(ring), - I915_READ(ring->regs.start)); + I915_READ_START(ring)); } I915_WRITE(ring->regs.ctl, @@ -194,7 +194,7 @@ static int init_ring_common(struct drm_device *dev, I915_READ(ring->regs.ctl), I915_READ(ring->regs.head), I915_READ_TAIL(ring), - I915_READ(ring->regs.start)); + I915_READ_START(ring)); return -EIO; } @@ -781,7 +781,6 @@ static const struct intel_ring_buffer render_ring = { .regs = { .ctl = PRB0_CTL, .head = PRB0_HEAD, - .start = PRB0_START }, .mmio_base = RENDER_RING_BASE, .size = 32 * PAGE_SIZE, @@ -818,7 +817,6 @@ static const struct intel_ring_buffer bsd_ring = { .regs = { .ctl = BSD_RING_CTL, .head = BSD_RING_HEAD, - .start = BSD_RING_START }, .mmio_base = BSD_RING_BASE, .size = 32 * PAGE_SIZE, @@ -929,7 +927,6 @@ static const struct intel_ring_buffer gen6_bsd_ring = { .regs = { .ctl = GEN6_BSD_RING_CTL, .head = GEN6_BSD_RING_HEAD, - .start = GEN6_BSD_RING_START }, .mmio_base = GEN6_BSD_RING_BASE, .size = 32 * PAGE_SIZE, diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 2dfcd9bad3ce..9d0ae5ad7e55 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -9,6 +9,8 @@ struct intel_hw_status_page { #define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base)) #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val) +#define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base)) +#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val) struct drm_i915_gem_execbuffer2; struct intel_ring_buffer { @@ -20,7 +22,6 @@ struct intel_ring_buffer { struct ring_regs { u32 ctl; u32 head; - u32 start; } regs; u32 mmio_base; unsigned long size; From 570ef608591aa1c7f7cb615c2d33b30246179da1 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 2 Aug 2010 17:06:23 +0200 Subject: [PATCH 186/476] drm/i915: use new macros to access the ring head register Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_ringbuffer.c | 48 ++++++------------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 5 ++- 2 files changed, 13 insertions(+), 40 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 395c4d34b1e2..7eb936a315bd 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -119,13 +119,6 @@ render_ring_flush(struct drm_device *dev, } } -static unsigned int render_ring_get_head(struct drm_device *dev, - struct intel_ring_buffer *ring) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - return I915_READ(PRB0_HEAD) & HEAD_ADDR; -} - static void ring_set_tail(struct drm_device *dev, struct intel_ring_buffer *ring, u32 value) @@ -153,12 +146,12 @@ static int init_ring_common(struct drm_device *dev, /* Stop the ring if it's running. */ I915_WRITE(ring->regs.ctl, 0); - I915_WRITE(ring->regs.head, 0); + I915_WRITE_HEAD(ring, 0); ring->set_tail(dev, ring, 0); /* Initialize the ring. */ I915_WRITE_START(ring, obj_priv->gtt_offset); - head = ring->get_head(dev, ring); + head = I915_READ_HEAD(ring) & HEAD_ADDR; /* G45 ring initialization fails to reset head to zero */ if (head != 0) { @@ -166,17 +159,17 @@ static int init_ring_common(struct drm_device *dev, "ctl %08x head %08x tail %08x start %08x\n", ring->name, I915_READ(ring->regs.ctl), - I915_READ(ring->regs.head), + I915_READ_HEAD(ring), I915_READ_TAIL(ring), I915_READ_START(ring)); - I915_WRITE(ring->regs.head, 0); + I915_WRITE_HEAD(ring, 0); DRM_ERROR("%s head forced to zero " "ctl %08x head %08x tail %08x start %08x\n", ring->name, I915_READ(ring->regs.ctl), - I915_READ(ring->regs.head), + I915_READ_HEAD(ring), I915_READ_TAIL(ring), I915_READ_START(ring)); } @@ -185,14 +178,14 @@ static int init_ring_common(struct drm_device *dev, ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) | RING_NO_REPORT | RING_VALID); - head = I915_READ(ring->regs.head) & HEAD_ADDR; + head = I915_READ_HEAD(ring) & HEAD_ADDR; /* If the head is still not zero, the ring is dead */ if (head != 0) { DRM_ERROR("%s initialization failed " "ctl %08x head %08x tail %08x start %08x\n", ring->name, I915_READ(ring->regs.ctl), - I915_READ(ring->regs.head), + I915_READ_HEAD(ring), I915_READ_TAIL(ring), I915_READ_START(ring)); return -EIO; @@ -201,7 +194,7 @@ static int init_ring_common(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) i915_kernel_lost_context(dev); else { - ring->head = ring->get_head(dev, ring); + ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; ring->space = ring->head - (ring->tail + 8); if (ring->space < 0) @@ -381,13 +374,6 @@ bsd_ring_flush(struct drm_device *dev, intel_ring_advance(dev, ring); } -static inline unsigned int bsd_ring_get_head(struct drm_device *dev, - struct intel_ring_buffer *ring) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - return I915_READ(BSD_RING_HEAD) & HEAD_ADDR; -} - static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev, struct intel_ring_buffer *ring) { @@ -650,7 +636,7 @@ int intel_init_ring_buffer(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) i915_kernel_lost_context(dev); else { - ring->head = ring->get_head(dev, ring); + ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; ring->space = ring->head - (ring->tail + 8); if (ring->space < 0) @@ -716,11 +702,12 @@ int intel_wait_ring_buffer(struct drm_device *dev, struct intel_ring_buffer *ring, int n) { unsigned long end; + drm_i915_private_t *dev_priv = dev->dev_private; trace_i915_ring_wait_begin (dev); end = jiffies + 3 * HZ; do { - ring->head = ring->get_head(dev, ring); + ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; ring->space = ring->head - (ring->tail + 8); if (ring->space < 0) ring->space += ring->size; @@ -780,7 +767,6 @@ static const struct intel_ring_buffer render_ring = { .id = RING_RENDER, .regs = { .ctl = PRB0_CTL, - .head = PRB0_HEAD, }, .mmio_base = RENDER_RING_BASE, .size = 32 * PAGE_SIZE, @@ -796,7 +782,6 @@ static const struct intel_ring_buffer render_ring = { .waiting_gem_seqno = 0, .setup_status_page = render_setup_status_page, .init = init_render_ring, - .get_head = render_ring_get_head, .set_tail = ring_set_tail, .get_active_head = render_ring_get_active_head, .flush = render_ring_flush, @@ -816,7 +801,6 @@ static const struct intel_ring_buffer bsd_ring = { .id = RING_BSD, .regs = { .ctl = BSD_RING_CTL, - .head = BSD_RING_HEAD, }, .mmio_base = BSD_RING_BASE, .size = 32 * PAGE_SIZE, @@ -832,7 +816,6 @@ static const struct intel_ring_buffer bsd_ring = { .waiting_gem_seqno = 0, .setup_status_page = bsd_setup_status_page, .init = init_bsd_ring, - .get_head = bsd_ring_get_head, .set_tail = ring_set_tail, .get_active_head = bsd_ring_get_active_head, .flush = bsd_ring_flush, @@ -854,13 +837,6 @@ static void gen6_bsd_setup_status_page(struct drm_device *dev, I915_READ(GEN6_BSD_HWS_PGA); } -static inline unsigned int gen6_bsd_ring_get_head(struct drm_device *dev, - struct intel_ring_buffer *ring) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - return I915_READ(GEN6_BSD_RING_HEAD) & HEAD_ADDR; -} - static inline void gen6_bsd_ring_set_tail(struct drm_device *dev, struct intel_ring_buffer *ring, u32 value) @@ -926,7 +902,6 @@ static const struct intel_ring_buffer gen6_bsd_ring = { .id = RING_BSD, .regs = { .ctl = GEN6_BSD_RING_CTL, - .head = GEN6_BSD_RING_HEAD, }, .mmio_base = GEN6_BSD_RING_BASE, .size = 32 * PAGE_SIZE, @@ -942,7 +917,6 @@ static const struct intel_ring_buffer gen6_bsd_ring = { .waiting_gem_seqno = 0, .setup_status_page = gen6_bsd_setup_status_page, .init = init_bsd_ring, - .get_head = gen6_bsd_ring_get_head, .set_tail = gen6_bsd_ring_set_tail, .get_active_head = gen6_bsd_ring_get_active_head, .flush = gen6_bsd_ring_flush, diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 9d0ae5ad7e55..af09eaa84bed 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -11,6 +11,8 @@ struct intel_hw_status_page { #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val) #define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base)) #define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val) +#define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base)) +#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val) struct drm_i915_gem_execbuffer2; struct intel_ring_buffer { @@ -21,7 +23,6 @@ struct intel_ring_buffer { } id; struct ring_regs { u32 ctl; - u32 head; } regs; u32 mmio_base; unsigned long size; @@ -48,8 +49,6 @@ struct intel_ring_buffer { int (*init)(struct drm_device *dev, struct intel_ring_buffer *ring); - unsigned int (*get_head)(struct drm_device *dev, - struct intel_ring_buffer *ring); void (*set_tail)(struct drm_device *dev, struct intel_ring_buffer *ring, u32 value); From 7f2ab69913135f0377a1dfc1da5695b64107d3ca Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 2 Aug 2010 17:06:59 +0200 Subject: [PATCH 187/476] drm/i915: use new macros to access the ring ctl register Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_ringbuffer.c | 19 +++++-------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 5 ++--- 2 files changed, 7 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 7eb936a315bd..1198b6097be0 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -145,7 +145,7 @@ static int init_ring_common(struct drm_device *dev, obj_priv = to_intel_bo(ring->gem_object); /* Stop the ring if it's running. */ - I915_WRITE(ring->regs.ctl, 0); + I915_WRITE_CTL(ring, 0); I915_WRITE_HEAD(ring, 0); ring->set_tail(dev, ring, 0); @@ -158,7 +158,7 @@ static int init_ring_common(struct drm_device *dev, DRM_ERROR("%s head not reset to zero " "ctl %08x head %08x tail %08x start %08x\n", ring->name, - I915_READ(ring->regs.ctl), + I915_READ_CTL(ring), I915_READ_HEAD(ring), I915_READ_TAIL(ring), I915_READ_START(ring)); @@ -168,13 +168,13 @@ static int init_ring_common(struct drm_device *dev, DRM_ERROR("%s head forced to zero " "ctl %08x head %08x tail %08x start %08x\n", ring->name, - I915_READ(ring->regs.ctl), + I915_READ_CTL(ring), I915_READ_HEAD(ring), I915_READ_TAIL(ring), I915_READ_START(ring)); } - I915_WRITE(ring->regs.ctl, + I915_WRITE_CTL(ring, ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) | RING_NO_REPORT | RING_VALID); @@ -184,7 +184,7 @@ static int init_ring_common(struct drm_device *dev, DRM_ERROR("%s initialization failed " "ctl %08x head %08x tail %08x start %08x\n", ring->name, - I915_READ(ring->regs.ctl), + I915_READ_CTL(ring), I915_READ_HEAD(ring), I915_READ_TAIL(ring), I915_READ_START(ring)); @@ -765,9 +765,6 @@ void intel_fill_struct(struct drm_device *dev, static const struct intel_ring_buffer render_ring = { .name = "render ring", .id = RING_RENDER, - .regs = { - .ctl = PRB0_CTL, - }, .mmio_base = RENDER_RING_BASE, .size = 32 * PAGE_SIZE, .alignment = PAGE_SIZE, @@ -799,9 +796,6 @@ static const struct intel_ring_buffer render_ring = { static const struct intel_ring_buffer bsd_ring = { .name = "bsd ring", .id = RING_BSD, - .regs = { - .ctl = BSD_RING_CTL, - }, .mmio_base = BSD_RING_BASE, .size = 32 * PAGE_SIZE, .alignment = PAGE_SIZE, @@ -900,9 +894,6 @@ gen6_bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, static const struct intel_ring_buffer gen6_bsd_ring = { .name = "gen6 bsd ring", .id = RING_BSD, - .regs = { - .ctl = GEN6_BSD_RING_CTL, - }, .mmio_base = GEN6_BSD_RING_BASE, .size = 32 * PAGE_SIZE, .alignment = PAGE_SIZE, diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index af09eaa84bed..1668cd9ac876 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -13,6 +13,8 @@ struct intel_hw_status_page { #define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val) #define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base)) #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val) +#define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base)) +#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val) struct drm_i915_gem_execbuffer2; struct intel_ring_buffer { @@ -21,9 +23,6 @@ struct intel_ring_buffer { RING_RENDER = 0x1, RING_BSD = 0x2, } id; - struct ring_regs { - u32 ctl; - } regs; u32 mmio_base; unsigned long size; unsigned int alignment; From fa7ed4d206890fd325eddcc8d27d6d1e89c5d4bd Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 2 Aug 2010 17:08:41 +0200 Subject: [PATCH 188/476] drm/i915: don't explicitly initialize ringbuffer members to zero The compiler happily does that for us. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_ringbuffer.c | 33 ------------------------- 1 file changed, 33 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 1198b6097be0..d395d055328a 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -768,15 +768,6 @@ static const struct intel_ring_buffer render_ring = { .mmio_base = RENDER_RING_BASE, .size = 32 * PAGE_SIZE, .alignment = PAGE_SIZE, - .virtual_start = NULL, - .dev = NULL, - .gem_object = NULL, - .head = 0, - .tail = 0, - .space = 0, - .user_irq_refcount = 0, - .irq_gem_seqno = 0, - .waiting_gem_seqno = 0, .setup_status_page = render_setup_status_page, .init = init_render_ring, .set_tail = ring_set_tail, @@ -787,8 +778,6 @@ static const struct intel_ring_buffer render_ring = { .user_irq_get = render_ring_get_user_irq, .user_irq_put = render_ring_put_user_irq, .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer, - .status_page = {NULL, 0, NULL}, - .map = {0,} }; /* ring buffer for bit-stream decoder */ @@ -799,15 +788,6 @@ static const struct intel_ring_buffer bsd_ring = { .mmio_base = BSD_RING_BASE, .size = 32 * PAGE_SIZE, .alignment = PAGE_SIZE, - .virtual_start = NULL, - .dev = NULL, - .gem_object = NULL, - .head = 0, - .tail = 0, - .space = 0, - .user_irq_refcount = 0, - .irq_gem_seqno = 0, - .waiting_gem_seqno = 0, .setup_status_page = bsd_setup_status_page, .init = init_bsd_ring, .set_tail = ring_set_tail, @@ -818,8 +798,6 @@ static const struct intel_ring_buffer bsd_ring = { .user_irq_get = bsd_ring_get_user_irq, .user_irq_put = bsd_ring_put_user_irq, .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer, - .status_page = {NULL, 0, NULL}, - .map = {0,} }; @@ -897,15 +875,6 @@ static const struct intel_ring_buffer gen6_bsd_ring = { .mmio_base = GEN6_BSD_RING_BASE, .size = 32 * PAGE_SIZE, .alignment = PAGE_SIZE, - .virtual_start = NULL, - .dev = NULL, - .gem_object = NULL, - .head = 0, - .tail = 0, - .space = 0, - .user_irq_refcount = 0, - .irq_gem_seqno = 0, - .waiting_gem_seqno = 0, .setup_status_page = gen6_bsd_setup_status_page, .init = init_bsd_ring, .set_tail = gen6_bsd_ring_set_tail, @@ -916,8 +885,6 @@ static const struct intel_ring_buffer gen6_bsd_ring = { .user_irq_get = bsd_ring_get_user_irq, .user_irq_put = bsd_ring_put_user_irq, .dispatch_gem_execbuffer = gen6_bsd_ring_dispatch_gem_execbuffer, - .status_page = {NULL, 0, NULL}, - .map = {0,} }; int intel_init_render_ring_buffer(struct drm_device *dev) From a9db5c8fdd8c6e6e966897e05e2c2acd99bcdb6e Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 2 Aug 2010 17:22:48 +0200 Subject: [PATCH 189/476] drm/i915: drop alignment ringbuffer parameter Always PAGE_SIZE and only complicates the code. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_ringbuffer.c | 5 +---- drivers/gpu/drm/i915/intel_ringbuffer.h | 1 - 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index d395d055328a..c9894c2bcd65 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -610,7 +610,7 @@ int intel_init_ring_buffer(struct drm_device *dev, ring->gem_object = obj; - ret = i915_gem_object_pin(obj, ring->alignment); + ret = i915_gem_object_pin(obj, PAGE_SIZE); if (ret) goto err_unref; @@ -767,7 +767,6 @@ static const struct intel_ring_buffer render_ring = { .id = RING_RENDER, .mmio_base = RENDER_RING_BASE, .size = 32 * PAGE_SIZE, - .alignment = PAGE_SIZE, .setup_status_page = render_setup_status_page, .init = init_render_ring, .set_tail = ring_set_tail, @@ -787,7 +786,6 @@ static const struct intel_ring_buffer bsd_ring = { .id = RING_BSD, .mmio_base = BSD_RING_BASE, .size = 32 * PAGE_SIZE, - .alignment = PAGE_SIZE, .setup_status_page = bsd_setup_status_page, .init = init_bsd_ring, .set_tail = ring_set_tail, @@ -874,7 +872,6 @@ static const struct intel_ring_buffer gen6_bsd_ring = { .id = RING_BSD, .mmio_base = GEN6_BSD_RING_BASE, .size = 32 * PAGE_SIZE, - .alignment = PAGE_SIZE, .setup_status_page = gen6_bsd_setup_status_page, .init = init_bsd_ring, .set_tail = gen6_bsd_ring_set_tail, diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 1668cd9ac876..abf8a4e25a2d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -25,7 +25,6 @@ struct intel_ring_buffer { } id; u32 mmio_base; unsigned long size; - unsigned int alignment; void *virtual_start; struct drm_device *dev; struct drm_gem_object *gem_object; From ab6f8e325083f138ce5da8417baf48887d62da3c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 19 Sep 2010 17:53:44 +0100 Subject: [PATCH 190/476] drm/i915/ringbuffer: whitespace cleanup Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_ringbuffer.c | 123 ++++++++++++------------ drivers/gpu/drm/i915/intel_ringbuffer.h | 10 +- 2 files changed, 68 insertions(+), 65 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index c9894c2bcd65..1bcea7c85238 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -50,9 +50,9 @@ static u32 i915_gem_get_seqno(struct drm_device *dev) static void render_ring_flush(struct drm_device *dev, - struct intel_ring_buffer *ring, - u32 invalidate_domains, - u32 flush_domains) + struct intel_ring_buffer *ring, + u32 invalidate_domains, + u32 flush_domains) { drm_i915_private_t *dev_priv = dev->dev_private; u32 cmd; @@ -128,7 +128,7 @@ static void ring_set_tail(struct drm_device *dev, } static unsigned int render_ring_get_active_head(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; u32 acthd_reg = INTEL_INFO(dev)->gen ? ACTHD_I965 : ACTHD; @@ -137,7 +137,7 @@ static unsigned int render_ring_get_active_head(struct drm_device *dev, } static int init_ring_common(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { u32 head; drm_i915_private_t *dev_priv = dev->dev_private; @@ -204,7 +204,7 @@ static int init_ring_common(struct drm_device *dev, } static int init_render_ring(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; int ret = init_ring_common(dev, ring); @@ -238,9 +238,9 @@ do { \ */ static u32 render_ring_add_request(struct drm_device *dev, - struct intel_ring_buffer *ring, - struct drm_file *file_priv, - u32 flush_domains) + struct intel_ring_buffer *ring, + struct drm_file *file_priv, + u32 flush_domains) { drm_i915_private_t *dev_priv = dev->dev_private; u32 seqno; @@ -304,7 +304,7 @@ render_ring_add_request(struct drm_device *dev, static u32 render_ring_get_gem_seqno(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; if (HAS_PIPE_CONTROL(dev)) @@ -315,7 +315,7 @@ render_ring_get_gem_seqno(struct drm_device *dev, static void render_ring_get_user_irq(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; @@ -332,7 +332,7 @@ render_ring_get_user_irq(struct drm_device *dev, static void render_ring_put_user_irq(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; @@ -349,7 +349,7 @@ render_ring_put_user_irq(struct drm_device *dev, } static void render_setup_status_page(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; if (IS_GEN6(dev)) { @@ -362,7 +362,7 @@ static void render_setup_status_page(struct drm_device *dev, } -void +static void bsd_ring_flush(struct drm_device *dev, struct intel_ring_buffer *ring, u32 invalidate_domains, @@ -374,24 +374,24 @@ bsd_ring_flush(struct drm_device *dev, intel_ring_advance(dev, ring); } -static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev, - struct intel_ring_buffer *ring) +static unsigned int bsd_ring_get_active_head(struct drm_device *dev, + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; return I915_READ(BSD_RING_ACTHD); } static int init_bsd_ring(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { return init_ring_common(dev, ring); } static u32 bsd_ring_add_request(struct drm_device *dev, - struct intel_ring_buffer *ring, - struct drm_file *file_priv, - u32 flush_domains) + struct intel_ring_buffer *ring, + struct drm_file *file_priv, + u32 flush_domains) { u32 seqno; @@ -411,7 +411,7 @@ bsd_ring_add_request(struct drm_device *dev, } static void bsd_setup_status_page(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr); @@ -420,30 +420,30 @@ static void bsd_setup_status_page(struct drm_device *dev, static void bsd_ring_get_user_irq(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { /* do nothing */ } static void bsd_ring_put_user_irq(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { /* do nothing */ } static u32 bsd_ring_get_gem_seqno(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { return intel_read_status_page(ring, I915_GEM_HWS_INDEX); } static int bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, - struct intel_ring_buffer *ring, - struct drm_i915_gem_execbuffer2 *exec, - struct drm_clip_rect *cliprects, - uint64_t exec_offset) + struct intel_ring_buffer *ring, + struct drm_i915_gem_execbuffer2 *exec, + struct drm_clip_rect *cliprects, + uint64_t exec_offset) { uint32_t exec_start; exec_start = (uint32_t) exec_offset + exec->batch_start_offset; @@ -458,10 +458,10 @@ bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, static int render_ring_dispatch_gem_execbuffer(struct drm_device *dev, - struct intel_ring_buffer *ring, - struct drm_i915_gem_execbuffer2 *exec, - struct drm_clip_rect *cliprects, - uint64_t exec_offset) + struct intel_ring_buffer *ring, + struct drm_i915_gem_execbuffer2 *exec, + struct drm_clip_rect *cliprects, + uint64_t exec_offset) { drm_i915_private_t *dev_priv = dev->dev_private; int nbox = exec->num_cliprects; @@ -520,7 +520,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev, } static void cleanup_status_page(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_gem_object *obj; @@ -540,7 +540,7 @@ static void cleanup_status_page(struct drm_device *dev, } static int init_status_page(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_gem_object *obj; @@ -584,9 +584,8 @@ err: return ret; } - int intel_init_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv; @@ -659,7 +658,7 @@ err_hws: } void intel_cleanup_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { if (ring->gem_object == NULL) return; @@ -672,8 +671,8 @@ void intel_cleanup_ring_buffer(struct drm_device *dev, cleanup_status_page(dev, ring); } -int intel_wrap_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring) +static int intel_wrap_ring_buffer(struct drm_device *dev, + struct intel_ring_buffer *ring) { unsigned int *virt; int rem; @@ -699,7 +698,7 @@ int intel_wrap_ring_buffer(struct drm_device *dev, } int intel_wait_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring, int n) + struct intel_ring_buffer *ring, int n) { unsigned long end; drm_i915_private_t *dev_priv = dev->dev_private; @@ -729,7 +728,8 @@ int intel_wait_ring_buffer(struct drm_device *dev, } void intel_ring_begin(struct drm_device *dev, - struct intel_ring_buffer *ring, int num_dwords) + struct intel_ring_buffer *ring, + int num_dwords) { int n = 4*num_dwords; if (unlikely(ring->tail + n > ring->size)) @@ -741,16 +741,16 @@ void intel_ring_begin(struct drm_device *dev, } void intel_ring_advance(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { ring->tail &= ring->size - 1; ring->set_tail(dev, ring, ring->tail); } void intel_fill_struct(struct drm_device *dev, - struct intel_ring_buffer *ring, - void *data, - unsigned int len) + struct intel_ring_buffer *ring, + void *data, + unsigned int len) { unsigned int *virt = ring->virtual_start + ring->tail; BUG_ON((len&~(4-1)) != 0); @@ -800,16 +800,16 @@ static const struct intel_ring_buffer bsd_ring = { static void gen6_bsd_setup_status_page(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; I915_WRITE(GEN6_BSD_HWS_PGA, ring->status_page.gfx_addr); I915_READ(GEN6_BSD_HWS_PGA); } -static inline void gen6_bsd_ring_set_tail(struct drm_device *dev, - struct intel_ring_buffer *ring, - u32 value) +static void gen6_bsd_ring_set_tail(struct drm_device *dev, + struct intel_ring_buffer *ring, + u32 value) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -830,17 +830,17 @@ static inline void gen6_bsd_ring_set_tail(struct drm_device *dev, GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); } -static inline unsigned int gen6_bsd_ring_get_active_head(struct drm_device *dev, - struct intel_ring_buffer *ring) +static unsigned int gen6_bsd_ring_get_active_head(struct drm_device *dev, + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; return I915_READ(GEN6_BSD_RING_ACTHD); } static void gen6_bsd_ring_flush(struct drm_device *dev, - struct intel_ring_buffer *ring, - u32 invalidate_domains, - u32 flush_domains) + struct intel_ring_buffer *ring, + u32 invalidate_domains, + u32 flush_domains) { intel_ring_begin(dev, ring, 4); intel_ring_emit(dev, ring, MI_FLUSH_DW); @@ -852,17 +852,22 @@ static void gen6_bsd_ring_flush(struct drm_device *dev, static int gen6_bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, - struct intel_ring_buffer *ring, - struct drm_i915_gem_execbuffer2 *exec, - struct drm_clip_rect *cliprects, - uint64_t exec_offset) + struct intel_ring_buffer *ring, + struct drm_i915_gem_execbuffer2 *exec, + struct drm_clip_rect *cliprects, + uint64_t exec_offset) { uint32_t exec_start; + exec_start = (uint32_t) exec_offset + exec->batch_start_offset; + intel_ring_begin(dev, ring, 2); - intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); /* bit0-7 is the length on GEN6+ */ + intel_ring_emit(dev, ring, + MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); + /* bit0-7 is the length on GEN6+ */ intel_ring_emit(dev, ring, exec_start); intel_ring_advance(dev, ring); + return 0; } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index abf8a4e25a2d..2d3165fc1475 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -104,15 +104,13 @@ intel_read_status_page(struct intel_ring_buffer *ring, } int intel_init_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring); + struct intel_ring_buffer *ring); void intel_cleanup_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring); + struct intel_ring_buffer *ring); int intel_wait_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring, int n); -int intel_wrap_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring); + struct intel_ring_buffer *ring, int n); void intel_ring_begin(struct drm_device *dev, - struct intel_ring_buffer *ring, int n); + struct intel_ring_buffer *ring, int n); static inline void intel_ring_emit(struct drm_device *dev, struct intel_ring_buffer *ring, From 53640e1d07fb7dd5d14300dd94f4718eca33348e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 20 Sep 2010 11:40:50 +0100 Subject: [PATCH 191/476] drm/i915: Track gpu fence usage Track if the gpu requires the fence for the execution of a batch buffer and so only wait upon the retirement of the object's last rendering seqno if the fence is in use by the GPU. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_gem.c | 11 +++++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 790ffec135df..6e22be4f3585 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -128,6 +128,7 @@ struct drm_i915_master_private { struct drm_i915_fence_reg { struct drm_gem_object *obj; struct list_head lru_list; + bool gpu; }; struct sdvo_device_mapping { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b242530ffcbd..a5d5751bad30 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2461,7 +2461,9 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj, bool interruptible) { struct drm_device *dev = obj->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct drm_i915_fence_reg *reg; if (obj_priv->fence_reg == I915_FENCE_REG_NONE) return 0; @@ -2476,7 +2478,8 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj, * therefore we must wait for any outstanding access to complete * before clearing the fence. */ - if (INTEL_INFO(dev)->gen < 4) { + reg = &dev_priv->fence_regs[obj_priv->fence_reg]; + if (reg->gpu) { int ret; ret = i915_gem_object_flush_gpu_write_domain(obj, true); @@ -2486,6 +2489,8 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj, ret = i915_gem_object_wait_rendering(obj, interruptible); if (ret) return ret; + + reg->gpu = false; } i915_gem_object_flush_gtt_write_domain(obj); @@ -3180,11 +3185,13 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, * properly handle blits to/from tiled surfaces. */ if (need_fence) { - ret = i915_gem_object_get_fence_reg(obj, false); + ret = i915_gem_object_get_fence_reg(obj, true); if (ret != 0) { i915_gem_object_unpin(obj); return ret; } + + dev_priv->fence_regs[obj_priv->fence_reg].gpu = true; } entry->offset = obj_priv->gtt_offset; From c78ec30bba52754b9f21a899eac2e2f5a7486116 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 20 Sep 2010 12:50:23 +0100 Subject: [PATCH 192/476] drm/i915: Merge ring flushing and lazy requests Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_gem.c | 53 ++++++++++++---------------- drivers/gpu/drm/i915/intel_display.c | 2 +- 3 files changed, 24 insertions(+), 32 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6e22be4f3585..37a44c80efd2 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1003,6 +1003,7 @@ void i915_gem_reset_flushing_list(struct drm_device *dev); void i915_gem_reset_inactive_gpu_domains(struct drm_device *dev); void i915_gem_clflush_object(struct drm_gem_object *obj); void i915_gem_flush_ring(struct drm_device *dev, + struct drm_file *file_priv, struct intel_ring_buffer *ring, uint32_t invalidate_domains, uint32_t flush_domains); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a5d5751bad30..58baecc821a5 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1910,16 +1910,23 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno, void i915_gem_flush_ring(struct drm_device *dev, + struct drm_file *file_priv, struct intel_ring_buffer *ring, uint32_t invalidate_domains, uint32_t flush_domains) { ring->flush(dev, ring, invalidate_domains, flush_domains); i915_gem_process_flushing_list(dev, flush_domains, ring); + + if (ring->outstanding_lazy_request) { + (void)i915_add_request(dev, file_priv, NULL, ring); + ring->outstanding_lazy_request = false; + } } static void i915_gem_flush(struct drm_device *dev, + struct drm_file *file_priv, uint32_t invalidate_domains, uint32_t flush_domains, uint32_t flush_rings) @@ -1931,11 +1938,11 @@ i915_gem_flush(struct drm_device *dev, if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { if (flush_rings & RING_RENDER) - i915_gem_flush_ring(dev, + i915_gem_flush_ring(dev, file_priv, &dev_priv->render_ring, invalidate_domains, flush_domains); if (flush_rings & RING_BSD) - i915_gem_flush_ring(dev, + i915_gem_flush_ring(dev, file_priv, &dev_priv->bsd_ring, invalidate_domains, flush_domains); } @@ -2054,6 +2061,7 @@ i915_gpu_idle(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; bool lists_empty; + u32 seqno; int ret; lists_empty = (list_empty(&dev_priv->mm.flushing_list) && @@ -2064,24 +2072,18 @@ i915_gpu_idle(struct drm_device *dev) return 0; /* Flush everything onto the inactive list. */ - i915_gem_flush_ring(dev, - &dev_priv->render_ring, + seqno = i915_gem_next_request_seqno(dev, &dev_priv->render_ring); + i915_gem_flush_ring(dev, NULL, &dev_priv->render_ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); - - ret = i915_wait_request(dev, - i915_gem_next_request_seqno(dev, &dev_priv->render_ring), - &dev_priv->render_ring); + ret = i915_wait_request(dev, seqno, &dev_priv->render_ring); if (ret) return ret; if (HAS_BSD(dev)) { - i915_gem_flush_ring(dev, - &dev_priv->bsd_ring, + seqno = i915_gem_next_request_seqno(dev, &dev_priv->render_ring); + i915_gem_flush_ring(dev, NULL, &dev_priv->bsd_ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); - - ret = i915_wait_request(dev, - i915_gem_next_request_seqno(dev, &dev_priv->bsd_ring), - &dev_priv->bsd_ring); + ret = i915_wait_request(dev, seqno, &dev_priv->bsd_ring); if (ret) return ret; } @@ -2651,7 +2653,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, /* Queue the GPU write cache flushing we need. */ old_write_domain = obj->write_domain; - i915_gem_flush_ring(dev, + i915_gem_flush_ring(dev, NULL, to_intel_bo(obj)->ring, 0, obj->write_domain); BUG_ON(obj->write_domain); @@ -2780,7 +2782,7 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, i915_gem_object_flush_cpu_write_domain(obj); old_read_domains = obj->read_domains; - obj->read_domains = I915_GEM_DOMAIN_GTT; + obj->read_domains |= I915_GEM_DOMAIN_GTT; trace_i915_gem_object_change_domain(obj, old_read_domains, @@ -2837,7 +2839,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) * need to be invalidated at next use. */ if (write) { - obj->read_domains &= I915_GEM_DOMAIN_CPU; + obj->read_domains = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU; } @@ -3762,21 +3764,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, dev->invalidate_domains, dev->flush_domains); #endif - i915_gem_flush(dev, + i915_gem_flush(dev, file_priv, dev->invalidate_domains, dev->flush_domains, dev_priv->mm.flush_rings); } - if (dev_priv->render_ring.outstanding_lazy_request) { - (void)i915_add_request(dev, file_priv, NULL, &dev_priv->render_ring); - dev_priv->render_ring.outstanding_lazy_request = false; - } - if (dev_priv->bsd_ring.outstanding_lazy_request) { - (void)i915_add_request(dev, file_priv, NULL, &dev_priv->bsd_ring); - dev_priv->bsd_ring.outstanding_lazy_request = false; - } - for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); @@ -4232,12 +4225,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, * use this buffer rather sooner than later, so issuing the required * flush earlier is beneficial. */ - if (obj->write_domain & I915_GEM_GPU_DOMAINS) { - i915_gem_flush_ring(dev, + if (obj->write_domain & I915_GEM_GPU_DOMAINS) + i915_gem_flush_ring(dev, file_priv, obj_priv->ring, 0, obj->write_domain); - (void)i915_add_request(dev, file_priv, NULL, obj_priv->ring); - } /* Update the active list for the hardware's current position. * Otherwise this only updates on a delayed timer or when irqs diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 0505ddb76a10..791374c888da 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5058,7 +5058,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, /* Schedule the pipelined flush */ if (was_dirty) - i915_gem_flush_ring(dev, obj_priv->ring, 0, was_dirty); + i915_gem_flush_ring(dev, NULL, obj_priv->ring, 0, was_dirty); if (IS_GEN3(dev) || IS_GEN2(dev)) { u32 flip_mask; From 265db9585e570814d2f7aca109c5563bcde9c948 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 20 Sep 2010 15:41:01 +0100 Subject: [PATCH 193/476] drm/i915: Drain any pending flips on the fb prior to unpinning If we have queued a page flip on the current fb and then request a mode change, wait until the page flip completes before performing the new request. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 3 ++ drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/intel_display.c | 41 +++++++++++++++------------- 3 files changed, 26 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 37a44c80efd2..ce8ff8fdc55c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1024,6 +1024,9 @@ int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, bool interruptible, struct intel_ring_buffer *ring); +int i915_gem_wait_for_pending_flip(struct drm_device *dev, + struct drm_gem_object **object_list, + int count); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 58baecc821a5..a8ddcd499b3b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3497,7 +3497,7 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec, return 0; } -static int +int i915_gem_wait_for_pending_flip(struct drm_device *dev, struct drm_gem_object **object_list, int count) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 791374c888da..461bf4879e0a 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1564,11 +1564,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_device *dev = crtc->dev; struct drm_i915_master_private *master_priv; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_framebuffer *intel_fb; - struct drm_i915_gem_object *obj_priv; - struct drm_gem_object *obj; - int pipe = intel_crtc->pipe; - int plane = intel_crtc->plane; int ret; /* no fb bound */ @@ -1577,38 +1572,46 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, return 0; } - switch (plane) { + switch (intel_crtc->plane) { case 0: case 1: break; default: - DRM_ERROR("Can't update plane %d in SAREA\n", plane); return -EINVAL; } - intel_fb = to_intel_framebuffer(crtc->fb); - obj = intel_fb->obj; - obj_priv = to_intel_bo(obj); - mutex_lock(&dev->struct_mutex); - ret = intel_pin_and_fence_fb_obj(dev, obj, false); + ret = intel_pin_and_fence_fb_obj(dev, + to_intel_framebuffer(crtc->fb)->obj, + false); if (ret != 0) { mutex_unlock(&dev->struct_mutex); return ret; } + if (old_fb) { + struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + + if (atomic_read(&obj_priv->pending_flip)) { + ret = i915_gem_wait_for_pending_flip(dev, &obj, 1); + if (ret) { + i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); + mutex_unlock(&dev->struct_mutex); + return ret; + } + } + } + ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y); if (ret) { - i915_gem_object_unpin(obj); + i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); mutex_unlock(&dev->struct_mutex); return ret; } - if (old_fb) { - intel_fb = to_intel_framebuffer(old_fb); - obj_priv = to_intel_bo(intel_fb->obj); - i915_gem_object_unpin(intel_fb->obj); - } + if (old_fb) + i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj); mutex_unlock(&dev->struct_mutex); @@ -1619,7 +1622,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, if (!master_priv->sarea_priv) return 0; - if (pipe) { + if (intel_crtc->pipe) { master_priv->sarea_priv->pipeB_x = x; master_priv->sarea_priv->pipeB_y = y; } else { From f13d3f7311add99d1f874a6b67d56426afa35664 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 20 Sep 2010 17:36:15 +0100 Subject: [PATCH 194/476] drm/i915: Track pinned objects Keep a list of pinned objects and display it via debugfs. Now all objects that exist in the GTT are always tracked on one of the active, flushing, inactive or pinned lists. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 16 ++++++++++++---- drivers/gpu/drm/i915/i915_drv.h | 6 ++++++ drivers/gpu/drm/i915/i915_gem.c | 18 ++++++++---------- 3 files changed, 26 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index ac48115429ed..36f0e3630f74 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -40,10 +40,13 @@ #if defined(CONFIG_DEBUG_FS) -#define RENDER_LIST 1 -#define BSD_LIST 2 -#define FLUSHING_LIST 3 -#define INACTIVE_LIST 4 +enum { + RENDER_LIST, + BSD_LIST, + FLUSHING_LIST, + INACTIVE_LIST, + PINNED_LIST +}; static const char *yesno(int v) { @@ -150,6 +153,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) seq_printf(m, "Inactive:\n"); head = &dev_priv->mm.inactive_list; break; + case PINNED_LIST: + seq_printf(m, "Pinned:\n"); + head = &dev_priv->mm.pinned_list; + break; case FLUSHING_LIST: seq_printf(m, "Flushing:\n"); head = &dev_priv->mm.flushing_list; @@ -983,6 +990,7 @@ static struct drm_info_list i915_debugfs_list[] = { {"i915_gem_bsd_active", i915_gem_object_list_info, 0, (void *) BSD_LIST}, {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, + {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST}, {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, {"i915_gem_request", i915_gem_request_info, 0}, {"i915_gem_seqno", i915_gem_seqno_info, 0}, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ce8ff8fdc55c..12e9f853a5e9 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -569,6 +569,12 @@ typedef struct drm_i915_private { */ struct list_head inactive_list; + /** + * LRU list of objects which are not in the ringbuffer but + * are still pinned in the GTT. + */ + struct list_head pinned_list; + /** LRU list of objects with fence regs on them. */ struct list_head fence_list; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a8ddcd499b3b..151fa43e4417 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1051,7 +1051,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); } - /* Maintain LRU order of "inactive" objects */ if (ret == 0 && i915_gem_object_is_inactive(obj_priv)) list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); @@ -1552,7 +1551,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) i915_verify_inactive(dev, __FILE__, __LINE__); if (obj_priv->pin_count != 0) - list_del_init(&obj_priv->list); + list_move_tail(&obj_priv->list, &dev_priv->mm.pinned_list); else list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); @@ -2044,9 +2043,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) obj_priv->gtt_space = NULL; } - /* Remove ourselves from the LRU list if present. */ - if (!list_empty(&obj_priv->list)) - list_del_init(&obj_priv->list); + list_del_init(&obj_priv->list); if (i915_gem_object_is_purgeable(obj_priv)) i915_gem_object_truncate(obj); @@ -4030,6 +4027,7 @@ int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) { struct drm_device *dev = obj->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int ret; @@ -4065,9 +4063,9 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) if (obj_priv->pin_count == 1) { atomic_inc(&dev->pin_count); atomic_add(obj->size, &dev->pin_memory); - if (!obj_priv->active && - (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) - list_del_init(&obj_priv->list); + if (!obj_priv->active) + list_move_tail(&obj_priv->list, + &dev_priv->mm.pinned_list); } i915_verify_inactive(dev, __FILE__, __LINE__); @@ -4091,8 +4089,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj) * the inactive list */ if (obj_priv->pin_count == 0) { - if (!obj_priv->active && - (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) + if (!obj_priv->active) list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); atomic_dec(&dev->pin_count); @@ -4614,6 +4611,7 @@ i915_gem_load(struct drm_device *dev) INIT_LIST_HEAD(&dev_priv->mm.flushing_list); INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); INIT_LIST_HEAD(&dev_priv->mm.inactive_list); + INIT_LIST_HEAD(&dev_priv->mm.pinned_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list); INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); INIT_LIST_HEAD(&dev_priv->render_ring.active_list); From 0e87d2b06cb4651c874d0b208d31c73addbd638b Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 7 Sep 2010 22:11:15 +0200 Subject: [PATCH 195/476] intel-gtt: initialize our own scratch page The intel gtt fake agp driver is the only agp driver to use dma address remapping. So it makes sense to fold this code back into the only user (and thus reduce the reliance on the agp code). This patch does the first step by initializing (and remapping) the scratch page in a new function intel_gtt_setup_scratch_page. Unfortunately intel_gtt_cleanup had to move to avoid a forward declaration. The new scratch page is not yet used, though. v2: Refactor out scratch page teardown. Suggested by Chris Wilson on irc. This makes it clear what's going on and results in a nice symmetry between setup and teardown. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 81 ++++++++++++++++++++++++++++-------- 1 file changed, 64 insertions(+), 17 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 248ac5f8708e..e386a44330b8 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -35,6 +35,8 @@ */ #ifdef CONFIG_DMAR #define USE_PCI_DMA_API 1 +#else +#define USE_PCI_DMA_API 0 #endif /* Max amount of stolen space, anything above will be returned to Linux */ @@ -108,6 +110,8 @@ static struct _intel_private { struct page *i8xx_page; struct resource ifp_resource; int resource_valid; + struct page *scratch_page; + dma_addr_t scratch_page_dma; } intel_private; #define INTEL_GTT_GEN intel_private.driver->gen @@ -115,7 +119,7 @@ static struct _intel_private { #define IS_PINEVIEW intel_private.driver->is_pineview #define IS_IRONLAKE intel_private.driver->is_ironlake -#ifdef USE_PCI_DMA_API +#if USE_PCI_DMA_API static int intel_agp_map_page(struct page *page, dma_addr_t *ret) { *ret = pci_map_page(intel_private.pcidev, page, 0, @@ -540,6 +544,32 @@ static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge, return addr | bridge->driver->masks[type].mask; } +static int intel_gtt_setup_scratch_page(void) +{ + struct page *page; + dma_addr_t dma_addr; + + page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); + if (page == NULL) + return -ENOMEM; + get_page(page); + set_pages_uc(page, 1); + + if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) { + dma_addr = pci_map_page(intel_private.pcidev, page, 0, + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) + return -EINVAL; + + intel_private.scratch_page_dma = dma_addr; + } else + intel_private.scratch_page_dma = page_to_phys(page); + + intel_private.scratch_page = page; + + return 0; +} + static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = { {128, 32768, 5}, /* The 64M mode still requires a 128k gatt */ @@ -794,6 +824,29 @@ static unsigned int intel_gtt_mappable_entries(void) return aperture_size >> PAGE_SHIFT; } +static void intel_gtt_teardown_scratch_page(void) +{ + set_pages_wb(intel_private.scratch_page, 1); + pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma, + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + put_page(intel_private.scratch_page); + __free_page(intel_private.scratch_page); +} + +static void intel_gtt_cleanup(void) +{ + if (intel_private.i9xx_flush_page) + iounmap(intel_private.i9xx_flush_page); + if (intel_private.resource_valid) + release_resource(&intel_private.ifp_resource); + intel_private.ifp_resource.start = 0; + intel_private.resource_valid = 0; + iounmap(intel_private.gtt); + iounmap(intel_private.registers); + + intel_gtt_teardown_scratch_page(); +} + static int intel_gtt_init(void) { u32 gtt_map_size; @@ -825,6 +878,12 @@ static int intel_gtt_init(void) return -ENOMEM; } + ret = intel_gtt_setup_scratch_page(); + if (ret != 0) { + intel_gtt_cleanup(); + return ret; + } + return 0; } @@ -1174,18 +1233,6 @@ static int intel_i9xx_configure(void) return 0; } -static void intel_gtt_cleanup(void) -{ - if (intel_private.i9xx_flush_page) - iounmap(intel_private.i9xx_flush_page); - if (intel_private.resource_valid) - release_resource(&intel_private.ifp_resource); - intel_private.ifp_resource.start = 0; - intel_private.resource_valid = 0; - iounmap(intel_private.gtt); - iounmap(intel_private.registers); -} - static void intel_i915_chipset_flush(struct agp_bridge_data *bridge) { if (intel_private.i9xx_flush_page) @@ -1416,7 +1463,7 @@ static const struct agp_bridge_driver intel_915_driver = { .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = intel_i830_type_to_mask_type, .chipset_flush = intel_i915_chipset_flush, -#ifdef USE_PCI_DMA_API +#if USE_PCI_DMA_API .agp_map_page = intel_agp_map_page, .agp_unmap_page = intel_agp_unmap_page, .agp_map_memory = intel_agp_map_memory, @@ -1449,7 +1496,7 @@ static const struct agp_bridge_driver intel_i965_driver = { .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = intel_i830_type_to_mask_type, .chipset_flush = intel_i915_chipset_flush, -#ifdef USE_PCI_DMA_API +#if USE_PCI_DMA_API .agp_map_page = intel_agp_map_page, .agp_unmap_page = intel_agp_unmap_page, .agp_map_memory = intel_agp_map_memory, @@ -1482,7 +1529,7 @@ static const struct agp_bridge_driver intel_gen6_driver = { .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = intel_gen6_type_to_mask_type, .chipset_flush = intel_i915_chipset_flush, -#ifdef USE_PCI_DMA_API +#if USE_PCI_DMA_API .agp_map_page = intel_agp_map_page, .agp_unmap_page = intel_agp_unmap_page, .agp_map_memory = intel_agp_map_memory, @@ -1515,7 +1562,7 @@ static const struct agp_bridge_driver intel_g33_driver = { .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = intel_i830_type_to_mask_type, .chipset_flush = intel_i915_chipset_flush, -#ifdef USE_PCI_DMA_API +#if USE_PCI_DMA_API .agp_map_page = intel_agp_map_page, .agp_unmap_page = intel_agp_unmap_page, .agp_map_memory = intel_agp_map_memory, From 351bb278d2fd2df93526c15f37500070347328b4 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 7 Sep 2010 22:41:04 +0200 Subject: [PATCH 196/476] intel-gtt: introduce pte write function for i8xx/i915/i945 And put it to use in the gtt configuration code that writes the scratch page addr in all gtt ptes. This makes intel_i830_configure generic, hence rename it to intel_fake_agp_configure. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 41 ++++++++++++++++++++++++++++-------- 1 file changed, 32 insertions(+), 9 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index e386a44330b8..4d768e085382 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -90,6 +90,10 @@ struct intel_gtt_driver { unsigned int is_ironlake : 1; /* Chipset specific GTT setup */ int (*setup)(void); + void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags); + /* Flags is a more or less chipset specific opaque value. + * For chipsets that need to support old ums (non-gem) code, this + * needs to be identical to the various supported agp memory types! */ }; static struct _intel_private { @@ -954,6 +958,23 @@ static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) printk(KERN_ERR "Timed out waiting for cache flush.\n"); } +static void i830_write_entry(dma_addr_t addr, unsigned int entry, + unsigned int flags) +{ + u32 pte_flags = I810_PTE_VALID; + + switch (flags) { + case AGP_DCACHE_MEMORY: + pte_flags |= I810_PTE_LOCAL; + break; + case AGP_USER_CACHED_MEMORY: + pte_flags |= I830_PTE_SYSTEM_CACHED; + break; + } + + writel(addr | pte_flags, intel_private.gtt + entry); +} + static void intel_enable_gtt(void) { u32 gma_addr; @@ -1011,7 +1032,7 @@ static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge) return 0; } -static int intel_i830_configure(void) +static int intel_fake_agp_configure(void) { int i; @@ -1019,13 +1040,12 @@ static int intel_i830_configure(void) agp_bridge->gart_bus_addr = intel_private.gma_bus_addr; - if (agp_bridge->driver->needs_scratch_page) { - for (i = intel_private.base.gtt_stolen_entries; - i < intel_private.base.gtt_total_entries; i++) { - writel(agp_bridge->scratch_page, intel_private.gtt+i); - } - readl(intel_private.gtt+i-1); /* PCI Posting. */ + for (i = intel_private.base.gtt_stolen_entries; + i < intel_private.base.gtt_total_entries; i++) { + intel_private.driver->write_entry(intel_private.scratch_page_dma, + i, 0); } + readl(intel_private.gtt+i-1); /* PCI Posting. */ global_cache_flush(); @@ -1417,7 +1437,7 @@ static const struct agp_bridge_driver intel_830_driver = { .aperture_sizes = intel_fake_agp_sizes, .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes), .needs_scratch_page = true, - .configure = intel_i830_configure, + .configure = intel_fake_agp_configure, .fetch_size = intel_fake_agp_fetch_size, .cleanup = intel_gtt_cleanup, .mask_memory = intel_i810_mask_memory, @@ -1444,7 +1464,7 @@ static const struct agp_bridge_driver intel_915_driver = { .aperture_sizes = intel_fake_agp_sizes, .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes), .needs_scratch_page = true, - .configure = intel_i9xx_configure, + .configure = intel_fake_agp_configure, .fetch_size = intel_fake_agp_fetch_size, .cleanup = intel_gtt_cleanup, .mask_memory = intel_i810_mask_memory, @@ -1573,10 +1593,13 @@ static const struct agp_bridge_driver intel_g33_driver = { static const struct intel_gtt_driver i8xx_gtt_driver = { .gen = 2, .setup = i830_setup, + .write_entry = i830_write_entry, }; static const struct intel_gtt_driver i915_gtt_driver = { .gen = 3, .setup = i9xx_setup, + /* i945 is the last gpu to need phys mem (for overlay and cursors). */ + .write_entry = i830_write_entry, }; static const struct intel_gtt_driver g33_gtt_driver = { .gen = 3, From a6963596a13e62f8e65b1cf3403a330ff2db407c Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sat, 11 Sep 2010 14:01:43 +0200 Subject: [PATCH 197/476] intel-gtt: introduce pte write function for g33/i965/gm45 Like for the i830. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 4d768e085382..9d25ebd50d89 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1334,6 +1334,14 @@ static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start, return 0; } +static void i965_write_entry(dma_addr_t addr, unsigned int entry, + unsigned int flags) +{ + /* Shift high bits down */ + addr |= (addr >> 28) & 0xf0; + writel(addr | I810_PTE_VALID, intel_private.gtt + entry); +} + static int i9xx_setup(void) { u32 reg_addr; @@ -1497,7 +1505,7 @@ static const struct agp_bridge_driver intel_i965_driver = { .aperture_sizes = intel_fake_agp_sizes, .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes), .needs_scratch_page = true, - .configure = intel_i9xx_configure, + .configure = intel_fake_agp_configure, .fetch_size = intel_fake_agp_fetch_size, .cleanup = intel_gtt_cleanup, .mask_memory = intel_i965_mask_memory, @@ -1563,7 +1571,7 @@ static const struct agp_bridge_driver intel_g33_driver = { .aperture_sizes = intel_fake_agp_sizes, .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes), .needs_scratch_page = true, - .configure = intel_i9xx_configure, + .configure = intel_fake_agp_configure, .fetch_size = intel_fake_agp_fetch_size, .cleanup = intel_gtt_cleanup, .mask_memory = intel_i965_mask_memory, @@ -1605,24 +1613,29 @@ static const struct intel_gtt_driver g33_gtt_driver = { .gen = 3, .is_g33 = 1, .setup = i9xx_setup, + .write_entry = i965_write_entry, }; static const struct intel_gtt_driver pineview_gtt_driver = { .gen = 3, .is_pineview = 1, .is_g33 = 1, .setup = i9xx_setup, + .write_entry = i965_write_entry, }; static const struct intel_gtt_driver i965_gtt_driver = { .gen = 4, .setup = i9xx_setup, + .write_entry = i965_write_entry, }; static const struct intel_gtt_driver g4x_gtt_driver = { .gen = 5, .setup = i9xx_setup, + .write_entry = i965_write_entry, }; static const struct intel_gtt_driver ironlake_gtt_driver = { .gen = 5, .is_ironlake = 1, .setup = i9xx_setup, + .write_entry = i965_write_entry, }; static const struct intel_gtt_driver sandybridge_gtt_driver = { .gen = 6, From 97ef1bdd0bc75bce7b2058e9c432b6c277dcf4d3 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 9 Sep 2010 17:52:20 +0200 Subject: [PATCH 198/476] intel-gtt: introduce pte write function for gen6 Like for i830. intel_i9xx_configure is now unused, so kill it. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 48 +++++++++++++++++++----------------- 1 file changed, 26 insertions(+), 22 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 9d25ebd50d89..1de45f96db9c 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1232,27 +1232,6 @@ static void intel_i9xx_setup_flush(void) "can't ioremap flush page - no chipset flushing\n"); } -static int intel_i9xx_configure(void) -{ - int i; - - intel_enable_gtt(); - - agp_bridge->gart_bus_addr = intel_private.gma_bus_addr; - - if (agp_bridge->driver->needs_scratch_page) { - for (i = intel_private.base.gtt_stolen_entries; i < - intel_private.base.gtt_total_entries; i++) { - writel(agp_bridge->scratch_page, intel_private.gtt+i); - } - readl(intel_private.gtt+i-1); /* PCI Posting. */ - } - - global_cache_flush(); - - return 0; -} - static void intel_i915_chipset_flush(struct agp_bridge_data *bridge) { if (intel_private.i9xx_flush_page) @@ -1342,6 +1321,30 @@ static void i965_write_entry(dma_addr_t addr, unsigned int entry, writel(addr | I810_PTE_VALID, intel_private.gtt + entry); } +static void gen6_write_entry(dma_addr_t addr, unsigned int entry, + unsigned int flags) +{ + unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT; + unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; + u32 pte_flags; + + if (type_mask == AGP_USER_UNCACHED_MEMORY) + pte_flags = GEN6_PTE_UNCACHED; + else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) { + pte_flags = GEN6_PTE_LLC; + if (gfdt) + pte_flags |= GEN6_PTE_GFDT; + } else { /* set 'normal'/'cached' to LLC by default */ + pte_flags = GEN6_PTE_LLC_MLC; + if (gfdt) + pte_flags |= GEN6_PTE_GFDT; + } + + /* gen6 has bit11-4 for physical addr bit39-32 */ + addr |= (addr >> 28) & 0xff0; + writel(addr | pte_flags, intel_private.gtt + entry); +} + static int i9xx_setup(void) { u32 reg_addr; @@ -1538,7 +1541,7 @@ static const struct agp_bridge_driver intel_gen6_driver = { .aperture_sizes = intel_fake_agp_sizes, .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes), .needs_scratch_page = true, - .configure = intel_i9xx_configure, + .configure = intel_fake_agp_configure, .fetch_size = intel_fake_agp_fetch_size, .cleanup = intel_gtt_cleanup, .mask_memory = intel_gen6_mask_memory, @@ -1640,6 +1643,7 @@ static const struct intel_gtt_driver ironlake_gtt_driver = { static const struct intel_gtt_driver sandybridge_gtt_driver = { .gen = 6, .setup = i9xx_setup, + .write_entry = gen6_write_entry, }; /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of From d0b6dc4b930e3be9c02cc9638f02e14d271d5f0d Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 9 Sep 2010 18:11:41 +0200 Subject: [PATCH 199/476] intel-gtt: drop agp scratch page support stuff intel-gtt.c now handles the scratch page itself, so drop all that was just there to support it. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 28 ---------------------------- 1 file changed, 28 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 1de45f96db9c..64a62d9afb75 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -124,21 +124,6 @@ static struct _intel_private { #define IS_IRONLAKE intel_private.driver->is_ironlake #if USE_PCI_DMA_API -static int intel_agp_map_page(struct page *page, dma_addr_t *ret) -{ - *ret = pci_map_page(intel_private.pcidev, page, 0, - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(intel_private.pcidev, *ret)) - return -EINVAL; - return 0; -} - -static void intel_agp_unmap_page(struct page *page, dma_addr_t dma) -{ - pci_unmap_page(intel_private.pcidev, dma, - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); -} - static void intel_agp_free_sglist(struct agp_memory *mem) { struct sg_table st; @@ -1447,7 +1432,6 @@ static const struct agp_bridge_driver intel_830_driver = { .size_type = FIXED_APER_SIZE, .aperture_sizes = intel_fake_agp_sizes, .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes), - .needs_scratch_page = true, .configure = intel_fake_agp_configure, .fetch_size = intel_fake_agp_fetch_size, .cleanup = intel_gtt_cleanup, @@ -1474,7 +1458,6 @@ static const struct agp_bridge_driver intel_915_driver = { .size_type = FIXED_APER_SIZE, .aperture_sizes = intel_fake_agp_sizes, .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes), - .needs_scratch_page = true, .configure = intel_fake_agp_configure, .fetch_size = intel_fake_agp_fetch_size, .cleanup = intel_gtt_cleanup, @@ -1495,8 +1478,6 @@ static const struct agp_bridge_driver intel_915_driver = { .agp_type_to_mask_type = intel_i830_type_to_mask_type, .chipset_flush = intel_i915_chipset_flush, #if USE_PCI_DMA_API - .agp_map_page = intel_agp_map_page, - .agp_unmap_page = intel_agp_unmap_page, .agp_map_memory = intel_agp_map_memory, .agp_unmap_memory = intel_agp_unmap_memory, #endif @@ -1507,7 +1488,6 @@ static const struct agp_bridge_driver intel_i965_driver = { .size_type = FIXED_APER_SIZE, .aperture_sizes = intel_fake_agp_sizes, .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes), - .needs_scratch_page = true, .configure = intel_fake_agp_configure, .fetch_size = intel_fake_agp_fetch_size, .cleanup = intel_gtt_cleanup, @@ -1528,8 +1508,6 @@ static const struct agp_bridge_driver intel_i965_driver = { .agp_type_to_mask_type = intel_i830_type_to_mask_type, .chipset_flush = intel_i915_chipset_flush, #if USE_PCI_DMA_API - .agp_map_page = intel_agp_map_page, - .agp_unmap_page = intel_agp_unmap_page, .agp_map_memory = intel_agp_map_memory, .agp_unmap_memory = intel_agp_unmap_memory, #endif @@ -1540,7 +1518,6 @@ static const struct agp_bridge_driver intel_gen6_driver = { .size_type = FIXED_APER_SIZE, .aperture_sizes = intel_fake_agp_sizes, .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes), - .needs_scratch_page = true, .configure = intel_fake_agp_configure, .fetch_size = intel_fake_agp_fetch_size, .cleanup = intel_gtt_cleanup, @@ -1561,8 +1538,6 @@ static const struct agp_bridge_driver intel_gen6_driver = { .agp_type_to_mask_type = intel_gen6_type_to_mask_type, .chipset_flush = intel_i915_chipset_flush, #if USE_PCI_DMA_API - .agp_map_page = intel_agp_map_page, - .agp_unmap_page = intel_agp_unmap_page, .agp_map_memory = intel_agp_map_memory, .agp_unmap_memory = intel_agp_unmap_memory, #endif @@ -1573,7 +1548,6 @@ static const struct agp_bridge_driver intel_g33_driver = { .size_type = FIXED_APER_SIZE, .aperture_sizes = intel_fake_agp_sizes, .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes), - .needs_scratch_page = true, .configure = intel_fake_agp_configure, .fetch_size = intel_fake_agp_fetch_size, .cleanup = intel_gtt_cleanup, @@ -1594,8 +1568,6 @@ static const struct agp_bridge_driver intel_g33_driver = { .agp_type_to_mask_type = intel_i830_type_to_mask_type, .chipset_flush = intel_i915_chipset_flush, #if USE_PCI_DMA_API - .agp_map_page = intel_agp_map_page, - .agp_unmap_page = intel_agp_unmap_page, .agp_map_memory = intel_agp_map_memory, .agp_unmap_memory = intel_agp_unmap_memory, #endif From a87aa5cc0074fea871c8c6d2660d9b6cd7699d3d Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 9 Sep 2010 18:17:34 +0200 Subject: [PATCH 200/476] agp: kill agp_(map|unmap)_page Only used to remap the scratch page. Now that intel-gtt does this itself, kill the support code. Cc: Dave Airlie Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/agp.h | 2 -- drivers/char/agp/backend.c | 22 +--------------------- 2 files changed, 1 insertion(+), 23 deletions(-) diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h index 120490949997..04ad0bbfaf41 100644 --- a/drivers/char/agp/agp.h +++ b/drivers/char/agp/agp.h @@ -122,8 +122,6 @@ struct agp_bridge_driver { int (*agp_type_to_mask_type) (struct agp_bridge_data *, int); void (*chipset_flush)(struct agp_bridge_data *); - int (*agp_map_page)(struct page *page, dma_addr_t *ret); - void (*agp_unmap_page)(struct page *page, dma_addr_t dma); int (*agp_map_memory)(struct agp_memory *mem); void (*agp_unmap_memory)(struct agp_memory *mem); }; diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c index ee4f855611b6..f27d0d0816d3 100644 --- a/drivers/char/agp/backend.c +++ b/drivers/char/agp/backend.c @@ -151,17 +151,7 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge) } bridge->scratch_page_page = page; - if (bridge->driver->agp_map_page) { - if (bridge->driver->agp_map_page(page, - &bridge->scratch_page_dma)) { - dev_err(&bridge->dev->dev, - "unable to dma-map scratch page\n"); - rc = -ENOMEM; - goto err_out_nounmap; - } - } else { - bridge->scratch_page_dma = page_to_phys(page); - } + bridge->scratch_page_dma = page_to_phys(page); bridge->scratch_page = bridge->driver->mask_memory(bridge, bridge->scratch_page_dma, 0); @@ -204,12 +194,6 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge) return 0; err_out: - if (bridge->driver->needs_scratch_page && - bridge->driver->agp_unmap_page) { - bridge->driver->agp_unmap_page(bridge->scratch_page_page, - bridge->scratch_page_dma); - } -err_out_nounmap: if (bridge->driver->needs_scratch_page) { void *va = page_address(bridge->scratch_page_page); @@ -240,10 +224,6 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge) bridge->driver->needs_scratch_page) { void *va = page_address(bridge->scratch_page_page); - if (bridge->driver->agp_unmap_page) - bridge->driver->agp_unmap_page(bridge->scratch_page_page, - bridge->scratch_page_dma); - bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP); bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE); } From 5cbecafce4ee8ab73c194911e01a77a7a07f034e Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sat, 11 Sep 2010 21:31:04 +0200 Subject: [PATCH 201/476] intel-gtt: generic (insert|remove)_entries for i830 Well, not all too generic because it does not yet support dmar. Add a new function check_flags to ensure that non-gem code does not try to screw us over. v2: Beautify i830_check_flags with an idea from Chris Wilson. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 53 +++++++++++++++++++----------------- 1 file changed, 28 insertions(+), 25 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 64a62d9afb75..c1b766dbef4d 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -94,6 +94,7 @@ struct intel_gtt_driver { /* Flags is a more or less chipset specific opaque value. * For chipsets that need to support old ums (non-gem) code, this * needs to be identical to the various supported agp memory types! */ + bool (*check_flags)(unsigned int flags); }; static struct _intel_private { @@ -1037,20 +1038,28 @@ static int intel_fake_agp_configure(void) return 0; } -static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start, - int type) +static bool i830_check_flags(unsigned int flags) { - int i, j, num_entries; - void *temp; + switch (flags) { + case 0: + case AGP_PHYS_MEMORY: + case AGP_USER_CACHED_MEMORY: + case AGP_USER_MEMORY: + return true; + } + + return false; +} + +static int intel_fake_agp_insert_entries(struct agp_memory *mem, + off_t pg_start, int type) +{ + int i, j; int ret = -EINVAL; - int mask_type; if (mem->page_count == 0) goto out; - temp = agp_bridge->current_size; - num_entries = A_SIZE_FIX(temp)->num_entries; - if (pg_start < intel_private.base.gtt_stolen_entries) { dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n", @@ -1061,29 +1070,21 @@ static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start, goto out_err; } - if ((pg_start + mem->page_count) > num_entries) + if ((pg_start + mem->page_count) > intel_private.base.gtt_total_entries) goto out_err; - /* The i830 can't check the GTT for entries since its read only, - * depend on the caller to make the correct offset decisions. - */ - if (type != mem->type) goto out_err; - mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); - - if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && - mask_type != INTEL_AGP_CACHED_MEMORY) + if (!intel_private.driver->check_flags(type)) goto out_err; if (!mem->is_flushed) global_cache_flush(); for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { - writel(agp_bridge->driver->mask_memory(agp_bridge, - page_to_phys(mem->pages[i]), mask_type), - intel_private.gtt+j); + intel_private.driver->write_entry(page_to_phys(mem->pages[i]), + j, type); } readl(intel_private.gtt+j-1); @@ -1094,8 +1095,8 @@ out_err: return ret; } -static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start, - int type) +static int intel_fake_agp_remove_entries(struct agp_memory *mem, + off_t pg_start, int type) { int i; @@ -1109,7 +1110,8 @@ static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start, } for (i = pg_start; i < (mem->page_count + pg_start); i++) { - writel(agp_bridge->scratch_page, intel_private.gtt+i); + intel_private.driver->write_entry(intel_private.scratch_page_dma, + i, 0); } readl(intel_private.gtt+i-1); @@ -1441,8 +1443,8 @@ static const struct agp_bridge_driver intel_830_driver = { .cache_flush = global_cache_flush, .create_gatt_table = intel_fake_agp_create_gatt_table, .free_gatt_table = intel_fake_agp_free_gatt_table, - .insert_memory = intel_i830_insert_entries, - .remove_memory = intel_i830_remove_entries, + .insert_memory = intel_fake_agp_insert_entries, + .remove_memory = intel_fake_agp_remove_entries, .alloc_by_type = intel_fake_agp_alloc_by_type, .free_by_type = intel_i810_free_by_type, .agp_alloc_page = agp_generic_alloc_page, @@ -1577,6 +1579,7 @@ static const struct intel_gtt_driver i8xx_gtt_driver = { .gen = 2, .setup = i830_setup, .write_entry = i830_write_entry, + .check_flags = i830_check_flags, }; static const struct intel_gtt_driver i915_gtt_driver = { .gen = 3, From fefaa70f0c7fa406492039e35b69b83fc13e163a Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sat, 11 Sep 2010 22:12:11 +0200 Subject: [PATCH 202/476] intel-gtt: generic (insert|remove)_entries for i915 Beef up the generic version to support dmar. Otherwise like for the i830. v2: Don't try to DMA remap on resume for already remapped pages. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 60 +++++++++++++++++++++++++++++------- 1 file changed, 49 insertions(+), 11 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index c1b766dbef4d..f05c3648017d 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -124,7 +124,6 @@ static struct _intel_private { #define IS_PINEVIEW intel_private.driver->is_pineview #define IS_IRONLAKE intel_private.driver->is_ironlake -#if USE_PCI_DMA_API static void intel_agp_free_sglist(struct agp_memory *mem) { struct sg_table st; @@ -144,6 +143,9 @@ static int intel_agp_map_memory(struct agp_memory *mem) struct scatterlist *sg; int i; + if (mem->sg_list) + return 0; /* already mapped (for e.g. resume */ + DBG("try mapping %lu pages\n", (unsigned long)mem->page_count); if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL)) @@ -175,6 +177,7 @@ static void intel_agp_unmap_memory(struct agp_memory *mem) intel_agp_free_sglist(mem); } +#if USE_PCI_DMA_API static void intel_agp_insert_sg_entries(struct agp_memory *mem, off_t pg_start, int mask_type) { @@ -1051,6 +1054,31 @@ static bool i830_check_flags(unsigned int flags) return false; } +static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list, + unsigned int sg_len, + unsigned int pg_start, + unsigned int flags) +{ + struct scatterlist *sg; + unsigned int len, m; + int i, j; + + j = pg_start; + + /* sg may merge pages, but we have to separate + * per-page addr for GTT */ + for_each_sg(sg_list, sg, sg_len, i) { + len = sg_dma_len(sg) >> PAGE_SHIFT; + for (m = 0; m < len; m++) { + dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); + intel_private.driver->write_entry(addr, + j, flags); + j++; + } + } + readl(intel_private.gtt+j-1); +} + static int intel_fake_agp_insert_entries(struct agp_memory *mem, off_t pg_start, int type) { @@ -1082,11 +1110,21 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem, if (!mem->is_flushed) global_cache_flush(); - for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { - intel_private.driver->write_entry(page_to_phys(mem->pages[i]), - j, type); + if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) { + ret = intel_agp_map_memory(mem); + if (ret != 0) + return ret; + + intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg, + pg_start, type); + } else { + for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { + dma_addr_t addr = page_to_phys(mem->pages[i]); + intel_private.driver->write_entry(addr, + j, type); + } + readl(intel_private.gtt+j-1); } - readl(intel_private.gtt+j-1); out: ret = 0; @@ -1109,6 +1147,9 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem, return -EINVAL; } + if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) + intel_agp_unmap_memory(mem); + for (i = pg_start; i < (mem->page_count + pg_start); i++) { intel_private.driver->write_entry(intel_private.scratch_page_dma, i, 0); @@ -1469,8 +1510,8 @@ static const struct agp_bridge_driver intel_915_driver = { .cache_flush = global_cache_flush, .create_gatt_table = intel_fake_agp_create_gatt_table, .free_gatt_table = intel_fake_agp_free_gatt_table, - .insert_memory = intel_i915_insert_entries, - .remove_memory = intel_i915_remove_entries, + .insert_memory = intel_fake_agp_insert_entries, + .remove_memory = intel_fake_agp_remove_entries, .alloc_by_type = intel_fake_agp_alloc_by_type, .free_by_type = intel_i810_free_by_type, .agp_alloc_page = agp_generic_alloc_page, @@ -1479,10 +1520,6 @@ static const struct agp_bridge_driver intel_915_driver = { .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = intel_i830_type_to_mask_type, .chipset_flush = intel_i915_chipset_flush, -#if USE_PCI_DMA_API - .agp_map_memory = intel_agp_map_memory, - .agp_unmap_memory = intel_agp_unmap_memory, -#endif }; static const struct agp_bridge_driver intel_i965_driver = { @@ -1586,6 +1623,7 @@ static const struct intel_gtt_driver i915_gtt_driver = { .setup = i9xx_setup, /* i945 is the last gpu to need phys mem (for overlay and cursors). */ .write_entry = i830_write_entry, + .check_flags = i830_check_flags, }; static const struct intel_gtt_driver g33_gtt_driver = { .gen = 3, From 450f2b3d51025a1749b694ee13f0e4e23ed58750 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sat, 11 Sep 2010 23:48:25 +0200 Subject: [PATCH 203/476] intel-gtt: generic (insert|remove)_entries for g33/i965 Like for the i915. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index f05c3648017d..dc06b23c1431 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1536,8 +1536,8 @@ static const struct agp_bridge_driver intel_i965_driver = { .cache_flush = global_cache_flush, .create_gatt_table = intel_fake_agp_create_gatt_table, .free_gatt_table = intel_fake_agp_free_gatt_table, - .insert_memory = intel_i915_insert_entries, - .remove_memory = intel_i915_remove_entries, + .insert_memory = intel_fake_agp_insert_entries, + .remove_memory = intel_fake_agp_remove_entries, .alloc_by_type = intel_fake_agp_alloc_by_type, .free_by_type = intel_i810_free_by_type, .agp_alloc_page = agp_generic_alloc_page, @@ -1546,10 +1546,6 @@ static const struct agp_bridge_driver intel_i965_driver = { .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = intel_i830_type_to_mask_type, .chipset_flush = intel_i915_chipset_flush, -#if USE_PCI_DMA_API - .agp_map_memory = intel_agp_map_memory, - .agp_unmap_memory = intel_agp_unmap_memory, -#endif }; static const struct agp_bridge_driver intel_gen6_driver = { @@ -1596,8 +1592,8 @@ static const struct agp_bridge_driver intel_g33_driver = { .cache_flush = global_cache_flush, .create_gatt_table = intel_fake_agp_create_gatt_table, .free_gatt_table = intel_fake_agp_free_gatt_table, - .insert_memory = intel_i915_insert_entries, - .remove_memory = intel_i915_remove_entries, + .insert_memory = intel_fake_agp_insert_entries, + .remove_memory = intel_fake_agp_remove_entries, .alloc_by_type = intel_fake_agp_alloc_by_type, .free_by_type = intel_i810_free_by_type, .agp_alloc_page = agp_generic_alloc_page, @@ -1606,10 +1602,6 @@ static const struct agp_bridge_driver intel_g33_driver = { .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = intel_i830_type_to_mask_type, .chipset_flush = intel_i915_chipset_flush, -#if USE_PCI_DMA_API - .agp_map_memory = intel_agp_map_memory, - .agp_unmap_memory = intel_agp_unmap_memory, -#endif }; static const struct intel_gtt_driver i8xx_gtt_driver = { @@ -1630,28 +1622,33 @@ static const struct intel_gtt_driver g33_gtt_driver = { .is_g33 = 1, .setup = i9xx_setup, .write_entry = i965_write_entry, + .check_flags = i830_check_flags, }; static const struct intel_gtt_driver pineview_gtt_driver = { .gen = 3, .is_pineview = 1, .is_g33 = 1, .setup = i9xx_setup, .write_entry = i965_write_entry, + .check_flags = i830_check_flags, }; static const struct intel_gtt_driver i965_gtt_driver = { .gen = 4, .setup = i9xx_setup, .write_entry = i965_write_entry, + .check_flags = i830_check_flags, }; static const struct intel_gtt_driver g4x_gtt_driver = { .gen = 5, .setup = i9xx_setup, .write_entry = i965_write_entry, + .check_flags = i830_check_flags, }; static const struct intel_gtt_driver ironlake_gtt_driver = { .gen = 5, .is_ironlake = 1, .setup = i9xx_setup, .write_entry = i965_write_entry, + .check_flags = i830_check_flags, }; static const struct intel_gtt_driver sandybridge_gtt_driver = { .gen = 6, From 90cb149e1a85f8296daa1989c055db18fbf4ea88 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sat, 11 Sep 2010 23:55:20 +0200 Subject: [PATCH 204/476] intel-gtt: generic (insert|remove)_entries for sandybridge Like before, but now with the added bonus of being able to kill quite a bit of no-longer userful code (the old dmar support stuff). Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 144 ++--------------------------------- 1 file changed, 8 insertions(+), 136 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index dc06b23c1431..44722c6790b2 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -177,61 +177,6 @@ static void intel_agp_unmap_memory(struct agp_memory *mem) intel_agp_free_sglist(mem); } -#if USE_PCI_DMA_API -static void intel_agp_insert_sg_entries(struct agp_memory *mem, - off_t pg_start, int mask_type) -{ - struct scatterlist *sg; - int i, j; - - j = pg_start; - - WARN_ON(!mem->num_sg); - - if (mem->num_sg == mem->page_count) { - for_each_sg(mem->sg_list, sg, mem->page_count, i) { - writel(agp_bridge->driver->mask_memory(agp_bridge, - sg_dma_address(sg), mask_type), - intel_private.gtt+j); - j++; - } - } else { - /* sg may merge pages, but we have to separate - * per-page addr for GTT */ - unsigned int len, m; - - for_each_sg(mem->sg_list, sg, mem->num_sg, i) { - len = sg_dma_len(sg) / PAGE_SIZE; - for (m = 0; m < len; m++) { - writel(agp_bridge->driver->mask_memory(agp_bridge, - sg_dma_address(sg) + m * PAGE_SIZE, - mask_type), - intel_private.gtt+j); - j++; - } - } - } - readl(intel_private.gtt+j-1); -} - -#else - -static void intel_agp_insert_sg_entries(struct agp_memory *mem, - off_t pg_start, int mask_type) -{ - int i, j; - - for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { - writel(agp_bridge->driver->mask_memory(agp_bridge, - page_to_phys(mem->pages[i]), mask_type), - intel_private.gtt+j); - } - - readl(intel_private.gtt+j-1); -} - -#endif - static int intel_i810_fetch_size(void) { u32 smram_miscc; @@ -1266,81 +1211,6 @@ static void intel_i915_chipset_flush(struct agp_bridge_data *bridge) writel(1, intel_private.i9xx_flush_page); } -static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, - int type) -{ - int num_entries; - void *temp; - int ret = -EINVAL; - int mask_type; - - if (mem->page_count == 0) - goto out; - - temp = agp_bridge->current_size; - num_entries = A_SIZE_FIX(temp)->num_entries; - - if (pg_start < intel_private.base.gtt_stolen_entries) { - dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, - "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n", - pg_start, intel_private.base.gtt_stolen_entries); - - dev_info(&intel_private.pcidev->dev, - "trying to insert into local/stolen memory\n"); - goto out_err; - } - - if ((pg_start + mem->page_count) > num_entries) - goto out_err; - - /* The i915 can't check the GTT for entries since it's read only; - * depend on the caller to make the correct offset decisions. - */ - - if (type != mem->type) - goto out_err; - - mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); - - if (INTEL_GTT_GEN != 6 && mask_type != 0 && - mask_type != AGP_PHYS_MEMORY && - mask_type != INTEL_AGP_CACHED_MEMORY) - goto out_err; - - if (!mem->is_flushed) - global_cache_flush(); - - intel_agp_insert_sg_entries(mem, pg_start, mask_type); - - out: - ret = 0; - out_err: - mem->is_flushed = true; - return ret; -} - -static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start, - int type) -{ - int i; - - if (mem->page_count == 0) - return 0; - - if (pg_start < intel_private.base.gtt_stolen_entries) { - dev_info(&intel_private.pcidev->dev, - "trying to disable local/stolen memory\n"); - return -EINVAL; - } - - for (i = pg_start; i < (mem->page_count + pg_start); i++) - writel(agp_bridge->scratch_page, intel_private.gtt+i); - - readl(intel_private.gtt+i-1); - - return 0; -} - static void i965_write_entry(dma_addr_t addr, unsigned int entry, unsigned int flags) { @@ -1349,6 +1219,11 @@ static void i965_write_entry(dma_addr_t addr, unsigned int entry, writel(addr | I810_PTE_VALID, intel_private.gtt + entry); } +static bool gen6_check_flags(unsigned int flags) +{ + return true; +} + static void gen6_write_entry(dma_addr_t addr, unsigned int entry, unsigned int flags) { @@ -1562,8 +1437,8 @@ static const struct agp_bridge_driver intel_gen6_driver = { .cache_flush = global_cache_flush, .create_gatt_table = intel_fake_agp_create_gatt_table, .free_gatt_table = intel_fake_agp_free_gatt_table, - .insert_memory = intel_i915_insert_entries, - .remove_memory = intel_i915_remove_entries, + .insert_memory = intel_fake_agp_insert_entries, + .remove_memory = intel_fake_agp_remove_entries, .alloc_by_type = intel_fake_agp_alloc_by_type, .free_by_type = intel_i810_free_by_type, .agp_alloc_page = agp_generic_alloc_page, @@ -1572,10 +1447,6 @@ static const struct agp_bridge_driver intel_gen6_driver = { .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = intel_gen6_type_to_mask_type, .chipset_flush = intel_i915_chipset_flush, -#if USE_PCI_DMA_API - .agp_map_memory = intel_agp_map_memory, - .agp_unmap_memory = intel_agp_unmap_memory, -#endif }; static const struct agp_bridge_driver intel_g33_driver = { @@ -1654,6 +1525,7 @@ static const struct intel_gtt_driver sandybridge_gtt_driver = { .gen = 6, .setup = i9xx_setup, .write_entry = gen6_write_entry, + .check_flags = gen6_check_flags, }; /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of From bdd30729b68d708c970125aab363931134698f2d Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 12 Sep 2010 12:34:44 +0200 Subject: [PATCH 205/476] intel-gtt: kill mask_memory functions That indirection mess can now go. Add a dummy i81x gtt_driver to avoid a NULL pointer check. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 105 +++++------------------------------ 1 file changed, 13 insertions(+), 92 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 44722c6790b2..bb222d5f322b 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -69,20 +69,6 @@ static struct gatt_mask intel_i810_masks[] = #define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3 #define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4 -static struct gatt_mask intel_gen6_masks[] = -{ - {.mask = I810_PTE_VALID | GEN6_PTE_UNCACHED, - .type = INTEL_AGP_UNCACHED_MEMORY }, - {.mask = I810_PTE_VALID | GEN6_PTE_LLC, - .type = INTEL_AGP_CACHED_MEMORY_LLC }, - {.mask = I810_PTE_VALID | GEN6_PTE_LLC | GEN6_PTE_GFDT, - .type = INTEL_AGP_CACHED_MEMORY_LLC_GFDT }, - {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC, - .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC }, - {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC | GEN6_PTE_GFDT, - .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT }, -}; - struct intel_gtt_driver { unsigned int gen : 8; unsigned int is_g33 : 1; @@ -287,34 +273,6 @@ static void i8xx_destroy_pages(struct page *page) atomic_dec(&agp_bridge->current_memory_agp); } -static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge, - int type) -{ - if (type < AGP_USER_TYPES) - return type; - else if (type == AGP_USER_CACHED_MEMORY) - return INTEL_AGP_CACHED_MEMORY; - else - return 0; -} - -static int intel_gen6_type_to_mask_type(struct agp_bridge_data *bridge, - int type) -{ - unsigned int type_mask = type & ~AGP_USER_CACHED_MEMORY_GFDT; - unsigned int gfdt = type & AGP_USER_CACHED_MEMORY_GFDT; - - if (type_mask == AGP_USER_UNCACHED_MEMORY) - return INTEL_AGP_UNCACHED_MEMORY; - else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) - return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT : - INTEL_AGP_CACHED_MEMORY_LLC_MLC; - else /* set 'normal'/'cached' to LLC by default */ - return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_GFDT : - INTEL_AGP_CACHED_MEMORY_LLC; -} - - static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, int type) { @@ -1290,35 +1248,6 @@ static int i9xx_setup(void) return 0; } -/* - * The i965 supports 36-bit physical addresses, but to keep - * the format of the GTT the same, the bits that don't fit - * in a 32-bit word are shifted down to bits 4..7. - * - * Gcc is smart enough to notice that "(addr >> 28) & 0xf0" - * is always zero on 32-bit architectures, so no need to make - * this conditional. - */ -static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, - dma_addr_t addr, int type) -{ - /* Shift high bits down */ - addr |= (addr >> 28) & 0xf0; - - /* Type checking must be done elsewhere */ - return addr | bridge->driver->masks[type].mask; -} - -static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge, - dma_addr_t addr, int type) -{ - /* gen6 has bit11-4 for physical addr bit39-32 */ - addr |= (addr >> 28) & 0xff0; - - /* Type checking must be done elsewhere */ - return addr | bridge->driver->masks[type].mask; -} - static const struct agp_bridge_driver intel_810_driver = { .owner = THIS_MODULE, .aperture_sizes = intel_i810_sizes, @@ -1353,8 +1282,6 @@ static const struct agp_bridge_driver intel_830_driver = { .configure = intel_fake_agp_configure, .fetch_size = intel_fake_agp_fetch_size, .cleanup = intel_gtt_cleanup, - .mask_memory = intel_i810_mask_memory, - .masks = intel_i810_masks, .agp_enable = intel_fake_agp_enable, .cache_flush = global_cache_flush, .create_gatt_table = intel_fake_agp_create_gatt_table, @@ -1367,7 +1294,6 @@ static const struct agp_bridge_driver intel_830_driver = { .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, - .agp_type_to_mask_type = intel_i830_type_to_mask_type, .chipset_flush = intel_i830_chipset_flush, }; @@ -1379,8 +1305,6 @@ static const struct agp_bridge_driver intel_915_driver = { .configure = intel_fake_agp_configure, .fetch_size = intel_fake_agp_fetch_size, .cleanup = intel_gtt_cleanup, - .mask_memory = intel_i810_mask_memory, - .masks = intel_i810_masks, .agp_enable = intel_fake_agp_enable, .cache_flush = global_cache_flush, .create_gatt_table = intel_fake_agp_create_gatt_table, @@ -1393,7 +1317,6 @@ static const struct agp_bridge_driver intel_915_driver = { .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, - .agp_type_to_mask_type = intel_i830_type_to_mask_type, .chipset_flush = intel_i915_chipset_flush, }; @@ -1405,8 +1328,6 @@ static const struct agp_bridge_driver intel_i965_driver = { .configure = intel_fake_agp_configure, .fetch_size = intel_fake_agp_fetch_size, .cleanup = intel_gtt_cleanup, - .mask_memory = intel_i965_mask_memory, - .masks = intel_i810_masks, .agp_enable = intel_fake_agp_enable, .cache_flush = global_cache_flush, .create_gatt_table = intel_fake_agp_create_gatt_table, @@ -1419,7 +1340,6 @@ static const struct agp_bridge_driver intel_i965_driver = { .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, - .agp_type_to_mask_type = intel_i830_type_to_mask_type, .chipset_flush = intel_i915_chipset_flush, }; @@ -1431,8 +1351,6 @@ static const struct agp_bridge_driver intel_gen6_driver = { .configure = intel_fake_agp_configure, .fetch_size = intel_fake_agp_fetch_size, .cleanup = intel_gtt_cleanup, - .mask_memory = intel_gen6_mask_memory, - .masks = intel_gen6_masks, .agp_enable = intel_fake_agp_enable, .cache_flush = global_cache_flush, .create_gatt_table = intel_fake_agp_create_gatt_table, @@ -1445,7 +1363,6 @@ static const struct agp_bridge_driver intel_gen6_driver = { .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, - .agp_type_to_mask_type = intel_gen6_type_to_mask_type, .chipset_flush = intel_i915_chipset_flush, }; @@ -1457,8 +1374,6 @@ static const struct agp_bridge_driver intel_g33_driver = { .configure = intel_fake_agp_configure, .fetch_size = intel_fake_agp_fetch_size, .cleanup = intel_gtt_cleanup, - .mask_memory = intel_i965_mask_memory, - .masks = intel_i810_masks, .agp_enable = intel_fake_agp_enable, .cache_flush = global_cache_flush, .create_gatt_table = intel_fake_agp_create_gatt_table, @@ -1471,10 +1386,12 @@ static const struct agp_bridge_driver intel_g33_driver = { .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, - .agp_type_to_mask_type = intel_i830_type_to_mask_type, .chipset_flush = intel_i915_chipset_flush, }; +static const struct intel_gtt_driver i81x_gtt_driver = { + .gen = 1, +}; static const struct intel_gtt_driver i8xx_gtt_driver = { .gen = 2, .setup = i830_setup, @@ -1538,10 +1455,14 @@ static const struct intel_gtt_driver_description { const struct agp_bridge_driver *gmch_driver; const struct intel_gtt_driver *gtt_driver; } intel_gtt_chipsets[] = { - { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver , NULL}, - { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver , NULL}, - { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver , NULL}, - { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver , NULL}, + { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver, + &i81x_gtt_driver}, + { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver, + &i81x_gtt_driver}, + { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver, + &i81x_gtt_driver}, + { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver, + &i81x_gtt_driver}, { PCI_DEVICE_ID_INTEL_82830_CGC, "830M", &intel_830_driver , &i8xx_gtt_driver}, { PCI_DEVICE_ID_INTEL_82845G_IG, "830M", @@ -1664,9 +1585,9 @@ int intel_gmch_probe(struct pci_dev *pdev, dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name); - if (bridge->driver->mask_memory == intel_gen6_mask_memory) + if (intel_private.driver->write_entry == gen6_write_entry) mask = 40; - else if (bridge->driver->mask_memory == intel_i965_mask_memory) + else if (intel_private.driver->write_entry == i965_write_entry) mask = 36; else mask = 32; From 1b263f246639c4777fbf6cfda932ecd1ea4bebb9 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 12 Sep 2010 00:27:24 +0200 Subject: [PATCH 206/476] intel-gtt: move chipset flush to the gtt driver struct This is the last differentiator between the different fake agp drivers. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index bb222d5f322b..abd422c806c5 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -81,6 +81,7 @@ struct intel_gtt_driver { * For chipsets that need to support old ums (non-gem) code, this * needs to be identical to the various supported agp memory types! */ bool (*check_flags)(unsigned int flags); + void (*chipset_flush)(void); }; static struct _intel_private { @@ -838,7 +839,7 @@ static void intel_i830_setup_flush(void) * that buffer out, we just fill 1KB and clflush it out, on the assumption * that it'll push whatever was in there out. It appears to work. */ -static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) +static void i830_chipset_flush(void) { unsigned int *pg = intel_private.i8xx_flush_page; @@ -1062,6 +1063,11 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem, return 0; } +static void intel_fake_agp_chipset_flush(struct agp_bridge_data *bridge) +{ + intel_private.driver->chipset_flush(); +} + static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count, int type) { @@ -1163,7 +1169,7 @@ static void intel_i9xx_setup_flush(void) "can't ioremap flush page - no chipset flushing\n"); } -static void intel_i915_chipset_flush(struct agp_bridge_data *bridge) +static void i9xx_chipset_flush(void) { if (intel_private.i9xx_flush_page) writel(1, intel_private.i9xx_flush_page); @@ -1294,7 +1300,7 @@ static const struct agp_bridge_driver intel_830_driver = { .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, - .chipset_flush = intel_i830_chipset_flush, + .chipset_flush = intel_fake_agp_chipset_flush, }; static const struct agp_bridge_driver intel_915_driver = { @@ -1317,7 +1323,7 @@ static const struct agp_bridge_driver intel_915_driver = { .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, - .chipset_flush = intel_i915_chipset_flush, + .chipset_flush = intel_fake_agp_chipset_flush, }; static const struct agp_bridge_driver intel_i965_driver = { @@ -1340,7 +1346,7 @@ static const struct agp_bridge_driver intel_i965_driver = { .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, - .chipset_flush = intel_i915_chipset_flush, + .chipset_flush = intel_fake_agp_chipset_flush, }; static const struct agp_bridge_driver intel_gen6_driver = { @@ -1363,7 +1369,7 @@ static const struct agp_bridge_driver intel_gen6_driver = { .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, - .chipset_flush = intel_i915_chipset_flush, + .chipset_flush = intel_fake_agp_chipset_flush, }; static const struct agp_bridge_driver intel_g33_driver = { @@ -1386,7 +1392,7 @@ static const struct agp_bridge_driver intel_g33_driver = { .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, - .chipset_flush = intel_i915_chipset_flush, + .chipset_flush = intel_fake_agp_chipset_flush, }; static const struct intel_gtt_driver i81x_gtt_driver = { @@ -1397,6 +1403,7 @@ static const struct intel_gtt_driver i8xx_gtt_driver = { .setup = i830_setup, .write_entry = i830_write_entry, .check_flags = i830_check_flags, + .chipset_flush = i830_chipset_flush, }; static const struct intel_gtt_driver i915_gtt_driver = { .gen = 3, @@ -1404,6 +1411,7 @@ static const struct intel_gtt_driver i915_gtt_driver = { /* i945 is the last gpu to need phys mem (for overlay and cursors). */ .write_entry = i830_write_entry, .check_flags = i830_check_flags, + .chipset_flush = i9xx_chipset_flush, }; static const struct intel_gtt_driver g33_gtt_driver = { .gen = 3, @@ -1411,6 +1419,7 @@ static const struct intel_gtt_driver g33_gtt_driver = { .setup = i9xx_setup, .write_entry = i965_write_entry, .check_flags = i830_check_flags, + .chipset_flush = i9xx_chipset_flush, }; static const struct intel_gtt_driver pineview_gtt_driver = { .gen = 3, @@ -1418,18 +1427,21 @@ static const struct intel_gtt_driver pineview_gtt_driver = { .setup = i9xx_setup, .write_entry = i965_write_entry, .check_flags = i830_check_flags, + .chipset_flush = i9xx_chipset_flush, }; static const struct intel_gtt_driver i965_gtt_driver = { .gen = 4, .setup = i9xx_setup, .write_entry = i965_write_entry, .check_flags = i830_check_flags, + .chipset_flush = i9xx_chipset_flush, }; static const struct intel_gtt_driver g4x_gtt_driver = { .gen = 5, .setup = i9xx_setup, .write_entry = i965_write_entry, .check_flags = i830_check_flags, + .chipset_flush = i9xx_chipset_flush, }; static const struct intel_gtt_driver ironlake_gtt_driver = { .gen = 5, @@ -1437,12 +1449,14 @@ static const struct intel_gtt_driver ironlake_gtt_driver = { .setup = i9xx_setup, .write_entry = i965_write_entry, .check_flags = i830_check_flags, + .chipset_flush = i9xx_chipset_flush, }; static const struct intel_gtt_driver sandybridge_gtt_driver = { .gen = 6, .setup = i9xx_setup, .write_entry = gen6_write_entry, .check_flags = gen6_check_flags, + .chipset_flush = i9xx_chipset_flush, }; /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of From e9b1cc81c2222108d866323c51f482dd6db8d689 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 12 Sep 2010 00:29:26 +0200 Subject: [PATCH 207/476] intel-gtt: consolidate fake_agp driver structs They're now all the same. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 170 ++++++++--------------------------- 1 file changed, 39 insertions(+), 131 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index abd422c806c5..57dc50488d18 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1280,99 +1280,7 @@ static const struct agp_bridge_driver intel_810_driver = { .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; -static const struct agp_bridge_driver intel_830_driver = { - .owner = THIS_MODULE, - .size_type = FIXED_APER_SIZE, - .aperture_sizes = intel_fake_agp_sizes, - .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes), - .configure = intel_fake_agp_configure, - .fetch_size = intel_fake_agp_fetch_size, - .cleanup = intel_gtt_cleanup, - .agp_enable = intel_fake_agp_enable, - .cache_flush = global_cache_flush, - .create_gatt_table = intel_fake_agp_create_gatt_table, - .free_gatt_table = intel_fake_agp_free_gatt_table, - .insert_memory = intel_fake_agp_insert_entries, - .remove_memory = intel_fake_agp_remove_entries, - .alloc_by_type = intel_fake_agp_alloc_by_type, - .free_by_type = intel_i810_free_by_type, - .agp_alloc_page = agp_generic_alloc_page, - .agp_alloc_pages = agp_generic_alloc_pages, - .agp_destroy_page = agp_generic_destroy_page, - .agp_destroy_pages = agp_generic_destroy_pages, - .chipset_flush = intel_fake_agp_chipset_flush, -}; - -static const struct agp_bridge_driver intel_915_driver = { - .owner = THIS_MODULE, - .size_type = FIXED_APER_SIZE, - .aperture_sizes = intel_fake_agp_sizes, - .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes), - .configure = intel_fake_agp_configure, - .fetch_size = intel_fake_agp_fetch_size, - .cleanup = intel_gtt_cleanup, - .agp_enable = intel_fake_agp_enable, - .cache_flush = global_cache_flush, - .create_gatt_table = intel_fake_agp_create_gatt_table, - .free_gatt_table = intel_fake_agp_free_gatt_table, - .insert_memory = intel_fake_agp_insert_entries, - .remove_memory = intel_fake_agp_remove_entries, - .alloc_by_type = intel_fake_agp_alloc_by_type, - .free_by_type = intel_i810_free_by_type, - .agp_alloc_page = agp_generic_alloc_page, - .agp_alloc_pages = agp_generic_alloc_pages, - .agp_destroy_page = agp_generic_destroy_page, - .agp_destroy_pages = agp_generic_destroy_pages, - .chipset_flush = intel_fake_agp_chipset_flush, -}; - -static const struct agp_bridge_driver intel_i965_driver = { - .owner = THIS_MODULE, - .size_type = FIXED_APER_SIZE, - .aperture_sizes = intel_fake_agp_sizes, - .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes), - .configure = intel_fake_agp_configure, - .fetch_size = intel_fake_agp_fetch_size, - .cleanup = intel_gtt_cleanup, - .agp_enable = intel_fake_agp_enable, - .cache_flush = global_cache_flush, - .create_gatt_table = intel_fake_agp_create_gatt_table, - .free_gatt_table = intel_fake_agp_free_gatt_table, - .insert_memory = intel_fake_agp_insert_entries, - .remove_memory = intel_fake_agp_remove_entries, - .alloc_by_type = intel_fake_agp_alloc_by_type, - .free_by_type = intel_i810_free_by_type, - .agp_alloc_page = agp_generic_alloc_page, - .agp_alloc_pages = agp_generic_alloc_pages, - .agp_destroy_page = agp_generic_destroy_page, - .agp_destroy_pages = agp_generic_destroy_pages, - .chipset_flush = intel_fake_agp_chipset_flush, -}; - -static const struct agp_bridge_driver intel_gen6_driver = { - .owner = THIS_MODULE, - .size_type = FIXED_APER_SIZE, - .aperture_sizes = intel_fake_agp_sizes, - .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes), - .configure = intel_fake_agp_configure, - .fetch_size = intel_fake_agp_fetch_size, - .cleanup = intel_gtt_cleanup, - .agp_enable = intel_fake_agp_enable, - .cache_flush = global_cache_flush, - .create_gatt_table = intel_fake_agp_create_gatt_table, - .free_gatt_table = intel_fake_agp_free_gatt_table, - .insert_memory = intel_fake_agp_insert_entries, - .remove_memory = intel_fake_agp_remove_entries, - .alloc_by_type = intel_fake_agp_alloc_by_type, - .free_by_type = intel_i810_free_by_type, - .agp_alloc_page = agp_generic_alloc_page, - .agp_alloc_pages = agp_generic_alloc_pages, - .agp_destroy_page = agp_generic_destroy_page, - .agp_destroy_pages = agp_generic_destroy_pages, - .chipset_flush = intel_fake_agp_chipset_flush, -}; - -static const struct agp_bridge_driver intel_g33_driver = { +static const struct agp_bridge_driver intel_fake_agp_driver = { .owner = THIS_MODULE, .size_type = FIXED_APER_SIZE, .aperture_sizes = intel_fake_agp_sizes, @@ -1478,81 +1386,81 @@ static const struct intel_gtt_driver_description { { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver, &i81x_gtt_driver}, { PCI_DEVICE_ID_INTEL_82830_CGC, "830M", - &intel_830_driver , &i8xx_gtt_driver}, + &intel_fake_agp_driver, &i8xx_gtt_driver}, { PCI_DEVICE_ID_INTEL_82845G_IG, "830M", - &intel_830_driver , &i8xx_gtt_driver}, + &intel_fake_agp_driver, &i8xx_gtt_driver}, { PCI_DEVICE_ID_INTEL_82854_IG, "854", - &intel_830_driver , &i8xx_gtt_driver}, + &intel_fake_agp_driver, &i8xx_gtt_driver}, { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM", - &intel_830_driver , &i8xx_gtt_driver}, + &intel_fake_agp_driver, &i8xx_gtt_driver}, { PCI_DEVICE_ID_INTEL_82865_IG, "865", - &intel_830_driver , &i8xx_gtt_driver}, + &intel_fake_agp_driver, &i8xx_gtt_driver}, { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", - &intel_915_driver , &i915_gtt_driver }, + &intel_fake_agp_driver, &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82915G_IG, "915G", - &intel_915_driver , &i915_gtt_driver }, + &intel_fake_agp_driver, &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", - &intel_915_driver , &i915_gtt_driver }, + &intel_fake_agp_driver, &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82945G_IG, "945G", - &intel_915_driver , &i915_gtt_driver }, + &intel_fake_agp_driver, &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", - &intel_915_driver , &i915_gtt_driver }, + &intel_fake_agp_driver, &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", - &intel_915_driver , &i915_gtt_driver }, + &intel_fake_agp_driver, &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", - &intel_i965_driver , &i965_gtt_driver }, + &intel_fake_agp_driver, &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_82G35_IG, "G35", - &intel_i965_driver , &i965_gtt_driver }, + &intel_fake_agp_driver, &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", - &intel_i965_driver , &i965_gtt_driver }, + &intel_fake_agp_driver, &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_82965G_IG, "965G", - &intel_i965_driver , &i965_gtt_driver }, + &intel_fake_agp_driver, &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", - &intel_i965_driver , &i965_gtt_driver }, + &intel_fake_agp_driver, &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", - &intel_i965_driver , &i965_gtt_driver }, + &intel_fake_agp_driver, &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_G33_IG, "G33", - &intel_g33_driver , &g33_gtt_driver }, + &intel_fake_agp_driver, &g33_gtt_driver }, { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", - &intel_g33_driver , &g33_gtt_driver }, + &intel_fake_agp_driver, &g33_gtt_driver }, { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", - &intel_g33_driver , &g33_gtt_driver }, + &intel_fake_agp_driver, &g33_gtt_driver }, { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", - &intel_g33_driver , &pineview_gtt_driver }, + &intel_fake_agp_driver, &pineview_gtt_driver }, { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", - &intel_g33_driver , &pineview_gtt_driver }, + &intel_fake_agp_driver, &pineview_gtt_driver }, { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45", - &intel_i965_driver , &g4x_gtt_driver }, + &intel_fake_agp_driver, &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake", - &intel_i965_driver , &g4x_gtt_driver }, + &intel_fake_agp_driver, &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43", - &intel_i965_driver , &g4x_gtt_driver }, + &intel_fake_agp_driver, &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43", - &intel_i965_driver , &g4x_gtt_driver }, + &intel_fake_agp_driver, &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_B43_IG, "B43", - &intel_i965_driver , &g4x_gtt_driver }, + &intel_fake_agp_driver, &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43", - &intel_i965_driver , &g4x_gtt_driver }, + &intel_fake_agp_driver, &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_G41_IG, "G41", - &intel_i965_driver , &g4x_gtt_driver }, + &intel_fake_agp_driver, &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, - "HD Graphics", &intel_i965_driver , &ironlake_gtt_driver }, + "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver }, { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, - "HD Graphics", &intel_i965_driver , &ironlake_gtt_driver }, + "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG, - "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver }, + "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG, - "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver }, + "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG, - "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver }, + "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG, - "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver }, + "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG, - "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver }, + "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG, - "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver }, + "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG, - "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver }, + "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, { 0, NULL, NULL } }; From aaa62591199162e6496b4f47cac4f5923bc571d1 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 12 Sep 2010 11:07:15 +0200 Subject: [PATCH 208/476] agp: kill agp_(unmap|map)_memory DMA remapping was only used by the intel-gtt driver. With that code now folded into the driver, kill the agp generic support for it. Cc: Dave Airlie Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/agp.h | 3 --- drivers/char/agp/generic.c | 8 -------- 2 files changed, 11 deletions(-) diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h index 04ad0bbfaf41..5259065f3c79 100644 --- a/drivers/char/agp/agp.h +++ b/drivers/char/agp/agp.h @@ -121,9 +121,6 @@ struct agp_bridge_driver { void (*agp_destroy_pages)(struct agp_memory *); int (*agp_type_to_mask_type) (struct agp_bridge_data *, int); void (*chipset_flush)(struct agp_bridge_data *); - - int (*agp_map_memory)(struct agp_memory *mem); - void (*agp_unmap_memory)(struct agp_memory *mem); }; struct agp_bridge_data { diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index d2abf5143983..78235ceccfa1 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c @@ -437,11 +437,6 @@ int agp_bind_memory(struct agp_memory *curr, off_t pg_start) curr->is_flushed = true; } - if (curr->bridge->driver->agp_map_memory) { - ret_val = curr->bridge->driver->agp_map_memory(curr); - if (ret_val) - return ret_val; - } ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type); if (ret_val != 0) @@ -483,9 +478,6 @@ int agp_unbind_memory(struct agp_memory *curr) if (ret_val != 0) return ret_val; - if (curr->bridge->driver->agp_unmap_memory) - curr->bridge->driver->agp_unmap_memory(curr); - curr->is_bound = false; curr->pg_start = 0; spin_lock(&curr->bridge->mapped_lock); From 0af9e92e779602bdd6d4d19acf63b4802fab91b6 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 12 Sep 2010 14:04:03 +0200 Subject: [PATCH 209/476] intel-gtt: clean up gtt size reporting Consolidate everything in intel-gtt.c and also kill the export of intel_max_stolen. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 6 +++++- drivers/gpu/drm/i915/i915_dma.c | 1 - include/drm/intel-gtt.h | 2 -- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 57dc50488d18..4f84063bccdd 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -41,7 +41,6 @@ /* Max amount of stolen space, anything above will be returned to Linux */ int intel_max_stolen = 32 * 1024 * 1024; -EXPORT_SYMBOL(intel_max_stolen); static const struct aper_size_info_fixed intel_i810_sizes[] = { @@ -756,6 +755,11 @@ static int intel_gtt_init(void) intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries(); intel_private.base.gtt_total_entries = intel_gtt_total_entries(); + dev_info(&intel_private.bridge_dev->dev, + "detected gtt size: %dK total, %dK mappable\n", + intel_private.base.gtt_total_entries * 4, + intel_private.base.gtt_mappable_entries * 4); + gtt_map_size = intel_private.base.gtt_total_entries * 4; intel_private.gtt = ioremap(intel_private.gtt_bus_addr, diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 9977a0a5308a..dd7a0de7212c 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1200,7 +1200,6 @@ static int i915_load_modeset_init(struct drm_device *dev, /* Basic memrange allocator for stolen space (aka mm.vram) */ drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size); - DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); /* We're off and running w/KMS */ dev_priv->mm.suspended = 0; diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h index b3aa7ab72d09..d3c81946f613 100644 --- a/include/drm/intel-gtt.h +++ b/include/drm/intel-gtt.h @@ -2,8 +2,6 @@ #ifndef _DRM_INTEL_GTT_H #define _DRM_INTEL_GTT_H -extern int intel_max_stolen; /* from AGP driver */ - struct intel_gtt { /* Number of stolen gtt entries at the beginning. */ unsigned int gtt_stolen_entries; From 22533b494ff6a812b3e97248cc6c062858396182 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 12 Sep 2010 16:38:55 +0200 Subject: [PATCH 210/476] intel-gtt: store the dma mask size in intel_gtt_driver Storing this explicitly makes for clearer code and hopefully less further confusion. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 4f84063bccdd..f82a2a688bcc 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -73,6 +73,7 @@ struct intel_gtt_driver { unsigned int is_g33 : 1; unsigned int is_pineview : 1; unsigned int is_ironlake : 1; + unsigned int dma_mask_size : 8; /* Chipset specific GTT setup */ int (*setup)(void); void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags); @@ -1309,11 +1310,13 @@ static const struct agp_bridge_driver intel_fake_agp_driver = { static const struct intel_gtt_driver i81x_gtt_driver = { .gen = 1, + .dma_mask_size = 32, }; static const struct intel_gtt_driver i8xx_gtt_driver = { .gen = 2, .setup = i830_setup, .write_entry = i830_write_entry, + .dma_mask_size = 32, .check_flags = i830_check_flags, .chipset_flush = i830_chipset_flush, }; @@ -1322,6 +1325,7 @@ static const struct intel_gtt_driver i915_gtt_driver = { .setup = i9xx_setup, /* i945 is the last gpu to need phys mem (for overlay and cursors). */ .write_entry = i830_write_entry, + .dma_mask_size = 32, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, }; @@ -1330,6 +1334,7 @@ static const struct intel_gtt_driver g33_gtt_driver = { .is_g33 = 1, .setup = i9xx_setup, .write_entry = i965_write_entry, + .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, }; @@ -1338,6 +1343,7 @@ static const struct intel_gtt_driver pineview_gtt_driver = { .is_pineview = 1, .is_g33 = 1, .setup = i9xx_setup, .write_entry = i965_write_entry, + .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, }; @@ -1345,6 +1351,7 @@ static const struct intel_gtt_driver i965_gtt_driver = { .gen = 4, .setup = i9xx_setup, .write_entry = i965_write_entry, + .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, }; @@ -1352,6 +1359,7 @@ static const struct intel_gtt_driver g4x_gtt_driver = { .gen = 5, .setup = i9xx_setup, .write_entry = i965_write_entry, + .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, }; @@ -1360,6 +1368,7 @@ static const struct intel_gtt_driver ironlake_gtt_driver = { .is_ironlake = 1, .setup = i9xx_setup, .write_entry = i965_write_entry, + .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, }; @@ -1367,6 +1376,7 @@ static const struct intel_gtt_driver sandybridge_gtt_driver = { .gen = 6, .setup = i9xx_setup, .write_entry = gen6_write_entry, + .dma_mask_size = 40, .check_flags = gen6_check_flags, .chipset_flush = i9xx_chipset_flush, }; @@ -1511,13 +1521,7 @@ int intel_gmch_probe(struct pci_dev *pdev, dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name); - if (intel_private.driver->write_entry == gen6_write_entry) - mask = 40; - else if (intel_private.driver->write_entry == i965_write_entry) - mask = 36; - else - mask = 32; - + mask = intel_private.driver->dma_mask_size; if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask))) dev_err(&intel_private.pcidev->dev, "set gfx device dma mask %d-bit failed!\n", mask); From ae83dd5c7d80e0f9063739a18e270da7207a91e3 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 12 Sep 2010 17:11:15 +0200 Subject: [PATCH 211/476] intel-gtt add a cleanup function for chipset specific stuff The old code didn't clean up the i830 chipset flush page. And it looks nicer. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 39 ++++++++++++++++++++++++++++-------- 1 file changed, 31 insertions(+), 8 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index f82a2a688bcc..9a03815483c7 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -76,6 +76,9 @@ struct intel_gtt_driver { unsigned int dma_mask_size : 8; /* Chipset specific GTT setup */ int (*setup)(void); + /* This should undo anything done in ->setup() save the unmapping + * of the mmio register file, that's done in the generic code. */ + void (*cleanup)(void); void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags); /* Flags is a more or less chipset specific opaque value. * For chipsets that need to support old ums (non-gem) code, this @@ -732,12 +735,8 @@ static void intel_gtt_teardown_scratch_page(void) static void intel_gtt_cleanup(void) { - if (intel_private.i9xx_flush_page) - iounmap(intel_private.i9xx_flush_page); - if (intel_private.resource_valid) - release_resource(&intel_private.ifp_resource); - intel_private.ifp_resource.start = 0; - intel_private.resource_valid = 0; + intel_private.driver->cleanup(); + iounmap(intel_private.gtt); iounmap(intel_private.registers); @@ -766,6 +765,7 @@ static int intel_gtt_init(void) intel_private.gtt = ioremap(intel_private.gtt_bus_addr, gtt_map_size); if (!intel_private.gtt) { + intel_private.driver->cleanup(); iounmap(intel_private.registers); return -ENOMEM; } @@ -775,6 +775,7 @@ static int intel_gtt_init(void) /* we have to call this as early as possible after the MMIO base address is known */ intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries(); if (intel_private.base.gtt_stolen_entries == 0) { + intel_private.driver->cleanup(); iounmap(intel_private.registers); iounmap(intel_private.gtt); return -ENOMEM; @@ -809,7 +810,7 @@ static int intel_fake_agp_fetch_size(void) return 0; } -static void intel_i830_fini_flush(void) +static void i830_cleanup(void) { kunmap(intel_private.i8xx_page); intel_private.i8xx_flush_page = NULL; @@ -831,7 +832,7 @@ static void intel_i830_setup_flush(void) intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page); if (!intel_private.i8xx_flush_page) - intel_i830_fini_flush(); + i830_cleanup(); } /* The chipset_flush interface needs to get data that has already been @@ -1174,6 +1175,16 @@ static void intel_i9xx_setup_flush(void) "can't ioremap flush page - no chipset flushing\n"); } +static void i9xx_cleanup(void) +{ + if (intel_private.i9xx_flush_page) + iounmap(intel_private.i9xx_flush_page); + if (intel_private.resource_valid) + release_resource(&intel_private.ifp_resource); + intel_private.ifp_resource.start = 0; + intel_private.resource_valid = 0; +} + static void i9xx_chipset_flush(void) { if (intel_private.i9xx_flush_page) @@ -1217,6 +1228,10 @@ static void gen6_write_entry(dma_addr_t addr, unsigned int entry, writel(addr | pte_flags, intel_private.gtt + entry); } +static void gen6_cleanup(void) +{ +} + static int i9xx_setup(void) { u32 reg_addr; @@ -1315,6 +1330,7 @@ static const struct intel_gtt_driver i81x_gtt_driver = { static const struct intel_gtt_driver i8xx_gtt_driver = { .gen = 2, .setup = i830_setup, + .cleanup = i830_cleanup, .write_entry = i830_write_entry, .dma_mask_size = 32, .check_flags = i830_check_flags, @@ -1323,6 +1339,7 @@ static const struct intel_gtt_driver i8xx_gtt_driver = { static const struct intel_gtt_driver i915_gtt_driver = { .gen = 3, .setup = i9xx_setup, + .cleanup = i9xx_cleanup, /* i945 is the last gpu to need phys mem (for overlay and cursors). */ .write_entry = i830_write_entry, .dma_mask_size = 32, @@ -1333,6 +1350,7 @@ static const struct intel_gtt_driver g33_gtt_driver = { .gen = 3, .is_g33 = 1, .setup = i9xx_setup, + .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, @@ -1342,6 +1360,7 @@ static const struct intel_gtt_driver pineview_gtt_driver = { .gen = 3, .is_pineview = 1, .is_g33 = 1, .setup = i9xx_setup, + .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, @@ -1350,6 +1369,7 @@ static const struct intel_gtt_driver pineview_gtt_driver = { static const struct intel_gtt_driver i965_gtt_driver = { .gen = 4, .setup = i9xx_setup, + .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, @@ -1358,6 +1378,7 @@ static const struct intel_gtt_driver i965_gtt_driver = { static const struct intel_gtt_driver g4x_gtt_driver = { .gen = 5, .setup = i9xx_setup, + .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, @@ -1367,6 +1388,7 @@ static const struct intel_gtt_driver ironlake_gtt_driver = { .gen = 5, .is_ironlake = 1, .setup = i9xx_setup, + .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, @@ -1375,6 +1397,7 @@ static const struct intel_gtt_driver ironlake_gtt_driver = { static const struct intel_gtt_driver sandybridge_gtt_driver = { .gen = 6, .setup = i9xx_setup, + .cleanup = gen6_cleanup, .write_entry = gen6_write_entry, .dma_mask_size = 40, .check_flags = gen6_check_flags, From 6eecba33f2fc24544073631dc1b23b7a312e644b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 8 Sep 2010 09:45:11 +0100 Subject: [PATCH 212/476] drm/i915: Disable output polling across suspend & resume Suspending (especially hibernating) may take a finite amount of time, during which a hotplug event may trigger and we will attempt to handle it with inconsistent state. Disable hotplug polling around suspend and resume. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=30070 Reported-by: Rui Tiago Matos Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 2 -- drivers/gpu/drm/i915/i915_drv.c | 11 ++++++++++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index dd7a0de7212c..048c54bdfd4c 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1172,10 +1172,8 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_ /* i915 resume handler doesn't set to D0 */ pci_set_power_state(dev->pdev, PCI_D0); i915_resume(dev); - drm_kms_helper_poll_enable(dev); } else { printk(KERN_ERR "i915: switched off\n"); - drm_kms_helper_poll_disable(dev); i915_suspend(dev, pmm); } } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 2c87f9b97b6f..4e83bb36888e 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -281,6 +281,8 @@ int i915_suspend(struct drm_device *dev, pm_message_t state) if (state.event == PM_EVENT_PRETHAW) return 0; + drm_kms_helper_poll_disable(dev); + error = i915_drm_freeze(dev); if (error) return error; @@ -325,12 +327,19 @@ static int i915_drm_thaw(struct drm_device *dev) int i915_resume(struct drm_device *dev) { + int ret; + if (pci_enable_device(dev->pdev)) return -EIO; pci_set_master(dev->pdev); - return i915_drm_thaw(dev); + ret = i915_drm_thaw(dev); + if (ret) + return ret; + + drm_kms_helper_poll_enable(dev); + return 0; } static int i965_reset_complete(struct drm_device *dev) From cdd59983118c02d9c5ba0c116ded1faef47ec452 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 8 Sep 2010 16:30:16 +0100 Subject: [PATCH 213/476] drm/i915: Drop crtc->fb pin on disable. In order to handle disable_functions() where the framebuffer is decoupled from the crtc we need to unpin the fb in order to prevent a leak. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=29857 Reported-by: Sitsofe Wheeler Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 461bf4879e0a..e6f7ebfe86e5 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2487,6 +2487,20 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) } } +static void intel_crtc_disable(struct drm_crtc *crtc) +{ + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + struct drm_device *dev = crtc->dev; + + crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); + + if (crtc->fb) { + mutex_lock(&dev->struct_mutex); + i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); + mutex_unlock(&dev->struct_mutex); + } +} + /* Prepare for a mode set. * * Note we could be a lot smarter here. We need to figure out which outputs @@ -5163,6 +5177,7 @@ static struct drm_crtc_helper_funcs intel_helper_funcs = { .mode_set_base = intel_pipe_set_base, .mode_set_base_atomic = intel_pipe_set_base_atomic, .load_lut = intel_crtc_load_lut, + .disable = intel_crtc_disable, }; static const struct drm_crtc_funcs intel_crtc_funcs = { From a6b17b4367ed5d9bac94bc87d1489de3847fce98 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 21 Sep 2010 12:34:25 +0100 Subject: [PATCH 214/476] drm/i915: Use the correct DPB GMBUS port for GPIOE Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_dvo.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index ad28bc4fb732..561fbc34cec8 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -72,7 +72,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = { .name = "ch7017", .dvo_reg = DVOC, .slave_addr = 0x75, - .gpio = GMBUS_PORT_DPD, + .gpio = GMBUS_PORT_DPB, .dev_ops = &ch7017_ops, } }; @@ -386,7 +386,7 @@ void intel_dvo_init(struct drm_device *dev) else if (dvo->type == INTEL_DVO_CHIP_LVDS) gpio = GMBUS_PORT_PANEL; else - gpio = GMBUS_PORT_DPD; + gpio = GMBUS_PORT_DPB; /* Set up the I2C bus necessary for the chip we're probing. * It appears that everything is on GPIOE except for panels From 4fd21dc8ee6fde52a99042186ff94de1b5e8b43c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 21 Sep 2010 14:06:12 +0100 Subject: [PATCH 215/476] drm/i915/lvds: Unlock the PP register when panel-fitting As we do not wait for the panel to turn off when we need to adjust the panel-fitting registers we also need to unlock the PLLs as with the non-pfit update path. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_lvds.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 02c5aed36c87..2bcea8000859 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -97,6 +97,7 @@ static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on) DRM_ERROR("timed out waiting for panel to power off\n"); I915_WRITE(PFIT_CONTROL, 0); intel_lvds->pfit_control = 0; + intel_lvds->pfit_dirty = false; } I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); @@ -377,8 +378,8 @@ static void intel_lvds_prepare(struct drm_encoder *encoder) I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS); } else if (intel_lvds->pfit_dirty) { I915_WRITE(PP_CONTROL, - I915_READ(PP_CONTROL) & ~POWER_TARGET_ON); - I915_WRITE(LVDS, I915_READ(LVDS) & ~LVDS_PORT_EN); + (I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS) + & ~POWER_TARGET_ON); } else { I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); @@ -438,6 +439,9 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, * screen. Should be enabled before the pipe is enabled, according to * register description and PRM. */ + DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", + intel_lvds->pfit_control, + intel_lvds->pfit_pgm_ratios); if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) DRM_ERROR("timed out waiting for panel to power off\n"); From 9e0ae53404700f1e4ae1f33b0ff92948ae0e509d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 21 Sep 2010 15:05:24 +0100 Subject: [PATCH 216/476] drm/i915: Don't overwrite the returned error-code During i915_gem_create_mmap_offset() if the subsystem reports an error code, use it. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 151fa43e4417..734cc08c3fdb 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1246,7 +1246,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj) obj->size / PAGE_SIZE, 0, 0); if (!list->file_offset_node) { DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); - ret = -ENOMEM; + ret = -ENOSPC; goto out_free_list; } @@ -1258,9 +1258,9 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj) } list->hash.key = list->file_offset_node->start; - if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) { + ret = drm_ht_insert_item(&mm->offset_hash, &list->hash); + if (ret) { DRM_ERROR("failed to add to map hash\n"); - ret = -ENOMEM; goto out_free_mm; } From dfaae392f4461785eb1c92aeaf2a1040b184edba Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 22 Sep 2010 10:31:52 +0100 Subject: [PATCH 217/476] drm/i915: Clear the gpu_write_list on resetting write_domain upon hang Otherwise we will hit a list handling assertion when moving the object to the inactive list. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.c | 16 +-------- drivers/gpu/drm/i915/i915_drv.h | 3 +- drivers/gpu/drm/i915/i915_gem.c | 59 ++++++++++++++++++++++++--------- 3 files changed, 46 insertions(+), 32 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 4e83bb36888e..2184d29e7a9f 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -395,21 +395,7 @@ int i915_reset(struct drm_device *dev, u8 flags) mutex_lock(&dev->struct_mutex); - /* - * Clear request list - */ - i915_gem_retire_requests(dev); - - /* Remove anything from the flushing lists. The GPU cache is likely - * to be lost on reset along with the data, so simply move the - * lost bo to the inactive list. - */ - i915_gem_reset_flushing_list(dev); - - /* Move everything out of the GPU domains to ensure we do any - * necessary invalidation upon reuse. - */ - i915_gem_reset_inactive_gpu_domains(dev); + i915_gem_reset_lists(dev); /* * Set the domains we want to reset (GRDOM/bits 2 and 3) as diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 12e9f853a5e9..5fec2ca619e8 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1005,8 +1005,7 @@ int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, int i915_gem_object_put_fence_reg(struct drm_gem_object *obj, bool interruptible); void i915_gem_retire_requests(struct drm_device *dev); -void i915_gem_reset_flushing_list(struct drm_device *dev); -void i915_gem_reset_inactive_gpu_domains(struct drm_device *dev); +void i915_gem_reset_lists(struct drm_device *dev); void i915_gem_clflush_object(struct drm_gem_object *obj); void i915_gem_flush_ring(struct drm_device *dev, struct drm_file *file_priv, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 734cc08c3fdb..0ce28c71facc 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1682,27 +1682,60 @@ i915_get_gem_seqno(struct drm_device *dev, return ring->get_gem_seqno(dev, ring); } -void i915_gem_reset_flushing_list(struct drm_device *dev) +static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, + struct intel_ring_buffer *ring) { - struct drm_i915_private *dev_priv = dev->dev_private; + while (!list_empty(&ring->request_list)) { + struct drm_i915_gem_request *request; - while (!list_empty(&dev_priv->mm.flushing_list)) { + request = list_first_entry(&ring->request_list, + struct drm_i915_gem_request, + list); + + list_del(&request->list); + list_del(&request->client_list); + kfree(request); + } + + while (!list_empty(&ring->active_list)) { struct drm_i915_gem_object *obj_priv; + obj_priv = list_first_entry(&ring->active_list, + struct drm_i915_gem_object, + list); + + obj_priv->base.write_domain = 0; + list_del_init(&obj_priv->gpu_write_list); + i915_gem_object_move_to_inactive(&obj_priv->base); + } +} + +void i915_gem_reset_lists(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv; + + i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring); + if (HAS_BSD(dev)) + i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring); + + /* Remove anything from the flushing lists. The GPU cache is likely + * to be lost on reset along with the data, so simply move the + * lost bo to the inactive list. + */ + while (!list_empty(&dev_priv->mm.flushing_list)) { obj_priv = list_first_entry(&dev_priv->mm.flushing_list, struct drm_i915_gem_object, list); obj_priv->base.write_domain = 0; + list_del_init(&obj_priv->gpu_write_list); i915_gem_object_move_to_inactive(&obj_priv->base); } -} - -void i915_gem_reset_inactive_gpu_domains(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; + /* Move everything out of the GPU domains to ensure we do any + * necessary invalidation upon reuse. + */ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) @@ -1720,15 +1753,12 @@ i915_gem_retire_requests_ring(struct drm_device *dev, { drm_i915_private_t *dev_priv = dev->dev_private; uint32_t seqno; - bool wedged; if (!ring->status_page.page_addr || list_empty(&ring->request_list)) return; seqno = i915_get_gem_seqno(dev, ring); - wedged = atomic_read(&dev_priv->mm.wedged); - while (!list_empty(&ring->request_list)) { struct drm_i915_gem_request *request; @@ -1736,7 +1766,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev, struct drm_i915_gem_request, list); - if (!wedged && !i915_seqno_passed(seqno, request->seqno)) + if (!i915_seqno_passed(seqno, request->seqno)) break; trace_i915_gem_request_retire(dev, request->seqno); @@ -1757,8 +1787,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev, struct drm_i915_gem_object, list); - if (!wedged && - !i915_seqno_passed(seqno, obj_priv->last_rendering_seqno)) + if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno)) break; obj = &obj_priv->base; From 2896b5397570f6857fd5d0e0533f640b05b1d162 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 22 Sep 2010 10:54:48 +0100 Subject: [PATCH 218/476] drm/i915: Don't offset the pin used for crt_ddc Previously when converting the GMBUS pin to the GPIO reg, we would offset the pin by one and then use the look-up table. Now that we first try to use the GMBUS pin, we no longer need the offset and can use the value from the VBIOS directly. Reported-by: Carlos R. Mafra Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_bios.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index d11bbcad4fea..123e31d5a80b 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -299,7 +299,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv, int bus_pin = general->crt_ddc_gmbus_pin; DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin); if (bus_pin >= 1 && bus_pin <= 6) - dev_priv->crt_ddc_pin = bus_pin - 1; + dev_priv->crt_ddc_pin = bus_pin; } else { DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n", block_size); From 5c12a07e8073295ce8b57a822f811ac34e4f8420 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 22 Sep 2010 11:22:30 +0100 Subject: [PATCH 219/476] drm/i915: Drop ring->lazy_request We are not currently using it as intended, so remove the complication. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 37 ++++--------------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 5 ---- 2 files changed, 6 insertions(+), 36 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0ce28c71facc..3fd69ad19aa7 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1470,24 +1470,12 @@ i915_gem_object_put_pages(struct drm_gem_object *obj) obj_priv->pages = NULL; } -static uint32_t -i915_gem_next_request_seqno(struct drm_device *dev, - struct intel_ring_buffer *ring) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - - ring->outstanding_lazy_request = true; - - return dev_priv->next_seqno; -} - static void i915_gem_object_move_to_active(struct drm_gem_object *obj, struct intel_ring_buffer *ring) { - struct drm_device *dev = obj->dev; + struct drm_i915_private *dev_priv = obj->dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - uint32_t seqno = i915_gem_next_request_seqno(dev, ring); BUG_ON(ring == NULL); obj_priv->ring = ring; @@ -1500,7 +1488,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, /* Move from whatever list we were on to the tail of execution. */ list_move_tail(&obj_priv->list, &ring->active_list); - obj_priv->last_rendering_seqno = seqno; + obj_priv->last_rendering_seqno = dev_priv->next_seqno; } static void @@ -1945,11 +1933,6 @@ i915_gem_flush_ring(struct drm_device *dev, { ring->flush(dev, ring, invalidate_domains, flush_domains); i915_gem_process_flushing_list(dev, flush_domains, ring); - - if (ring->outstanding_lazy_request) { - (void)i915_add_request(dev, file_priv, NULL, ring); - ring->outstanding_lazy_request = false; - } } static void @@ -2098,7 +2081,7 @@ i915_gpu_idle(struct drm_device *dev) return 0; /* Flush everything onto the inactive list. */ - seqno = i915_gem_next_request_seqno(dev, &dev_priv->render_ring); + seqno = dev_priv->next_seqno; i915_gem_flush_ring(dev, NULL, &dev_priv->render_ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); ret = i915_wait_request(dev, seqno, &dev_priv->render_ring); @@ -2106,7 +2089,7 @@ i915_gpu_idle(struct drm_device *dev) return ret; if (HAS_BSD(dev)) { - seqno = i915_gem_next_request_seqno(dev, &dev_priv->render_ring); + seqno = dev_priv->next_seqno; i915_gem_flush_ring(dev, NULL, &dev_priv->bsd_ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); ret = i915_wait_request(dev, seqno, &dev_priv->bsd_ring); @@ -3573,7 +3556,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_i915_gem_request *request = NULL; int ret = 0, ret2, i, pinned = 0; uint64_t exec_offset; - uint32_t seqno, reloc_index; + uint32_t reloc_index; int pin_tries, flips; struct intel_ring_buffer *ring = NULL; @@ -3854,15 +3837,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, DRM_INFO("%s: move to exec list %p\n", __func__, obj); #endif } - - /* - * Get a seqno representing the execution of the current buffer, - * which we can wait on. We would like to mitigate these interrupts, - * likely by only creating seqnos occasionally (so that we have - * *some* interrupts representing completion of buffers that we can - * wait on when trying to clear up gtt space). - */ - seqno = i915_add_request(dev, file_priv, request, ring); + i915_add_request(dev, file_priv, request, ring); request = NULL; #if WATCH_LRU diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 2d3165fc1475..bfbc4889909e 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -86,11 +86,6 @@ struct intel_ring_buffer { */ struct list_head request_list; - /** - * Do we have some not yet emitted requests outstanding? - */ - bool outstanding_lazy_request; - wait_queue_head_t irq_queue; drm_local_map_t map; }; From a5cad620b36f15ef3aad434712ae290640aae96c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 22 Sep 2010 13:15:10 +0100 Subject: [PATCH 220/476] drm/i915: Disable "disabled FBC" message when a no-op Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e6f7ebfe86e5..b92385498d2c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1110,6 +1110,9 @@ void i8xx_disable_fbc(struct drm_device *dev) /* Disable compression */ fbc_ctl = I915_READ(FBC_CONTROL); + if ((fbc_ctl & FBC_CTL_EN) == 0) + return; + fbc_ctl &= ~FBC_CTL_EN; I915_WRITE(FBC_CONTROL, fbc_ctl); From 6ec3d0c0e9c0c605696e91048eebaca7b0c36695 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 22 Sep 2010 18:17:01 +0100 Subject: [PATCH 221/476] drm/i915/crt: Use a DDC probe on 0xA0 before load-detect The BIOS writer's guide suggests that a VGA connection will ACK a write to address 0xA0 and that this should be used before doing legacy load-detection. Considering the extreme cost of load-detection, performing an extra DDC seems a risk worth taking. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_crt.c | 39 ++++++++++++++++++++++++++++---- 1 file changed, 35 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 8e484c9ac1f5..389fcd2aea1f 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -261,6 +261,21 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) return ret; } +static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus) +{ + u8 buf; + struct i2c_msg msgs[] = { + { + .addr = 0xA0, + .flags = 0, + .len = 1, + .buf = &buf, + }, + }; + /* DDC monitor detect: Does it ACK a write to 0xA0? */ + return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1; +} + static bool intel_crt_detect_ddc(struct drm_encoder *encoder) { struct intel_encoder *intel_encoder = to_intel_encoder(encoder); @@ -270,7 +285,17 @@ static bool intel_crt_detect_ddc(struct drm_encoder *encoder) if (intel_encoder->type != INTEL_OUTPUT_ANALOG) return false; - return intel_ddc_probe(intel_encoder, dev_priv->crt_ddc_pin); + if (intel_crt_ddc_probe(dev_priv, dev_priv->crt_ddc_pin)) { + DRM_DEBUG_KMS("CRT detected via DDC:0xa0\n"); + return true; + } + + if (intel_ddc_probe(intel_encoder, dev_priv->crt_ddc_pin)) { + DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); + return true; + } + + return false; } static enum drm_connector_status @@ -296,6 +321,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder uint8_t st00; enum drm_connector_status status; + DRM_DEBUG_KMS("starting load-detect on CRT\n"); + if (pipe == 0) { bclrpat_reg = BCLRPAT_A; vtotal_reg = VTOTAL_A; @@ -412,9 +439,10 @@ intel_crt_detect(struct drm_connector *connector, bool force) enum drm_connector_status status; if (I915_HAS_HOTPLUG(dev)) { - if (intel_crt_detect_hotplug(connector)) + if (intel_crt_detect_hotplug(connector)) { + DRM_DEBUG_KMS("CRT detected via hotplug\n"); return connector_status_connected; - else + } else return connector_status_disconnected; } @@ -431,7 +459,10 @@ intel_crt_detect(struct drm_connector *connector, bool force) crtc = intel_get_load_detect_pipe(encoder, connector, NULL, &dpms_mode); if (crtc) { - status = intel_crt_load_detect(crtc, encoder); + if (intel_crt_detect_ddc(&encoder->base)) + status = connector_status_connected; + else + status = intel_crt_load_detect(crtc, encoder); intel_release_load_detect_pipe(encoder, connector, dpms_mode); } else From 20f0cd55f68e0678909214c60b3595a22124bdb0 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 23 Sep 2010 11:00:38 +0100 Subject: [PATCH 222/476] drm/i915: Remove the broken flush_ring from page-flip This is already performed with the pipelined flush, so by the time we schedule the flush in the page-flip, the ring is NULL and we OOPs instead. Reported-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 5 ----- drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/intel_display.c | 7 +------ 3 files changed, 2 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5fec2ca619e8..ac41ca1157a5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1007,11 +1007,6 @@ int i915_gem_object_put_fence_reg(struct drm_gem_object *obj, void i915_gem_retire_requests(struct drm_device *dev); void i915_gem_reset_lists(struct drm_device *dev); void i915_gem_clflush_object(struct drm_gem_object *obj); -void i915_gem_flush_ring(struct drm_device *dev, - struct drm_file *file_priv, - struct intel_ring_buffer *ring, - uint32_t invalidate_domains, - uint32_t flush_domains); int i915_gem_object_set_domain(struct drm_gem_object *obj, uint32_t read_domains, uint32_t write_domain); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3fd69ad19aa7..174e38abc9ef 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1924,7 +1924,7 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno, return i915_do_wait_request(dev, seqno, 1, ring); } -void +static void i915_gem_flush_ring(struct drm_device *dev, struct drm_file *file_priv, struct intel_ring_buffer *ring, diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index b92385498d2c..16541ee9e1e0 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5028,7 +5028,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, struct intel_unpin_work *work; unsigned long flags, offset; int pipe = intel_crtc->pipe; - u32 was_dirty, pf, pipesrc; + u32 pf, pipesrc; int ret; work = kzalloc(sizeof *work, GFP_KERNEL); @@ -5057,7 +5057,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, obj = intel_fb->obj; mutex_lock(&dev->struct_mutex); - was_dirty = obj->write_domain & I915_GEM_GPU_DOMAINS; ret = intel_pin_and_fence_fb_obj(dev, obj, true); if (ret) goto cleanup_work; @@ -5076,10 +5075,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, atomic_inc(&obj_priv->pending_flip); work->pending_flip_obj = obj; - /* Schedule the pipelined flush */ - if (was_dirty) - i915_gem_flush_ring(dev, NULL, obj_priv->ring, 0, was_dirty); - if (IS_GEN3(dev) || IS_GEN2(dev)) { u32 flip_mask; From 29e1316ab129f2d3a9ea874e7c9a4cb936f43542 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 22 Sep 2010 19:10:09 +0100 Subject: [PATCH 223/476] drm/i915/tv: Sleep before checking for state changes. We need to wait for the PLLs to settle prior to detecting the state changes. The BIOS writers guide suggests waiting for the next vblank. Reported-by: Carlos R. Mafra Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_tv.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 49ab11c667bb..106560bc84db 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1271,8 +1271,12 @@ intel_tv_detect_type (struct intel_tv *intel_tv) I915_WRITE(TV_DAC, tv_dac); POSTING_READ(TV_DAC); + intel_wait_for_vblank(intel_tv->base.base.dev, + to_intel_crtc(intel_tv->base.base.crtc)->pipe); + type = -1; if (wait_for((tv_dac = I915_READ(TV_DAC)) & TVDAC_STATE_CHG, 20) == 0) { + DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac); /* * A B C * 0 1 1 Composite @@ -1289,8 +1293,7 @@ intel_tv_detect_type (struct intel_tv *intel_tv) DRM_DEBUG_KMS("Detected Component TV connection\n"); type = DRM_MODE_CONNECTOR_Component; } else { - DRM_DEBUG_KMS("Unrecognised TV connection: %x\n", - tv_dac); + DRM_DEBUG_KMS("Unrecognised TV connection\n"); } } From 316f60a120a8f1dacb574f705d5faf7eac3e6e2a Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Thu, 26 Aug 2010 16:13:49 +0200 Subject: [PATCH 224/476] drm/nouveau: Fix suspend on some nv4x AGP cards. On some nv4x cards (specifically, the ones that use an internal PCIE->AGP bridge) the AGP controller state isn't preserved after a suspend/resume cycle, and the AGP control registers have moved from 0x18xx to 0x100xx, so the FW check in nouveau_mem_reset_agp() doesn't quite work. Check "dev->agp->mode" instead. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_mem.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 9689d4147686..c14466ba69ba 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -355,7 +355,8 @@ nouveau_mem_reset_agp(struct drm_device *dev) /* First of all, disable fast writes, otherwise if it's * already enabled in the AGP bridge and we disable the card's * AGP controller we might be locking ourselves out of it. */ - if (nv_rd32(dev, NV04_PBUS_PCI_NV_19) & PCI_AGP_COMMAND_FW) { + if ((nv_rd32(dev, NV04_PBUS_PCI_NV_19) | + dev->agp->mode) & PCI_AGP_COMMAND_FW) { struct drm_agp_info info; struct drm_agp_mode mode; From 4295f188e8297660b498e021caee430a40558d8b Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Thu, 26 Aug 2010 23:07:02 +0200 Subject: [PATCH 225/476] drm/nv20: Use the nv30 CRTC bandwidth calculation code. nv2x CRTC FIFOs are as large as in nv3x (4kB it seems), and the FIFO control registers have the same layout: we can make them share the same implementation. Previously we were using the nv1x code, but the calculated FIFO watermarks are usually too low for nv2x and they cause horrible scanout artifacts. They've gone unnoticed until now because we've been leaving one of the bandwidth regs uninitialized (CRE 47, which contains the most significant bits of FFLWM), so everything seemed to work fine except in some cases after a cold boot, depending on the memory bandwidth and pixel clocks used. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_calc.c | 6 +++--- drivers/gpu/drm/nouveau/nouveau_hw.c | 10 ++++++---- drivers/gpu/drm/nouveau/nv04_crtc.c | 2 +- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/nouveau_calc.c index ca85da784846..23d9896962f4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_calc.c +++ b/drivers/gpu/drm/nouveau/nouveau_calc.c @@ -234,7 +234,7 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp, } static void -nv30_update_arb(int *burst, int *lwm) +nv20_update_arb(int *burst, int *lwm) { unsigned int fifo_size, burst_size, graphics_lwm; @@ -251,14 +251,14 @@ nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm { struct drm_nouveau_private *dev_priv = dev->dev_private; - if (dev_priv->card_type < NV_30) + if (dev_priv->card_type < NV_20) nv04_update_arb(dev, vclk, bpp, burst, lwm); else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ || (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) { *burst = 128; *lwm = 0x0480; } else - nv30_update_arb(burst, lwm); + nv20_update_arb(burst, lwm); } static int diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c index 7b613682e400..f8ec49b5308b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hw.c +++ b/drivers/gpu/drm/nouveau/nouveau_hw.c @@ -866,10 +866,11 @@ nv_save_state_ext(struct drm_device *dev, int head, rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); rd_cio_state(dev, head, regp, NV_CIO_CRE_21); - if (dev_priv->card_type >= NV_30) { + if (dev_priv->card_type >= NV_20) rd_cio_state(dev, head, regp, NV_CIO_CRE_47); + + if (dev_priv->card_type >= NV_30) rd_cio_state(dev, head, regp, 0x9f); - } rd_cio_state(dev, head, regp, NV_CIO_CRE_49); rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); @@ -976,10 +977,11 @@ nv_load_state_ext(struct drm_device *dev, int head, wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX); wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); - if (dev_priv->card_type >= NV_30) { + if (dev_priv->card_type >= NV_20) wr_cio_state(dev, head, regp, NV_CIO_CRE_47); + + if (dev_priv->card_type >= NV_30) wr_cio_state(dev, head, regp, 0x9f); - } wr_cio_state(dev, head, regp, NV_CIO_CRE_49); wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index 497df8765f28..932c914743fc 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c @@ -826,7 +826,7 @@ nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX); crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX); - if (dev_priv->card_type >= NV_30) { + if (dev_priv->card_type >= NV_20) { regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8; crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47); } From f1ab0cc9bacdd33a37603a80852ee0579f809ce7 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 26 Aug 2010 11:32:01 +1000 Subject: [PATCH 226/476] drm/nv50: add new accelerated bo move funtion Hopefully this one will be better able to cope with moving tiled buffers around without getting them all scrambled as a result. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 193 ++++++++++++++++++++------- 1 file changed, 145 insertions(+), 48 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index f6f44779d82f..a2908a91495c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -478,10 +478,12 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, } static inline uint32_t -nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan, - struct ttm_mem_reg *mem) +nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, + struct nouveau_channel *chan, struct ttm_mem_reg *mem) { - if (chan == nouveau_bdev(nvbo->bo.bdev)->channel) { + struct nouveau_bo *nvbo = nouveau_bo(bo); + + if (nvbo->no_vm) { if (mem->mem_type == TTM_PL_TT) return NvDmaGART; return NvDmaVRAM; @@ -493,86 +495,181 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan, } static int -nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, - bool no_wait_reserve, bool no_wait_gpu, - struct ttm_mem_reg *new_mem) +nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, + struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) { - struct nouveau_bo *nvbo = nouveau_bo(bo); struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); - struct ttm_mem_reg *old_mem = &bo->mem; - struct nouveau_channel *chan; - uint64_t src_offset, dst_offset; - uint32_t page_count; + struct nouveau_bo *nvbo = nouveau_bo(bo); + u64 length = (new_mem->num_pages << PAGE_SHIFT); + u64 src_offset, dst_offset; int ret; - chan = nvbo->channel; - if (!chan || nvbo->tile_flags || nvbo->no_vm) - chan = dev_priv->channel; - src_offset = old_mem->mm_node->start << PAGE_SHIFT; dst_offset = new_mem->mm_node->start << PAGE_SHIFT; - if (chan != dev_priv->channel) { - if (old_mem->mem_type == TTM_PL_TT) - src_offset += dev_priv->vm_gart_base; - else + if (!nvbo->no_vm) { + if (old_mem->mem_type == TTM_PL_VRAM) src_offset += dev_priv->vm_vram_base; - - if (new_mem->mem_type == TTM_PL_TT) - dst_offset += dev_priv->vm_gart_base; else + src_offset += dev_priv->vm_gart_base; + + if (new_mem->mem_type == TTM_PL_VRAM) dst_offset += dev_priv->vm_vram_base; + else + dst_offset += dev_priv->vm_gart_base; } ret = RING_SPACE(chan, 3); if (ret) return ret; - BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); - OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, old_mem)); - OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, new_mem)); - if (dev_priv->card_type >= NV_50) { - ret = RING_SPACE(chan, 4); + BEGIN_RING(chan, NvSubM2MF, 0x0184, 2); + OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem)); + OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem)); + + while (length) { + u32 amount, stride, height; + + amount = min(length, (u64)(16 * 1024 * 1024)); + stride = 64 * 4; + height = amount / stride; + + if (new_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) { + ret = RING_SPACE(chan, 8); + if (ret) + return ret; + + BEGIN_RING(chan, NvSubM2MF, 0x0200, 7); + OUT_RING (chan, 0); + OUT_RING (chan, 0x20); + OUT_RING (chan, stride); + OUT_RING (chan, height); + OUT_RING (chan, 1); + OUT_RING (chan, 0); + OUT_RING (chan, 0); + } else { + ret = RING_SPACE(chan, 2); + if (ret) + return ret; + + BEGIN_RING(chan, NvSubM2MF, 0x0200, 1); + OUT_RING (chan, 1); + } + if (old_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) { + ret = RING_SPACE(chan, 8); + if (ret) + return ret; + + BEGIN_RING(chan, NvSubM2MF, 0x021c, 7); + OUT_RING (chan, 0); + OUT_RING (chan, 0x20); + OUT_RING (chan, stride); + OUT_RING (chan, height); + OUT_RING (chan, 1); + OUT_RING (chan, 0); + OUT_RING (chan, 0); + } else { + ret = RING_SPACE(chan, 2); + if (ret) + return ret; + + BEGIN_RING(chan, NvSubM2MF, 0x021c, 1); + OUT_RING (chan, 1); + } + + ret = RING_SPACE(chan, 14); if (ret) return ret; - BEGIN_RING(chan, NvSubM2MF, 0x0200, 1); - OUT_RING(chan, 1); - BEGIN_RING(chan, NvSubM2MF, 0x021c, 1); - OUT_RING(chan, 1); + + BEGIN_RING(chan, NvSubM2MF, 0x0238, 2); + OUT_RING (chan, upper_32_bits(src_offset)); + OUT_RING (chan, upper_32_bits(dst_offset)); + BEGIN_RING(chan, NvSubM2MF, 0x030c, 8); + OUT_RING (chan, lower_32_bits(src_offset)); + OUT_RING (chan, lower_32_bits(dst_offset)); + OUT_RING (chan, stride); + OUT_RING (chan, stride); + OUT_RING (chan, stride); + OUT_RING (chan, height); + OUT_RING (chan, 0x00000101); + OUT_RING (chan, 0x00000000); + BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); + OUT_RING (chan, 0); + + length -= amount; + src_offset += amount; + dst_offset += amount; } + return 0; +} + +static int +nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, + struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) +{ + u32 src_offset = old_mem->mm_node->start << PAGE_SHIFT; + u32 dst_offset = new_mem->mm_node->start << PAGE_SHIFT; + u32 page_count = new_mem->num_pages; + int ret; + + ret = RING_SPACE(chan, 3); + if (ret) + return ret; + + BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); + OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem)); + OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem)); + page_count = new_mem->num_pages; while (page_count) { int line_count = (page_count > 2047) ? 2047 : page_count; - if (dev_priv->card_type >= NV_50) { - ret = RING_SPACE(chan, 3); - if (ret) - return ret; - BEGIN_RING(chan, NvSubM2MF, 0x0238, 2); - OUT_RING(chan, upper_32_bits(src_offset)); - OUT_RING(chan, upper_32_bits(dst_offset)); - } ret = RING_SPACE(chan, 11); if (ret) return ret; + BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); - OUT_RING(chan, lower_32_bits(src_offset)); - OUT_RING(chan, lower_32_bits(dst_offset)); - OUT_RING(chan, PAGE_SIZE); /* src_pitch */ - OUT_RING(chan, PAGE_SIZE); /* dst_pitch */ - OUT_RING(chan, PAGE_SIZE); /* line_length */ - OUT_RING(chan, line_count); - OUT_RING(chan, (1<<8)|(1<<0)); - OUT_RING(chan, 0); + OUT_RING (chan, src_offset); + OUT_RING (chan, dst_offset); + OUT_RING (chan, PAGE_SIZE); /* src_pitch */ + OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ + OUT_RING (chan, PAGE_SIZE); /* line_length */ + OUT_RING (chan, line_count); + OUT_RING (chan, 0x00000101); + OUT_RING (chan, 0x00000000); BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); - OUT_RING(chan, 0); + OUT_RING (chan, 0); page_count -= line_count; src_offset += (PAGE_SIZE * line_count); dst_offset += (PAGE_SIZE * line_count); } + return 0; +} + +static int +nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, + bool no_wait_reserve, bool no_wait_gpu, + struct ttm_mem_reg *new_mem) +{ + struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); + struct nouveau_bo *nvbo = nouveau_bo(bo); + struct nouveau_channel *chan; + int ret; + + chan = nvbo->channel; + if (!chan || nvbo->no_vm) + chan = dev_priv->channel; + + if (dev_priv->card_type < NV_50) + ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem); + else + ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem); + if (ret) + return ret; + return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem); } From b8a6a8045d4d3e5b7330603c35cbfaf9f3cc2e50 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 27 Aug 2010 11:55:43 +1000 Subject: [PATCH 227/476] drm/nouveau: move check for no-op bo move before memcpy fallback Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index a2908a91495c..ab661f459fe4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -816,12 +816,6 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, if (ret) return ret; - /* Software copy if the card isn't up and running yet. */ - if (!dev_priv->channel) { - ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); - goto out; - } - /* Fake bo copy. */ if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { BUG_ON(bo->mem.mm_node != NULL); @@ -830,6 +824,12 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, goto out; } + /* Software copy if the card isn't up and running yet. */ + if (!dev_priv->channel) { + ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); + goto out; + } + /* Hardware assisted copy. */ if (new_mem->mem_type == TTM_PL_SYSTEM) ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); From e694438d2f55d71da3491b1a2472383cec4b8236 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 27 Aug 2010 11:58:49 +1000 Subject: [PATCH 228/476] drm/nouveau: remove second map of notifier bo Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_dma.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index 2e3c6caa97ee..2d006993378a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c @@ -91,11 +91,6 @@ nouveau_dma_init(struct nouveau_channel *chan) if (ret) return ret; - /* Map M2MF notifier object - fbcon. */ - ret = nouveau_bo_map(chan->notifier_bo); - if (ret) - return ret; - /* Insert NOPS for NOUVEAU_DMA_SKIPS */ ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS); if (ret) From 9d59e8a1fc8fc35bf22dc92d7722a7502769ab7e Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 27 Aug 2010 13:04:41 +1000 Subject: [PATCH 229/476] drm/nouveau: require explicit unmap of kmapped bos Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 5 ++--- drivers/gpu/drm/nouveau/nouveau_channel.c | 1 + drivers/gpu/drm/nouveau/nv04_crtc.c | 1 + drivers/gpu/drm/nouveau/nv50_crtc.c | 2 ++ drivers/gpu/drm/nouveau/nv50_display.c | 1 + 5 files changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index ab661f459fe4..841c63f28867 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -58,8 +58,6 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) struct drm_device *dev = dev_priv->dev; struct nouveau_bo *nvbo = nouveau_bo(bo); - ttm_bo_kunmap(&nvbo->kmap); - if (unlikely(nvbo->gem)) DRM_ERROR("bo %p still attached to GEM object\n", bo); @@ -305,7 +303,8 @@ nouveau_bo_map(struct nouveau_bo *nvbo) void nouveau_bo_unmap(struct nouveau_bo *nvbo) { - ttm_bo_kunmap(&nvbo->kmap); + if (nvbo) + ttm_bo_kunmap(&nvbo->kmap); } u16 diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 0480f064f2c1..3144ddea593e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -310,6 +310,7 @@ nouveau_channel_free(struct nouveau_channel *chan) /* Release the channel's resources */ nouveau_gpuobj_ref_del(dev, &chan->pushbuf); if (chan->pushbuf_bo) { + nouveau_bo_unmap(chan->pushbuf_bo); nouveau_bo_unpin(chan->pushbuf_bo); nouveau_bo_ref(NULL, &chan->pushbuf_bo); } diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index 932c914743fc..1f0f978d8e9d 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c @@ -718,6 +718,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc) drm_crtc_cleanup(crtc); + nouveau_bo_unmap(nv_crtc->cursor.nvbo); nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); kfree(nv_crtc); } diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index bfd4ca2fe7ef..d819eb935a2f 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -338,7 +338,9 @@ nv50_crtc_destroy(struct drm_crtc *crtc) nv50_cursor_fini(nv_crtc); + nouveau_bo_unmap(nv_crtc->lut.nvbo); nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); + nouveau_bo_unmap(nv_crtc->cursor.nvbo); nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); kfree(nv_crtc->mode); kfree(nv_crtc); diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 612fa6d6a0cb..c87f8744866f 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -42,6 +42,7 @@ nv50_evo_channel_del(struct nouveau_channel **pchan) *pchan = NULL; nouveau_gpuobj_channel_takedown(chan); + nouveau_bo_unmap(chan->pushbuf_bo); nouveau_bo_ref(NULL, &chan->pushbuf_bo); if (chan->user) From 7314dec95c054258d5e5a3d1d55e1fd042f8eaad Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Mon, 30 Aug 2010 15:37:57 +0200 Subject: [PATCH 230/476] drm/nv17-nv4x: Fix analog load detection false positive on rare occasions. On some boards the residual current DAC outputs can draw when they're disconnected can be high enough to give a false load detection positive (I've only seen it in the S-video luma output of some cards, but just to be sure). The output line capacitance is limited and sampling twice should fix it reliably. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv04_dac.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c index ea3627041ecf..509c05015d50 100644 --- a/drivers/gpu/drm/nouveau/nv04_dac.c +++ b/drivers/gpu/drm/nouveau/nv04_dac.c @@ -291,6 +291,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder) msleep(5); sample = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); + /* do it again just in case it's a residual current */ + sample &= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL); NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL, From 217275d03d7e1f3e62c7b0d9718df99456b7c356 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Mon, 30 Aug 2010 19:55:52 +0200 Subject: [PATCH 231/476] drm/nv40: Try to set up CRE_LCD even if it has unknown bits set. They don't seem to do anything useful, and we really want to program CRE_LCD if we aren't lucky enough to find the right CRTC binding already set. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv04_dac.c | 8 +------- drivers/gpu/drm/nouveau/nv04_dfp.c | 33 +++++++++++++----------------- drivers/gpu/drm/nouveau/nv17_tv.c | 13 ++++-------- 3 files changed, 19 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c index 509c05015d50..9cc560c792a4 100644 --- a/drivers/gpu/drm/nouveau/nv04_dac.c +++ b/drivers/gpu/drm/nouveau/nv04_dac.c @@ -352,15 +352,9 @@ static void nv04_dac_prepare(struct drm_encoder *encoder) helper->dpms(encoder, DRM_MODE_DPMS_OFF); nv04_dfp_disable(dev, head); - - /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f) - * at LCD__INDEX which we don't alter - */ - if (!(crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] & 0x44)) - crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] = 0; + crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] = 0; } - static void nv04_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c index 0d3206a7046c..e331b4faeb10 100644 --- a/drivers/gpu/drm/nouveau/nv04_dfp.c +++ b/drivers/gpu/drm/nouveau/nv04_dfp.c @@ -253,26 +253,21 @@ static void nv04_dfp_prepare(struct drm_encoder *encoder) nv04_dfp_prepare_sel_clk(dev, nv_encoder, head); - /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f) - * at LCD__INDEX which we don't alter - */ - if (!(*cr_lcd & 0x44)) { - *cr_lcd = 0x3; + *cr_lcd = 0x3; - if (nv_two_heads(dev)) { - if (nv_encoder->dcb->location == DCB_LOC_ON_CHIP) - *cr_lcd |= head ? 0x0 : 0x8; - else { - *cr_lcd |= (nv_encoder->dcb->or << 4) & 0x30; - if (nv_encoder->dcb->type == OUTPUT_LVDS) - *cr_lcd |= 0x30; - if ((*cr_lcd & 0x30) == (*cr_lcd_oth & 0x30)) { - /* avoid being connected to both crtcs */ - *cr_lcd_oth &= ~0x30; - NVWriteVgaCrtc(dev, head ^ 1, - NV_CIO_CRE_LCD__INDEX, - *cr_lcd_oth); - } + if (nv_two_heads(dev)) { + if (nv_encoder->dcb->location == DCB_LOC_ON_CHIP) + *cr_lcd |= head ? 0x0 : 0x8; + else { + *cr_lcd |= (nv_encoder->dcb->or << 4) & 0x30; + if (nv_encoder->dcb->type == OUTPUT_LVDS) + *cr_lcd |= 0x30; + if ((*cr_lcd & 0x30) == (*cr_lcd_oth & 0x30)) { + /* avoid being connected to both crtcs */ + *cr_lcd_oth &= ~0x30; + NVWriteVgaCrtc(dev, head ^ 1, + NV_CIO_CRE_LCD__INDEX, + *cr_lcd_oth); } } } diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c index 13cdc05b7c2d..703c188c32d6 100644 --- a/drivers/gpu/drm/nouveau/nv17_tv.c +++ b/drivers/gpu/drm/nouveau/nv17_tv.c @@ -408,15 +408,10 @@ static void nv17_tv_prepare(struct drm_encoder *encoder) } - /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f) - * at LCD__INDEX which we don't alter - */ - if (!(*cr_lcd & 0x44)) { - if (tv_norm->kind == CTV_ENC_MODE) - *cr_lcd = 0x1 | (head ? 0x0 : 0x8); - else - *cr_lcd = 0; - } + if (tv_norm->kind == CTV_ENC_MODE) + *cr_lcd = 0x1 | (head ? 0x0 : 0x8); + else + *cr_lcd = 0; /* Set the DACCLK register */ dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1; From 2a7fdb2bc15b3bfc824eb969b3dc7efa3fb52463 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 30 Aug 2010 16:14:51 +1000 Subject: [PATCH 232/476] drm/nouveau: have nv_mask return original register value Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index b1be617373b6..c684686f6c0f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -1255,12 +1255,11 @@ static inline void nv_wr32(struct drm_device *dev, unsigned reg, u32 val) iowrite32_native(val, dev_priv->mmio + reg); } -static inline void nv_mask(struct drm_device *dev, u32 reg, u32 mask, u32 val) +static inline u32 nv_mask(struct drm_device *dev, u32 reg, u32 mask, u32 val) { u32 tmp = nv_rd32(dev, reg); - tmp &= ~mask; - tmp |= val; - nv_wr32(dev, reg, tmp); + nv_wr32(dev, reg, (tmp & ~mask) | val); + return tmp; } static inline u8 nv_rd08(struct drm_device *dev, unsigned reg) From 479dcaea09bf17e8de7005015345e4266723666d Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 1 Sep 2010 15:24:28 +1000 Subject: [PATCH 233/476] drm/nouveau: move ramht code out of nouveau_object.c, nothing to see here Reviewed-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/Makefile | 2 +- drivers/gpu/drm/nouveau/nouveau_object.c | 132 +------------------ drivers/gpu/drm/nouveau/nouveau_ramht.c | 160 +++++++++++++++++++++++ drivers/gpu/drm/nouveau/nouveau_ramht.h | 31 +++++ 4 files changed, 193 insertions(+), 132 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/nouveau_ramht.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_ramht.h diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index e9b06e4ef2a2..d6cfbf259876 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile @@ -9,7 +9,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \ nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \ nouveau_display.o nouveau_connector.o nouveau_fbcon.o \ - nouveau_dp.o \ + nouveau_dp.o nouveau_ramht.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o nv50_mc.o \ nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \ diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index b6bcb254f4ab..e658aa2dbe67 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -34,6 +34,7 @@ #include "drm.h" #include "nouveau_drv.h" #include "nouveau_drm.h" +#include "nouveau_ramht.h" /* NVidia uses context objects to drive drawing operations. @@ -65,137 +66,6 @@ The key into the hash table depends on the object handle and channel id and is given as: */ -static uint32_t -nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t hash = 0; - int i; - - NV_DEBUG(dev, "ch%d handle=0x%08x\n", channel, handle); - - for (i = 32; i > 0; i -= dev_priv->ramht_bits) { - hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1)); - handle >>= dev_priv->ramht_bits; - } - - if (dev_priv->card_type < NV_50) - hash ^= channel << (dev_priv->ramht_bits - 4); - hash <<= 3; - - NV_DEBUG(dev, "hash=0x%08x\n", hash); - return hash; -} - -static int -nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht, - uint32_t offset) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t ctx = nv_ro32(dev, ramht, (offset + 4)/4); - - if (dev_priv->card_type < NV_40) - return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0); - return (ctx != 0); -} - -static int -nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; - struct nouveau_channel *chan = ref->channel; - struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; - uint32_t ctx, co, ho; - - if (!ramht) { - NV_ERROR(dev, "No hash table!\n"); - return -EINVAL; - } - - if (dev_priv->card_type < NV_40) { - ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) | - (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | - (ref->gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); - } else - if (dev_priv->card_type < NV_50) { - ctx = (ref->instance >> 4) | - (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | - (ref->gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); - } else { - if (ref->gpuobj->engine == NVOBJ_ENGINE_DISPLAY) { - ctx = (ref->instance << 10) | 2; - } else { - ctx = (ref->instance >> 4) | - ((ref->gpuobj->engine << - NV40_RAMHT_CONTEXT_ENGINE_SHIFT)); - } - } - - co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); - do { - if (!nouveau_ramht_entry_valid(dev, ramht, co)) { - NV_DEBUG(dev, - "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n", - chan->id, co, ref->handle, ctx); - nv_wo32(dev, ramht, (co + 0)/4, ref->handle); - nv_wo32(dev, ramht, (co + 4)/4, ctx); - - list_add_tail(&ref->list, &chan->ramht_refs); - instmem->flush(dev); - return 0; - } - NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n", - chan->id, co, nv_ro32(dev, ramht, co/4)); - - co += 8; - if (co >= dev_priv->ramht_size) - co = 0; - } while (co != ho); - - NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id); - return -ENOMEM; -} - -static void -nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; - struct nouveau_channel *chan = ref->channel; - struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; - uint32_t co, ho; - - if (!ramht) { - NV_ERROR(dev, "No hash table!\n"); - return; - } - - co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); - do { - if (nouveau_ramht_entry_valid(dev, ramht, co) && - (ref->handle == nv_ro32(dev, ramht, (co/4)))) { - NV_DEBUG(dev, - "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n", - chan->id, co, ref->handle, - nv_ro32(dev, ramht, (co + 4))); - nv_wo32(dev, ramht, (co + 0)/4, 0x00000000); - nv_wo32(dev, ramht, (co + 4)/4, 0x00000000); - - list_del(&ref->list); - instmem->flush(dev); - return; - } - - co += 8; - if (co >= dev_priv->ramht_size) - co = 0; - } while (co != ho); - list_del(&ref->list); - - NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n", - chan->id, ref->handle); -} int nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c new file mode 100644 index 000000000000..8b27ee5411b3 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c @@ -0,0 +1,160 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" + +#include "nouveau_drv.h" +#include "nouveau_ramht.h" + +static uint32_t +nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t hash = 0; + int i; + + NV_DEBUG(dev, "ch%d handle=0x%08x\n", channel, handle); + + for (i = 32; i > 0; i -= dev_priv->ramht_bits) { + hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1)); + handle >>= dev_priv->ramht_bits; + } + + if (dev_priv->card_type < NV_50) + hash ^= channel << (dev_priv->ramht_bits - 4); + hash <<= 3; + + NV_DEBUG(dev, "hash=0x%08x\n", hash); + return hash; +} + +static int +nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht, + uint32_t offset) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t ctx = nv_ro32(dev, ramht, (offset + 4)/4); + + if (dev_priv->card_type < NV_40) + return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0); + return (ctx != 0); +} + +int +nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; + struct nouveau_channel *chan = ref->channel; + struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; + uint32_t ctx, co, ho; + + if (!ramht) { + NV_ERROR(dev, "No hash table!\n"); + return -EINVAL; + } + + if (dev_priv->card_type < NV_40) { + ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) | + (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | + (ref->gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); + } else + if (dev_priv->card_type < NV_50) { + ctx = (ref->instance >> 4) | + (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | + (ref->gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); + } else { + if (ref->gpuobj->engine == NVOBJ_ENGINE_DISPLAY) { + ctx = (ref->instance << 10) | 2; + } else { + ctx = (ref->instance >> 4) | + ((ref->gpuobj->engine << + NV40_RAMHT_CONTEXT_ENGINE_SHIFT)); + } + } + + co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); + do { + if (!nouveau_ramht_entry_valid(dev, ramht, co)) { + NV_DEBUG(dev, + "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n", + chan->id, co, ref->handle, ctx); + nv_wo32(dev, ramht, (co + 0)/4, ref->handle); + nv_wo32(dev, ramht, (co + 4)/4, ctx); + + list_add_tail(&ref->list, &chan->ramht_refs); + instmem->flush(dev); + return 0; + } + NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n", + chan->id, co, nv_ro32(dev, ramht, co/4)); + + co += 8; + if (co >= dev_priv->ramht_size) + co = 0; + } while (co != ho); + + NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id); + return -ENOMEM; +} + +void +nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; + struct nouveau_channel *chan = ref->channel; + struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; + uint32_t co, ho; + + if (!ramht) { + NV_ERROR(dev, "No hash table!\n"); + return; + } + + co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); + do { + if (nouveau_ramht_entry_valid(dev, ramht, co) && + (ref->handle == nv_ro32(dev, ramht, (co/4)))) { + NV_DEBUG(dev, + "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n", + chan->id, co, ref->handle, + nv_ro32(dev, ramht, (co + 4))); + nv_wo32(dev, ramht, (co + 0)/4, 0x00000000); + nv_wo32(dev, ramht, (co + 4)/4, 0x00000000); + + list_del(&ref->list); + instmem->flush(dev); + return; + } + + co += 8; + if (co >= dev_priv->ramht_size) + co = 0; + } while (co != ho); + list_del(&ref->list); + + NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n", + chan->id, ref->handle); +} diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.h b/drivers/gpu/drm/nouveau/nouveau_ramht.h new file mode 100644 index 000000000000..e10455c6e7ff --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_ramht.h @@ -0,0 +1,31 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#ifndef __NOUVEAU_RAMHT_H__ +#define __NOUVEAU_RAMHT_H__ + +extern int nouveau_ramht_insert(struct drm_device *, struct nouveau_gpuobj_ref *); +extern void nouveau_ramht_remove(struct drm_device *, struct nouveau_gpuobj_ref *); + +#endif From b3beb167af0de6d7cb03aed0687eca645cfd06a6 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 1 Sep 2010 15:24:29 +1000 Subject: [PATCH 234/476] drm/nouveau: modify object accessors, offset in bytes rather than dwords Reviewed-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 14 +- drivers/gpu/drm/nouveau/nouveau_grctx.h | 2 +- drivers/gpu/drm/nouveau/nouveau_mem.c | 11 +- drivers/gpu/drm/nouveau/nouveau_object.c | 92 +++-- drivers/gpu/drm/nouveau/nouveau_ramht.c | 16 +- drivers/gpu/drm/nouveau/nouveau_sgdma.c | 50 +-- drivers/gpu/drm/nouveau/nv04_fifo.c | 8 +- drivers/gpu/drm/nouveau/nv20_graph.c | 474 +++++++++++------------ drivers/gpu/drm/nouveau/nv40_graph.c | 4 +- drivers/gpu/drm/nouveau/nv40_grctx.c | 6 +- drivers/gpu/drm/nouveau/nv50_display.c | 14 +- drivers/gpu/drm/nouveau/nv50_fifo.c | 209 +++++----- drivers/gpu/drm/nouveau/nv50_graph.c | 18 +- drivers/gpu/drm/nouveau/nv50_grctx.c | 2 +- drivers/gpu/drm/nouveau/nv50_instmem.c | 10 +- 15 files changed, 473 insertions(+), 457 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index c684686f6c0f..372adfdd9de0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -138,6 +138,7 @@ enum nouveau_flags { #define NVOBJ_FLAG_ZERO_FREE (1 << 2) #define NVOBJ_FLAG_FAKE (1 << 3) struct nouveau_gpuobj { + struct drm_device *dev; struct list_head list; struct nouveau_channel *im_channel; @@ -1291,17 +1292,8 @@ static inline void nv_wi32(struct drm_device *dev, unsigned offset, u32 val) } /* object access */ -static inline u32 nv_ro32(struct drm_device *dev, struct nouveau_gpuobj *obj, - unsigned index) -{ - return nv_ri32(dev, obj->im_pramin->start + index * 4); -} - -static inline void nv_wo32(struct drm_device *dev, struct nouveau_gpuobj *obj, - unsigned index, u32 val) -{ - nv_wi32(dev, obj->im_pramin->start + index * 4, val); -} +extern u32 nv_ro32(struct nouveau_gpuobj *, u32 offset); +extern void nv_wo32(struct nouveau_gpuobj *, u32 offset, u32 val); /* * Logging diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.h b/drivers/gpu/drm/nouveau/nouveau_grctx.h index 5d39c4ce8006..4a8ad1307fa4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_grctx.h +++ b/drivers/gpu/drm/nouveau/nouveau_grctx.h @@ -126,7 +126,7 @@ gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val) reg = (reg - 0x00400000) / 4; reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base; - nv_wo32(ctx->dev, ctx->data, reg, val); + nv_wo32(ctx->data, reg * 4, val); } #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index c14466ba69ba..f34c532bcac3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -169,8 +169,9 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, virt += (end - pte); while (pte < end) { - nv_wo32(dev, pgt, pte++, offset_l); - nv_wo32(dev, pgt, pte++, offset_h); + nv_wo32(pgt, (pte * 4) + 0, offset_l); + nv_wo32(pgt, (pte * 4) + 4, offset_h); + pte += 2; } } } @@ -203,8 +204,10 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) pages -= (end - pte); virt += (end - pte) << 15; - while (pte < end) - nv_wo32(dev, pgt, pte++, 0); + while (pte < end) { + nv_wo32(pgt, (pte * 4), 0); + pte++; + } } dev_priv->engine.instmem.flush(dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index e658aa2dbe67..52db13cd75b2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -88,6 +88,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, if (!gpuobj) return -ENOMEM; NV_DEBUG(dev, "gpuobj %p\n", gpuobj); + gpuobj->dev = dev; gpuobj->flags = flags; gpuobj->im_channel = chan; @@ -134,7 +135,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, int i; for (i = 0; i < gpuobj->im_pramin->size; i += 4) - nv_wo32(dev, gpuobj, i/4, 0); + nv_wo32(gpuobj, i, 0); engine->instmem.flush(dev); } @@ -224,7 +225,7 @@ nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { for (i = 0; i < gpuobj->im_pramin->size; i += 4) - nv_wo32(dev, gpuobj, i/4, 0); + nv_wo32(gpuobj, i, 0); engine->instmem.flush(dev); } @@ -435,6 +436,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, if (!gpuobj) return -ENOMEM; NV_DEBUG(dev, "gpuobj %p\n", gpuobj); + gpuobj->dev = dev; gpuobj->im_channel = NULL; gpuobj->flags = flags | NVOBJ_FLAG_FAKE; @@ -458,7 +460,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { for (i = 0; i < gpuobj->im_pramin->size; i += 4) - nv_wo32(dev, gpuobj, i/4, 0); + nv_wo32(gpuobj, i, 0); dev_priv->engine.instmem.flush(dev); } @@ -555,14 +557,12 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, adjust = offset & 0x00000fff; frame = offset & ~0x00000fff; - nv_wo32(dev, *gpuobj, 0, ((1<<12) | (1<<13) | - (adjust << 20) | - (access << 14) | - (target << 16) | - class)); - nv_wo32(dev, *gpuobj, 1, size - 1); - nv_wo32(dev, *gpuobj, 2, frame | pte_flags); - nv_wo32(dev, *gpuobj, 3, frame | pte_flags); + nv_wo32(*gpuobj, 0, ((1<<12) | (1<<13) | (adjust << 20) | + (access << 14) | (target << 16) | + class)); + nv_wo32(*gpuobj, 4, size - 1); + nv_wo32(*gpuobj, 8, frame | pte_flags); + nv_wo32(*gpuobj, 12, frame | pte_flags); } else { uint64_t limit = offset + size - 1; uint32_t flags0, flags5; @@ -575,12 +575,12 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, flags5 = 0x00080000; } - nv_wo32(dev, *gpuobj, 0, flags0 | class); - nv_wo32(dev, *gpuobj, 1, lower_32_bits(limit)); - nv_wo32(dev, *gpuobj, 2, lower_32_bits(offset)); - nv_wo32(dev, *gpuobj, 3, ((upper_32_bits(limit) & 0xff) << 24) | - (upper_32_bits(offset) & 0xff)); - nv_wo32(dev, *gpuobj, 5, flags5); + nv_wo32(*gpuobj, 0, flags0 | class); + nv_wo32(*gpuobj, 4, lower_32_bits(limit)); + nv_wo32(*gpuobj, 8, lower_32_bits(offset)); + nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) | + (upper_32_bits(offset) & 0xff)); + nv_wo32(*gpuobj, 20, flags5); } instmem->flush(dev); @@ -699,25 +699,25 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, } if (dev_priv->card_type >= NV_50) { - nv_wo32(dev, *gpuobj, 0, class); - nv_wo32(dev, *gpuobj, 5, 0x00010000); + nv_wo32(*gpuobj, 0, class); + nv_wo32(*gpuobj, 20, 0x00010000); } else { switch (class) { case NV_CLASS_NULL: - nv_wo32(dev, *gpuobj, 0, 0x00001030); - nv_wo32(dev, *gpuobj, 1, 0xFFFFFFFF); + nv_wo32(*gpuobj, 0, 0x00001030); + nv_wo32(*gpuobj, 4, 0xFFFFFFFF); break; default: if (dev_priv->card_type >= NV_40) { - nv_wo32(dev, *gpuobj, 0, class); + nv_wo32(*gpuobj, 0, class); #ifdef __BIG_ENDIAN - nv_wo32(dev, *gpuobj, 2, 0x01000000); + nv_wo32(*gpuobj, 8, 0x01000000); #endif } else { #ifdef __BIG_ENDIAN - nv_wo32(dev, *gpuobj, 0, class | 0x00080000); + nv_wo32(*gpuobj, 0, class | 0x00080000); #else - nv_wo32(dev, *gpuobj, 0, class); + nv_wo32(*gpuobj, 0, class); #endif } } @@ -836,21 +836,20 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, if (ret) return ret; for (i = 0; i < 0x4000; i += 8) { - nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000); - nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe); + nv_wo32(chan->vm_pd, i + 0, 0x00000000); + nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe); } - pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 2; + pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8; ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->gart_info.sg_ctxdma, &chan->vm_gart_pt); if (ret) return ret; - nv_wo32(dev, chan->vm_pd, pde++, - chan->vm_gart_pt->instance | 0x03); - nv_wo32(dev, chan->vm_pd, pde++, 0x00000000); + nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->instance | 3); + nv_wo32(chan->vm_pd, pde + 4, 0x00000000); - pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 2; + pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8; for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->vm_vram_pt[i], @@ -858,9 +857,10 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, if (ret) return ret; - nv_wo32(dev, chan->vm_pd, pde++, - chan->vm_vram_pt[i]->instance | 0x61); - nv_wo32(dev, chan->vm_pd, pde++, 0x00000000); + nv_wo32(chan->vm_pd, pde + 0, + chan->vm_vram_pt[i]->instance | 0x61); + nv_wo32(chan->vm_pd, pde + 4, 0x00000000); + pde += 8; } instmem->flush(dev); @@ -996,8 +996,8 @@ nouveau_gpuobj_suspend(struct drm_device *dev) return -ENOMEM; } - for (i = 0; i < gpuobj->im_pramin->size / 4; i++) - gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i); + for (i = 0; i < gpuobj->im_pramin->size; i += 4) + gpuobj->im_backing_suspend[i/4] = nv_ro32(gpuobj, i); } return 0; @@ -1042,8 +1042,8 @@ nouveau_gpuobj_resume(struct drm_device *dev) if (!gpuobj->im_backing_suspend) continue; - for (i = 0; i < gpuobj->im_pramin->size / 4; i++) - nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]); + for (i = 0; i < gpuobj->im_pramin->size; i += 4) + nv_wo32(gpuobj, i, gpuobj->im_backing_suspend[i/4]); dev_priv->engine.instmem.flush(dev); } @@ -1120,3 +1120,17 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, return 0; } + +u32 +nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset) +{ + struct drm_device *dev = gpuobj->dev; + return nv_ri32(dev, gpuobj->im_pramin->start + offset); +} + +void +nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val) +{ + struct drm_device *dev = gpuobj->dev; + nv_wi32(dev, gpuobj->im_pramin->start + offset, val); +} diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c index 8b27ee5411b3..e5cc93c55d80 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ramht.c +++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c @@ -54,7 +54,7 @@ nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht, uint32_t offset) { struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t ctx = nv_ro32(dev, ramht, (offset + 4)/4); + uint32_t ctx = nv_ro32(ramht, offset + 4); if (dev_priv->card_type < NV_40) return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0); @@ -100,15 +100,15 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) NV_DEBUG(dev, "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n", chan->id, co, ref->handle, ctx); - nv_wo32(dev, ramht, (co + 0)/4, ref->handle); - nv_wo32(dev, ramht, (co + 4)/4, ctx); + nv_wo32(ramht, co + 0, ref->handle); + nv_wo32(ramht, co + 4, ctx); list_add_tail(&ref->list, &chan->ramht_refs); instmem->flush(dev); return 0; } NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n", - chan->id, co, nv_ro32(dev, ramht, co/4)); + chan->id, co, nv_ro32(ramht, co)); co += 8; if (co >= dev_priv->ramht_size) @@ -136,13 +136,13 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); do { if (nouveau_ramht_entry_valid(dev, ramht, co) && - (ref->handle == nv_ro32(dev, ramht, (co/4)))) { + (ref->handle == nv_ro32(ramht, co))) { NV_DEBUG(dev, "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n", chan->id, co, ref->handle, - nv_ro32(dev, ramht, (co + 4))); - nv_wo32(dev, ramht, (co + 0)/4, 0x00000000); - nv_wo32(dev, ramht, (co + 4)/4, 0x00000000); + nv_ro32(ramht, co + 4)); + nv_wo32(ramht, co + 0, 0x00000000); + nv_wo32(ramht, co + 4, 0x00000000); list_del(&ref->list); instmem->flush(dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 6b9187d7f67d..630988af801c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -105,11 +105,13 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) uint32_t offset_h = upper_32_bits(dma_offset); for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) { - if (dev_priv->card_type < NV_50) - nv_wo32(dev, gpuobj, pte++, offset_l | 3); - else { - nv_wo32(dev, gpuobj, pte++, offset_l | 0x21); - nv_wo32(dev, gpuobj, pte++, offset_h & 0xff); + if (dev_priv->card_type < NV_50) { + nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3); + pte += 1; + } else { + nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21); + nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff); + pte += 2; } dma_offset += NV_CTXDMA_PAGE_SIZE; @@ -145,11 +147,13 @@ nouveau_sgdma_unbind(struct ttm_backend *be) dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus; for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) { - if (dev_priv->card_type < NV_50) - nv_wo32(dev, gpuobj, pte++, dma_offset | 3); - else { - nv_wo32(dev, gpuobj, pte++, dma_offset | 0x21); - nv_wo32(dev, gpuobj, pte++, 0x00000000); + if (dev_priv->card_type < NV_50) { + nv_wo32(gpuobj, (pte * 4) + 0, dma_offset | 3); + pte += 1; + } else { + nv_wo32(gpuobj, (pte * 4), dma_offset | 0x21); + nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000); + pte += 2; } dma_offset += NV_CTXDMA_PAGE_SIZE; @@ -258,21 +262,21 @@ nouveau_sgdma_init(struct drm_device *dev) /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE * on those cards? */ - nv_wo32(dev, gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | - (1 << 12) /* PT present */ | - (0 << 13) /* PT *not* linear */ | - (NV_DMA_ACCESS_RW << 14) | - (NV_DMA_TARGET_PCI << 16)); - nv_wo32(dev, gpuobj, 1, aper_size - 1); + nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | + (1 << 12) /* PT present */ | + (0 << 13) /* PT *not* linear */ | + (NV_DMA_ACCESS_RW << 14) | + (NV_DMA_TARGET_PCI << 16)); + nv_wo32(gpuobj, 4, aper_size - 1); for (i = 2; i < 2 + (aper_size >> 12); i++) { - nv_wo32(dev, gpuobj, i, - dev_priv->gart_info.sg_dummy_bus | 3); + nv_wo32(gpuobj, i * 4, + dev_priv->gart_info.sg_dummy_bus | 3); } } else { for (i = 0; i < obj_size; i += 8) { - nv_wo32(dev, gpuobj, (i+0)/4, - dev_priv->gart_info.sg_dummy_bus | 0x21); - nv_wo32(dev, gpuobj, (i+4)/4, 0); + nv_wo32(gpuobj, i + 0, + dev_priv->gart_info.sg_dummy_bus | 0x21); + nv_wo32(gpuobj, i + 4, 0); } } dev_priv->engine.instmem.flush(dev); @@ -308,9 +312,9 @@ nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page) struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; int pte; - pte = (offset >> NV_CTXDMA_PAGE_SHIFT); + pte = (offset >> NV_CTXDMA_PAGE_SHIFT) << 2; if (dev_priv->card_type < NV_50) { - *page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK; + *page = nv_ro32(gpuobj, (pte + 8)) & ~NV_CTXDMA_PAGE_MASK; return 0; } diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c index 06cedd99c26a..bbb87ef262c0 100644 --- a/drivers/gpu/drm/nouveau/nv04_fifo.c +++ b/drivers/gpu/drm/nouveau/nv04_fifo.c @@ -38,10 +38,10 @@ #define NV04_RAMFC_ENGINE 0x14 #define NV04_RAMFC_PULL1_ENGINE 0x18 -#define RAMFC_WR(offset, val) nv_wo32(dev, chan->ramfc->gpuobj, \ - NV04_RAMFC_##offset/4, (val)) -#define RAMFC_RD(offset) nv_ro32(dev, chan->ramfc->gpuobj, \ - NV04_RAMFC_##offset/4) +#define RAMFC_WR(offset, val) nv_wo32(chan->ramfc->gpuobj, \ + NV04_RAMFC_##offset, (val)) +#define RAMFC_RD(offset) nv_ro32(chan->ramfc->gpuobj, \ + NV04_RAMFC_##offset) void nv04_fifo_disable(struct drm_device *dev) diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c index 17f309b36c91..d8693d32bd0e 100644 --- a/drivers/gpu/drm/nouveau/nv20_graph.c +++ b/drivers/gpu/drm/nouveau/nv20_graph.c @@ -37,49 +37,49 @@ nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { int i; - nv_wo32(dev, ctx, 0x033c/4, 0xffff0000); - nv_wo32(dev, ctx, 0x03a0/4, 0x0fff0000); - nv_wo32(dev, ctx, 0x03a4/4, 0x0fff0000); - nv_wo32(dev, ctx, 0x047c/4, 0x00000101); - nv_wo32(dev, ctx, 0x0490/4, 0x00000111); - nv_wo32(dev, ctx, 0x04a8/4, 0x44400000); + nv_wo32(ctx, 0x033c, 0xffff0000); + nv_wo32(ctx, 0x03a0, 0x0fff0000); + nv_wo32(ctx, 0x03a4, 0x0fff0000); + nv_wo32(ctx, 0x047c, 0x00000101); + nv_wo32(ctx, 0x0490, 0x00000111); + nv_wo32(ctx, 0x04a8, 0x44400000); for (i = 0x04d4; i <= 0x04e0; i += 4) - nv_wo32(dev, ctx, i/4, 0x00030303); + nv_wo32(ctx, i, 0x00030303); for (i = 0x04f4; i <= 0x0500; i += 4) - nv_wo32(dev, ctx, i/4, 0x00080000); + nv_wo32(ctx, i, 0x00080000); for (i = 0x050c; i <= 0x0518; i += 4) - nv_wo32(dev, ctx, i/4, 0x01012000); + nv_wo32(ctx, i, 0x01012000); for (i = 0x051c; i <= 0x0528; i += 4) - nv_wo32(dev, ctx, i/4, 0x000105b8); + nv_wo32(ctx, i, 0x000105b8); for (i = 0x052c; i <= 0x0538; i += 4) - nv_wo32(dev, ctx, i/4, 0x00080008); + nv_wo32(ctx, i, 0x00080008); for (i = 0x055c; i <= 0x0598; i += 4) - nv_wo32(dev, ctx, i/4, 0x07ff0000); - nv_wo32(dev, ctx, 0x05a4/4, 0x4b7fffff); - nv_wo32(dev, ctx, 0x05fc/4, 0x00000001); - nv_wo32(dev, ctx, 0x0604/4, 0x00004000); - nv_wo32(dev, ctx, 0x0610/4, 0x00000001); - nv_wo32(dev, ctx, 0x0618/4, 0x00040000); - nv_wo32(dev, ctx, 0x061c/4, 0x00010000); + nv_wo32(ctx, i, 0x07ff0000); + nv_wo32(ctx, 0x05a4, 0x4b7fffff); + nv_wo32(ctx, 0x05fc, 0x00000001); + nv_wo32(ctx, 0x0604, 0x00004000); + nv_wo32(ctx, 0x0610, 0x00000001); + nv_wo32(ctx, 0x0618, 0x00040000); + nv_wo32(ctx, 0x061c, 0x00010000); for (i = 0x1c1c; i <= 0x248c; i += 16) { - nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9); - nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c); - nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b); + nv_wo32(ctx, (i + 0), 0x10700ff9); + nv_wo32(ctx, (i + 4), 0x0436086c); + nv_wo32(ctx, (i + 8), 0x000c001b); } - nv_wo32(dev, ctx, 0x281c/4, 0x3f800000); - nv_wo32(dev, ctx, 0x2830/4, 0x3f800000); - nv_wo32(dev, ctx, 0x285c/4, 0x40000000); - nv_wo32(dev, ctx, 0x2860/4, 0x3f800000); - nv_wo32(dev, ctx, 0x2864/4, 0x3f000000); - nv_wo32(dev, ctx, 0x286c/4, 0x40000000); - nv_wo32(dev, ctx, 0x2870/4, 0x3f800000); - nv_wo32(dev, ctx, 0x2878/4, 0xbf800000); - nv_wo32(dev, ctx, 0x2880/4, 0xbf800000); - nv_wo32(dev, ctx, 0x34a4/4, 0x000fe000); - nv_wo32(dev, ctx, 0x3530/4, 0x000003f8); - nv_wo32(dev, ctx, 0x3540/4, 0x002fe000); + nv_wo32(ctx, 0x281c, 0x3f800000); + nv_wo32(ctx, 0x2830, 0x3f800000); + nv_wo32(ctx, 0x285c, 0x40000000); + nv_wo32(ctx, 0x2860, 0x3f800000); + nv_wo32(ctx, 0x2864, 0x3f000000); + nv_wo32(ctx, 0x286c, 0x40000000); + nv_wo32(ctx, 0x2870, 0x3f800000); + nv_wo32(ctx, 0x2878, 0xbf800000); + nv_wo32(ctx, 0x2880, 0xbf800000); + nv_wo32(ctx, 0x34a4, 0x000fe000); + nv_wo32(ctx, 0x3530, 0x000003f8); + nv_wo32(ctx, 0x3540, 0x002fe000); for (i = 0x355c; i <= 0x3578; i += 4) - nv_wo32(dev, ctx, i/4, 0x001c527c); + nv_wo32(ctx, i, 0x001c527c); } static void @@ -87,58 +87,58 @@ nv25_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { int i; - nv_wo32(dev, ctx, 0x035c/4, 0xffff0000); - nv_wo32(dev, ctx, 0x03c0/4, 0x0fff0000); - nv_wo32(dev, ctx, 0x03c4/4, 0x0fff0000); - nv_wo32(dev, ctx, 0x049c/4, 0x00000101); - nv_wo32(dev, ctx, 0x04b0/4, 0x00000111); - nv_wo32(dev, ctx, 0x04c8/4, 0x00000080); - nv_wo32(dev, ctx, 0x04cc/4, 0xffff0000); - nv_wo32(dev, ctx, 0x04d0/4, 0x00000001); - nv_wo32(dev, ctx, 0x04e4/4, 0x44400000); - nv_wo32(dev, ctx, 0x04fc/4, 0x4b800000); + nv_wo32(ctx, 0x035c, 0xffff0000); + nv_wo32(ctx, 0x03c0, 0x0fff0000); + nv_wo32(ctx, 0x03c4, 0x0fff0000); + nv_wo32(ctx, 0x049c, 0x00000101); + nv_wo32(ctx, 0x04b0, 0x00000111); + nv_wo32(ctx, 0x04c8, 0x00000080); + nv_wo32(ctx, 0x04cc, 0xffff0000); + nv_wo32(ctx, 0x04d0, 0x00000001); + nv_wo32(ctx, 0x04e4, 0x44400000); + nv_wo32(ctx, 0x04fc, 0x4b800000); for (i = 0x0510; i <= 0x051c; i += 4) - nv_wo32(dev, ctx, i/4, 0x00030303); + nv_wo32(ctx, i, 0x00030303); for (i = 0x0530; i <= 0x053c; i += 4) - nv_wo32(dev, ctx, i/4, 0x00080000); + nv_wo32(ctx, i, 0x00080000); for (i = 0x0548; i <= 0x0554; i += 4) - nv_wo32(dev, ctx, i/4, 0x01012000); + nv_wo32(ctx, i, 0x01012000); for (i = 0x0558; i <= 0x0564; i += 4) - nv_wo32(dev, ctx, i/4, 0x000105b8); + nv_wo32(ctx, i, 0x000105b8); for (i = 0x0568; i <= 0x0574; i += 4) - nv_wo32(dev, ctx, i/4, 0x00080008); + nv_wo32(ctx, i, 0x00080008); for (i = 0x0598; i <= 0x05d4; i += 4) - nv_wo32(dev, ctx, i/4, 0x07ff0000); - nv_wo32(dev, ctx, 0x05e0/4, 0x4b7fffff); - nv_wo32(dev, ctx, 0x0620/4, 0x00000080); - nv_wo32(dev, ctx, 0x0624/4, 0x30201000); - nv_wo32(dev, ctx, 0x0628/4, 0x70605040); - nv_wo32(dev, ctx, 0x062c/4, 0xb0a09080); - nv_wo32(dev, ctx, 0x0630/4, 0xf0e0d0c0); - nv_wo32(dev, ctx, 0x0664/4, 0x00000001); - nv_wo32(dev, ctx, 0x066c/4, 0x00004000); - nv_wo32(dev, ctx, 0x0678/4, 0x00000001); - nv_wo32(dev, ctx, 0x0680/4, 0x00040000); - nv_wo32(dev, ctx, 0x0684/4, 0x00010000); + nv_wo32(ctx, i, 0x07ff0000); + nv_wo32(ctx, 0x05e0, 0x4b7fffff); + nv_wo32(ctx, 0x0620, 0x00000080); + nv_wo32(ctx, 0x0624, 0x30201000); + nv_wo32(ctx, 0x0628, 0x70605040); + nv_wo32(ctx, 0x062c, 0xb0a09080); + nv_wo32(ctx, 0x0630, 0xf0e0d0c0); + nv_wo32(ctx, 0x0664, 0x00000001); + nv_wo32(ctx, 0x066c, 0x00004000); + nv_wo32(ctx, 0x0678, 0x00000001); + nv_wo32(ctx, 0x0680, 0x00040000); + nv_wo32(ctx, 0x0684, 0x00010000); for (i = 0x1b04; i <= 0x2374; i += 16) { - nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9); - nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c); - nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b); + nv_wo32(ctx, (i + 0), 0x10700ff9); + nv_wo32(ctx, (i + 4), 0x0436086c); + nv_wo32(ctx, (i + 8), 0x000c001b); } - nv_wo32(dev, ctx, 0x2704/4, 0x3f800000); - nv_wo32(dev, ctx, 0x2718/4, 0x3f800000); - nv_wo32(dev, ctx, 0x2744/4, 0x40000000); - nv_wo32(dev, ctx, 0x2748/4, 0x3f800000); - nv_wo32(dev, ctx, 0x274c/4, 0x3f000000); - nv_wo32(dev, ctx, 0x2754/4, 0x40000000); - nv_wo32(dev, ctx, 0x2758/4, 0x3f800000); - nv_wo32(dev, ctx, 0x2760/4, 0xbf800000); - nv_wo32(dev, ctx, 0x2768/4, 0xbf800000); - nv_wo32(dev, ctx, 0x308c/4, 0x000fe000); - nv_wo32(dev, ctx, 0x3108/4, 0x000003f8); - nv_wo32(dev, ctx, 0x3468/4, 0x002fe000); + nv_wo32(ctx, 0x2704, 0x3f800000); + nv_wo32(ctx, 0x2718, 0x3f800000); + nv_wo32(ctx, 0x2744, 0x40000000); + nv_wo32(ctx, 0x2748, 0x3f800000); + nv_wo32(ctx, 0x274c, 0x3f000000); + nv_wo32(ctx, 0x2754, 0x40000000); + nv_wo32(ctx, 0x2758, 0x3f800000); + nv_wo32(ctx, 0x2760, 0xbf800000); + nv_wo32(ctx, 0x2768, 0xbf800000); + nv_wo32(ctx, 0x308c, 0x000fe000); + nv_wo32(ctx, 0x3108, 0x000003f8); + nv_wo32(ctx, 0x3468, 0x002fe000); for (i = 0x3484; i <= 0x34a0; i += 4) - nv_wo32(dev, ctx, i/4, 0x001c527c); + nv_wo32(ctx, i, 0x001c527c); } static void @@ -146,49 +146,49 @@ nv2a_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { int i; - nv_wo32(dev, ctx, 0x033c/4, 0xffff0000); - nv_wo32(dev, ctx, 0x03a0/4, 0x0fff0000); - nv_wo32(dev, ctx, 0x03a4/4, 0x0fff0000); - nv_wo32(dev, ctx, 0x047c/4, 0x00000101); - nv_wo32(dev, ctx, 0x0490/4, 0x00000111); - nv_wo32(dev, ctx, 0x04a8/4, 0x44400000); + nv_wo32(ctx, 0x033c, 0xffff0000); + nv_wo32(ctx, 0x03a0, 0x0fff0000); + nv_wo32(ctx, 0x03a4, 0x0fff0000); + nv_wo32(ctx, 0x047c, 0x00000101); + nv_wo32(ctx, 0x0490, 0x00000111); + nv_wo32(ctx, 0x04a8, 0x44400000); for (i = 0x04d4; i <= 0x04e0; i += 4) - nv_wo32(dev, ctx, i/4, 0x00030303); + nv_wo32(ctx, i, 0x00030303); for (i = 0x04f4; i <= 0x0500; i += 4) - nv_wo32(dev, ctx, i/4, 0x00080000); + nv_wo32(ctx, i, 0x00080000); for (i = 0x050c; i <= 0x0518; i += 4) - nv_wo32(dev, ctx, i/4, 0x01012000); + nv_wo32(ctx, i, 0x01012000); for (i = 0x051c; i <= 0x0528; i += 4) - nv_wo32(dev, ctx, i/4, 0x000105b8); + nv_wo32(ctx, i, 0x000105b8); for (i = 0x052c; i <= 0x0538; i += 4) - nv_wo32(dev, ctx, i/4, 0x00080008); + nv_wo32(ctx, i, 0x00080008); for (i = 0x055c; i <= 0x0598; i += 4) - nv_wo32(dev, ctx, i/4, 0x07ff0000); - nv_wo32(dev, ctx, 0x05a4/4, 0x4b7fffff); - nv_wo32(dev, ctx, 0x05fc/4, 0x00000001); - nv_wo32(dev, ctx, 0x0604/4, 0x00004000); - nv_wo32(dev, ctx, 0x0610/4, 0x00000001); - nv_wo32(dev, ctx, 0x0618/4, 0x00040000); - nv_wo32(dev, ctx, 0x061c/4, 0x00010000); + nv_wo32(ctx, i, 0x07ff0000); + nv_wo32(ctx, 0x05a4, 0x4b7fffff); + nv_wo32(ctx, 0x05fc, 0x00000001); + nv_wo32(ctx, 0x0604, 0x00004000); + nv_wo32(ctx, 0x0610, 0x00000001); + nv_wo32(ctx, 0x0618, 0x00040000); + nv_wo32(ctx, 0x061c, 0x00010000); for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */ - nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9); - nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c); - nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b); + nv_wo32(ctx, (i + 0), 0x10700ff9); + nv_wo32(ctx, (i + 4), 0x0436086c); + nv_wo32(ctx, (i + 8), 0x000c001b); } - nv_wo32(dev, ctx, 0x269c/4, 0x3f800000); - nv_wo32(dev, ctx, 0x26b0/4, 0x3f800000); - nv_wo32(dev, ctx, 0x26dc/4, 0x40000000); - nv_wo32(dev, ctx, 0x26e0/4, 0x3f800000); - nv_wo32(dev, ctx, 0x26e4/4, 0x3f000000); - nv_wo32(dev, ctx, 0x26ec/4, 0x40000000); - nv_wo32(dev, ctx, 0x26f0/4, 0x3f800000); - nv_wo32(dev, ctx, 0x26f8/4, 0xbf800000); - nv_wo32(dev, ctx, 0x2700/4, 0xbf800000); - nv_wo32(dev, ctx, 0x3024/4, 0x000fe000); - nv_wo32(dev, ctx, 0x30a0/4, 0x000003f8); - nv_wo32(dev, ctx, 0x33fc/4, 0x002fe000); + nv_wo32(ctx, 0x269c, 0x3f800000); + nv_wo32(ctx, 0x26b0, 0x3f800000); + nv_wo32(ctx, 0x26dc, 0x40000000); + nv_wo32(ctx, 0x26e0, 0x3f800000); + nv_wo32(ctx, 0x26e4, 0x3f000000); + nv_wo32(ctx, 0x26ec, 0x40000000); + nv_wo32(ctx, 0x26f0, 0x3f800000); + nv_wo32(ctx, 0x26f8, 0xbf800000); + nv_wo32(ctx, 0x2700, 0xbf800000); + nv_wo32(ctx, 0x3024, 0x000fe000); + nv_wo32(ctx, 0x30a0, 0x000003f8); + nv_wo32(ctx, 0x33fc, 0x002fe000); for (i = 0x341c; i <= 0x3438; i += 4) - nv_wo32(dev, ctx, i/4, 0x001c527c); + nv_wo32(ctx, i, 0x001c527c); } static void @@ -196,57 +196,57 @@ nv30_31_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { int i; - nv_wo32(dev, ctx, 0x0410/4, 0x00000101); - nv_wo32(dev, ctx, 0x0424/4, 0x00000111); - nv_wo32(dev, ctx, 0x0428/4, 0x00000060); - nv_wo32(dev, ctx, 0x0444/4, 0x00000080); - nv_wo32(dev, ctx, 0x0448/4, 0xffff0000); - nv_wo32(dev, ctx, 0x044c/4, 0x00000001); - nv_wo32(dev, ctx, 0x0460/4, 0x44400000); - nv_wo32(dev, ctx, 0x048c/4, 0xffff0000); + nv_wo32(ctx, 0x0410, 0x00000101); + nv_wo32(ctx, 0x0424, 0x00000111); + nv_wo32(ctx, 0x0428, 0x00000060); + nv_wo32(ctx, 0x0444, 0x00000080); + nv_wo32(ctx, 0x0448, 0xffff0000); + nv_wo32(ctx, 0x044c, 0x00000001); + nv_wo32(ctx, 0x0460, 0x44400000); + nv_wo32(ctx, 0x048c, 0xffff0000); for (i = 0x04e0; i < 0x04e8; i += 4) - nv_wo32(dev, ctx, i/4, 0x0fff0000); - nv_wo32(dev, ctx, 0x04ec/4, 0x00011100); + nv_wo32(ctx, i, 0x0fff0000); + nv_wo32(ctx, 0x04ec, 0x00011100); for (i = 0x0508; i < 0x0548; i += 4) - nv_wo32(dev, ctx, i/4, 0x07ff0000); - nv_wo32(dev, ctx, 0x0550/4, 0x4b7fffff); - nv_wo32(dev, ctx, 0x058c/4, 0x00000080); - nv_wo32(dev, ctx, 0x0590/4, 0x30201000); - nv_wo32(dev, ctx, 0x0594/4, 0x70605040); - nv_wo32(dev, ctx, 0x0598/4, 0xb8a89888); - nv_wo32(dev, ctx, 0x059c/4, 0xf8e8d8c8); - nv_wo32(dev, ctx, 0x05b0/4, 0xb0000000); + nv_wo32(ctx, i, 0x07ff0000); + nv_wo32(ctx, 0x0550, 0x4b7fffff); + nv_wo32(ctx, 0x058c, 0x00000080); + nv_wo32(ctx, 0x0590, 0x30201000); + nv_wo32(ctx, 0x0594, 0x70605040); + nv_wo32(ctx, 0x0598, 0xb8a89888); + nv_wo32(ctx, 0x059c, 0xf8e8d8c8); + nv_wo32(ctx, 0x05b0, 0xb0000000); for (i = 0x0600; i < 0x0640; i += 4) - nv_wo32(dev, ctx, i/4, 0x00010588); + nv_wo32(ctx, i, 0x00010588); for (i = 0x0640; i < 0x0680; i += 4) - nv_wo32(dev, ctx, i/4, 0x00030303); + nv_wo32(ctx, i, 0x00030303); for (i = 0x06c0; i < 0x0700; i += 4) - nv_wo32(dev, ctx, i/4, 0x0008aae4); + nv_wo32(ctx, i, 0x0008aae4); for (i = 0x0700; i < 0x0740; i += 4) - nv_wo32(dev, ctx, i/4, 0x01012000); + nv_wo32(ctx, i, 0x01012000); for (i = 0x0740; i < 0x0780; i += 4) - nv_wo32(dev, ctx, i/4, 0x00080008); - nv_wo32(dev, ctx, 0x085c/4, 0x00040000); - nv_wo32(dev, ctx, 0x0860/4, 0x00010000); + nv_wo32(ctx, i, 0x00080008); + nv_wo32(ctx, 0x085c, 0x00040000); + nv_wo32(ctx, 0x0860, 0x00010000); for (i = 0x0864; i < 0x0874; i += 4) - nv_wo32(dev, ctx, i/4, 0x00040004); + nv_wo32(ctx, i, 0x00040004); for (i = 0x1f18; i <= 0x3088 ; i += 16) { - nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9); - nv_wo32(dev, ctx, i/4 + 1, 0x0436086c); - nv_wo32(dev, ctx, i/4 + 2, 0x000c001b); + nv_wo32(ctx, i + 0, 0x10700ff9); + nv_wo32(ctx, i + 1, 0x0436086c); + nv_wo32(ctx, i + 2, 0x000c001b); } for (i = 0x30b8; i < 0x30c8; i += 4) - nv_wo32(dev, ctx, i/4, 0x0000ffff); - nv_wo32(dev, ctx, 0x344c/4, 0x3f800000); - nv_wo32(dev, ctx, 0x3808/4, 0x3f800000); - nv_wo32(dev, ctx, 0x381c/4, 0x3f800000); - nv_wo32(dev, ctx, 0x3848/4, 0x40000000); - nv_wo32(dev, ctx, 0x384c/4, 0x3f800000); - nv_wo32(dev, ctx, 0x3850/4, 0x3f000000); - nv_wo32(dev, ctx, 0x3858/4, 0x40000000); - nv_wo32(dev, ctx, 0x385c/4, 0x3f800000); - nv_wo32(dev, ctx, 0x3864/4, 0xbf800000); - nv_wo32(dev, ctx, 0x386c/4, 0xbf800000); + nv_wo32(ctx, i, 0x0000ffff); + nv_wo32(ctx, 0x344c, 0x3f800000); + nv_wo32(ctx, 0x3808, 0x3f800000); + nv_wo32(ctx, 0x381c, 0x3f800000); + nv_wo32(ctx, 0x3848, 0x40000000); + nv_wo32(ctx, 0x384c, 0x3f800000); + nv_wo32(ctx, 0x3850, 0x3f000000); + nv_wo32(ctx, 0x3858, 0x40000000); + nv_wo32(ctx, 0x385c, 0x3f800000); + nv_wo32(ctx, 0x3864, 0xbf800000); + nv_wo32(ctx, 0x386c, 0xbf800000); } static void @@ -254,57 +254,57 @@ nv34_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { int i; - nv_wo32(dev, ctx, 0x040c/4, 0x01000101); - nv_wo32(dev, ctx, 0x0420/4, 0x00000111); - nv_wo32(dev, ctx, 0x0424/4, 0x00000060); - nv_wo32(dev, ctx, 0x0440/4, 0x00000080); - nv_wo32(dev, ctx, 0x0444/4, 0xffff0000); - nv_wo32(dev, ctx, 0x0448/4, 0x00000001); - nv_wo32(dev, ctx, 0x045c/4, 0x44400000); - nv_wo32(dev, ctx, 0x0480/4, 0xffff0000); + nv_wo32(ctx, 0x040c, 0x01000101); + nv_wo32(ctx, 0x0420, 0x00000111); + nv_wo32(ctx, 0x0424, 0x00000060); + nv_wo32(ctx, 0x0440, 0x00000080); + nv_wo32(ctx, 0x0444, 0xffff0000); + nv_wo32(ctx, 0x0448, 0x00000001); + nv_wo32(ctx, 0x045c, 0x44400000); + nv_wo32(ctx, 0x0480, 0xffff0000); for (i = 0x04d4; i < 0x04dc; i += 4) - nv_wo32(dev, ctx, i/4, 0x0fff0000); - nv_wo32(dev, ctx, 0x04e0/4, 0x00011100); + nv_wo32(ctx, i, 0x0fff0000); + nv_wo32(ctx, 0x04e0, 0x00011100); for (i = 0x04fc; i < 0x053c; i += 4) - nv_wo32(dev, ctx, i/4, 0x07ff0000); - nv_wo32(dev, ctx, 0x0544/4, 0x4b7fffff); - nv_wo32(dev, ctx, 0x057c/4, 0x00000080); - nv_wo32(dev, ctx, 0x0580/4, 0x30201000); - nv_wo32(dev, ctx, 0x0584/4, 0x70605040); - nv_wo32(dev, ctx, 0x0588/4, 0xb8a89888); - nv_wo32(dev, ctx, 0x058c/4, 0xf8e8d8c8); - nv_wo32(dev, ctx, 0x05a0/4, 0xb0000000); + nv_wo32(ctx, i, 0x07ff0000); + nv_wo32(ctx, 0x0544, 0x4b7fffff); + nv_wo32(ctx, 0x057c, 0x00000080); + nv_wo32(ctx, 0x0580, 0x30201000); + nv_wo32(ctx, 0x0584, 0x70605040); + nv_wo32(ctx, 0x0588, 0xb8a89888); + nv_wo32(ctx, 0x058c, 0xf8e8d8c8); + nv_wo32(ctx, 0x05a0, 0xb0000000); for (i = 0x05f0; i < 0x0630; i += 4) - nv_wo32(dev, ctx, i/4, 0x00010588); + nv_wo32(ctx, i, 0x00010588); for (i = 0x0630; i < 0x0670; i += 4) - nv_wo32(dev, ctx, i/4, 0x00030303); + nv_wo32(ctx, i, 0x00030303); for (i = 0x06b0; i < 0x06f0; i += 4) - nv_wo32(dev, ctx, i/4, 0x0008aae4); + nv_wo32(ctx, i, 0x0008aae4); for (i = 0x06f0; i < 0x0730; i += 4) - nv_wo32(dev, ctx, i/4, 0x01012000); + nv_wo32(ctx, i, 0x01012000); for (i = 0x0730; i < 0x0770; i += 4) - nv_wo32(dev, ctx, i/4, 0x00080008); - nv_wo32(dev, ctx, 0x0850/4, 0x00040000); - nv_wo32(dev, ctx, 0x0854/4, 0x00010000); + nv_wo32(ctx, i, 0x00080008); + nv_wo32(ctx, 0x0850, 0x00040000); + nv_wo32(ctx, 0x0854, 0x00010000); for (i = 0x0858; i < 0x0868; i += 4) - nv_wo32(dev, ctx, i/4, 0x00040004); + nv_wo32(ctx, i, 0x00040004); for (i = 0x15ac; i <= 0x271c ; i += 16) { - nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9); - nv_wo32(dev, ctx, i/4 + 1, 0x0436086c); - nv_wo32(dev, ctx, i/4 + 2, 0x000c001b); + nv_wo32(ctx, i + 0, 0x10700ff9); + nv_wo32(ctx, i + 1, 0x0436086c); + nv_wo32(ctx, i + 2, 0x000c001b); } for (i = 0x274c; i < 0x275c; i += 4) - nv_wo32(dev, ctx, i/4, 0x0000ffff); - nv_wo32(dev, ctx, 0x2ae0/4, 0x3f800000); - nv_wo32(dev, ctx, 0x2e9c/4, 0x3f800000); - nv_wo32(dev, ctx, 0x2eb0/4, 0x3f800000); - nv_wo32(dev, ctx, 0x2edc/4, 0x40000000); - nv_wo32(dev, ctx, 0x2ee0/4, 0x3f800000); - nv_wo32(dev, ctx, 0x2ee4/4, 0x3f000000); - nv_wo32(dev, ctx, 0x2eec/4, 0x40000000); - nv_wo32(dev, ctx, 0x2ef0/4, 0x3f800000); - nv_wo32(dev, ctx, 0x2ef8/4, 0xbf800000); - nv_wo32(dev, ctx, 0x2f00/4, 0xbf800000); + nv_wo32(ctx, i, 0x0000ffff); + nv_wo32(ctx, 0x2ae0, 0x3f800000); + nv_wo32(ctx, 0x2e9c, 0x3f800000); + nv_wo32(ctx, 0x2eb0, 0x3f800000); + nv_wo32(ctx, 0x2edc, 0x40000000); + nv_wo32(ctx, 0x2ee0, 0x3f800000); + nv_wo32(ctx, 0x2ee4, 0x3f000000); + nv_wo32(ctx, 0x2eec, 0x40000000); + nv_wo32(ctx, 0x2ef0, 0x3f800000); + nv_wo32(ctx, 0x2ef8, 0xbf800000); + nv_wo32(ctx, 0x2f00, 0xbf800000); } static void @@ -312,57 +312,57 @@ nv35_36_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { int i; - nv_wo32(dev, ctx, 0x040c/4, 0x00000101); - nv_wo32(dev, ctx, 0x0420/4, 0x00000111); - nv_wo32(dev, ctx, 0x0424/4, 0x00000060); - nv_wo32(dev, ctx, 0x0440/4, 0x00000080); - nv_wo32(dev, ctx, 0x0444/4, 0xffff0000); - nv_wo32(dev, ctx, 0x0448/4, 0x00000001); - nv_wo32(dev, ctx, 0x045c/4, 0x44400000); - nv_wo32(dev, ctx, 0x0488/4, 0xffff0000); + nv_wo32(ctx, 0x040c, 0x00000101); + nv_wo32(ctx, 0x0420, 0x00000111); + nv_wo32(ctx, 0x0424, 0x00000060); + nv_wo32(ctx, 0x0440, 0x00000080); + nv_wo32(ctx, 0x0444, 0xffff0000); + nv_wo32(ctx, 0x0448, 0x00000001); + nv_wo32(ctx, 0x045c, 0x44400000); + nv_wo32(ctx, 0x0488, 0xffff0000); for (i = 0x04dc; i < 0x04e4; i += 4) - nv_wo32(dev, ctx, i/4, 0x0fff0000); - nv_wo32(dev, ctx, 0x04e8/4, 0x00011100); + nv_wo32(ctx, i, 0x0fff0000); + nv_wo32(ctx, 0x04e8, 0x00011100); for (i = 0x0504; i < 0x0544; i += 4) - nv_wo32(dev, ctx, i/4, 0x07ff0000); - nv_wo32(dev, ctx, 0x054c/4, 0x4b7fffff); - nv_wo32(dev, ctx, 0x0588/4, 0x00000080); - nv_wo32(dev, ctx, 0x058c/4, 0x30201000); - nv_wo32(dev, ctx, 0x0590/4, 0x70605040); - nv_wo32(dev, ctx, 0x0594/4, 0xb8a89888); - nv_wo32(dev, ctx, 0x0598/4, 0xf8e8d8c8); - nv_wo32(dev, ctx, 0x05ac/4, 0xb0000000); + nv_wo32(ctx, i, 0x07ff0000); + nv_wo32(ctx, 0x054c, 0x4b7fffff); + nv_wo32(ctx, 0x0588, 0x00000080); + nv_wo32(ctx, 0x058c, 0x30201000); + nv_wo32(ctx, 0x0590, 0x70605040); + nv_wo32(ctx, 0x0594, 0xb8a89888); + nv_wo32(ctx, 0x0598, 0xf8e8d8c8); + nv_wo32(ctx, 0x05ac, 0xb0000000); for (i = 0x0604; i < 0x0644; i += 4) - nv_wo32(dev, ctx, i/4, 0x00010588); + nv_wo32(ctx, i, 0x00010588); for (i = 0x0644; i < 0x0684; i += 4) - nv_wo32(dev, ctx, i/4, 0x00030303); + nv_wo32(ctx, i, 0x00030303); for (i = 0x06c4; i < 0x0704; i += 4) - nv_wo32(dev, ctx, i/4, 0x0008aae4); + nv_wo32(ctx, i, 0x0008aae4); for (i = 0x0704; i < 0x0744; i += 4) - nv_wo32(dev, ctx, i/4, 0x01012000); + nv_wo32(ctx, i, 0x01012000); for (i = 0x0744; i < 0x0784; i += 4) - nv_wo32(dev, ctx, i/4, 0x00080008); - nv_wo32(dev, ctx, 0x0860/4, 0x00040000); - nv_wo32(dev, ctx, 0x0864/4, 0x00010000); + nv_wo32(ctx, i, 0x00080008); + nv_wo32(ctx, 0x0860, 0x00040000); + nv_wo32(ctx, 0x0864, 0x00010000); for (i = 0x0868; i < 0x0878; i += 4) - nv_wo32(dev, ctx, i/4, 0x00040004); + nv_wo32(ctx, i, 0x00040004); for (i = 0x1f1c; i <= 0x308c ; i += 16) { - nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9); - nv_wo32(dev, ctx, i/4 + 1, 0x0436086c); - nv_wo32(dev, ctx, i/4 + 2, 0x000c001b); + nv_wo32(ctx, i + 0, 0x10700ff9); + nv_wo32(ctx, i + 4, 0x0436086c); + nv_wo32(ctx, i + 8, 0x000c001b); } for (i = 0x30bc; i < 0x30cc; i += 4) - nv_wo32(dev, ctx, i/4, 0x0000ffff); - nv_wo32(dev, ctx, 0x3450/4, 0x3f800000); - nv_wo32(dev, ctx, 0x380c/4, 0x3f800000); - nv_wo32(dev, ctx, 0x3820/4, 0x3f800000); - nv_wo32(dev, ctx, 0x384c/4, 0x40000000); - nv_wo32(dev, ctx, 0x3850/4, 0x3f800000); - nv_wo32(dev, ctx, 0x3854/4, 0x3f000000); - nv_wo32(dev, ctx, 0x385c/4, 0x40000000); - nv_wo32(dev, ctx, 0x3860/4, 0x3f800000); - nv_wo32(dev, ctx, 0x3868/4, 0xbf800000); - nv_wo32(dev, ctx, 0x3870/4, 0xbf800000); + nv_wo32(ctx, i, 0x0000ffff); + nv_wo32(ctx, 0x3450, 0x3f800000); + nv_wo32(ctx, 0x380c, 0x3f800000); + nv_wo32(ctx, 0x3820, 0x3f800000); + nv_wo32(ctx, 0x384c, 0x40000000); + nv_wo32(ctx, 0x3850, 0x3f800000); + nv_wo32(ctx, 0x3854, 0x3f000000); + nv_wo32(ctx, 0x385c, 0x40000000); + nv_wo32(ctx, 0x3860, 0x3f800000); + nv_wo32(ctx, 0x3868, 0xbf800000); + nv_wo32(ctx, 0x3870, 0xbf800000); } int @@ -372,7 +372,7 @@ nv20_graph_create_context(struct nouveau_channel *chan) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *); - unsigned int idoffs = 0x28/4; + unsigned int idoffs = 0x28; int ret; switch (dev_priv->chipset) { @@ -413,11 +413,11 @@ nv20_graph_create_context(struct nouveau_channel *chan) ctx_init(dev, chan->ramin_grctx->gpuobj); /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */ - nv_wo32(dev, chan->ramin_grctx->gpuobj, idoffs, - (chan->id << 24) | 0x1); /* CTX_USER */ + nv_wo32(chan->ramin_grctx->gpuobj, idoffs, + (chan->id << 24) | 0x1); /* CTX_USER */ - nv_wo32(dev, pgraph->ctx_table->gpuobj, chan->id, - chan->ramin_grctx->instance >> 4); + nv_wo32(pgraph->ctx_table->gpuobj, chan->id * 4, + chan->ramin_grctx->instance >> 4); return 0; } @@ -431,7 +431,7 @@ nv20_graph_destroy_context(struct nouveau_channel *chan) if (chan->ramin_grctx) nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); - nv_wo32(dev, pgraph->ctx_table->gpuobj, chan->id, 0); + nv_wo32(pgraph->ctx_table->gpuobj, chan->id * 4, 0); } int diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index fd7d2b501316..6215dfcf1ea9 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c @@ -73,8 +73,8 @@ nv40_graph_create_context(struct nouveau_channel *chan) ctx.data = chan->ramin_grctx->gpuobj; nv40_grctx_init(&ctx); - nv_wo32(dev, chan->ramin_grctx->gpuobj, 0, - chan->ramin_grctx->gpuobj->im_pramin->start); + nv_wo32(chan->ramin_grctx->gpuobj, 0, + chan->ramin_grctx->gpuobj->im_pramin->start); return 0; } diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c index 9b5c97469588..ce585093264e 100644 --- a/drivers/gpu/drm/nouveau/nv40_grctx.c +++ b/drivers/gpu/drm/nouveau/nv40_grctx.c @@ -596,13 +596,13 @@ nv40_graph_construct_shader(struct nouveau_grctx *ctx) offset += 0x0280/4; for (i = 0; i < 16; i++, offset += 2) - nv_wo32(dev, obj, offset, 0x3f800000); + nv_wo32(obj, offset * 4, 0x3f800000); for (vs = 0; vs < vs_nr; vs++, offset += vs_len) { for (i = 0; i < vs_nr_b0 * 6; i += 6) - nv_wo32(dev, obj, offset + b0_offset + i, 0x00000001); + nv_wo32(obj, (offset + b0_offset + i) * 4, 0x00000001); for (i = 0; i < vs_nr_b1 * 4; i += 4) - nv_wo32(dev, obj, offset + b1_offset + i, 0x3f800000); + nv_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000); } } diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index c87f8744866f..435d2b727949 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -72,15 +72,15 @@ nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name, return ret; } - nv_wo32(dev, obj, 0, (tile_flags << 22) | (magic_flags << 16) | class); - nv_wo32(dev, obj, 1, limit); - nv_wo32(dev, obj, 2, offset); - nv_wo32(dev, obj, 3, 0x00000000); - nv_wo32(dev, obj, 4, 0x00000000); + nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class); + nv_wo32(obj, 4, limit); + nv_wo32(obj, 8, offset); + nv_wo32(obj, 12, 0x00000000); + nv_wo32(obj, 16, 0x00000000); if (dev_priv->card_type < NV_C0) - nv_wo32(dev, obj, 5, 0x00010000); + nv_wo32(obj, 20, 0x00010000); else - nv_wo32(dev, obj, 5, 0x00020000); + nv_wo32(obj, 20, 0x00020000); dev_priv->engine.instmem.flush(dev); return 0; diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index fb0281ae8f90..38dbcda86196 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c @@ -43,8 +43,10 @@ nv50_fifo_playlist_update(struct drm_device *dev) /* We never schedule channel 0 or 127 */ for (i = 1, nr = 0; i < 127; i++) { - if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) - nv_wo32(dev, cur->gpuobj, nr++, i); + if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) { + nv_wo32(cur->gpuobj, (nr * 4), i); + nr++; + } } dev_priv->engine.instmem.flush(dev); @@ -258,27 +260,25 @@ nv50_fifo_create_context(struct nouveau_channel *chan) spin_lock_irqsave(&dev_priv->context_switch_lock, flags); - nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4); - nv_wo32(dev, ramfc, 0x80/4, (0 << 27) /* 4KiB */ | - (4 << 24) /* SEARCH_FULL */ | - (chan->ramht->instance >> 4)); - nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff); - nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff); - nv_wo32(dev, ramfc, 0x40/4, 0x00000000); - nv_wo32(dev, ramfc, 0x7c/4, 0x30000001); - nv_wo32(dev, ramfc, 0x78/4, 0x00000000); - nv_wo32(dev, ramfc, 0x3c/4, 0x403f6078); - nv_wo32(dev, ramfc, 0x50/4, chan->pushbuf_base + - chan->dma.ib_base * 4); - nv_wo32(dev, ramfc, 0x54/4, drm_order(chan->dma.ib_max + 1) << 16); + nv_wo32(ramfc, 0x48, chan->pushbuf->instance >> 4); + nv_wo32(ramfc, 0x80, (0 << 27) /* 4KiB */ | + (4 << 24) /* SEARCH_FULL */ | + (chan->ramht->instance >> 4)); + nv_wo32(ramfc, 0x44, 0x2101ffff); + nv_wo32(ramfc, 0x60, 0x7fffffff); + nv_wo32(ramfc, 0x40, 0x00000000); + nv_wo32(ramfc, 0x7c, 0x30000001); + nv_wo32(ramfc, 0x78, 0x00000000); + nv_wo32(ramfc, 0x3c, 0x403f6078); + nv_wo32(ramfc, 0x50, chan->pushbuf_base + chan->dma.ib_base * 4); + nv_wo32(ramfc, 0x54, drm_order(chan->dma.ib_max + 1) << 16); if (dev_priv->chipset != 0x50) { - nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id); - nv_wo32(dev, chan->ramin->gpuobj, 1, - chan->ramfc->instance >> 8); + nv_wo32(chan->ramin->gpuobj, 0, chan->id); + nv_wo32(chan->ramin->gpuobj, 4, chan->ramfc->instance >> 8); - nv_wo32(dev, ramfc, 0x88/4, chan->cache->instance >> 10); - nv_wo32(dev, ramfc, 0x98/4, chan->ramin->instance >> 12); + nv_wo32(ramfc, 0x88, chan->cache->instance >> 10); + nv_wo32(ramfc, 0x98, chan->ramin->instance >> 12); } dev_priv->engine.instmem.flush(dev); @@ -321,57 +321,57 @@ nv50_fifo_load_context(struct nouveau_channel *chan) NV_DEBUG(dev, "ch%d\n", chan->id); - nv_wr32(dev, 0x3330, nv_ro32(dev, ramfc, 0x00/4)); - nv_wr32(dev, 0x3334, nv_ro32(dev, ramfc, 0x04/4)); - nv_wr32(dev, 0x3240, nv_ro32(dev, ramfc, 0x08/4)); - nv_wr32(dev, 0x3320, nv_ro32(dev, ramfc, 0x0c/4)); - nv_wr32(dev, 0x3244, nv_ro32(dev, ramfc, 0x10/4)); - nv_wr32(dev, 0x3328, nv_ro32(dev, ramfc, 0x14/4)); - nv_wr32(dev, 0x3368, nv_ro32(dev, ramfc, 0x18/4)); - nv_wr32(dev, 0x336c, nv_ro32(dev, ramfc, 0x1c/4)); - nv_wr32(dev, 0x3370, nv_ro32(dev, ramfc, 0x20/4)); - nv_wr32(dev, 0x3374, nv_ro32(dev, ramfc, 0x24/4)); - nv_wr32(dev, 0x3378, nv_ro32(dev, ramfc, 0x28/4)); - nv_wr32(dev, 0x337c, nv_ro32(dev, ramfc, 0x2c/4)); - nv_wr32(dev, 0x3228, nv_ro32(dev, ramfc, 0x30/4)); - nv_wr32(dev, 0x3364, nv_ro32(dev, ramfc, 0x34/4)); - nv_wr32(dev, 0x32a0, nv_ro32(dev, ramfc, 0x38/4)); - nv_wr32(dev, 0x3224, nv_ro32(dev, ramfc, 0x3c/4)); - nv_wr32(dev, 0x324c, nv_ro32(dev, ramfc, 0x40/4)); - nv_wr32(dev, 0x2044, nv_ro32(dev, ramfc, 0x44/4)); - nv_wr32(dev, 0x322c, nv_ro32(dev, ramfc, 0x48/4)); - nv_wr32(dev, 0x3234, nv_ro32(dev, ramfc, 0x4c/4)); - nv_wr32(dev, 0x3340, nv_ro32(dev, ramfc, 0x50/4)); - nv_wr32(dev, 0x3344, nv_ro32(dev, ramfc, 0x54/4)); - nv_wr32(dev, 0x3280, nv_ro32(dev, ramfc, 0x58/4)); - nv_wr32(dev, 0x3254, nv_ro32(dev, ramfc, 0x5c/4)); - nv_wr32(dev, 0x3260, nv_ro32(dev, ramfc, 0x60/4)); - nv_wr32(dev, 0x3264, nv_ro32(dev, ramfc, 0x64/4)); - nv_wr32(dev, 0x3268, nv_ro32(dev, ramfc, 0x68/4)); - nv_wr32(dev, 0x326c, nv_ro32(dev, ramfc, 0x6c/4)); - nv_wr32(dev, 0x32e4, nv_ro32(dev, ramfc, 0x70/4)); - nv_wr32(dev, 0x3248, nv_ro32(dev, ramfc, 0x74/4)); - nv_wr32(dev, 0x2088, nv_ro32(dev, ramfc, 0x78/4)); - nv_wr32(dev, 0x2058, nv_ro32(dev, ramfc, 0x7c/4)); - nv_wr32(dev, 0x2210, nv_ro32(dev, ramfc, 0x80/4)); + nv_wr32(dev, 0x3330, nv_ro32(ramfc, 0x00)); + nv_wr32(dev, 0x3334, nv_ro32(ramfc, 0x04)); + nv_wr32(dev, 0x3240, nv_ro32(ramfc, 0x08)); + nv_wr32(dev, 0x3320, nv_ro32(ramfc, 0x0c)); + nv_wr32(dev, 0x3244, nv_ro32(ramfc, 0x10)); + nv_wr32(dev, 0x3328, nv_ro32(ramfc, 0x14)); + nv_wr32(dev, 0x3368, nv_ro32(ramfc, 0x18)); + nv_wr32(dev, 0x336c, nv_ro32(ramfc, 0x1c)); + nv_wr32(dev, 0x3370, nv_ro32(ramfc, 0x20)); + nv_wr32(dev, 0x3374, nv_ro32(ramfc, 0x24)); + nv_wr32(dev, 0x3378, nv_ro32(ramfc, 0x28)); + nv_wr32(dev, 0x337c, nv_ro32(ramfc, 0x2c)); + nv_wr32(dev, 0x3228, nv_ro32(ramfc, 0x30)); + nv_wr32(dev, 0x3364, nv_ro32(ramfc, 0x34)); + nv_wr32(dev, 0x32a0, nv_ro32(ramfc, 0x38)); + nv_wr32(dev, 0x3224, nv_ro32(ramfc, 0x3c)); + nv_wr32(dev, 0x324c, nv_ro32(ramfc, 0x40)); + nv_wr32(dev, 0x2044, nv_ro32(ramfc, 0x44)); + nv_wr32(dev, 0x322c, nv_ro32(ramfc, 0x48)); + nv_wr32(dev, 0x3234, nv_ro32(ramfc, 0x4c)); + nv_wr32(dev, 0x3340, nv_ro32(ramfc, 0x50)); + nv_wr32(dev, 0x3344, nv_ro32(ramfc, 0x54)); + nv_wr32(dev, 0x3280, nv_ro32(ramfc, 0x58)); + nv_wr32(dev, 0x3254, nv_ro32(ramfc, 0x5c)); + nv_wr32(dev, 0x3260, nv_ro32(ramfc, 0x60)); + nv_wr32(dev, 0x3264, nv_ro32(ramfc, 0x64)); + nv_wr32(dev, 0x3268, nv_ro32(ramfc, 0x68)); + nv_wr32(dev, 0x326c, nv_ro32(ramfc, 0x6c)); + nv_wr32(dev, 0x32e4, nv_ro32(ramfc, 0x70)); + nv_wr32(dev, 0x3248, nv_ro32(ramfc, 0x74)); + nv_wr32(dev, 0x2088, nv_ro32(ramfc, 0x78)); + nv_wr32(dev, 0x2058, nv_ro32(ramfc, 0x7c)); + nv_wr32(dev, 0x2210, nv_ro32(ramfc, 0x80)); - cnt = nv_ro32(dev, ramfc, 0x84/4); + cnt = nv_ro32(ramfc, 0x84); for (ptr = 0; ptr < cnt; ptr++) { nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr), - nv_ro32(dev, cache, (ptr * 2) + 0)); + nv_ro32(cache, (ptr * 8) + 0)); nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr), - nv_ro32(dev, cache, (ptr * 2) + 1)); + nv_ro32(cache, (ptr * 8) + 4)); } nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2); nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); /* guessing that all the 0x34xx regs aren't on NV50 */ if (dev_priv->chipset != 0x50) { - nv_wr32(dev, 0x340c, nv_ro32(dev, ramfc, 0x88/4)); - nv_wr32(dev, 0x3400, nv_ro32(dev, ramfc, 0x8c/4)); - nv_wr32(dev, 0x3404, nv_ro32(dev, ramfc, 0x90/4)); - nv_wr32(dev, 0x3408, nv_ro32(dev, ramfc, 0x94/4)); - nv_wr32(dev, 0x3410, nv_ro32(dev, ramfc, 0x98/4)); + nv_wr32(dev, 0x340c, nv_ro32(ramfc, 0x88)); + nv_wr32(dev, 0x3400, nv_ro32(ramfc, 0x8c)); + nv_wr32(dev, 0x3404, nv_ro32(ramfc, 0x90)); + nv_wr32(dev, 0x3408, nv_ro32(ramfc, 0x94)); + nv_wr32(dev, 0x3410, nv_ro32(ramfc, 0x98)); } nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16)); @@ -402,59 +402,60 @@ nv50_fifo_unload_context(struct drm_device *dev) ramfc = chan->ramfc->gpuobj; cache = chan->cache->gpuobj; - nv_wo32(dev, ramfc, 0x00/4, nv_rd32(dev, 0x3330)); - nv_wo32(dev, ramfc, 0x04/4, nv_rd32(dev, 0x3334)); - nv_wo32(dev, ramfc, 0x08/4, nv_rd32(dev, 0x3240)); - nv_wo32(dev, ramfc, 0x0c/4, nv_rd32(dev, 0x3320)); - nv_wo32(dev, ramfc, 0x10/4, nv_rd32(dev, 0x3244)); - nv_wo32(dev, ramfc, 0x14/4, nv_rd32(dev, 0x3328)); - nv_wo32(dev, ramfc, 0x18/4, nv_rd32(dev, 0x3368)); - nv_wo32(dev, ramfc, 0x1c/4, nv_rd32(dev, 0x336c)); - nv_wo32(dev, ramfc, 0x20/4, nv_rd32(dev, 0x3370)); - nv_wo32(dev, ramfc, 0x24/4, nv_rd32(dev, 0x3374)); - nv_wo32(dev, ramfc, 0x28/4, nv_rd32(dev, 0x3378)); - nv_wo32(dev, ramfc, 0x2c/4, nv_rd32(dev, 0x337c)); - nv_wo32(dev, ramfc, 0x30/4, nv_rd32(dev, 0x3228)); - nv_wo32(dev, ramfc, 0x34/4, nv_rd32(dev, 0x3364)); - nv_wo32(dev, ramfc, 0x38/4, nv_rd32(dev, 0x32a0)); - nv_wo32(dev, ramfc, 0x3c/4, nv_rd32(dev, 0x3224)); - nv_wo32(dev, ramfc, 0x40/4, nv_rd32(dev, 0x324c)); - nv_wo32(dev, ramfc, 0x44/4, nv_rd32(dev, 0x2044)); - nv_wo32(dev, ramfc, 0x48/4, nv_rd32(dev, 0x322c)); - nv_wo32(dev, ramfc, 0x4c/4, nv_rd32(dev, 0x3234)); - nv_wo32(dev, ramfc, 0x50/4, nv_rd32(dev, 0x3340)); - nv_wo32(dev, ramfc, 0x54/4, nv_rd32(dev, 0x3344)); - nv_wo32(dev, ramfc, 0x58/4, nv_rd32(dev, 0x3280)); - nv_wo32(dev, ramfc, 0x5c/4, nv_rd32(dev, 0x3254)); - nv_wo32(dev, ramfc, 0x60/4, nv_rd32(dev, 0x3260)); - nv_wo32(dev, ramfc, 0x64/4, nv_rd32(dev, 0x3264)); - nv_wo32(dev, ramfc, 0x68/4, nv_rd32(dev, 0x3268)); - nv_wo32(dev, ramfc, 0x6c/4, nv_rd32(dev, 0x326c)); - nv_wo32(dev, ramfc, 0x70/4, nv_rd32(dev, 0x32e4)); - nv_wo32(dev, ramfc, 0x74/4, nv_rd32(dev, 0x3248)); - nv_wo32(dev, ramfc, 0x78/4, nv_rd32(dev, 0x2088)); - nv_wo32(dev, ramfc, 0x7c/4, nv_rd32(dev, 0x2058)); - nv_wo32(dev, ramfc, 0x80/4, nv_rd32(dev, 0x2210)); + nv_wo32(ramfc, 0x00, nv_rd32(dev, 0x3330)); + nv_wo32(ramfc, 0x04, nv_rd32(dev, 0x3334)); + nv_wo32(ramfc, 0x08, nv_rd32(dev, 0x3240)); + nv_wo32(ramfc, 0x0c, nv_rd32(dev, 0x3320)); + nv_wo32(ramfc, 0x10, nv_rd32(dev, 0x3244)); + nv_wo32(ramfc, 0x14, nv_rd32(dev, 0x3328)); + nv_wo32(ramfc, 0x18, nv_rd32(dev, 0x3368)); + nv_wo32(ramfc, 0x1c, nv_rd32(dev, 0x336c)); + nv_wo32(ramfc, 0x20, nv_rd32(dev, 0x3370)); + nv_wo32(ramfc, 0x24, nv_rd32(dev, 0x3374)); + nv_wo32(ramfc, 0x28, nv_rd32(dev, 0x3378)); + nv_wo32(ramfc, 0x2c, nv_rd32(dev, 0x337c)); + nv_wo32(ramfc, 0x30, nv_rd32(dev, 0x3228)); + nv_wo32(ramfc, 0x34, nv_rd32(dev, 0x3364)); + nv_wo32(ramfc, 0x38, nv_rd32(dev, 0x32a0)); + nv_wo32(ramfc, 0x3c, nv_rd32(dev, 0x3224)); + nv_wo32(ramfc, 0x40, nv_rd32(dev, 0x324c)); + nv_wo32(ramfc, 0x44, nv_rd32(dev, 0x2044)); + nv_wo32(ramfc, 0x48, nv_rd32(dev, 0x322c)); + nv_wo32(ramfc, 0x4c, nv_rd32(dev, 0x3234)); + nv_wo32(ramfc, 0x50, nv_rd32(dev, 0x3340)); + nv_wo32(ramfc, 0x54, nv_rd32(dev, 0x3344)); + nv_wo32(ramfc, 0x58, nv_rd32(dev, 0x3280)); + nv_wo32(ramfc, 0x5c, nv_rd32(dev, 0x3254)); + nv_wo32(ramfc, 0x60, nv_rd32(dev, 0x3260)); + nv_wo32(ramfc, 0x64, nv_rd32(dev, 0x3264)); + nv_wo32(ramfc, 0x68, nv_rd32(dev, 0x3268)); + nv_wo32(ramfc, 0x6c, nv_rd32(dev, 0x326c)); + nv_wo32(ramfc, 0x70, nv_rd32(dev, 0x32e4)); + nv_wo32(ramfc, 0x74, nv_rd32(dev, 0x3248)); + nv_wo32(ramfc, 0x78, nv_rd32(dev, 0x2088)); + nv_wo32(ramfc, 0x7c, nv_rd32(dev, 0x2058)); + nv_wo32(ramfc, 0x80, nv_rd32(dev, 0x2210)); put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2; get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2; ptr = 0; while (put != get) { - nv_wo32(dev, cache, ptr++, - nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get))); - nv_wo32(dev, cache, ptr++, - nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get))); + nv_wo32(cache, ptr + 0, + nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get))); + nv_wo32(cache, ptr + 4, + nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get))); get = (get + 1) & 0x1ff; + ptr += 8; } /* guessing that all the 0x34xx regs aren't on NV50 */ if (dev_priv->chipset != 0x50) { - nv_wo32(dev, ramfc, 0x84/4, ptr >> 1); - nv_wo32(dev, ramfc, 0x88/4, nv_rd32(dev, 0x340c)); - nv_wo32(dev, ramfc, 0x8c/4, nv_rd32(dev, 0x3400)); - nv_wo32(dev, ramfc, 0x90/4, nv_rd32(dev, 0x3404)); - nv_wo32(dev, ramfc, 0x94/4, nv_rd32(dev, 0x3408)); - nv_wo32(dev, ramfc, 0x98/4, nv_rd32(dev, 0x3410)); + nv_wo32(ramfc, 0x84, ptr >> 3); + nv_wo32(ramfc, 0x88, nv_rd32(dev, 0x340c)); + nv_wo32(ramfc, 0x8c, nv_rd32(dev, 0x3400)); + nv_wo32(ramfc, 0x90, nv_rd32(dev, 0x3404)); + nv_wo32(ramfc, 0x94, nv_rd32(dev, 0x3408)); + nv_wo32(ramfc, 0x98, nv_rd32(dev, 0x3410)); } dev_priv->engine.instmem.flush(dev); diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 1413028e1580..17a8d788a494 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -220,20 +220,20 @@ nv50_graph_create_context(struct nouveau_channel *chan) obj = chan->ramin_grctx->gpuobj; hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; - nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002); - nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance + - pgraph->grctx_size - 1); - nv_wo32(dev, ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance); - nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0); - nv_wo32(dev, ramin, (hdr + 0x10)/4, 0); - nv_wo32(dev, ramin, (hdr + 0x14)/4, 0x00010000); + nv_wo32(ramin, hdr + 0x00, 0x00190002); + nv_wo32(ramin, hdr + 0x04, chan->ramin_grctx->instance + + pgraph->grctx_size - 1); + nv_wo32(ramin, hdr + 0x08, chan->ramin_grctx->instance); + nv_wo32(ramin, hdr + 0x0c, 0); + nv_wo32(ramin, hdr + 0x10, 0); + nv_wo32(ramin, hdr + 0x14, 0x00010000); ctx.dev = chan->dev; ctx.mode = NOUVEAU_GRCTX_VALS; ctx.data = obj; nv50_grctx_init(&ctx); - nv_wo32(dev, obj, 0x00000/4, chan->ramin->instance >> 12); + nv_wo32(obj, 0x00000, chan->ramin->instance >> 12); dev_priv->engine.instmem.flush(dev); return 0; @@ -252,7 +252,7 @@ nv50_graph_destroy_context(struct nouveau_channel *chan) return; for (i = hdr; i < hdr + 24; i += 4) - nv_wo32(dev, chan->ramin->gpuobj, i/4, 0); + nv_wo32(chan->ramin->gpuobj, i, 0); dev_priv->engine.instmem.flush(dev); nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c index 42a8fb20c1e6..ba6c033c8c95 100644 --- a/drivers/gpu/drm/nouveau/nv50_grctx.c +++ b/drivers/gpu/drm/nouveau/nv50_grctx.c @@ -995,7 +995,7 @@ xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) { int i; if (val && ctx->mode == NOUVEAU_GRCTX_VALS) for (i = 0; i < num; i++) - nv_wo32(ctx->dev, ctx->data, ctx->ctxvals_pos + (i << 3), val); + nv_wo32(ctx->data, (ctx->ctxvals_pos + (i << 3))*4, val); ctx->ctxvals_pos += num << 3; } diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 91ef93cf1f35..821806c835ba 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -449,9 +449,10 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) } while (pte < pte_end) { - nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram)); - nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram)); + nv_wo32(pramin_pt, (pte * 4) + 0, lower_32_bits(vram)); + nv_wo32(pramin_pt, (pte * 4) + 4, upper_32_bits(vram)); vram += NV50_INSTMEM_PAGE_SIZE; + pte += 2; } dev_priv->engine.instmem.flush(dev); @@ -476,8 +477,9 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; while (pte < pte_end) { - nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000); - nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000); + nv_wo32(priv->pramin_pt->gpuobj, (pte * 4) + 0, 0x00000000); + nv_wo32(priv->pramin_pt->gpuobj, (pte * 4) + 4, 0x00000000); + pte += 2; } dev_priv->engine.instmem.flush(dev); From de3a6c0a3b642c0c350414d63298a1b19a009290 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 1 Sep 2010 15:24:30 +1000 Subject: [PATCH 235/476] drm/nouveau: rebase per-channel pramin heap offsets to 0 Reviewed-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 4 +++ drivers/gpu/drm/nouveau/nouveau_object.c | 45 ++++++++++++++++-------- drivers/gpu/drm/nouveau/nv50_display.c | 3 +- drivers/gpu/drm/nouveau/nv50_fifo.c | 16 ++++----- 4 files changed, 43 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 372adfdd9de0..b8511c122f5f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -151,6 +151,10 @@ struct nouveau_gpuobj { uint32_t flags; int refcount; + u32 pinst; + u32 cinst; + u64 vinst; + uint32_t engine; uint32_t class; diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 52db13cd75b2..552f5131650f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -131,6 +131,23 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, } } + /* calculate the various different addresses for the object */ + if (chan) { + gpuobj->pinst = gpuobj->im_pramin->start + + chan->ramin->gpuobj->im_pramin->start; + if (dev_priv->card_type < NV_50) { + gpuobj->cinst = gpuobj->pinst; + } else { + gpuobj->cinst = gpuobj->im_pramin->start; + gpuobj->vinst = gpuobj->im_pramin->start + + chan->ramin->gpuobj->im_backing_start; + } + } else { + gpuobj->pinst = gpuobj->im_pramin->start; + gpuobj->cinst = 0xdeadbeef; + gpuobj->vinst = gpuobj->im_backing_start; + } + if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { int i; @@ -260,19 +277,16 @@ nouveau_gpuobj_instance_get(struct drm_device *dev, /* card_type < NV_50) { *inst = gpuobj->im_pramin->start; + if (gpuobj->im_channel) { + cpramin = gpuobj->im_channel->ramin->gpuobj; + *inst += cpramin->im_pramin->start; + } return 0; } - if (chan && gpuobj->im_channel != chan) { - NV_ERROR(dev, "Channel mismatch: obj %d, ref %d\n", - gpuobj->im_channel->id, chan->id); - return -EINVAL; - } - /* NV50 channel-local instance */ if (chan) { - cpramin = chan->ramin->gpuobj; - *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start; + *inst = gpuobj->im_pramin->start; return 0; } @@ -288,8 +302,7 @@ nouveau_gpuobj_instance_get(struct drm_device *dev, } else { /* ...from local heap */ cpramin = gpuobj->im_channel->ramin->gpuobj; - *inst = cpramin->im_backing_start + - (gpuobj->im_pramin->start - cpramin->im_pramin->start); + *inst = cpramin->im_backing_start + gpuobj->im_pramin->start; return 0; } @@ -458,6 +471,10 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, gpuobj->im_backing_start = b_offset; } + gpuobj->pinst = gpuobj->im_pramin->start; + gpuobj->cinst = 0xdeadbeef; + gpuobj->vinst = gpuobj->im_backing_start; + if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { for (i = 0; i < gpuobj->im_pramin->size; i += 4) nv_wo32(gpuobj, i, 0); @@ -789,7 +806,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) } pramin = chan->ramin->gpuobj; - ret = drm_mm_init(&chan->ramin_heap, pramin->im_pramin->start + base, size); + ret = drm_mm_init(&chan->ramin_heap, base, size); if (ret) { NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); nouveau_gpuobj_ref_del(dev, &chan->ramin); @@ -1124,13 +1141,11 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, u32 nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset) { - struct drm_device *dev = gpuobj->dev; - return nv_ri32(dev, gpuobj->im_pramin->start + offset); + return nv_ri32(gpuobj->dev, gpuobj->pinst + offset); } void nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val) { - struct drm_device *dev = gpuobj->dev; - nv_wi32(dev, gpuobj->im_pramin->start + offset, val); + nv_wi32(gpuobj->dev, gpuobj->pinst + offset, val); } diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 435d2b727949..6f89674ebb96 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -113,8 +113,7 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan) return ret; } - ret = drm_mm_init(&chan->ramin_heap, - chan->ramin->gpuobj->im_pramin->start, 32768); + ret = drm_mm_init(&chan->ramin_heap, 0, 32768); if (ret) { NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret); nv50_evo_channel_del(pchan); diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index 38dbcda86196..9201f35d8277 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c @@ -228,19 +228,19 @@ nv50_fifo_create_context(struct nouveau_channel *chan) NV_DEBUG(dev, "ch%d\n", chan->id); if (dev_priv->chipset == 0x50) { - uint32_t ramin_poffset = chan->ramin->gpuobj->im_pramin->start; - uint32_t ramin_voffset = chan->ramin->gpuobj->im_backing_start; - - ret = nouveau_gpuobj_new_fake(dev, ramin_poffset, ramin_voffset, - 0x100, NVOBJ_FLAG_ZERO_ALLOC | + ret = nouveau_gpuobj_new_fake(dev, chan->ramin->gpuobj->pinst, + chan->ramin->gpuobj->vinst, 0x100, + NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, &ramfc, &chan->ramfc); if (ret) return ret; - ret = nouveau_gpuobj_new_fake(dev, ramin_poffset + 0x0400, - ramin_voffset + 0x0400, 4096, - 0, NULL, &chan->cache); + ret = nouveau_gpuobj_new_fake(dev, chan->ramin->gpuobj->pinst + + 0x0400, + chan->ramin->gpuobj->vinst + + 0x0400, 4096, 0, NULL, + &chan->cache); if (ret) return ret; } else { From a8eaebc6c52bb0cd243b4cb421068f42d378be9c Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 1 Sep 2010 15:24:31 +1000 Subject: [PATCH 236/476] drm/nouveau: remove nouveau_gpuobj_ref completely, replace with sanity Reviewed-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_channel.c | 12 +- drivers/gpu/drm/nouveau/nouveau_dma.c | 14 +- drivers/gpu/drm/nouveau/nouveau_drv.h | 61 ++-- drivers/gpu/drm/nouveau/nouveau_irq.c | 16 +- drivers/gpu/drm/nouveau/nouveau_notifier.c | 7 +- drivers/gpu/drm/nouveau/nouveau_object.c | 358 ++++++--------------- drivers/gpu/drm/nouveau/nouveau_ramht.c | 137 ++++++-- drivers/gpu/drm/nouveau/nouveau_ramht.h | 26 +- drivers/gpu/drm/nouveau/nouveau_sgdma.c | 13 +- drivers/gpu/drm/nouveau/nouveau_state.c | 18 +- drivers/gpu/drm/nouveau/nv04_fbcon.c | 9 +- drivers/gpu/drm/nouveau/nv04_fifo.c | 12 +- drivers/gpu/drm/nouveau/nv10_fifo.c | 6 +- drivers/gpu/drm/nouveau/nv20_graph.c | 40 ++- drivers/gpu/drm/nouveau/nv40_fifo.c | 9 +- drivers/gpu/drm/nouveau/nv40_graph.c | 16 +- drivers/gpu/drm/nouveau/nv50_display.c | 32 +- drivers/gpu/drm/nouveau/nv50_fbcon.c | 4 +- drivers/gpu/drm/nouveau/nv50_fifo.c | 85 +++-- drivers/gpu/drm/nouveau/nv50_graph.c | 39 ++- drivers/gpu/drm/nouveau/nv50_instmem.c | 126 ++++---- 21 files changed, 458 insertions(+), 582 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 3144ddea593e..e01396747f6f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -70,14 +70,8 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT; } - ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf); - if (ret) { - NV_ERROR(dev, "Error referencing pushbuf ctxdma: %d\n", ret); - if (pushbuf != dev_priv->gart_info.sg_ctxdma) - nouveau_gpuobj_del(dev, &pushbuf); - return ret; - } - + nouveau_gpuobj_ref(pushbuf, &chan->pushbuf); + nouveau_gpuobj_ref(NULL, &pushbuf); return 0; } @@ -308,7 +302,7 @@ nouveau_channel_free(struct nouveau_channel *chan) spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); /* Release the channel's resources */ - nouveau_gpuobj_ref_del(dev, &chan->pushbuf); + nouveau_gpuobj_ref(NULL, &chan->pushbuf); if (chan->pushbuf_bo) { nouveau_bo_unmap(chan->pushbuf_bo); nouveau_bo_unpin(chan->pushbuf_bo); diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index 2d006993378a..9d27acda87e2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c @@ -28,6 +28,7 @@ #include "drm.h" #include "nouveau_drv.h" #include "nouveau_dma.h" +#include "nouveau_ramht.h" void nouveau_dma_pre_init(struct nouveau_channel *chan) @@ -58,26 +59,27 @@ nouveau_dma_init(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *m2mf = NULL; - struct nouveau_gpuobj *nvsw = NULL; + struct nouveau_gpuobj *obj = NULL; int ret, i; /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */ ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ? - 0x0039 : 0x5039, &m2mf); + 0x0039 : 0x5039, &obj); if (ret) return ret; - ret = nouveau_gpuobj_ref_add(dev, chan, NvM2MF, m2mf, NULL); + ret = nouveau_ramht_insert(chan, NvM2MF, obj); + nouveau_gpuobj_ref(NULL, &obj); if (ret) return ret; /* Create an NV_SW object for various sync purposes */ - ret = nouveau_gpuobj_sw_new(chan, NV_SW, &nvsw); + ret = nouveau_gpuobj_sw_new(chan, NV_SW, &obj); if (ret) return ret; - ret = nouveau_gpuobj_ref_add(dev, chan, NvSw, nvsw, NULL); + ret = nouveau_ramht_insert(chan, NvSw, obj); + nouveau_gpuobj_ref(NULL, &obj); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index b8511c122f5f..bf89d0297e21 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -133,7 +133,6 @@ enum nouveau_flags { #define NVOBJ_ENGINE_DISPLAY 2 #define NVOBJ_ENGINE_INT 0xdeadbeef -#define NVOBJ_FLAG_ALLOW_NO_REFS (1 << 0) #define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) #define NVOBJ_FLAG_ZERO_FREE (1 << 2) #define NVOBJ_FLAG_FAKE (1 << 3) @@ -141,7 +140,6 @@ struct nouveau_gpuobj { struct drm_device *dev; struct list_head list; - struct nouveau_channel *im_channel; struct drm_mm_node *im_pramin; struct nouveau_bo *im_backing; uint32_t im_backing_start; @@ -162,16 +160,6 @@ struct nouveau_gpuobj { void *priv; }; -struct nouveau_gpuobj_ref { - struct list_head list; - - struct nouveau_gpuobj *gpuobj; - uint32_t instance; - - struct nouveau_channel *channel; - int handle; -}; - struct nouveau_channel { struct drm_device *dev; int id; @@ -197,33 +185,32 @@ struct nouveau_channel { } fence; /* DMA push buffer */ - struct nouveau_gpuobj_ref *pushbuf; - struct nouveau_bo *pushbuf_bo; - uint32_t pushbuf_base; + struct nouveau_gpuobj *pushbuf; + struct nouveau_bo *pushbuf_bo; + uint32_t pushbuf_base; /* Notifier memory */ struct nouveau_bo *notifier_bo; struct drm_mm notifier_heap; /* PFIFO context */ - struct nouveau_gpuobj_ref *ramfc; - struct nouveau_gpuobj_ref *cache; + struct nouveau_gpuobj *ramfc; + struct nouveau_gpuobj *cache; /* PGRAPH context */ /* XXX may be merge 2 pointers as private data ??? */ - struct nouveau_gpuobj_ref *ramin_grctx; + struct nouveau_gpuobj *ramin_grctx; void *pgraph_ctx; /* NV50 VM */ - struct nouveau_gpuobj *vm_pd; - struct nouveau_gpuobj_ref *vm_gart_pt; - struct nouveau_gpuobj_ref *vm_vram_pt[NV50_VM_VRAM_NR]; + struct nouveau_gpuobj *vm_pd; + struct nouveau_gpuobj *vm_gart_pt; + struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; /* Objects */ - struct nouveau_gpuobj_ref *ramin; /* Private instmem */ - struct drm_mm ramin_heap; /* Private PRAMIN heap */ - struct nouveau_gpuobj_ref *ramht; /* Hash table */ - struct list_head ramht_refs; /* Objects referenced by RAMHT */ + struct nouveau_gpuobj *ramin; /* Private instmem */ + struct drm_mm ramin_heap; /* Private PRAMIN heap */ + struct nouveau_ramht *ramht; /* Hash table */ /* GPU object info for stuff used in-kernel (mm_enabled) */ uint32_t m2mf_ntfy; @@ -301,7 +288,7 @@ struct nouveau_fb_engine { struct nouveau_fifo_engine { int channels; - struct nouveau_gpuobj_ref *playlist[2]; + struct nouveau_gpuobj *playlist[2]; int cur_playlist; int (*init)(struct drm_device *); @@ -339,7 +326,7 @@ struct nouveau_pgraph_engine { int grctx_size; /* NV2x/NV3x context table (0x400780) */ - struct nouveau_gpuobj_ref *ctx_table; + struct nouveau_gpuobj *ctx_table; int (*init)(struct drm_device *); void (*takedown)(struct drm_device *); @@ -555,7 +542,7 @@ struct drm_nouveau_private { spinlock_t context_switch_lock; /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ - struct nouveau_gpuobj *ramht; + struct nouveau_ramht *ramht; uint32_t ramin_rsvd_vram; uint32_t ramht_offset; uint32_t ramht_size; @@ -764,24 +751,12 @@ extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *); extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *, uint32_t size, int align, uint32_t flags, struct nouveau_gpuobj **); -extern int nouveau_gpuobj_del(struct drm_device *, struct nouveau_gpuobj **); -extern int nouveau_gpuobj_ref_add(struct drm_device *, struct nouveau_channel *, - uint32_t handle, struct nouveau_gpuobj *, - struct nouveau_gpuobj_ref **); -extern int nouveau_gpuobj_ref_del(struct drm_device *, - struct nouveau_gpuobj_ref **); -extern int nouveau_gpuobj_ref_find(struct nouveau_channel *, uint32_t handle, - struct nouveau_gpuobj_ref **ref_ret); -extern int nouveau_gpuobj_new_ref(struct drm_device *, - struct nouveau_channel *alloc_chan, - struct nouveau_channel *ref_chan, - uint32_t handle, uint32_t size, int align, - uint32_t flags, struct nouveau_gpuobj_ref **); +extern void nouveau_gpuobj_ref(struct nouveau_gpuobj *, + struct nouveau_gpuobj **); extern int nouveau_gpuobj_new_fake(struct drm_device *, uint32_t p_offset, uint32_t b_offset, uint32_t size, uint32_t flags, - struct nouveau_gpuobj **, - struct nouveau_gpuobj_ref**); + struct nouveau_gpuobj **); extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class, uint64_t offset, uint64_t size, int access, int target, struct nouveau_gpuobj **); diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 794b0ee30cf6..9cc3259a54b9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -35,6 +35,7 @@ #include "nouveau_drm.h" #include "nouveau_drv.h" #include "nouveau_reg.h" +#include "nouveau_ramht.h" #include /* needed for hotplug irq */ @@ -106,15 +107,16 @@ nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data) const int mthd = addr & 0x1ffc; if (mthd == 0x0000) { - struct nouveau_gpuobj_ref *ref = NULL; + struct nouveau_gpuobj *gpuobj; - if (nouveau_gpuobj_ref_find(chan, data, &ref)) + gpuobj = nouveau_ramht_find(chan, data); + if (!gpuobj) return false; - if (ref->gpuobj->engine != NVOBJ_ENGINE_SW) + if (gpuobj->engine != NVOBJ_ENGINE_SW) return false; - chan->sw_subchannel[subc] = ref->gpuobj->class; + chan->sw_subchannel[subc] = gpuobj->class; nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4)); return true; @@ -357,7 +359,7 @@ nouveau_graph_chid_from_grctx(struct drm_device *dev) if (!chan || !chan->ramin_grctx) continue; - if (inst == chan->ramin_grctx->instance) + if (inst == chan->ramin_grctx->pinst) break; } } else { @@ -369,7 +371,7 @@ nouveau_graph_chid_from_grctx(struct drm_device *dev) if (!chan || !chan->ramin) continue; - if (inst == chan->ramin->instance) + if (inst == chan->ramin->vinst) break; } } @@ -625,7 +627,7 @@ nv50_pfb_vm_trap(struct drm_device *dev, int display, const char *name) if (!chan || !chan->ramin) continue; - if (trap[1] == chan->ramin->instance >> 12) + if (trap[1] == chan->ramin->vinst >> 12) break; } NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x %08x channel %d\n", diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 3ec181ff50ce..22b86189b7bb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c @@ -28,6 +28,7 @@ #include "drmP.h" #include "drm.h" #include "nouveau_drv.h" +#include "nouveau_ramht.h" int nouveau_notifier_init_channel(struct nouveau_channel *chan) @@ -146,11 +147,11 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, nobj->dtor = nouveau_notifier_gpuobj_dtor; nobj->priv = mem; - ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL); + ret = nouveau_ramht_insert(chan, handle, nobj); + nouveau_gpuobj_ref(NULL, &nobj); if (ret) { - nouveau_gpuobj_del(dev, &nobj); drm_mm_put_block(mem); - NV_ERROR(dev, "Error referencing notifier ctxdma: %d\n", ret); + NV_ERROR(dev, "Error adding notifier to ramht: %d\n", ret); return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 552f5131650f..d55c50f1a2d3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -90,7 +90,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, NV_DEBUG(dev, "gpuobj %p\n", gpuobj); gpuobj->dev = dev; gpuobj->flags = flags; - gpuobj->im_channel = chan; + gpuobj->refcount = 1; list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); @@ -108,7 +108,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, ret = engine->instmem.populate(dev, gpuobj, &size); if (ret) { - nouveau_gpuobj_del(dev, &gpuobj); + nouveau_gpuobj_ref(NULL, &gpuobj); return ret; } } @@ -119,14 +119,14 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, gpuobj->im_pramin = drm_mm_get_block(gpuobj->im_pramin, size, align); if (!gpuobj->im_pramin) { - nouveau_gpuobj_del(dev, &gpuobj); + nouveau_gpuobj_ref(NULL, &gpuobj); return -ENOMEM; } if (!chan) { ret = engine->instmem.bind(dev, gpuobj); if (ret) { - nouveau_gpuobj_del(dev, &gpuobj); + nouveau_gpuobj_ref(NULL, &gpuobj); return ret; } } @@ -134,13 +134,13 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, /* calculate the various different addresses for the object */ if (chan) { gpuobj->pinst = gpuobj->im_pramin->start + - chan->ramin->gpuobj->im_pramin->start; + chan->ramin->im_pramin->start; if (dev_priv->card_type < NV_50) { gpuobj->cinst = gpuobj->pinst; } else { gpuobj->cinst = gpuobj->im_pramin->start; gpuobj->vinst = gpuobj->im_pramin->start + - chan->ramin->gpuobj->im_backing_start; + chan->ramin->im_backing_start; } } else { gpuobj->pinst = gpuobj->im_pramin->start; @@ -156,6 +156,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, engine->instmem.flush(dev); } + *gpuobj_ret = gpuobj; return 0; } @@ -176,20 +177,23 @@ int nouveau_gpuobj_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *ramht = NULL; int ret; NV_DEBUG(dev, "\n"); - if (dev_priv->card_type < NV_50) { - ret = nouveau_gpuobj_new_fake(dev, - dev_priv->ramht_offset, ~0, dev_priv->ramht_size, - NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ALLOW_NO_REFS, - &dev_priv->ramht, NULL); - if (ret) - return ret; - } + if (dev_priv->card_type >= NV_50) + return 0; - return 0; + ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset, ~0, + dev_priv->ramht_size, + NVOBJ_FLAG_ZERO_ALLOC, &ramht); + if (ret) + return ret; + + ret = nouveau_ramht_new(dev, ramht, &dev_priv->ramht); + nouveau_gpuobj_ref(NULL, &ramht); + return ret; } void @@ -199,7 +203,7 @@ nouveau_gpuobj_takedown(struct drm_device *dev) NV_DEBUG(dev, "\n"); - nouveau_gpuobj_del(dev, &dev_priv->ramht); + nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL); } void @@ -216,29 +220,21 @@ nouveau_gpuobj_late_takedown(struct drm_device *dev) NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n", gpuobj, gpuobj->refcount); - gpuobj->refcount = 0; - nouveau_gpuobj_del(dev, &gpuobj); + + gpuobj->refcount = 1; + nouveau_gpuobj_ref(NULL, &gpuobj); } } -int -nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) +static int +nouveau_gpuobj_del(struct nouveau_gpuobj *gpuobj) { + struct drm_device *dev = gpuobj->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_engine *engine = &dev_priv->engine; - struct nouveau_gpuobj *gpuobj; int i; - NV_DEBUG(dev, "gpuobj %p\n", pgpuobj ? *pgpuobj : NULL); - - if (!dev_priv || !pgpuobj || !(*pgpuobj)) - return -EINVAL; - gpuobj = *pgpuobj; - - if (gpuobj->refcount != 0) { - NV_ERROR(dev, "gpuobj refcount is %d\n", gpuobj->refcount); - return -EINVAL; - } + NV_DEBUG(dev, "gpuobj %p\n", gpuobj); if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { for (i = 0; i < gpuobj->im_pramin->size; i += 4) @@ -261,181 +257,26 @@ nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) list_del(&gpuobj->list); - *pgpuobj = NULL; kfree(gpuobj); return 0; } -static int -nouveau_gpuobj_instance_get(struct drm_device *dev, - struct nouveau_channel *chan, - struct nouveau_gpuobj *gpuobj, uint32_t *inst) +void +nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *cpramin; + if (ref) + ref->refcount++; - /* card_type < NV_50) { - *inst = gpuobj->im_pramin->start; - if (gpuobj->im_channel) { - cpramin = gpuobj->im_channel->ramin->gpuobj; - *inst += cpramin->im_pramin->start; - } - return 0; - } + if (*ptr && --(*ptr)->refcount == 0) + nouveau_gpuobj_del(*ptr); - /* NV50 channel-local instance */ - if (chan) { - *inst = gpuobj->im_pramin->start; - return 0; - } - - /* NV50 global (VRAM) instance */ - if (!gpuobj->im_channel) { - /* ...from global heap */ - if (!gpuobj->im_backing) { - NV_ERROR(dev, "AII, no VRAM backing gpuobj\n"); - return -EINVAL; - } - *inst = gpuobj->im_backing_start; - return 0; - } else { - /* ...from local heap */ - cpramin = gpuobj->im_channel->ramin->gpuobj; - *inst = cpramin->im_backing_start + gpuobj->im_pramin->start; - return 0; - } - - return -EINVAL; -} - -int -nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan, - uint32_t handle, struct nouveau_gpuobj *gpuobj, - struct nouveau_gpuobj_ref **ref_ret) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj_ref *ref; - uint32_t instance; - int ret; - - NV_DEBUG(dev, "ch%d h=0x%08x gpuobj=%p\n", - chan ? chan->id : -1, handle, gpuobj); - - if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL)) - return -EINVAL; - - if (!chan && !ref_ret) - return -EINVAL; - - if (gpuobj->engine == NVOBJ_ENGINE_SW && !gpuobj->im_pramin) { - /* sw object */ - instance = 0x40; - } else { - ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance); - if (ret) - return ret; - } - - ref = kzalloc(sizeof(*ref), GFP_KERNEL); - if (!ref) - return -ENOMEM; - INIT_LIST_HEAD(&ref->list); - ref->gpuobj = gpuobj; - ref->channel = chan; - ref->instance = instance; - - if (!ref_ret) { - ref->handle = handle; - - ret = nouveau_ramht_insert(dev, ref); - if (ret) { - kfree(ref); - return ret; - } - } else { - ref->handle = ~0; - *ref_ret = ref; - } - - ref->gpuobj->refcount++; - return 0; -} - -int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref) -{ - struct nouveau_gpuobj_ref *ref; - - NV_DEBUG(dev, "ref %p\n", pref ? *pref : NULL); - - if (!dev || !pref || *pref == NULL) - return -EINVAL; - ref = *pref; - - if (ref->handle != ~0) - nouveau_ramht_remove(dev, ref); - - if (ref->gpuobj) { - ref->gpuobj->refcount--; - - if (ref->gpuobj->refcount == 0) { - if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS)) - nouveau_gpuobj_del(dev, &ref->gpuobj); - } - } - - *pref = NULL; - kfree(ref); - return 0; -} - -int -nouveau_gpuobj_new_ref(struct drm_device *dev, - struct nouveau_channel *oc, struct nouveau_channel *rc, - uint32_t handle, uint32_t size, int align, - uint32_t flags, struct nouveau_gpuobj_ref **ref) -{ - struct nouveau_gpuobj *gpuobj = NULL; - int ret; - - ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj); - if (ret) - return ret; - - ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref); - if (ret) { - nouveau_gpuobj_del(dev, &gpuobj); - return ret; - } - - return 0; -} - -int -nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle, - struct nouveau_gpuobj_ref **ref_ret) -{ - struct nouveau_gpuobj_ref *ref; - struct list_head *entry, *tmp; - - list_for_each_safe(entry, tmp, &chan->ramht_refs) { - ref = list_entry(entry, struct nouveau_gpuobj_ref, list); - - if (ref->handle == handle) { - if (ref_ret) - *ref_ret = ref; - return 0; - } - } - - return -EINVAL; + *ptr = ref; } int nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, uint32_t b_offset, uint32_t size, - uint32_t flags, struct nouveau_gpuobj **pgpuobj, - struct nouveau_gpuobj_ref **pref) + uint32_t flags, struct nouveau_gpuobj **pgpuobj) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *gpuobj = NULL; @@ -450,8 +291,8 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, return -ENOMEM; NV_DEBUG(dev, "gpuobj %p\n", gpuobj); gpuobj->dev = dev; - gpuobj->im_channel = NULL; gpuobj->flags = flags | NVOBJ_FLAG_FAKE; + gpuobj->refcount = 1; list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); @@ -459,7 +300,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, gpuobj->im_pramin = kzalloc(sizeof(struct drm_mm_node), GFP_KERNEL); if (!gpuobj->im_pramin) { - nouveau_gpuobj_del(dev, &gpuobj); + nouveau_gpuobj_ref(NULL, &gpuobj); return -ENOMEM; } gpuobj->im_pramin->start = p_offset; @@ -481,14 +322,6 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, dev_priv->engine.instmem.flush(dev); } - if (pref) { - i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref); - if (i) { - nouveau_gpuobj_del(dev, &gpuobj); - return i; - } - } - if (pgpuobj) *pgpuobj = gpuobj; return 0; @@ -628,7 +461,7 @@ nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan, *o_ret = 0; } else if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { - *gpuobj = dev_priv->gart_info.sg_ctxdma; + nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj); if (offset & ~0xffffffffULL) { NV_ERROR(dev, "obj offset exceeds 32-bits\n"); return -EINVAL; @@ -760,8 +593,11 @@ nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); if (!gpuobj) return -ENOMEM; + gpuobj->dev = chan->dev; gpuobj->engine = NVOBJ_ENGINE_SW; gpuobj->class = class; + gpuobj->refcount = 1; + gpuobj->cinst = 0x40; list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); *gpuobj_ret = gpuobj; @@ -773,7 +609,6 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *pramin = NULL; uint32_t size; uint32_t base; int ret; @@ -798,18 +633,16 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) size += 0x1000; } - ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0, - &chan->ramin); + ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin); if (ret) { NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret); return ret; } - pramin = chan->ramin->gpuobj; ret = drm_mm_init(&chan->ramin_heap, base, size); if (ret) { NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); - nouveau_gpuobj_ref_del(dev, &chan->ramin); + nouveau_gpuobj_ref(NULL, &chan->ramin); return ret; } @@ -826,8 +659,6 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_gpuobj *vram = NULL, *tt = NULL; int ret, i; - INIT_LIST_HEAD(&chan->ramht_refs); - NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); /* Allocate a chunk of memory for per-channel object storage */ @@ -846,10 +677,10 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, uint32_t vm_offset, pde; vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200; - vm_offset += chan->ramin->gpuobj->im_pramin->start; + vm_offset += chan->ramin->im_pramin->start; ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000, - 0, &chan->vm_pd, NULL); + 0, &chan->vm_pd); if (ret) return ret; for (i = 0; i < 0x4000; i += 8) { @@ -857,25 +688,19 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe); } + nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, + &chan->vm_gart_pt); pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8; - ret = nouveau_gpuobj_ref_add(dev, NULL, 0, - dev_priv->gart_info.sg_ctxdma, - &chan->vm_gart_pt); - if (ret) - return ret; - nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->instance | 3); + nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3); nv_wo32(chan->vm_pd, pde + 4, 0x00000000); pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8; for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { - ret = nouveau_gpuobj_ref_add(dev, NULL, 0, - dev_priv->vm_vram_pt[i], - &chan->vm_vram_pt[i]); - if (ret) - return ret; + nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i], + &chan->vm_vram_pt[i]); nv_wo32(chan->vm_pd, pde + 0, - chan->vm_vram_pt[i]->instance | 0x61); + chan->vm_vram_pt[i]->vinst | 0x61); nv_wo32(chan->vm_pd, pde + 4, 0x00000000); pde += 8; } @@ -885,15 +710,17 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, /* RAMHT */ if (dev_priv->card_type < NV_50) { - ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht, - &chan->ramht); + nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL); + } else { + struct nouveau_gpuobj *ramht = NULL; + + ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16, + NVOBJ_FLAG_ZERO_ALLOC, &ramht); if (ret) return ret; - } else { - ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, - 0x8000, 16, - NVOBJ_FLAG_ZERO_ALLOC, - &chan->ramht); + + ret = nouveau_ramht_new(dev, ramht, &chan->ramht); + nouveau_gpuobj_ref(NULL, &ramht); if (ret) return ret; } @@ -910,24 +737,32 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, } } else { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, - 0, dev_priv->fb_available_size, - NV_DMA_ACCESS_RW, - NV_DMA_TARGET_VIDMEM, &vram); + 0, dev_priv->fb_available_size, + NV_DMA_ACCESS_RW, + NV_DMA_TARGET_VIDMEM, &vram); if (ret) { NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); return ret; } } - ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL); + ret = nouveau_ramht_insert(chan, vram_h, vram); + nouveau_gpuobj_ref(NULL, &vram); if (ret) { - NV_ERROR(dev, "Error referencing VRAM ctxdma: %d\n", ret); + NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret); return ret; } /* TT memory ctxdma */ if (dev_priv->card_type >= NV_50) { - tt = vram; + ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, + 0, dev_priv->vm_end, + NV_DMA_ACCESS_RW, + NV_DMA_TARGET_AGP, &tt); + if (ret) { + NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); + return ret; + } } else if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { ret = nouveau_gpuobj_gart_dma_new(chan, 0, @@ -943,9 +778,10 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, return ret; } - ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL); + ret = nouveau_ramht_insert(chan, tt_h, tt); + nouveau_gpuobj_ref(NULL, &tt); if (ret) { - NV_ERROR(dev, "Error referencing TT ctxdma: %d\n", ret); + NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret); return ret; } @@ -957,33 +793,23 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) { struct drm_nouveau_private *dev_priv = chan->dev->dev_private; struct drm_device *dev = chan->dev; - struct list_head *entry, *tmp; - struct nouveau_gpuobj_ref *ref; int i; NV_DEBUG(dev, "ch%d\n", chan->id); - if (!chan->ramht_refs.next) + if (!chan->ramht) return; - list_for_each_safe(entry, tmp, &chan->ramht_refs) { - ref = list_entry(entry, struct nouveau_gpuobj_ref, list); + nouveau_ramht_ref(NULL, &chan->ramht, chan); - nouveau_gpuobj_ref_del(dev, &ref); - } - - nouveau_gpuobj_ref_del(dev, &chan->ramht); - - nouveau_gpuobj_del(dev, &chan->vm_pd); - nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt); + nouveau_gpuobj_ref(NULL, &chan->vm_pd); + nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt); for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) - nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]); + nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]); if (chan->ramin_heap.free_stack.next) drm_mm_takedown(&chan->ramin_heap); - if (chan->ramin) - nouveau_gpuobj_ref_del(dev, &chan->ramin); - + nouveau_gpuobj_ref(NULL, &chan->ramin); } int @@ -1095,25 +921,24 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, return -EPERM; } - if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0) + if (nouveau_ramht_find(chan, init->handle)) return -EEXIST; if (!grc->software) ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr); else ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr); - if (ret) { NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", ret, init->channel, init->handle); return ret; } - ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL); + ret = nouveau_ramht_insert(chan, init->handle, gr); + nouveau_gpuobj_ref(NULL, &gr); if (ret) { NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n", ret, init->channel, init->handle); - nouveau_gpuobj_del(dev, &gr); return ret; } @@ -1124,17 +949,16 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_nouveau_gpuobj_free *objfree = data; - struct nouveau_gpuobj_ref *ref; + struct nouveau_gpuobj *gpuobj; struct nouveau_channel *chan; - int ret; NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan); - ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref); - if (ret) - return ret; - nouveau_gpuobj_ref_del(dev, &ref); + gpuobj = nouveau_ramht_find(chan, objfree->handle); + if (!gpuobj) + return -ENOENT; + nouveau_ramht_remove(chan, objfree->handle); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c index e5cc93c55d80..5f9d52f06305 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ramht.c +++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c @@ -62,48 +62,56 @@ nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht, } int -nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) +nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle, + struct nouveau_gpuobj *gpuobj) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; - struct nouveau_channel *chan = ref->channel; - struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; + struct nouveau_ramht_entry *entry; + struct nouveau_gpuobj *ramht = chan->ramht->gpuobj; uint32_t ctx, co, ho; - if (!ramht) { - NV_ERROR(dev, "No hash table!\n"); - return -EINVAL; - } + if (nouveau_ramht_find(chan, handle)) + return -EEXIST; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + entry->channel = chan; + entry->gpuobj = NULL; + entry->handle = handle; + list_add(&entry->head, &chan->ramht->entries); + nouveau_gpuobj_ref(gpuobj, &entry->gpuobj); if (dev_priv->card_type < NV_40) { - ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) | + ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->cinst >> 4) | (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | - (ref->gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); + (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); } else if (dev_priv->card_type < NV_50) { - ctx = (ref->instance >> 4) | + ctx = (gpuobj->cinst >> 4) | (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | - (ref->gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); + (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); } else { - if (ref->gpuobj->engine == NVOBJ_ENGINE_DISPLAY) { - ctx = (ref->instance << 10) | 2; + if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) { + ctx = (gpuobj->cinst << 10) | 2; } else { - ctx = (ref->instance >> 4) | - ((ref->gpuobj->engine << + ctx = (gpuobj->cinst >> 4) | + ((gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT)); } } - co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); + co = ho = nouveau_ramht_hash_handle(dev, chan->id, handle); do { if (!nouveau_ramht_entry_valid(dev, ramht, co)) { NV_DEBUG(dev, "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n", - chan->id, co, ref->handle, ctx); - nv_wo32(ramht, co + 0, ref->handle); + chan->id, co, handle, ctx); + nv_wo32(ramht, co + 0, handle); nv_wo32(ramht, co + 4, ctx); - list_add_tail(&ref->list, &chan->ramht_refs); instmem->flush(dev); return 0; } @@ -116,35 +124,40 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) } while (co != ho); NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id); + list_del(&entry->head); + kfree(entry); return -ENOMEM; } void -nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) +nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; - struct nouveau_channel *chan = ref->channel; - struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; - uint32_t co, ho; + struct nouveau_gpuobj *ramht = chan->ramht->gpuobj; + struct nouveau_ramht_entry *entry, *tmp; + u32 co, ho; - if (!ramht) { - NV_ERROR(dev, "No hash table!\n"); - return; + list_for_each_entry_safe(entry, tmp, &chan->ramht->entries, head) { + if (entry->channel != chan || entry->handle != handle) + continue; + + nouveau_gpuobj_ref(NULL, &entry->gpuobj); + list_del(&entry->head); + kfree(entry); + break; } - co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); + co = ho = nouveau_ramht_hash_handle(dev, chan->id, handle); do { if (nouveau_ramht_entry_valid(dev, ramht, co) && - (ref->handle == nv_ro32(ramht, co))) { + (handle == nv_ro32(ramht, co))) { NV_DEBUG(dev, "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n", - chan->id, co, ref->handle, - nv_ro32(ramht, co + 4)); + chan->id, co, handle, nv_ro32(ramht, co + 4)); nv_wo32(ramht, co + 0, 0x00000000); nv_wo32(ramht, co + 4, 0x00000000); - - list_del(&ref->list); instmem->flush(dev); return; } @@ -153,8 +166,64 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) if (co >= dev_priv->ramht_size) co = 0; } while (co != ho); - list_del(&ref->list); NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n", - chan->id, ref->handle); + chan->id, handle); +} + +struct nouveau_gpuobj * +nouveau_ramht_find(struct nouveau_channel *chan, u32 handle) +{ + struct nouveau_ramht_entry *entry; + + list_for_each_entry(entry, &chan->ramht->entries, head) { + if (entry->channel == chan && entry->handle == handle) + return entry->gpuobj; + } + + return NULL; +} + +int +nouveau_ramht_new(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, + struct nouveau_ramht **pramht) +{ + struct nouveau_ramht *ramht; + + ramht = kzalloc(sizeof(*ramht), GFP_KERNEL); + if (!ramht) + return -ENOMEM; + + ramht->dev = dev; + ramht->refcount = 1; + INIT_LIST_HEAD(&ramht->entries); + nouveau_gpuobj_ref(gpuobj, &ramht->gpuobj); + + *pramht = ramht; + return 0; +} + +void +nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr, + struct nouveau_channel *chan) +{ + struct nouveau_ramht_entry *entry, *tmp; + struct nouveau_ramht *ramht; + + if (ref) + ref->refcount++; + + ramht = *ptr; + if (ramht) { + list_for_each_entry_safe(entry, tmp, &ramht->entries, head) { + if (entry->channel == chan) + nouveau_ramht_remove(chan, entry->handle); + } + + if (--ramht->refcount == 0) { + nouveau_gpuobj_ref(NULL, &ramht->gpuobj); + kfree(ramht); + } + } + *ptr = ref; } diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.h b/drivers/gpu/drm/nouveau/nouveau_ramht.h index e10455c6e7ff..7076ae4c07a5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ramht.h +++ b/drivers/gpu/drm/nouveau/nouveau_ramht.h @@ -25,7 +25,29 @@ #ifndef __NOUVEAU_RAMHT_H__ #define __NOUVEAU_RAMHT_H__ -extern int nouveau_ramht_insert(struct drm_device *, struct nouveau_gpuobj_ref *); -extern void nouveau_ramht_remove(struct drm_device *, struct nouveau_gpuobj_ref *); +struct nouveau_ramht_entry { + struct list_head head; + struct nouveau_channel *channel; + struct nouveau_gpuobj *gpuobj; + u32 handle; +}; + +struct nouveau_ramht { + struct drm_device *dev; + int refcount; + struct nouveau_gpuobj *gpuobj; + struct list_head entries; +}; + +extern int nouveau_ramht_new(struct drm_device *, struct nouveau_gpuobj *, + struct nouveau_ramht **); +extern void nouveau_ramht_ref(struct nouveau_ramht *, struct nouveau_ramht **, + struct nouveau_channel *unref_channel); + +extern int nouveau_ramht_insert(struct nouveau_channel *, u32 handle, + struct nouveau_gpuobj *); +extern void nouveau_ramht_remove(struct nouveau_channel *, u32 handle); +extern struct nouveau_gpuobj * +nouveau_ramht_find(struct nouveau_channel *chan, u32 handle); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 630988af801c..5a66a7ae6e29 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -234,7 +234,6 @@ nouveau_sgdma_init(struct drm_device *dev) } ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16, - NVOBJ_FLAG_ALLOW_NO_REFS | NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, &gpuobj); if (ret) { @@ -245,7 +244,7 @@ nouveau_sgdma_init(struct drm_device *dev) dev_priv->gart_info.sg_dummy_page = alloc_page(GFP_KERNEL|__GFP_DMA32); if (!dev_priv->gart_info.sg_dummy_page) { - nouveau_gpuobj_del(dev, &gpuobj); + nouveau_gpuobj_ref(NULL, &gpuobj); return -ENOMEM; } @@ -254,11 +253,17 @@ nouveau_sgdma_init(struct drm_device *dev) pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) { - nouveau_gpuobj_del(dev, &gpuobj); + nouveau_gpuobj_ref(NULL, &gpuobj); return -EFAULT; } if (dev_priv->card_type < NV_50) { + /* special case, allocated from global instmem heap so + * cinst is invalid, we use it on all channels though so + * cinst needs to be valid, set it the same as pinst + */ + gpuobj->cinst = gpuobj->pinst; + /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE * on those cards? */ @@ -302,7 +307,7 @@ nouveau_sgdma_takedown(struct drm_device *dev) dev_priv->gart_info.sg_dummy_bus = 0; } - nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma); + nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma); } int diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 989322be3728..fec29522298d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -35,6 +35,7 @@ #include "nouveau_drv.h" #include "nouveau_drm.h" #include "nouveau_fbcon.h" +#include "nouveau_ramht.h" #include "nv50_display.h" static void nouveau_stub_takedown(struct drm_device *dev) {} @@ -437,16 +438,14 @@ static int nouveau_card_init_channel(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *gpuobj; + struct nouveau_gpuobj *gpuobj = NULL; int ret; ret = nouveau_channel_alloc(dev, &dev_priv->channel, - (struct drm_file *)-2, - NvDmaFB, NvDmaTT); + (struct drm_file *)-2, NvDmaFB, NvDmaTT); if (ret) return ret; - gpuobj = NULL; ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, 0, dev_priv->vram_size, NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM, @@ -454,26 +453,25 @@ nouveau_card_init_channel(struct drm_device *dev) if (ret) goto out_err; - ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM, - gpuobj, NULL); + ret = nouveau_ramht_insert(dev_priv->channel, NvDmaVRAM, gpuobj); + nouveau_gpuobj_ref(NULL, &gpuobj); if (ret) goto out_err; - gpuobj = NULL; ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0, dev_priv->gart_info.aper_size, NV_DMA_ACCESS_RW, &gpuobj, NULL); if (ret) goto out_err; - ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART, - gpuobj, NULL); + ret = nouveau_ramht_insert(dev_priv->channel, NvDmaGART, gpuobj); + nouveau_gpuobj_ref(NULL, &gpuobj); if (ret) goto out_err; return 0; + out_err: - nouveau_gpuobj_del(dev, &gpuobj); nouveau_channel_free(dev_priv->channel); dev_priv->channel = NULL; return ret; diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index 1eeac4fae73d..33e4c9388bc1 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c @@ -25,6 +25,7 @@ #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_dma.h" +#include "nouveau_ramht.h" #include "nouveau_fbcon.h" void @@ -169,11 +170,9 @@ nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle) if (ret) return ret; - ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, handle, obj, NULL); - if (ret) - return ret; - - return 0; + ret = nouveau_ramht_insert(dev_priv->channel, handle, obj); + nouveau_gpuobj_ref(NULL, &obj); + return ret; } int diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c index bbb87ef262c0..b7ecafb78d77 100644 --- a/drivers/gpu/drm/nouveau/nv04_fifo.c +++ b/drivers/gpu/drm/nouveau/nv04_fifo.c @@ -38,10 +38,8 @@ #define NV04_RAMFC_ENGINE 0x14 #define NV04_RAMFC_PULL1_ENGINE 0x18 -#define RAMFC_WR(offset, val) nv_wo32(chan->ramfc->gpuobj, \ - NV04_RAMFC_##offset, (val)) -#define RAMFC_RD(offset) nv_ro32(chan->ramfc->gpuobj, \ - NV04_RAMFC_##offset) +#define RAMFC_WR(offset, val) nv_wo32(chan->ramfc, NV04_RAMFC_##offset, (val)) +#define RAMFC_RD(offset) nv_ro32(chan->ramfc, NV04_RAMFC_##offset) void nv04_fifo_disable(struct drm_device *dev) @@ -130,7 +128,7 @@ nv04_fifo_create_context(struct nouveau_channel *chan) NV04_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, - NULL, &chan->ramfc); + &chan->ramfc); if (ret) return ret; @@ -139,7 +137,7 @@ nv04_fifo_create_context(struct nouveau_channel *chan) /* Setup initial state */ RAMFC_WR(DMA_PUT, chan->pushbuf_base); RAMFC_WR(DMA_GET, chan->pushbuf_base); - RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4); + RAMFC_WR(DMA_INSTANCE, chan->pushbuf->pinst >> 4); RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | @@ -161,7 +159,7 @@ nv04_fifo_destroy_context(struct nouveau_channel *chan) nv_wr32(dev, NV04_PFIFO_MODE, nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); - nouveau_gpuobj_ref_del(dev, &chan->ramfc); + nouveau_gpuobj_ref(NULL, &chan->ramfc); } static void diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c index 7a4069cf5d0b..ccb07fb701ca 100644 --- a/drivers/gpu/drm/nouveau/nv10_fifo.c +++ b/drivers/gpu/drm/nouveau/nv10_fifo.c @@ -48,7 +48,7 @@ nv10_fifo_create_context(struct nouveau_channel *chan) ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0, NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | - NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc); + NVOBJ_FLAG_ZERO_FREE, &chan->ramfc); if (ret) return ret; @@ -57,7 +57,7 @@ nv10_fifo_create_context(struct nouveau_channel *chan) */ nv_wi32(dev, fc + 0, chan->pushbuf_base); nv_wi32(dev, fc + 4, chan->pushbuf_base); - nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4); + nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4); nv_wi32(dev, fc + 20, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | @@ -80,7 +80,7 @@ nv10_fifo_destroy_context(struct nouveau_channel *chan) nv_wr32(dev, NV04_PFIFO_MODE, nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); - nouveau_gpuobj_ref_del(dev, &chan->ramfc); + nouveau_gpuobj_ref(NULL, &chan->ramfc); } static void diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c index d8693d32bd0e..12ab9cd56eca 100644 --- a/drivers/gpu/drm/nouveau/nv20_graph.c +++ b/drivers/gpu/drm/nouveau/nv20_graph.c @@ -403,21 +403,19 @@ nv20_graph_create_context(struct nouveau_channel *chan) BUG_ON(1); } - ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size, - 16, NVOBJ_FLAG_ZERO_ALLOC, - &chan->ramin_grctx); + ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16, + NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx); if (ret) return ret; /* Initialise default context values */ - ctx_init(dev, chan->ramin_grctx->gpuobj); + ctx_init(dev, chan->ramin_grctx); /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */ - nv_wo32(chan->ramin_grctx->gpuobj, idoffs, + nv_wo32(chan->ramin_grctx, idoffs, (chan->id << 24) | 0x1); /* CTX_USER */ - nv_wo32(pgraph->ctx_table->gpuobj, chan->id * 4, - chan->ramin_grctx->instance >> 4); + nv_wo32(pgraph->ctx_table, chan->id * 4, chan->ramin_grctx->pinst >> 4); return 0; } @@ -428,10 +426,8 @@ nv20_graph_destroy_context(struct nouveau_channel *chan) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; - if (chan->ramin_grctx) - nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); - - nv_wo32(pgraph->ctx_table->gpuobj, chan->id * 4, 0); + nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); + nv_wo32(pgraph->ctx_table, chan->id * 4, 0); } int @@ -442,7 +438,7 @@ nv20_graph_load_context(struct nouveau_channel *chan) if (!chan->ramin_grctx) return -EINVAL; - inst = chan->ramin_grctx->instance >> 4; + inst = chan->ramin_grctx->pinst >> 4; nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER, @@ -465,7 +461,7 @@ nv20_graph_unload_context(struct drm_device *dev) chan = pgraph->channel(dev); if (!chan) return 0; - inst = chan->ramin_grctx->instance >> 4; + inst = chan->ramin_grctx->pinst >> 4; nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER, @@ -552,15 +548,15 @@ nv20_graph_init(struct drm_device *dev) if (!pgraph->ctx_table) { /* Create Context Pointer Table */ - ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32 * 4, 16, - NVOBJ_FLAG_ZERO_ALLOC, - &pgraph->ctx_table); + ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16, + NVOBJ_FLAG_ZERO_ALLOC, + &pgraph->ctx_table); if (ret) return ret; } nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, - pgraph->ctx_table->instance >> 4); + pgraph->ctx_table->pinst >> 4); nv20_graph_rdi(dev); @@ -646,7 +642,7 @@ nv20_graph_takedown(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; - nouveau_gpuobj_ref_del(dev, &pgraph->ctx_table); + nouveau_gpuobj_ref(NULL, &pgraph->ctx_table); } int @@ -681,15 +677,15 @@ nv30_graph_init(struct drm_device *dev) if (!pgraph->ctx_table) { /* Create Context Pointer Table */ - ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32 * 4, 16, - NVOBJ_FLAG_ZERO_ALLOC, - &pgraph->ctx_table); + ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16, + NVOBJ_FLAG_ZERO_ALLOC, + &pgraph->ctx_table); if (ret) return ret; } nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, - pgraph->ctx_table->instance >> 4); + pgraph->ctx_table->pinst >> 4); nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c index 2b67f1835c39..03f4dc13725b 100644 --- a/drivers/gpu/drm/nouveau/nv40_fifo.c +++ b/drivers/gpu/drm/nouveau/nv40_fifo.c @@ -42,7 +42,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan) ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0, NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | - NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc); + NVOBJ_FLAG_ZERO_FREE, &chan->ramfc); if (ret) return ret; @@ -50,7 +50,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan) nv_wi32(dev, fc + 0, chan->pushbuf_base); nv_wi32(dev, fc + 4, chan->pushbuf_base); - nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4); + nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4); nv_wi32(dev, fc + 24, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | @@ -58,7 +58,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan) NV_PFIFO_CACHE1_BIG_ENDIAN | #endif 0x30000000 /* no idea.. */); - nv_wi32(dev, fc + 56, chan->ramin_grctx->instance >> 4); + nv_wi32(dev, fc + 56, chan->ramin_grctx->pinst >> 4); nv_wi32(dev, fc + 60, 0x0001FFFF); /* enable the fifo dma operation */ @@ -77,8 +77,7 @@ nv40_fifo_destroy_context(struct nouveau_channel *chan) nv_wr32(dev, NV04_PFIFO_MODE, nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); - if (chan->ramfc) - nouveau_gpuobj_ref_del(dev, &chan->ramfc); + nouveau_gpuobj_ref(NULL, &chan->ramfc); } static void diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 6215dfcf1ea9..912940e2457d 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c @@ -45,7 +45,7 @@ nv40_graph_channel(struct drm_device *dev) struct nouveau_channel *chan = dev_priv->fifos[i]; if (chan && chan->ramin_grctx && - chan->ramin_grctx->instance == inst) + chan->ramin_grctx->pinst == inst) return chan; } @@ -61,27 +61,25 @@ nv40_graph_create_context(struct nouveau_channel *chan) struct nouveau_grctx ctx = {}; int ret; - ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size, - 16, NVOBJ_FLAG_ZERO_ALLOC, - &chan->ramin_grctx); + ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16, + NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx); if (ret) return ret; /* Initialise default context values */ ctx.dev = chan->dev; ctx.mode = NOUVEAU_GRCTX_VALS; - ctx.data = chan->ramin_grctx->gpuobj; + ctx.data = chan->ramin_grctx; nv40_grctx_init(&ctx); - nv_wo32(chan->ramin_grctx->gpuobj, 0, - chan->ramin_grctx->gpuobj->im_pramin->start); + nv_wo32(chan->ramin_grctx, 0, chan->ramin_grctx->im_pramin->start); return 0; } void nv40_graph_destroy_context(struct nouveau_channel *chan) { - nouveau_gpuobj_ref_del(chan->dev, &chan->ramin_grctx); + nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); } static int @@ -135,7 +133,7 @@ nv40_graph_load_context(struct nouveau_channel *chan) if (!chan->ramin_grctx) return -EINVAL; - inst = chan->ramin_grctx->instance >> 4; + inst = chan->ramin_grctx->pinst >> 4; ret = nv40_graph_transfer_context(dev, inst, 0); if (ret) diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 6f89674ebb96..b65d2ddd415d 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -30,6 +30,7 @@ #include "nouveau_connector.h" #include "nouveau_fb.h" #include "nouveau_fbcon.h" +#include "nouveau_ramht.h" #include "drm_crtc_helper.h" static void @@ -66,12 +67,6 @@ nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name, return ret; obj->engine = NVOBJ_ENGINE_DISPLAY; - ret = nouveau_gpuobj_ref_add(dev, evo, name, obj, NULL); - if (ret) { - nouveau_gpuobj_del(dev, &obj); - return ret; - } - nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class); nv_wo32(obj, 4, limit); nv_wo32(obj, 8, offset); @@ -83,6 +78,12 @@ nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name, nv_wo32(obj, 20, 0x00020000); dev_priv->engine.instmem.flush(dev); + ret = nouveau_ramht_insert(evo, name, obj); + nouveau_gpuobj_ref(NULL, &obj); + if (ret) { + return ret; + } + return 0; } @@ -90,6 +91,7 @@ static int nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan) { struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *ramht = NULL; struct nouveau_channel *chan; int ret; @@ -103,10 +105,8 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan) chan->user_get = 4; chan->user_put = 0; - INIT_LIST_HEAD(&chan->ramht_refs); - - ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32768, 0x1000, - NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin); + ret = nouveau_gpuobj_new(dev, NULL, 32768, 0x1000, + NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin); if (ret) { NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret); nv50_evo_channel_del(pchan); @@ -120,14 +120,20 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan) return ret; } - ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 4096, 16, - 0, &chan->ramht); + ret = nouveau_gpuobj_new(dev, chan, 4096, 16, 0, &ramht); if (ret) { NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret); nv50_evo_channel_del(pchan); return ret; } + ret = nouveau_ramht_new(dev, ramht, &chan->ramht); + nouveau_gpuobj_ref(NULL, &ramht); + if (ret) { + nv50_evo_channel_del(pchan); + return ret; + } + if (dev_priv->chipset != 0x50) { ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19, 0, 0xffffffff); @@ -321,7 +327,7 @@ nv50_display_init(struct drm_device *dev) } } - nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->instance >> 8) | 9); + nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9); /* initialise fifo */ nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0), diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 6bf025c6fc6f..6dcf048eddbc 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c @@ -1,6 +1,7 @@ #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_dma.h" +#include "nouveau_ramht.h" #include "nouveau_fbcon.h" void @@ -193,7 +194,8 @@ nv50_fbcon_accel_init(struct fb_info *info) if (ret) return ret; - ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, Nv2D, eng2d, NULL); + ret = nouveau_ramht_insert(dev_priv->channel, Nv2D, eng2d); + nouveau_gpuobj_ref(NULL, &eng2d); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index 9201f35d8277..4fc8b59cc41e 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c @@ -27,13 +27,14 @@ #include "drmP.h" #include "drm.h" #include "nouveau_drv.h" +#include "nouveau_ramht.h" static void nv50_fifo_playlist_update(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; - struct nouveau_gpuobj_ref *cur; + struct nouveau_gpuobj *cur; int i, nr; NV_DEBUG(dev, "\n"); @@ -44,13 +45,13 @@ nv50_fifo_playlist_update(struct drm_device *dev) /* We never schedule channel 0 or 127 */ for (i = 1, nr = 0; i < 127; i++) { if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) { - nv_wo32(cur->gpuobj, (nr * 4), i); + nv_wo32(cur, (nr * 4), i); nr++; } } dev_priv->engine.instmem.flush(dev); - nv_wr32(dev, 0x32f4, cur->instance >> 12); + nv_wr32(dev, 0x32f4, cur->vinst >> 12); nv_wr32(dev, 0x32ec, nr); nv_wr32(dev, 0x2500, 0x101); } @@ -65,9 +66,9 @@ nv50_fifo_channel_enable(struct drm_device *dev, int channel) NV_DEBUG(dev, "ch%d\n", channel); if (dev_priv->chipset == 0x50) - inst = chan->ramfc->instance >> 12; + inst = chan->ramfc->vinst >> 12; else - inst = chan->ramfc->instance >> 8; + inst = chan->ramfc->vinst >> 8; nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); @@ -165,19 +166,19 @@ nv50_fifo_init(struct drm_device *dev) goto just_reset; } - ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000, - NVOBJ_FLAG_ZERO_ALLOC, - &pfifo->playlist[0]); + ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000, + NVOBJ_FLAG_ZERO_ALLOC, + &pfifo->playlist[0]); if (ret) { NV_ERROR(dev, "error creating playlist 0: %d\n", ret); return ret; } - ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000, - NVOBJ_FLAG_ZERO_ALLOC, - &pfifo->playlist[1]); + ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000, + NVOBJ_FLAG_ZERO_ALLOC, + &pfifo->playlist[1]); if (ret) { - nouveau_gpuobj_ref_del(dev, &pfifo->playlist[0]); + nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]); NV_ERROR(dev, "error creating playlist 1: %d\n", ret); return ret; } @@ -205,8 +206,8 @@ nv50_fifo_takedown(struct drm_device *dev) if (!pfifo->playlist[0]) return; - nouveau_gpuobj_ref_del(dev, &pfifo->playlist[0]); - nouveau_gpuobj_ref_del(dev, &pfifo->playlist[1]); + nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]); + nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]); } int @@ -228,42 +229,39 @@ nv50_fifo_create_context(struct nouveau_channel *chan) NV_DEBUG(dev, "ch%d\n", chan->id); if (dev_priv->chipset == 0x50) { - ret = nouveau_gpuobj_new_fake(dev, chan->ramin->gpuobj->pinst, - chan->ramin->gpuobj->vinst, 0x100, + ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst, + chan->ramin->vinst, 0x100, NVOBJ_FLAG_ZERO_ALLOC | - NVOBJ_FLAG_ZERO_FREE, &ramfc, + NVOBJ_FLAG_ZERO_FREE, &chan->ramfc); if (ret) return ret; - ret = nouveau_gpuobj_new_fake(dev, chan->ramin->gpuobj->pinst + - 0x0400, - chan->ramin->gpuobj->vinst + - 0x0400, 4096, 0, NULL, - &chan->cache); + ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst + 0x0400, + chan->ramin->vinst + 0x0400, + 4096, 0, &chan->cache); if (ret) return ret; } else { - ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, 256, - NVOBJ_FLAG_ZERO_ALLOC | - NVOBJ_FLAG_ZERO_FREE, - &chan->ramfc); + ret = nouveau_gpuobj_new(dev, chan, 0x100, 256, + NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, &chan->ramfc); if (ret) return ret; - ramfc = chan->ramfc->gpuobj; - ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 1024, - 0, &chan->cache); + ret = nouveau_gpuobj_new(dev, chan, 4096, 1024, + 0, &chan->cache); if (ret) return ret; } + ramfc = chan->ramfc; spin_lock_irqsave(&dev_priv->context_switch_lock, flags); - nv_wo32(ramfc, 0x48, chan->pushbuf->instance >> 4); + nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4); nv_wo32(ramfc, 0x80, (0 << 27) /* 4KiB */ | (4 << 24) /* SEARCH_FULL */ | - (chan->ramht->instance >> 4)); + (chan->ramht->gpuobj->cinst >> 4)); nv_wo32(ramfc, 0x44, 0x2101ffff); nv_wo32(ramfc, 0x60, 0x7fffffff); nv_wo32(ramfc, 0x40, 0x00000000); @@ -274,11 +272,11 @@ nv50_fifo_create_context(struct nouveau_channel *chan) nv_wo32(ramfc, 0x54, drm_order(chan->dma.ib_max + 1) << 16); if (dev_priv->chipset != 0x50) { - nv_wo32(chan->ramin->gpuobj, 0, chan->id); - nv_wo32(chan->ramin->gpuobj, 4, chan->ramfc->instance >> 8); + nv_wo32(chan->ramin, 0, chan->id); + nv_wo32(chan->ramin, 4, chan->ramfc->vinst >> 8); - nv_wo32(ramfc, 0x88, chan->cache->instance >> 10); - nv_wo32(ramfc, 0x98, chan->ramin->instance >> 12); + nv_wo32(ramfc, 0x88, chan->cache->vinst >> 10); + nv_wo32(ramfc, 0x98, chan->ramin->vinst >> 12); } dev_priv->engine.instmem.flush(dev); @@ -293,12 +291,13 @@ void nv50_fifo_destroy_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; - struct nouveau_gpuobj_ref *ramfc = chan->ramfc; + struct nouveau_gpuobj *ramfc = NULL; NV_DEBUG(dev, "ch%d\n", chan->id); /* This will ensure the channel is seen as disabled. */ - chan->ramfc = NULL; + nouveau_gpuobj_ref(chan->ramfc, &ramfc); + nouveau_gpuobj_ref(NULL, &chan->ramfc); nv50_fifo_channel_disable(dev, chan->id); /* Dummy channel, also used on ch 127 */ @@ -306,8 +305,8 @@ nv50_fifo_destroy_context(struct nouveau_channel *chan) nv50_fifo_channel_disable(dev, 127); nv50_fifo_playlist_update(dev); - nouveau_gpuobj_ref_del(dev, &ramfc); - nouveau_gpuobj_ref_del(dev, &chan->cache); + nouveau_gpuobj_ref(NULL, &ramfc); + nouveau_gpuobj_ref(NULL, &chan->cache); } int @@ -315,8 +314,8 @@ nv50_fifo_load_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj; - struct nouveau_gpuobj *cache = chan->cache->gpuobj; + struct nouveau_gpuobj *ramfc = chan->ramfc; + struct nouveau_gpuobj *cache = chan->cache; int ptr, cnt; NV_DEBUG(dev, "ch%d\n", chan->id); @@ -399,8 +398,8 @@ nv50_fifo_unload_context(struct drm_device *dev) return -EINVAL; } NV_DEBUG(dev, "ch%d\n", chan->id); - ramfc = chan->ramfc->gpuobj; - cache = chan->cache->gpuobj; + ramfc = chan->ramfc; + cache = chan->cache; nv_wo32(ramfc, 0x00, nv_rd32(dev, 0x3330)); nv_wo32(ramfc, 0x04, nv_rd32(dev, 0x3334)); diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 17a8d788a494..7db0d627c1b9 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -27,7 +27,7 @@ #include "drmP.h" #include "drm.h" #include "nouveau_drv.h" - +#include "nouveau_ramht.h" #include "nouveau_grctx.h" static void @@ -192,7 +192,7 @@ nv50_graph_channel(struct drm_device *dev) for (i = 0; i < dev_priv->engine.fifo.channels; i++) { struct nouveau_channel *chan = dev_priv->fifos[i]; - if (chan && chan->ramin && chan->ramin->instance == inst) + if (chan && chan->ramin && chan->ramin->vinst == inst) return chan; } @@ -204,36 +204,34 @@ nv50_graph_create_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; - struct nouveau_gpuobj *obj; + struct nouveau_gpuobj *ramin = chan->ramin; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; struct nouveau_grctx ctx = {}; int hdr, ret; NV_DEBUG(dev, "ch%d\n", chan->id); - ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size, - 0x1000, NVOBJ_FLAG_ZERO_ALLOC | - NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); + ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0x1000, + NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); if (ret) return ret; - obj = chan->ramin_grctx->gpuobj; hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; nv_wo32(ramin, hdr + 0x00, 0x00190002); - nv_wo32(ramin, hdr + 0x04, chan->ramin_grctx->instance + + nv_wo32(ramin, hdr + 0x04, chan->ramin_grctx->vinst + pgraph->grctx_size - 1); - nv_wo32(ramin, hdr + 0x08, chan->ramin_grctx->instance); + nv_wo32(ramin, hdr + 0x08, chan->ramin_grctx->vinst); nv_wo32(ramin, hdr + 0x0c, 0); nv_wo32(ramin, hdr + 0x10, 0); nv_wo32(ramin, hdr + 0x14, 0x00010000); ctx.dev = chan->dev; ctx.mode = NOUVEAU_GRCTX_VALS; - ctx.data = obj; + ctx.data = chan->ramin_grctx; nv50_grctx_init(&ctx); - nv_wo32(obj, 0x00000, chan->ramin->instance >> 12); + nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12); dev_priv->engine.instmem.flush(dev); return 0; @@ -248,14 +246,14 @@ nv50_graph_destroy_context(struct nouveau_channel *chan) NV_DEBUG(dev, "ch%d\n", chan->id); - if (!chan->ramin || !chan->ramin->gpuobj) + if (!chan->ramin) return; for (i = hdr; i < hdr + 24; i += 4) - nv_wo32(chan->ramin->gpuobj, i, 0); + nv_wo32(chan->ramin, i, 0); dev_priv->engine.instmem.flush(dev); - nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); + nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); } static int @@ -282,7 +280,7 @@ nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst) int nv50_graph_load_context(struct nouveau_channel *chan) { - uint32_t inst = chan->ramin->instance >> 12; + uint32_t inst = chan->ramin->vinst >> 12; NV_DEBUG(chan->dev, "ch%d\n", chan->id); return nv50_graph_do_load_context(chan->dev, inst); @@ -327,15 +325,16 @@ static int nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass, int mthd, uint32_t data) { - struct nouveau_gpuobj_ref *ref = NULL; + struct nouveau_gpuobj *gpuobj; - if (nouveau_gpuobj_ref_find(chan, data, &ref)) + gpuobj = nouveau_ramht_find(chan, data); + if (!gpuobj) return -ENOENT; - if (nouveau_notifier_offset(ref->gpuobj, NULL)) + if (nouveau_notifier_offset(gpuobj, NULL)) return -EINVAL; - chan->nvsw.vblsem = ref->gpuobj; + chan->nvsw.vblsem = gpuobj; chan->nvsw.vblsem_offset = ~0; return 0; } diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 821806c835ba..0af0baf4f1a9 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -32,9 +32,9 @@ struct nv50_instmem_priv { uint32_t save1700[5]; /* 0x1700->0x1710 */ - struct nouveau_gpuobj_ref *pramin_pt; - struct nouveau_gpuobj_ref *pramin_bar; - struct nouveau_gpuobj_ref *fb_bar; + struct nouveau_gpuobj *pramin_pt; + struct nouveau_gpuobj *pramin_bar; + struct nouveau_gpuobj *fb_bar; }; #define NV50_INSTMEM_PAGE_SHIFT 12 @@ -44,15 +44,8 @@ struct nv50_instmem_priv { /*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN */ #define BAR0_WI32(g, o, v) do { \ - uint32_t offset; \ - if ((g)->im_backing) { \ - offset = (g)->im_backing_start; \ - } else { \ - offset = chan->ramin->gpuobj->im_backing_start; \ - offset += (g)->im_pramin->start; \ - } \ - offset += (o); \ - nv_wr32(dev, NV_RAMIN + (offset & 0xfffff), (v)); \ + u32 offset = (g)->vinst + (o); \ + nv_wr32(dev, NV_RAMIN + (offset & 0xfffff), (v)); \ } while (0) int @@ -142,8 +135,7 @@ nv50_instmem_init(struct drm_device *dev) INIT_LIST_HEAD(&chan->ramht_refs); /* Channel's PRAMIN object + heap */ - ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0, - NULL, &chan->ramin); + ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0, &chan->ramin); if (ret) return ret; @@ -152,16 +144,16 @@ nv50_instmem_init(struct drm_device *dev) /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */ ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc, - 0x4000, 0, NULL, &chan->ramfc); + 0x4000, 0, &chan->ramfc); if (ret) return ret; for (i = 0; i < c_vmpd; i += 4) - BAR0_WI32(chan->ramin->gpuobj, i, 0); + BAR0_WI32(chan->ramin, i, 0); /* VM page directory */ ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd, - 0x4000, 0, &chan->vm_pd, NULL); + 0x4000, 0, &chan->vm_pd); if (ret) return ret; for (i = 0; i < 0x4000; i += 8) { @@ -172,8 +164,8 @@ nv50_instmem_init(struct drm_device *dev) /* PRAMIN page table, cheat and map into VM at 0x0000000000. * We map the entire fake channel into the start of the PRAMIN BAR */ - ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000, - 0, &priv->pramin_pt); + ret = nouveau_gpuobj_new(dev, chan, pt_size, 0x1000, 0, + &priv->pramin_pt); if (ret) return ret; @@ -185,76 +177,74 @@ nv50_instmem_init(struct drm_device *dev) i = 0; while (v < dev_priv->vram_sys_base + c_offset + c_size) { - BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, lower_32_bits(v)); - BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, upper_32_bits(v)); + BAR0_WI32(priv->pramin_pt, i + 0, lower_32_bits(v)); + BAR0_WI32(priv->pramin_pt, i + 4, upper_32_bits(v)); v += 0x1000; i += 8; } while (i < pt_size) { - BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000); - BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); + BAR0_WI32(priv->pramin_pt, i + 0, 0x00000000); + BAR0_WI32(priv->pramin_pt, i + 4, 0x00000000); i += 8; } - BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63); + BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->vinst | 0x63); BAR0_WI32(chan->vm_pd, 0x04, 0x00000000); /* VRAM page table(s), mapped into VM at +1GiB */ for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { - ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, - NV50_VM_BLOCK/65536*8, 0, 0, - &chan->vm_vram_pt[i]); + ret = nouveau_gpuobj_new(dev, chan, NV50_VM_BLOCK / 0x10000 * 8, + 0, 0, &chan->vm_vram_pt[i]); if (ret) { NV_ERROR(dev, "Error creating VRAM page tables: %d\n", ret); dev_priv->vm_vram_pt_nr = i; return ret; } - dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i]->gpuobj; + /*XXX: double-check this is ok */ + dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i]; for (v = 0; v < dev_priv->vm_vram_pt[i]->im_pramin->size; v += 4) BAR0_WI32(dev_priv->vm_vram_pt[i], v, 0); BAR0_WI32(chan->vm_pd, 0x10 + (i*8), - chan->vm_vram_pt[i]->instance | 0x61); + chan->vm_vram_pt[i]->vinst | 0x61); BAR0_WI32(chan->vm_pd, 0x14 + (i*8), 0); } /* DMA object for PRAMIN BAR */ - ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0, - &priv->pramin_bar); + ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->pramin_bar); if (ret) return ret; - BAR0_WI32(priv->pramin_bar->gpuobj, 0x00, 0x7fc00000); - BAR0_WI32(priv->pramin_bar->gpuobj, 0x04, dev_priv->ramin_size - 1); - BAR0_WI32(priv->pramin_bar->gpuobj, 0x08, 0x00000000); - BAR0_WI32(priv->pramin_bar->gpuobj, 0x0c, 0x00000000); - BAR0_WI32(priv->pramin_bar->gpuobj, 0x10, 0x00000000); - BAR0_WI32(priv->pramin_bar->gpuobj, 0x14, 0x00000000); + BAR0_WI32(priv->pramin_bar, 0x00, 0x7fc00000); + BAR0_WI32(priv->pramin_bar, 0x04, dev_priv->ramin_size - 1); + BAR0_WI32(priv->pramin_bar, 0x08, 0x00000000); + BAR0_WI32(priv->pramin_bar, 0x0c, 0x00000000); + BAR0_WI32(priv->pramin_bar, 0x10, 0x00000000); + BAR0_WI32(priv->pramin_bar, 0x14, 0x00000000); /* DMA object for FB BAR */ - ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0, - &priv->fb_bar); + ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->fb_bar); if (ret) return ret; - BAR0_WI32(priv->fb_bar->gpuobj, 0x00, 0x7fc00000); - BAR0_WI32(priv->fb_bar->gpuobj, 0x04, 0x40000000 + - pci_resource_len(dev->pdev, 1) - 1); - BAR0_WI32(priv->fb_bar->gpuobj, 0x08, 0x40000000); - BAR0_WI32(priv->fb_bar->gpuobj, 0x0c, 0x00000000); - BAR0_WI32(priv->fb_bar->gpuobj, 0x10, 0x00000000); - BAR0_WI32(priv->fb_bar->gpuobj, 0x14, 0x00000000); + BAR0_WI32(priv->fb_bar, 0x00, 0x7fc00000); + BAR0_WI32(priv->fb_bar, 0x04, 0x40000000 + + pci_resource_len(dev->pdev, 1) - 1); + BAR0_WI32(priv->fb_bar, 0x08, 0x40000000); + BAR0_WI32(priv->fb_bar, 0x0c, 0x00000000); + BAR0_WI32(priv->fb_bar, 0x10, 0x00000000); + BAR0_WI32(priv->fb_bar, 0x14, 0x00000000); /* Poke the relevant regs, and pray it works :) */ - nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12)); + nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12)); nv_wr32(dev, NV50_PUNK_UNK1710, 0); - nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) | + nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) | NV50_PUNK_BAR_CFG_BASE_VALID); - nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) | + nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->cinst >> 4) | NV50_PUNK_BAR1_CTXDMA_VALID); - nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) | + nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->cinst >> 4) | NV50_PUNK_BAR3_CTXDMA_VALID); for (i = 0; i < 8; i++) @@ -301,21 +291,19 @@ nv50_instmem_takedown(struct drm_device *dev) for (i = 0x1700; i <= 0x1710; i += 4) nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]); - nouveau_gpuobj_ref_del(dev, &priv->fb_bar); - nouveau_gpuobj_ref_del(dev, &priv->pramin_bar); - nouveau_gpuobj_ref_del(dev, &priv->pramin_pt); + nouveau_gpuobj_ref(NULL, &priv->fb_bar); + nouveau_gpuobj_ref(NULL, &priv->pramin_bar); + nouveau_gpuobj_ref(NULL, &priv->pramin_pt); /* Destroy dummy channel */ if (chan) { - for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { - nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]); - dev_priv->vm_vram_pt[i] = NULL; - } + for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) + nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]); dev_priv->vm_vram_pt_nr = 0; - nouveau_gpuobj_del(dev, &chan->vm_pd); - nouveau_gpuobj_ref_del(dev, &chan->ramfc); - nouveau_gpuobj_ref_del(dev, &chan->ramin); + nouveau_gpuobj_ref(NULL, &chan->vm_pd); + nouveau_gpuobj_ref(NULL, &chan->ramfc); + nouveau_gpuobj_ref(NULL, &chan->ramin); drm_mm_takedown(&chan->ramin_heap); dev_priv->fifos[0] = dev_priv->fifos[127] = NULL; @@ -331,7 +319,7 @@ nv50_instmem_suspend(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->fifos[0]; - struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; + struct nouveau_gpuobj *ramin = chan->ramin; int i; ramin->im_backing_suspend = vmalloc(ramin->im_pramin->size); @@ -349,7 +337,7 @@ nv50_instmem_resume(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; struct nouveau_channel *chan = dev_priv->fifos[0]; - struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; + struct nouveau_gpuobj *ramin = chan->ramin; int i; nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (ramin->im_backing_start >> 16)); @@ -359,13 +347,13 @@ nv50_instmem_resume(struct drm_device *dev) ramin->im_backing_suspend = NULL; /* Poke the relevant regs, and pray it works :) */ - nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12)); + nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12)); nv_wr32(dev, NV50_PUNK_UNK1710, 0); - nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) | + nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) | NV50_PUNK_BAR_CFG_BASE_VALID); - nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) | + nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->cinst >> 4) | NV50_PUNK_BAR1_CTXDMA_VALID); - nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) | + nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->cinst >> 4) | NV50_PUNK_BAR3_CTXDMA_VALID); for (i = 0; i < 8; i++) @@ -424,7 +412,7 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; - struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj; + struct nouveau_gpuobj *pramin_pt = priv->pramin_pt; uint32_t pte, pte_end; uint64_t vram; @@ -477,8 +465,8 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; while (pte < pte_end) { - nv_wo32(priv->pramin_pt->gpuobj, (pte * 4) + 0, 0x00000000); - nv_wo32(priv->pramin_pt->gpuobj, (pte * 4) + 4, 0x00000000); + nv_wo32(priv->pramin_pt, (pte * 4) + 0, 0x00000000); + nv_wo32(priv->pramin_pt, (pte * 4) + 4, 0x00000000); pte += 2; } dev_priv->engine.instmem.flush(dev); From 43efc9ce25c6956133c07394a6fa44ef2c9268a4 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 1 Sep 2010 15:24:32 +1000 Subject: [PATCH 237/476] drm/nouveau: simplify fake gpu objects Reviewed-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 8 +-- drivers/gpu/drm/nouveau/nouveau_object.c | 70 ++++++++---------------- drivers/gpu/drm/nouveau/nv04_instmem.c | 21 +------ drivers/gpu/drm/nouveau/nv50_instmem.c | 19 +++---- drivers/gpu/drm/nouveau/nvc0_instmem.c | 7 +-- 5 files changed, 39 insertions(+), 86 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index bf89d0297e21..f8476f14c15c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -135,20 +135,19 @@ enum nouveau_flags { #define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) #define NVOBJ_FLAG_ZERO_FREE (1 << 2) -#define NVOBJ_FLAG_FAKE (1 << 3) struct nouveau_gpuobj { struct drm_device *dev; struct list_head list; struct drm_mm_node *im_pramin; struct nouveau_bo *im_backing; - uint32_t im_backing_start; uint32_t *im_backing_suspend; int im_bound; uint32_t flags; int refcount; + u32 size; u32 pinst; u32 cinst; u64 vinst; @@ -753,9 +752,8 @@ extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *, struct nouveau_gpuobj **); extern void nouveau_gpuobj_ref(struct nouveau_gpuobj *, struct nouveau_gpuobj **); -extern int nouveau_gpuobj_new_fake(struct drm_device *, - uint32_t p_offset, uint32_t b_offset, - uint32_t size, uint32_t flags, +extern int nouveau_gpuobj_new_fake(struct drm_device *, u32 pinst, u64 vinst, + u32 size, u32 flags, struct nouveau_gpuobj **); extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class, uint64_t offset, uint64_t size, int access, diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index d55c50f1a2d3..4bcea11f54e6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -91,6 +91,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, gpuobj->dev = dev; gpuobj->flags = flags; gpuobj->refcount = 1; + gpuobj->size = size; list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); @@ -133,25 +134,23 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, /* calculate the various different addresses for the object */ if (chan) { - gpuobj->pinst = gpuobj->im_pramin->start + - chan->ramin->im_pramin->start; + gpuobj->pinst = gpuobj->im_pramin->start + chan->ramin->pinst; if (dev_priv->card_type < NV_50) { gpuobj->cinst = gpuobj->pinst; } else { gpuobj->cinst = gpuobj->im_pramin->start; gpuobj->vinst = gpuobj->im_pramin->start + - chan->ramin->im_backing_start; + chan->ramin->vinst; } } else { gpuobj->pinst = gpuobj->im_pramin->start; gpuobj->cinst = 0xdeadbeef; - gpuobj->vinst = gpuobj->im_backing_start; } if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { int i; - for (i = 0; i < gpuobj->im_pramin->size; i += 4) + for (i = 0; i < gpuobj->size; i += 4) nv_wo32(gpuobj, i, 0); engine->instmem.flush(dev); } @@ -237,7 +236,7 @@ nouveau_gpuobj_del(struct nouveau_gpuobj *gpuobj) NV_DEBUG(dev, "gpuobj %p\n", gpuobj); if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { - for (i = 0; i < gpuobj->im_pramin->size; i += 4) + for (i = 0; i < gpuobj->size; i += 4) nv_wo32(gpuobj, i, 0); engine->instmem.flush(dev); } @@ -245,15 +244,11 @@ nouveau_gpuobj_del(struct nouveau_gpuobj *gpuobj) if (gpuobj->dtor) gpuobj->dtor(dev, gpuobj); - if (gpuobj->im_backing && !(gpuobj->flags & NVOBJ_FLAG_FAKE)) + if (gpuobj->im_backing) engine->instmem.clear(dev, gpuobj); - if (gpuobj->im_pramin) { - if (gpuobj->flags & NVOBJ_FLAG_FAKE) - kfree(gpuobj->im_pramin); - else - drm_mm_put_block(gpuobj->im_pramin); - } + if (gpuobj->im_pramin) + drm_mm_put_block(gpuobj->im_pramin); list_del(&gpuobj->list); @@ -274,56 +269,37 @@ nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr) } int -nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, - uint32_t b_offset, uint32_t size, - uint32_t flags, struct nouveau_gpuobj **pgpuobj) +nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst, + u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *gpuobj = NULL; int i; NV_DEBUG(dev, - "p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n", - p_offset, b_offset, size, flags); + "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n", + pinst, vinst, size, flags); gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); if (!gpuobj) return -ENOMEM; NV_DEBUG(dev, "gpuobj %p\n", gpuobj); gpuobj->dev = dev; - gpuobj->flags = flags | NVOBJ_FLAG_FAKE; + gpuobj->flags = flags; gpuobj->refcount = 1; - - list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); - - if (p_offset != ~0) { - gpuobj->im_pramin = kzalloc(sizeof(struct drm_mm_node), - GFP_KERNEL); - if (!gpuobj->im_pramin) { - nouveau_gpuobj_ref(NULL, &gpuobj); - return -ENOMEM; - } - gpuobj->im_pramin->start = p_offset; - gpuobj->im_pramin->size = size; - } - - if (b_offset != ~0) { - gpuobj->im_backing = (struct nouveau_bo *)-1; - gpuobj->im_backing_start = b_offset; - } - - gpuobj->pinst = gpuobj->im_pramin->start; + gpuobj->size = size; + gpuobj->pinst = pinst; gpuobj->cinst = 0xdeadbeef; - gpuobj->vinst = gpuobj->im_backing_start; + gpuobj->vinst = vinst; if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { - for (i = 0; i < gpuobj->im_pramin->size; i += 4) + for (i = 0; i < gpuobj->size; i += 4) nv_wo32(gpuobj, i, 0); dev_priv->engine.instmem.flush(dev); } - if (pgpuobj) - *pgpuobj = gpuobj; + list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); + *pgpuobj = gpuobj; return 0; } @@ -830,16 +806,16 @@ nouveau_gpuobj_suspend(struct drm_device *dev) } list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { - if (!gpuobj->im_backing || (gpuobj->flags & NVOBJ_FLAG_FAKE)) + if (!gpuobj->im_backing) continue; - gpuobj->im_backing_suspend = vmalloc(gpuobj->im_pramin->size); + gpuobj->im_backing_suspend = vmalloc(gpuobj->size); if (!gpuobj->im_backing_suspend) { nouveau_gpuobj_resume(dev); return -ENOMEM; } - for (i = 0; i < gpuobj->im_pramin->size; i += 4) + for (i = 0; i < gpuobj->size; i += 4) gpuobj->im_backing_suspend[i/4] = nv_ro32(gpuobj, i); } @@ -885,7 +861,7 @@ nouveau_gpuobj_resume(struct drm_device *dev) if (!gpuobj->im_backing_suspend) continue; - for (i = 0; i < gpuobj->im_pramin->size; i += 4) + for (i = 0; i < gpuobj->size; i += 4) nv_wo32(gpuobj, i, gpuobj->im_backing_suspend[i/4]); dev_priv->engine.instmem.flush(dev); } diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c index 4408232d33f1..619109f77b79 100644 --- a/drivers/gpu/drm/nouveau/nv04_instmem.c +++ b/drivers/gpu/drm/nouveau/nv04_instmem.c @@ -143,43 +143,26 @@ nv04_instmem_takedown(struct drm_device *dev) } int -nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz) +nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, + uint32_t *sz) { - if (gpuobj->im_backing) - return -EINVAL; - return 0; } void nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - - if (gpuobj && gpuobj->im_backing) { - if (gpuobj->im_bound) - dev_priv->engine.instmem.unbind(dev, gpuobj); - gpuobj->im_backing = NULL; - } } int nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) { - if (!gpuobj->im_pramin || gpuobj->im_bound) - return -EINVAL; - - gpuobj->im_bound = 1; return 0; } int nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) { - if (gpuobj->im_bound == 0) - return -EINVAL; - - gpuobj->im_bound = 0; return 0; } diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 0af0baf4f1a9..c18d1d8bb4cf 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -205,8 +205,7 @@ nv50_instmem_init(struct drm_device *dev) /*XXX: double-check this is ok */ dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i]; - for (v = 0; v < dev_priv->vm_vram_pt[i]->im_pramin->size; - v += 4) + for (v = 0; v < dev_priv->vm_vram_pt[i]->size; v += 4) BAR0_WI32(dev_priv->vm_vram_pt[i], v, 0); BAR0_WI32(chan->vm_pd, 0x10 + (i*8), @@ -322,11 +321,11 @@ nv50_instmem_suspend(struct drm_device *dev) struct nouveau_gpuobj *ramin = chan->ramin; int i; - ramin->im_backing_suspend = vmalloc(ramin->im_pramin->size); + ramin->im_backing_suspend = vmalloc(ramin->size); if (!ramin->im_backing_suspend) return -ENOMEM; - for (i = 0; i < ramin->im_pramin->size; i += 4) + for (i = 0; i < ramin->size; i += 4) ramin->im_backing_suspend[i/4] = nv_ri32(dev, i); return 0; } @@ -340,8 +339,8 @@ nv50_instmem_resume(struct drm_device *dev) struct nouveau_gpuobj *ramin = chan->ramin; int i; - nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (ramin->im_backing_start >> 16)); - for (i = 0; i < ramin->im_pramin->size; i += 4) + nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (ramin->vinst >> 16)); + for (i = 0; i < ramin->size; i += 4) BAR0_WI32(ramin, i, ramin->im_backing_suspend[i/4]); vfree(ramin->im_backing_suspend); ramin->im_backing_suspend = NULL; @@ -387,9 +386,7 @@ nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, return ret; } - gpuobj->im_backing_start = gpuobj->im_backing->bo.mem.mm_node->start; - gpuobj->im_backing_start <<= PAGE_SHIFT; - + gpuobj->vinst = gpuobj->im_backing->bo.mem.mm_node->start << PAGE_SHIFT; return 0; } @@ -424,11 +421,11 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) pte = (gpuobj->im_pramin->start >> 12) << 1; pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; - vram = gpuobj->im_backing_start; + vram = gpuobj->vinst; NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", gpuobj->im_pramin->start, pte, pte_end); - NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start); + NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst); vram |= 1; if (dev_priv->vram_sys_base) { diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c index 6b451f864783..0ffdcf6c7f5d 100644 --- a/drivers/gpu/drm/nouveau/nvc0_instmem.c +++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c @@ -50,8 +50,7 @@ nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, return ret; } - gpuobj->im_backing_start = gpuobj->im_backing->bo.mem.mm_node->start; - gpuobj->im_backing_start <<= PAGE_SHIFT; + gpuobj->vinst = gpuobj->im_backing->bo.mem.mm_node->start << PAGE_SHIFT; return 0; } @@ -84,11 +83,11 @@ nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) pte = gpuobj->im_pramin->start >> 12; pte_end = (gpuobj->im_pramin->size >> 12) + pte; - vram = gpuobj->im_backing_start; + vram = gpuobj->vinst; NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", gpuobj->im_pramin->start, pte, pte_end); - NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start); + NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst); while (pte < pte_end) { nv_wr32(dev, 0x702000 + (pte * 8), (vram >> 8) | 1); From 5125bfd88608012d58652ac7ea6a03a78773200f Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 1 Sep 2010 15:24:33 +1000 Subject: [PATCH 238/476] drm/nv50: allow gpuobjs that aren't mapped into aperture Reviewed-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 6 +- drivers/gpu/drm/nouveau/nouveau_object.c | 106 +++++++++++++++++------ drivers/gpu/drm/nouveau/nv04_instmem.c | 1 + drivers/gpu/drm/nouveau/nv40_graph.c | 2 +- drivers/gpu/drm/nouveau/nv50_instmem.c | 2 + 5 files changed, 88 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index f8476f14c15c..ec1be3fc80fc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -513,8 +513,12 @@ struct drm_nouveau_private { int flags; void __iomem *mmio; + void __iomem *ramin; - uint32_t ramin_size; + u32 ramin_size; + u32 ramin_base; + bool ramin_available; + spinlock_t ramin_lock; struct nouveau_bo *vga_ram; diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 4bcea11f54e6..df445fcb8321 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -75,7 +75,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_engine *engine = &dev_priv->engine; struct nouveau_gpuobj *gpuobj; - struct drm_mm *pramin = NULL; + struct drm_mm_node *ramin = NULL; int ret; NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", @@ -95,36 +95,42 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); - /* Choose between global instmem heap, and per-channel private - * instmem heap. On ramin_heap; + + ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0); + if (ramin) + ramin = drm_mm_get_block(ramin, size, align); + + if (!ramin) { + nouveau_gpuobj_ref(NULL, &gpuobj); + return -ENOMEM; + } } else { NV_DEBUG(dev, "global heap\n"); - pramin = &dev_priv->ramin_heap; + /* allocate backing pages, sets vinst */ ret = engine->instmem.populate(dev, gpuobj, &size); if (ret) { nouveau_gpuobj_ref(NULL, &gpuobj); return ret; } + + /* try and get aperture space */ + ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0); + if (ramin) + ramin = drm_mm_get_block(ramin, size, align); + + /* on nv50 it's ok to fail, we have a fallback path */ + if (!ramin && dev_priv->card_type < NV_50) { + nouveau_gpuobj_ref(NULL, &gpuobj); + return -ENOMEM; + } } - /* Allocate a chunk of the PRAMIN aperture */ - gpuobj->im_pramin = drm_mm_search_free(pramin, size, align, 0); - if (gpuobj->im_pramin) - gpuobj->im_pramin = drm_mm_get_block(gpuobj->im_pramin, size, align); - - if (!gpuobj->im_pramin) { - nouveau_gpuobj_ref(NULL, &gpuobj); - return -ENOMEM; - } - - if (!chan) { + /* if we got a chunk of the aperture, map pages into it */ + gpuobj->im_pramin = ramin; + if (!chan && gpuobj->im_pramin) { ret = engine->instmem.bind(dev, gpuobj); if (ret) { nouveau_gpuobj_ref(NULL, &gpuobj); @@ -134,7 +140,10 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, /* calculate the various different addresses for the object */ if (chan) { - gpuobj->pinst = gpuobj->im_pramin->start + chan->ramin->pinst; + gpuobj->pinst = chan->ramin->pinst; + if (gpuobj->pinst != ~0) + gpuobj->pinst += gpuobj->im_pramin->start; + if (dev_priv->card_type < NV_50) { gpuobj->cinst = gpuobj->pinst; } else { @@ -143,7 +152,10 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, chan->ramin->vinst; } } else { - gpuobj->pinst = gpuobj->im_pramin->start; + if (gpuobj->im_pramin) + gpuobj->pinst = gpuobj->im_pramin->start; + else + gpuobj->pinst = ~0; gpuobj->cinst = 0xdeadbeef; } @@ -168,6 +180,8 @@ nouveau_gpuobj_early_init(struct drm_device *dev) NV_DEBUG(dev, "\n"); INIT_LIST_HEAD(&dev_priv->gpuobj_list); + spin_lock_init(&dev_priv->ramin_lock); + dev_priv->ramin_base = ~0; return 0; } @@ -650,12 +664,15 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, * locations determined during init. */ if (dev_priv->card_type >= NV_50) { - uint32_t vm_offset, pde; + u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; + u64 vm_vinst = chan->ramin->vinst + pgd_offs; + u32 vm_pinst = chan->ramin->pinst; + u32 pde; - vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200; - vm_offset += chan->ramin->im_pramin->start; + if (vm_pinst != ~0) + vm_pinst += pgd_offs; - ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000, + ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000, 0, &chan->vm_pd); if (ret) return ret; @@ -941,11 +958,46 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, u32 nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset) { - return nv_ri32(gpuobj->dev, gpuobj->pinst + offset); + struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; + struct drm_device *dev = gpuobj->dev; + + if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { + u64 ptr = gpuobj->vinst + offset; + u32 base = ptr >> 16; + u32 val; + + spin_lock(&dev_priv->ramin_lock); + if (dev_priv->ramin_base != base) { + dev_priv->ramin_base = base; + nv_wr32(dev, 0x001700, dev_priv->ramin_base); + } + val = nv_rd32(dev, 0x700000 + (ptr & 0xffff)); + spin_unlock(&dev_priv->ramin_lock); + return val; + } + + return nv_ri32(dev, gpuobj->pinst + offset); } void nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val) { - nv_wi32(gpuobj->dev, gpuobj->pinst + offset, val); + struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; + struct drm_device *dev = gpuobj->dev; + + if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { + u64 ptr = gpuobj->vinst + offset; + u32 base = ptr >> 16; + + spin_lock(&dev_priv->ramin_lock); + if (dev_priv->ramin_base != base) { + dev_priv->ramin_base = base; + nv_wr32(dev, 0x001700, dev_priv->ramin_base); + } + nv_wr32(dev, 0x700000 + (ptr & 0xffff), val); + spin_unlock(&dev_priv->ramin_lock); + return; + } + + nv_wi32(dev, gpuobj->pinst + offset, val); } diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c index 619109f77b79..3aba7674560c 100644 --- a/drivers/gpu/drm/nouveau/nv04_instmem.c +++ b/drivers/gpu/drm/nouveau/nv04_instmem.c @@ -134,6 +134,7 @@ int nv04_instmem_init(struct drm_device *dev) return ret; } + dev_priv->ramin_available = true; return 0; } diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 912940e2457d..7ee1b91569b8 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c @@ -72,7 +72,7 @@ nv40_graph_create_context(struct nouveau_channel *chan) ctx.data = chan->ramin_grctx; nv40_grctx_init(&ctx); - nv_wo32(chan->ramin_grctx, 0, chan->ramin_grctx->im_pramin->start); + nv_wo32(chan->ramin_grctx, 0, chan->ramin_grctx->pinst); return 0; } diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index c18d1d8bb4cf..5c617f807b23 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -249,6 +249,8 @@ nv50_instmem_init(struct drm_device *dev) for (i = 0; i < 8; i++) nv_wr32(dev, 0x1900 + (i*4), 0); + dev_priv->ramin_available = true; + /* Assume that praying isn't enough, check that we can re-read the * entire fake channel back from the PRAMIN BAR */ for (i = 0; i < c_size; i += 4) { From 6c3d7ef25e3b4a0ea511b1e9d4a0a212750874a6 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 12 Aug 2010 12:37:28 +1000 Subject: [PATCH 239/476] drm/nv50: calculate vram reordering block size MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Will be used at a later point when we plug in an alternative VRAM memory manager for GeForce 8+ boards. Based on pscnv code to do the same. Signed-off-by: Ben Skeggs Signed-off-by: Marcin Kościelnicki Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 1 + drivers/gpu/drm/nouveau/nouveau_mem.c | 67 ++++++++++++++++++++++++++- 2 files changed, 66 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index ec1be3fc80fc..150fdeea11a1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -579,6 +579,7 @@ struct drm_nouveau_private { /* VRAM/fb configuration */ uint64_t vram_size; uint64_t vram_sys_base; + u32 vram_rblock_size; uint64_t fb_phys; uint64_t fb_available_size; diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index f34c532bcac3..6eeaeac56293 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -308,7 +308,61 @@ nouveau_mem_detect_nforce(struct drm_device *dev) return 0; } -/* returns the amount of FB ram in bytes */ +static void +nv50_vram_preinit(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int i, parts, colbits, rowbitsa, rowbitsb, banks; + u64 rowsize, predicted; + u32 r0, r4, rt, ru; + + r0 = nv_rd32(dev, 0x100200); + r4 = nv_rd32(dev, 0x100204); + rt = nv_rd32(dev, 0x100250); + ru = nv_rd32(dev, 0x001540); + NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru); + + for (i = 0, parts = 0; i < 8; i++) { + if (ru & (0x00010000 << i)) + parts++; + } + + colbits = (r4 & 0x0000f000) >> 12; + rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; + rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; + banks = ((r4 & 0x01000000) ? 8 : 4); + + rowsize = parts * banks * (1 << colbits) * 8; + predicted = rowsize << rowbitsa; + if (r0 & 0x00000004) + predicted += rowsize << rowbitsb; + + if (predicted != dev_priv->vram_size) { + NV_WARN(dev, "memory controller reports %dMiB VRAM\n", + (u32)(dev_priv->vram_size >> 20)); + NV_WARN(dev, "we calculated %dMiB VRAM\n", + (u32)(predicted >> 20)); + } + + dev_priv->vram_rblock_size = rowsize >> 12; + if (rt & 1) + dev_priv->vram_rblock_size *= 3; + + NV_DEBUG(dev, "rblock %lld bytes\n", + (u64)dev_priv->vram_rblock_size << 12); +} + +static void +nvaa_vram_preinit(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + /* To our knowledge, there's no large scale reordering of pages + * that occurs on IGP chipsets. + */ + dev_priv->vram_rblock_size = 1; +} + int nouveau_mem_detect(struct drm_device *dev) { @@ -328,9 +382,18 @@ nouveau_mem_detect(struct drm_device *dev) dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA); dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32; dev_priv->vram_size &= 0xffffffff00ll; - if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) { + + switch (dev_priv->chipset) { + case 0xaa: + case 0xac: + case 0xaf: dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10); dev_priv->vram_sys_base <<= 12; + nvaa_vram_preinit(dev); + break; + default: + nv50_vram_preinit(dev); + break; } } else { dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20; From fbd2895e45aebdb3d3ea73a3a796cf3bb9c912da Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 1 Sep 2010 15:24:34 +1000 Subject: [PATCH 240/476] drm/nouveau: rework init ordering so nv50_instmem.c can be less bad Reviewed-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 5 +- drivers/gpu/drm/nouveau/nouveau_drv.h | 6 +- drivers/gpu/drm/nouveau/nouveau_mem.c | 75 +++-- drivers/gpu/drm/nouveau/nouveau_object.c | 27 +- drivers/gpu/drm/nouveau/nouveau_state.c | 51 ++-- drivers/gpu/drm/nouveau/nv04_instmem.c | 51 +--- drivers/gpu/drm/nouveau/nv50_instmem.c | 335 ++++++++++------------- 7 files changed, 245 insertions(+), 305 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 841c63f28867..8e4a9bce4f3b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -403,7 +403,10 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; - man->gpu_offset = dev_priv->vm_vram_base; + if (dev_priv->card_type == NV_50) + man->gpu_offset = 0x40000000; + else + man->gpu_offset = 0; break; case TTM_PL_TT: switch (dev_priv->gart_info.type) { diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 150fdeea11a1..3ba7a649fe51 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -700,8 +700,10 @@ extern bool nouveau_wait_for_idle(struct drm_device *); extern int nouveau_card_init(struct drm_device *); /* nouveau_mem.c */ -extern int nouveau_mem_detect(struct drm_device *dev); -extern int nouveau_mem_init(struct drm_device *); +extern int nouveau_mem_vram_init(struct drm_device *); +extern void nouveau_mem_vram_fini(struct drm_device *); +extern int nouveau_mem_gart_init(struct drm_device *); +extern void nouveau_mem_gart_fini(struct drm_device *); extern int nouveau_mem_init_agp(struct drm_device *); extern int nouveau_mem_reset_agp(struct drm_device *); extern void nouveau_mem_close(struct drm_device *); diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 6eeaeac56293..fb15a1b0dda9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -221,7 +221,7 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) * Cleanup everything */ void -nouveau_mem_close(struct drm_device *dev) +nouveau_mem_vram_fini(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -232,6 +232,19 @@ nouveau_mem_close(struct drm_device *dev) nouveau_ttm_global_release(dev_priv); + if (dev_priv->fb_mtrr >= 0) { + drm_mtrr_del(dev_priv->fb_mtrr, + pci_resource_start(dev->pdev, 1), + pci_resource_len(dev->pdev, 1), DRM_MTRR_WC); + dev_priv->fb_mtrr = -1; + } +} + +void +nouveau_mem_gart_fini(struct drm_device *dev) +{ + nouveau_sgdma_takedown(dev); + if (drm_core_has_AGP(dev) && dev->agp) { struct drm_agp_mem *entry, *tempe; @@ -251,13 +264,6 @@ nouveau_mem_close(struct drm_device *dev) dev->agp->acquired = 0; dev->agp->enabled = 0; } - - if (dev_priv->fb_mtrr) { - drm_mtrr_del(dev_priv->fb_mtrr, - pci_resource_start(dev->pdev, 1), - pci_resource_len(dev->pdev, 1), DRM_MTRR_WC); - dev_priv->fb_mtrr = -1; - } } static uint32_t @@ -363,7 +369,7 @@ nvaa_vram_preinit(struct drm_device *dev) dev_priv->vram_rblock_size = 1; } -int +static int nouveau_mem_detect(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -500,24 +506,27 @@ nouveau_mem_init_agp(struct drm_device *dev) } int -nouveau_mem_init(struct drm_device *dev) +nouveau_mem_vram_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; - int ret, dma_bits = 32; - - dev_priv->fb_phys = pci_resource_start(dev->pdev, 1); - dev_priv->gart_info.type = NOUVEAU_GART_NONE; + int ret, dma_bits; if (dev_priv->card_type >= NV_50 && pci_dma_supported(dev->pdev, DMA_BIT_MASK(40))) dma_bits = 40; + else + dma_bits = 32; ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits)); - if (ret) { - NV_ERROR(dev, "Error setting DMA mask: %d\n", ret); + if (ret) return ret; - } + + ret = nouveau_mem_detect(dev); + if (ret) + return ret; + + dev_priv->fb_phys = pci_resource_start(dev->pdev, 1); ret = nouveau_ttm_global_init(dev_priv); if (ret) @@ -541,7 +550,16 @@ nouveau_mem_init(struct drm_device *dev) pci_resource_len(dev->pdev, 1); dev_priv->fb_mappable_pages >>= PAGE_SHIFT; - /* remove reserved space at end of vram from available amount */ + /* reserve space at end of VRAM for PRAMIN */ + if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 || + dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) + dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024); + else + if (dev_priv->card_type >= NV_40) + dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024); + else + dev_priv->ramin_rsvd_vram = (512 * 1024); + dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; dev_priv->fb_aper_free = dev_priv->fb_available_size; @@ -562,7 +580,21 @@ nouveau_mem_init(struct drm_device *dev) nouveau_bo_ref(NULL, &dev_priv->vga_ram); } - /* GART */ + dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1), + pci_resource_len(dev->pdev, 1), + DRM_MTRR_WC); + return 0; +} + +int +nouveau_mem_gart_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; + int ret; + + dev_priv->gart_info.type = NOUVEAU_GART_NONE; + #if !defined(__powerpc__) && !defined(__ia64__) if (drm_device_is_agp(dev) && dev->agp && !nouveau_noagp) { ret = nouveau_mem_init_agp(dev); @@ -590,11 +622,6 @@ nouveau_mem_init(struct drm_device *dev) return ret; } - dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1), - pci_resource_len(dev->pdev, 1), - DRM_MTRR_WC); - return 0; } - diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index df445fcb8321..b68922f2fe54 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -130,7 +130,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, /* if we got a chunk of the aperture, map pages into it */ gpuobj->im_pramin = ramin; - if (!chan && gpuobj->im_pramin) { + if (!chan && gpuobj->im_pramin && dev_priv->ramin_available) { ret = engine->instmem.bind(dev, gpuobj); if (ret) { nouveau_gpuobj_ref(NULL, &gpuobj); @@ -173,7 +173,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, } int -nouveau_gpuobj_early_init(struct drm_device *dev) +nouveau_gpuobj_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -186,29 +186,6 @@ nouveau_gpuobj_early_init(struct drm_device *dev) return 0; } -int -nouveau_gpuobj_init(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *ramht = NULL; - int ret; - - NV_DEBUG(dev, "\n"); - - if (dev_priv->card_type >= NV_50) - return 0; - - ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset, ~0, - dev_priv->ramht_size, - NVOBJ_FLAG_ZERO_ALLOC, &ramht); - if (ret) - return ret; - - ret = nouveau_ramht_new(dev, ramht, &dev_priv->ramht); - nouveau_gpuobj_ref(NULL, &ramht); - return ret; -} - void nouveau_gpuobj_takedown(struct drm_device *dev) { diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index fec29522298d..19eb06dca899 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -532,35 +532,26 @@ nouveau_card_init(struct drm_device *dev) if (ret) goto out_display_early; - ret = nouveau_mem_detect(dev); + ret = nouveau_mem_vram_init(dev); if (ret) goto out_bios; - ret = nouveau_gpuobj_early_init(dev); - if (ret) - goto out_bios; - - /* Initialise instance memory, must happen before mem_init so we - * know exactly how much VRAM we're able to use for "normal" - * purposes. - */ - ret = engine->instmem.init(dev); - if (ret) - goto out_gpuobj_early; - - /* Setup the memory manager */ - ret = nouveau_mem_init(dev); - if (ret) - goto out_instmem; - ret = nouveau_gpuobj_init(dev); if (ret) - goto out_mem; + goto out_vram; + + ret = engine->instmem.init(dev); + if (ret) + goto out_gpuobj; + + ret = nouveau_mem_gart_init(dev); + if (ret) + goto out_instmem; /* PMC */ ret = engine->mc.init(dev); if (ret) - goto out_gpuobj; + goto out_gart; /* PGPIO */ ret = engine->gpio.init(dev); @@ -640,15 +631,14 @@ out_gpio: engine->gpio.takedown(dev); out_mc: engine->mc.takedown(dev); -out_gpuobj: - nouveau_gpuobj_takedown(dev); -out_mem: - nouveau_sgdma_takedown(dev); - nouveau_mem_close(dev); +out_gart: + nouveau_mem_gart_fini(dev); out_instmem: engine->instmem.takedown(dev); -out_gpuobj_early: - nouveau_gpuobj_late_takedown(dev); +out_gpuobj: + nouveau_gpuobj_takedown(dev); +out_vram: + nouveau_mem_vram_fini(dev); out_bios: nouveau_bios_takedown(dev); out_display_early: @@ -684,15 +674,14 @@ static void nouveau_card_takedown(struct drm_device *dev) ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); mutex_unlock(&dev->struct_mutex); - nouveau_sgdma_takedown(dev); + nouveau_mem_gart_fini(dev); - nouveau_gpuobj_takedown(dev); - nouveau_mem_close(dev); engine->instmem.takedown(dev); + nouveau_gpuobj_takedown(dev); + nouveau_mem_vram_fini(dev); drm_irq_uninstall(dev); - nouveau_gpuobj_late_takedown(dev); nouveau_bios_takedown(dev); vga_client_register(dev->pdev, NULL, NULL, NULL); diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c index 3aba7674560c..15cd468f4c29 100644 --- a/drivers/gpu/drm/nouveau/nv04_instmem.c +++ b/drivers/gpu/drm/nouveau/nv04_instmem.c @@ -1,6 +1,7 @@ #include "drmP.h" #include "drm.h" #include "nouveau_drv.h" +#include "nouveau_ramht.h" /* returns the size of fifo context */ static int @@ -17,42 +18,6 @@ nouveau_fifo_ctx_size(struct drm_device *dev) return 32; } -static void -nv04_instmem_determine_amount(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - int i; - - /* Figure out how much instance memory we need */ - if (dev_priv->card_type >= NV_40) { - /* We'll want more instance memory than this on some NV4x cards. - * There's a 16MB aperture to play with that maps onto the end - * of vram. For now, only reserve a small piece until we know - * more about what each chipset requires. - */ - switch (dev_priv->chipset) { - case 0x40: - case 0x47: - case 0x49: - case 0x4b: - dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024); - break; - default: - dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024); - break; - } - } else { - /*XXX: what *are* the limits on ramin_rsvd_vram = (512 * 1024); - } - NV_DEBUG(dev, "RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram >> 10); - - /* Clear all of it, except the BIOS image that's in the first 64KiB */ - for (i = 64 * 1024; i < dev_priv->ramin_rsvd_vram; i += 4) - nv_wi32(dev, i, 0x00000000); -} - static void nv04_instmem_configure_fixed_tables(struct drm_device *dev) { @@ -103,12 +68,24 @@ nv04_instmem_configure_fixed_tables(struct drm_device *dev) int nv04_instmem_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *ramht = NULL; uint32_t offset; int ret; - nv04_instmem_determine_amount(dev); nv04_instmem_configure_fixed_tables(dev); + /* Setup shared RAMHT */ + ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset, ~0, + dev_priv->ramht_size, + NVOBJ_FLAG_ZERO_ALLOC, &ramht); + if (ret) + return ret; + + ret = nouveau_ramht_new(dev, ramht, &dev_priv->ramht); + nouveau_gpuobj_ref(NULL, &ramht); + if (ret) + return ret; + /* Create a heap to manage RAMIN allocations, we don't allocate * the space that was reserved for RAMHT/FC/RO. */ diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 5c617f807b23..d932594449c1 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -37,27 +37,82 @@ struct nv50_instmem_priv { struct nouveau_gpuobj *fb_bar; }; -#define NV50_INSTMEM_PAGE_SHIFT 12 -#define NV50_INSTMEM_PAGE_SIZE (1 << NV50_INSTMEM_PAGE_SHIFT) -#define NV50_INSTMEM_PT_SIZE(a) (((a) >> 12) << 3) +static void +nv50_channel_del(struct nouveau_channel **pchan) +{ + struct nouveau_channel *chan; -/*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN - */ -#define BAR0_WI32(g, o, v) do { \ - u32 offset = (g)->vinst + (o); \ - nv_wr32(dev, NV_RAMIN + (offset & 0xfffff), (v)); \ -} while (0) + chan = *pchan; + *pchan = NULL; + if (!chan) + return; + + nouveau_gpuobj_ref(NULL, &chan->ramfc); + nouveau_gpuobj_ref(NULL, &chan->vm_pd); + if (chan->ramin_heap.free_stack.next) + drm_mm_takedown(&chan->ramin_heap); + nouveau_gpuobj_ref(NULL, &chan->ramin); + kfree(chan); +} + +static int +nv50_channel_new(struct drm_device *dev, u32 size, + struct nouveau_channel **pchan) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; + u32 fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200; + struct nouveau_channel *chan; + int ret; + + chan = kzalloc(sizeof(*chan), GFP_KERNEL); + if (!chan) + return -ENOMEM; + chan->dev = dev; + + ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin); + if (ret) { + nv50_channel_del(&chan); + return ret; + } + + ret = drm_mm_init(&chan->ramin_heap, 0x6000, chan->ramin->size); + if (ret) { + nv50_channel_del(&chan); + return ret; + } + + ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 : + chan->ramin->pinst + pgd, + chan->ramin->vinst + pgd, + 0x4000, NVOBJ_FLAG_ZERO_ALLOC, + &chan->vm_pd); + if (ret) { + nv50_channel_del(&chan); + return ret; + } + + ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 : + chan->ramin->pinst + fc, + chan->ramin->vinst + fc, 0x100, + NVOBJ_FLAG_ZERO_ALLOC, &chan->ramfc); + if (ret) { + nv50_channel_del(&chan); + return ret; + } + + *pchan = chan; + return 0; +} int nv50_instmem_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *chan; - uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size; - uint32_t save_nv001700; - uint64_t v; struct nv50_instmem_priv *priv; + struct nouveau_channel *chan; int ret, i; + u32 tmp; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) @@ -68,27 +123,61 @@ nv50_instmem_init(struct drm_device *dev) for (i = 0x1700; i <= 0x1710; i += 4) priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); - /* Reserve the last MiB of VRAM, we should probably try to avoid - * setting up the below tables over the top of the VBIOS image at - * some point. - */ - dev_priv->ramin_rsvd_vram = 1 << 20; - c_offset = dev_priv->vram_size - dev_priv->ramin_rsvd_vram; - c_size = 128 << 10; - c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200; - c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20; - c_base = c_vmpd + 0x4000; - pt_size = NV50_INSTMEM_PT_SIZE(dev_priv->ramin_size); + /* Global PRAMIN heap */ + ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size); + if (ret) { + NV_ERROR(dev, "Failed to init RAMIN heap\n"); + return -ENOMEM; + } - NV_DEBUG(dev, " Rsvd VRAM base: 0x%08x\n", c_offset); - NV_DEBUG(dev, " VBIOS image: 0x%08x\n", - (nv_rd32(dev, 0x619f04) & ~0xff) << 8); - NV_DEBUG(dev, " Aperture size: %d MiB\n", dev_priv->ramin_size >> 20); - NV_DEBUG(dev, " PT size: %d KiB\n", pt_size >> 10); + /* we need a channel to plug into the hw to control the BARs */ + ret = nv50_channel_new(dev, 128*1024, &dev_priv->fifos[0]); + if (ret) + return ret; + chan = dev_priv->fifos[127] = dev_priv->fifos[0]; - /* Determine VM layout, we need to do this first to make sure - * we allocate enough memory for all the page tables. - */ + /* allocate page table for PRAMIN BAR */ + ret = nouveau_gpuobj_new(dev, chan, (dev_priv->ramin_size >> 12) * 8, + 0x1000, NVOBJ_FLAG_ZERO_ALLOC, + &priv->pramin_pt); + if (ret) + return ret; + + nv_wo32(chan->vm_pd, 0x0000, priv->pramin_pt->vinst | 0x63); + nv_wo32(chan->vm_pd, 0x0004, 0); + + /* DMA object for PRAMIN BAR */ + ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->pramin_bar); + if (ret) + return ret; + nv_wo32(priv->pramin_bar, 0x00, 0x7fc00000); + nv_wo32(priv->pramin_bar, 0x04, dev_priv->ramin_size - 1); + nv_wo32(priv->pramin_bar, 0x08, 0x00000000); + nv_wo32(priv->pramin_bar, 0x0c, 0x00000000); + nv_wo32(priv->pramin_bar, 0x10, 0x00000000); + nv_wo32(priv->pramin_bar, 0x14, 0x00000000); + + /* map channel into PRAMIN, gpuobj didn't do it for us */ + ret = nv50_instmem_bind(dev, chan->ramin); + if (ret) + return ret; + + /* poke regs... */ + nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12)); + nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12)); + nv_wr32(dev, 0x00170c, 0x80000000 | (priv->pramin_bar->cinst >> 4)); + + tmp = nv_ri32(dev, 0); + nv_wi32(dev, 0, ~tmp); + if (nv_ri32(dev, 0) != ~tmp) { + NV_ERROR(dev, "PRAMIN readback failed\n"); + return -EIO; + } + nv_wi32(dev, 0, tmp); + + dev_priv->ramin_available = true; + + /* Determine VM layout */ dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK); dev_priv->vm_gart_size = NV50_VM_BLOCK; @@ -108,166 +197,39 @@ nv50_instmem_init(struct drm_device *dev) dev_priv->vm_vram_base, dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1); - c_size += dev_priv->vm_vram_pt_nr * (NV50_VM_BLOCK / 65536 * 8); - - /* Map BAR0 PRAMIN aperture over the memory we want to use */ - save_nv001700 = nv_rd32(dev, NV50_PUNK_BAR0_PRAMIN); - nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (c_offset >> 16)); - - /* Create a fake channel, and use it as our "dummy" channels 0/127. - * The main reason for creating a channel is so we can use the gpuobj - * code. However, it's probably worth noting that NVIDIA also setup - * their channels 0/127 with the same values they configure here. - * So, there may be some other reason for doing this. - * - * Have to create the entire channel manually, as the real channel - * creation code assumes we have PRAMIN access, and we don't until - * we're done here. - */ - chan = kzalloc(sizeof(*chan), GFP_KERNEL); - if (!chan) - return -ENOMEM; - chan->id = 0; - chan->dev = dev; - chan->file_priv = (struct drm_file *)-2; - dev_priv->fifos[0] = dev_priv->fifos[127] = chan; - - INIT_LIST_HEAD(&chan->ramht_refs); - - /* Channel's PRAMIN object + heap */ - ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0, &chan->ramin); - if (ret) - return ret; - - if (drm_mm_init(&chan->ramin_heap, c_base, c_size - c_base)) - return -ENOMEM; - - /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */ - ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc, - 0x4000, 0, &chan->ramfc); - if (ret) - return ret; - - for (i = 0; i < c_vmpd; i += 4) - BAR0_WI32(chan->ramin, i, 0); - - /* VM page directory */ - ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd, - 0x4000, 0, &chan->vm_pd); - if (ret) - return ret; - for (i = 0; i < 0x4000; i += 8) { - BAR0_WI32(chan->vm_pd, i + 0x00, 0x00000000); - BAR0_WI32(chan->vm_pd, i + 0x04, 0x00000000); - } - - /* PRAMIN page table, cheat and map into VM at 0x0000000000. - * We map the entire fake channel into the start of the PRAMIN BAR - */ - ret = nouveau_gpuobj_new(dev, chan, pt_size, 0x1000, 0, - &priv->pramin_pt); - if (ret) - return ret; - - v = c_offset | 1; - if (dev_priv->vram_sys_base) { - v += dev_priv->vram_sys_base; - v |= 0x30; - } - - i = 0; - while (v < dev_priv->vram_sys_base + c_offset + c_size) { - BAR0_WI32(priv->pramin_pt, i + 0, lower_32_bits(v)); - BAR0_WI32(priv->pramin_pt, i + 4, upper_32_bits(v)); - v += 0x1000; - i += 8; - } - - while (i < pt_size) { - BAR0_WI32(priv->pramin_pt, i + 0, 0x00000000); - BAR0_WI32(priv->pramin_pt, i + 4, 0x00000000); - i += 8; - } - - BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->vinst | 0x63); - BAR0_WI32(chan->vm_pd, 0x04, 0x00000000); - /* VRAM page table(s), mapped into VM at +1GiB */ for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { - ret = nouveau_gpuobj_new(dev, chan, NV50_VM_BLOCK / 0x10000 * 8, - 0, 0, &chan->vm_vram_pt[i]); + ret = nouveau_gpuobj_new(dev, NULL, NV50_VM_BLOCK / 0x10000 * 8, + 0, NVOBJ_FLAG_ZERO_ALLOC, + &chan->vm_vram_pt[i]); if (ret) { - NV_ERROR(dev, "Error creating VRAM page tables: %d\n", - ret); + NV_ERROR(dev, "Error creating VRAM PGT: %d\n", ret); dev_priv->vm_vram_pt_nr = i; return ret; } - /*XXX: double-check this is ok */ dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i]; - for (v = 0; v < dev_priv->vm_vram_pt[i]->size; v += 4) - BAR0_WI32(dev_priv->vm_vram_pt[i], v, 0); - - BAR0_WI32(chan->vm_pd, 0x10 + (i*8), - chan->vm_vram_pt[i]->vinst | 0x61); - BAR0_WI32(chan->vm_pd, 0x14 + (i*8), 0); + nv_wo32(chan->vm_pd, 0x10 + (i*8), + chan->vm_vram_pt[i]->vinst | 0x61); + nv_wo32(chan->vm_pd, 0x14 + (i*8), 0); } - /* DMA object for PRAMIN BAR */ - ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->pramin_bar); - if (ret) - return ret; - BAR0_WI32(priv->pramin_bar, 0x00, 0x7fc00000); - BAR0_WI32(priv->pramin_bar, 0x04, dev_priv->ramin_size - 1); - BAR0_WI32(priv->pramin_bar, 0x08, 0x00000000); - BAR0_WI32(priv->pramin_bar, 0x0c, 0x00000000); - BAR0_WI32(priv->pramin_bar, 0x10, 0x00000000); - BAR0_WI32(priv->pramin_bar, 0x14, 0x00000000); - /* DMA object for FB BAR */ ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->fb_bar); if (ret) return ret; - BAR0_WI32(priv->fb_bar, 0x00, 0x7fc00000); - BAR0_WI32(priv->fb_bar, 0x04, 0x40000000 + - pci_resource_len(dev->pdev, 1) - 1); - BAR0_WI32(priv->fb_bar, 0x08, 0x40000000); - BAR0_WI32(priv->fb_bar, 0x0c, 0x00000000); - BAR0_WI32(priv->fb_bar, 0x10, 0x00000000); - BAR0_WI32(priv->fb_bar, 0x14, 0x00000000); - - /* Poke the relevant regs, and pray it works :) */ - nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12)); - nv_wr32(dev, NV50_PUNK_UNK1710, 0); - nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) | - NV50_PUNK_BAR_CFG_BASE_VALID); - nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->cinst >> 4) | - NV50_PUNK_BAR1_CTXDMA_VALID); - nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->cinst >> 4) | - NV50_PUNK_BAR3_CTXDMA_VALID); + nv_wo32(priv->fb_bar, 0x00, 0x7fc00000); + nv_wo32(priv->fb_bar, 0x04, 0x40000000 + + pci_resource_len(dev->pdev, 1) - 1); + nv_wo32(priv->fb_bar, 0x08, 0x40000000); + nv_wo32(priv->fb_bar, 0x0c, 0x00000000); + nv_wo32(priv->fb_bar, 0x10, 0x00000000); + nv_wo32(priv->fb_bar, 0x14, 0x00000000); + nv_wr32(dev, 0x001708, 0x80000000 | (priv->fb_bar->cinst >> 4)); for (i = 0; i < 8; i++) nv_wr32(dev, 0x1900 + (i*4), 0); - dev_priv->ramin_available = true; - - /* Assume that praying isn't enough, check that we can re-read the - * entire fake channel back from the PRAMIN BAR */ - for (i = 0; i < c_size; i += 4) { - if (nv_rd32(dev, NV_RAMIN + i) != nv_ri32(dev, i)) { - NV_ERROR(dev, "Error reading back PRAMIN at 0x%08x\n", - i); - return -EINVAL; - } - } - - nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, save_nv001700); - - /* Global PRAMIN heap */ - if (drm_mm_init(&dev_priv->ramin_heap, c_size, dev_priv->ramin_size - c_size)) { - NV_ERROR(dev, "Failed to init RAMIN heap\n"); - } - /*XXX: incorrect, but needed to make hash func "work" */ dev_priv->ramht_offset = 0x10000; dev_priv->ramht_bits = 9; @@ -288,6 +250,8 @@ nv50_instmem_takedown(struct drm_device *dev) if (!priv) return; + dev_priv->ramin_available = false; + /* Restore state from before init */ for (i = 0x1700; i <= 0x1710; i += 4) nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]); @@ -302,13 +266,8 @@ nv50_instmem_takedown(struct drm_device *dev) nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]); dev_priv->vm_vram_pt_nr = 0; - nouveau_gpuobj_ref(NULL, &chan->vm_pd); - nouveau_gpuobj_ref(NULL, &chan->ramfc); - nouveau_gpuobj_ref(NULL, &chan->ramin); - drm_mm_takedown(&chan->ramin_heap); - - dev_priv->fifos[0] = dev_priv->fifos[127] = NULL; - kfree(chan); + nv50_channel_del(&dev_priv->fifos[0]); + dev_priv->fifos[127] = NULL; } dev_priv->engine.instmem.priv = NULL; @@ -341,9 +300,11 @@ nv50_instmem_resume(struct drm_device *dev) struct nouveau_gpuobj *ramin = chan->ramin; int i; - nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (ramin->vinst >> 16)); + dev_priv->ramin_available = false; + dev_priv->ramin_base = ~0; for (i = 0; i < ramin->size; i += 4) - BAR0_WI32(ramin, i, ramin->im_backing_suspend[i/4]); + nv_wo32(ramin, i, ramin->im_backing_suspend[i/4]); + dev_priv->ramin_available = true; vfree(ramin->im_backing_suspend); ramin->im_backing_suspend = NULL; @@ -370,7 +331,7 @@ nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, if (gpuobj->im_backing) return -EINVAL; - *sz = ALIGN(*sz, NV50_INSTMEM_PAGE_SIZE); + *sz = ALIGN(*sz, 4096); if (*sz == 0) return -EINVAL; @@ -438,7 +399,7 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) while (pte < pte_end) { nv_wo32(pramin_pt, (pte * 4) + 0, lower_32_bits(vram)); nv_wo32(pramin_pt, (pte * 4) + 4, upper_32_bits(vram)); - vram += NV50_INSTMEM_PAGE_SIZE; + vram += 0x1000; pte += 2; } dev_priv->engine.instmem.flush(dev); @@ -460,6 +421,10 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) if (gpuobj->im_bound == 0) return -EINVAL; + /* can happen during late takedown */ + if (unlikely(!dev_priv->ramin_available)) + return 0; + pte = (gpuobj->im_pramin->start >> 12) << 1; pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; From e05c5a317efb03854950a3fcc5c9501bfefc7d68 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 1 Sep 2010 15:24:35 +1000 Subject: [PATCH 241/476] drm/nouveau: tidy ram{ht,fc,ro} a bit Reviewed-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 12 ++-- drivers/gpu/drm/nouveau/nouveau_object.c | 2 - drivers/gpu/drm/nouveau/nouveau_ramht.c | 23 ++++--- drivers/gpu/drm/nouveau/nouveau_ramht.h | 1 + drivers/gpu/drm/nouveau/nv04_fifo.c | 11 +-- drivers/gpu/drm/nouveau/nv04_instmem.c | 87 +++++++++--------------- drivers/gpu/drm/nouveau/nv10_fifo.c | 13 ++-- drivers/gpu/drm/nouveau/nv40_fifo.c | 11 +-- drivers/gpu/drm/nouveau/nv50_fifo.c | 2 +- drivers/gpu/drm/nouveau/nv50_instmem.c | 4 -- drivers/gpu/drm/nouveau/nvc0_instmem.c | 4 -- 11 files changed, 69 insertions(+), 101 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 3ba7a649fe51..71e27087951b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -545,15 +545,11 @@ struct drm_nouveau_private { spinlock_t context_switch_lock; /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ - struct nouveau_ramht *ramht; + struct nouveau_ramht *ramht; + struct nouveau_gpuobj *ramfc; + struct nouveau_gpuobj *ramro; + uint32_t ramin_rsvd_vram; - uint32_t ramht_offset; - uint32_t ramht_size; - uint32_t ramht_bits; - uint32_t ramfc_offset; - uint32_t ramfc_size; - uint32_t ramro_offset; - uint32_t ramro_size; struct { enum { diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index b68922f2fe54..198c2514f893 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -192,8 +192,6 @@ nouveau_gpuobj_takedown(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; NV_DEBUG(dev, "\n"); - - nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL); } void diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c index 5f9d52f06305..ccbc8d69ea68 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ramht.c +++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c @@ -28,21 +28,23 @@ #include "nouveau_ramht.h" static uint32_t -nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle) +nouveau_ramht_hash_handle(struct nouveau_channel *chan, uint32_t handle) { + struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_ramht *ramht = chan->ramht; uint32_t hash = 0; int i; - NV_DEBUG(dev, "ch%d handle=0x%08x\n", channel, handle); + NV_DEBUG(dev, "ch%d handle=0x%08x\n", chan->id, handle); - for (i = 32; i > 0; i -= dev_priv->ramht_bits) { - hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1)); - handle >>= dev_priv->ramht_bits; + for (i = 32; i > 0; i -= ramht->bits) { + hash ^= (handle & ((1 << ramht->bits) - 1)); + handle >>= ramht->bits; } if (dev_priv->card_type < NV_50) - hash ^= channel << (dev_priv->ramht_bits - 4); + hash ^= chan->id << (ramht->bits - 4); hash <<= 3; NV_DEBUG(dev, "hash=0x%08x\n", hash); @@ -103,7 +105,7 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle, } } - co = ho = nouveau_ramht_hash_handle(dev, chan->id, handle); + co = ho = nouveau_ramht_hash_handle(chan, handle); do { if (!nouveau_ramht_entry_valid(dev, ramht, co)) { NV_DEBUG(dev, @@ -119,7 +121,7 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle, chan->id, co, nv_ro32(ramht, co)); co += 8; - if (co >= dev_priv->ramht_size) + if (co >= ramht->size) co = 0; } while (co != ho); @@ -149,7 +151,7 @@ nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle) break; } - co = ho = nouveau_ramht_hash_handle(dev, chan->id, handle); + co = ho = nouveau_ramht_hash_handle(chan, handle); do { if (nouveau_ramht_entry_valid(dev, ramht, co) && (handle == nv_ro32(ramht, co))) { @@ -163,7 +165,7 @@ nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle) } co += 8; - if (co >= dev_priv->ramht_size) + if (co >= ramht->size) co = 0; } while (co != ho); @@ -196,6 +198,7 @@ nouveau_ramht_new(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, ramht->dev = dev; ramht->refcount = 1; + ramht->bits = drm_order(gpuobj->size / 8); INIT_LIST_HEAD(&ramht->entries); nouveau_gpuobj_ref(gpuobj, &ramht->gpuobj); diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.h b/drivers/gpu/drm/nouveau/nouveau_ramht.h index 7076ae4c07a5..f37737a93642 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ramht.h +++ b/drivers/gpu/drm/nouveau/nouveau_ramht.h @@ -37,6 +37,7 @@ struct nouveau_ramht { int refcount; struct nouveau_gpuobj *gpuobj; struct list_head entries; + int bits; }; extern int nouveau_ramht_new(struct drm_device *, struct nouveau_gpuobj *, diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c index b7ecafb78d77..64dc0e215eeb 100644 --- a/drivers/gpu/drm/nouveau/nv04_fifo.c +++ b/drivers/gpu/drm/nouveau/nv04_fifo.c @@ -27,8 +27,9 @@ #include "drmP.h" #include "drm.h" #include "nouveau_drv.h" +#include "nouveau_ramht.h" -#define NV04_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV04_RAMFC__SIZE)) +#define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE)) #define NV04_RAMFC__SIZE 32 #define NV04_RAMFC_DMA_PUT 0x00 #define NV04_RAMFC_DMA_GET 0x04 @@ -262,10 +263,10 @@ nv04_fifo_init_ramxx(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | - ((dev_priv->ramht_bits - 9) << 16) | - (dev_priv->ramht_offset >> 8)); - nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8); - nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc_offset >> 8); + ((dev_priv->ramht->bits - 9) << 16) | + (dev_priv->ramht->gpuobj->pinst >> 8)); + nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8); + nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8); } static void diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c index 15cd468f4c29..88316100389b 100644 --- a/drivers/gpu/drm/nouveau/nv04_instmem.c +++ b/drivers/gpu/drm/nouveau/nv04_instmem.c @@ -18,65 +18,15 @@ nouveau_fifo_ctx_size(struct drm_device *dev) return 32; } -static void -nv04_instmem_configure_fixed_tables(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_engine *engine = &dev_priv->engine; - - /* FIFO hash table (RAMHT) - * use 4k hash table at RAMIN+0x10000 - * TODO: extend the hash table - */ - dev_priv->ramht_offset = 0x10000; - dev_priv->ramht_bits = 9; - dev_priv->ramht_size = (1 << dev_priv->ramht_bits); /* nr entries */ - dev_priv->ramht_size *= 8; /* 2 32-bit values per entry in RAMHT */ - NV_DEBUG(dev, "RAMHT offset=0x%x, size=%d\n", dev_priv->ramht_offset, - dev_priv->ramht_size); - - /* FIFO runout table (RAMRO) - 512k at 0x11200 */ - dev_priv->ramro_offset = 0x11200; - dev_priv->ramro_size = 512; - NV_DEBUG(dev, "RAMRO offset=0x%x, size=%d\n", dev_priv->ramro_offset, - dev_priv->ramro_size); - - /* FIFO context table (RAMFC) - * NV40 : Not sure exactly how to position RAMFC on some cards, - * 0x30002 seems to position it at RAMIN+0x20000 on these - * cards. RAMFC is 4kb (32 fifos, 128byte entries). - * Others: Position RAMFC at RAMIN+0x11400 - */ - dev_priv->ramfc_size = engine->fifo.channels * - nouveau_fifo_ctx_size(dev); - switch (dev_priv->card_type) { - case NV_40: - dev_priv->ramfc_offset = 0x20000; - break; - case NV_30: - case NV_20: - case NV_10: - case NV_04: - default: - dev_priv->ramfc_offset = 0x11400; - break; - } - NV_DEBUG(dev, "RAMFC offset=0x%x, size=%d\n", dev_priv->ramfc_offset, - dev_priv->ramfc_size); -} - int nv04_instmem_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *ramht = NULL; - uint32_t offset; + u32 offset, length; int ret; - nv04_instmem_configure_fixed_tables(dev); - /* Setup shared RAMHT */ - ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset, ~0, - dev_priv->ramht_size, + ret = nouveau_gpuobj_new_fake(dev, 0x10000, ~0, 4096, NVOBJ_FLAG_ZERO_ALLOC, &ramht); if (ret) return ret; @@ -86,10 +36,30 @@ int nv04_instmem_init(struct drm_device *dev) if (ret) return ret; - /* Create a heap to manage RAMIN allocations, we don't allocate - * the space that was reserved for RAMHT/FC/RO. - */ - offset = dev_priv->ramfc_offset + dev_priv->ramfc_size; + /* And RAMRO */ + ret = nouveau_gpuobj_new_fake(dev, 0x11200, ~0, 512, + NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramro); + if (ret) + return ret; + + /* And RAMFC */ + length = dev_priv->engine.fifo.channels * nouveau_fifo_ctx_size(dev); + switch (dev_priv->card_type) { + case NV_40: + offset = 0x20000; + break; + default: + offset = 0x11400; + break; + } + + ret = nouveau_gpuobj_new_fake(dev, offset, ~0, length, + NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramfc); + if (ret) + return ret; + + /* Only allow space after RAMFC to be used for object allocation */ + offset += length; /* It appears RAMRO (or something?) is controlled by 0x2220/0x2230 * on certain NV4x chipsets as well as RAMFC. When 0x2230 == 0 @@ -118,6 +88,11 @@ int nv04_instmem_init(struct drm_device *dev) void nv04_instmem_takedown(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + + nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL); + nouveau_gpuobj_ref(NULL, &dev_priv->ramro); + nouveau_gpuobj_ref(NULL, &dev_priv->ramfc); } int diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c index ccb07fb701ca..f1b03ad58fd5 100644 --- a/drivers/gpu/drm/nouveau/nv10_fifo.c +++ b/drivers/gpu/drm/nouveau/nv10_fifo.c @@ -27,8 +27,9 @@ #include "drmP.h" #include "drm.h" #include "nouveau_drv.h" +#include "nouveau_ramht.h" -#define NV10_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV10_RAMFC__SIZE)) +#define NV10_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV10_RAMFC__SIZE)) #define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32) int @@ -202,14 +203,14 @@ nv10_fifo_init_ramxx(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | - ((dev_priv->ramht_bits - 9) << 16) | - (dev_priv->ramht_offset >> 8)); - nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8); + ((dev_priv->ramht->bits - 9) << 16) | + (dev_priv->ramht->gpuobj->pinst >> 8)); + nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8); if (dev_priv->chipset < 0x17) { - nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc_offset >> 8); + nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8); } else { - nv_wr32(dev, NV03_PFIFO_RAMFC, (dev_priv->ramfc_offset >> 8) | + nv_wr32(dev, NV03_PFIFO_RAMFC, (dev_priv->ramfc->pinst >> 8) | (1 << 16) /* 64 Bytes entry*/); /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */ } diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c index 03f4dc13725b..d337b8b28cdd 100644 --- a/drivers/gpu/drm/nouveau/nv40_fifo.c +++ b/drivers/gpu/drm/nouveau/nv40_fifo.c @@ -27,8 +27,9 @@ #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_drm.h" +#include "nouveau_ramht.h" -#define NV40_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV40_RAMFC__SIZE)) +#define NV40_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV40_RAMFC__SIZE)) #define NV40_RAMFC__SIZE 128 int @@ -240,9 +241,9 @@ nv40_fifo_init_ramxx(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | - ((dev_priv->ramht_bits - 9) << 16) | - (dev_priv->ramht_offset >> 8)); - nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8); + ((dev_priv->ramht->bits - 9) << 16) | + (dev_priv->ramht->gpuobj->pinst >> 8)); + nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8); switch (dev_priv->chipset) { case 0x47: @@ -270,7 +271,7 @@ nv40_fifo_init_ramxx(struct drm_device *dev) nv_wr32(dev, 0x2230, 0); nv_wr32(dev, NV40_PFIFO_RAMFC, ((dev_priv->vram_size - 512 * 1024 + - dev_priv->ramfc_offset) >> 16) | (3 << 16)); + dev_priv->ramfc->pinst) >> 16) | (3 << 16)); break; } } diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index 4fc8b59cc41e..a46a961102f3 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c @@ -259,7 +259,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan) spin_lock_irqsave(&dev_priv->context_switch_lock, flags); nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4); - nv_wo32(ramfc, 0x80, (0 << 27) /* 4KiB */ | + nv_wo32(ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | (4 << 24) /* SEARCH_FULL */ | (chan->ramht->gpuobj->cinst >> 4)); nv_wo32(ramfc, 0x44, 0x2101ffff); diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index d932594449c1..2e0aaf971b2f 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -230,10 +230,6 @@ nv50_instmem_init(struct drm_device *dev) for (i = 0; i < 8; i++) nv_wr32(dev, 0x1900 + (i*4), 0); - /*XXX: incorrect, but needed to make hash func "work" */ - dev_priv->ramht_offset = 0x10000; - dev_priv->ramht_bits = 9; - dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8; return 0; } diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c index 0ffdcf6c7f5d..595540975637 100644 --- a/drivers/gpu/drm/nouveau/nvc0_instmem.c +++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c @@ -220,10 +220,6 @@ nvc0_instmem_init(struct drm_device *dev) return -ENOMEM; } - /*XXX: incorrect, but needed to make hash func "work" */ - dev_priv->ramht_offset = 0x10000; - dev_priv->ramht_bits = 9; - dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8; return 0; } From dac790080467eb12f1049ddca1c101eb0dcc9f0c Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 1 Sep 2010 15:24:36 +1000 Subject: [PATCH 242/476] drm/nouveau: add spinlock around ramht modifications Reviewed-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_ramht.c | 76 ++++++++++++++++++------- drivers/gpu/drm/nouveau/nouveau_ramht.h | 3 +- 2 files changed, 58 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c index ccbc8d69ea68..de34b6bb059f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ramht.c +++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c @@ -27,13 +27,13 @@ #include "nouveau_drv.h" #include "nouveau_ramht.h" -static uint32_t -nouveau_ramht_hash_handle(struct nouveau_channel *chan, uint32_t handle) +static u32 +nouveau_ramht_hash_handle(struct nouveau_channel *chan, u32 handle) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_ramht *ramht = chan->ramht; - uint32_t hash = 0; + u32 hash = 0; int i; NV_DEBUG(dev, "ch%d handle=0x%08x\n", chan->id, handle); @@ -53,10 +53,10 @@ nouveau_ramht_hash_handle(struct nouveau_channel *chan, uint32_t handle) static int nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht, - uint32_t offset) + u32 offset) { struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t ctx = nv_ro32(ramht, offset + 4); + u32 ctx = nv_ro32(ramht, offset + 4); if (dev_priv->card_type < NV_40) return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0); @@ -72,7 +72,8 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle, struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; struct nouveau_ramht_entry *entry; struct nouveau_gpuobj *ramht = chan->ramht->gpuobj; - uint32_t ctx, co, ho; + unsigned long flags; + u32 ctx, co, ho; if (nouveau_ramht_find(chan, handle)) return -EEXIST; @@ -83,7 +84,6 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle, entry->channel = chan; entry->gpuobj = NULL; entry->handle = handle; - list_add(&entry->head, &chan->ramht->entries); nouveau_gpuobj_ref(gpuobj, &entry->gpuobj); if (dev_priv->card_type < NV_40) { @@ -105,6 +105,9 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle, } } + spin_lock_irqsave(&chan->ramht->lock, flags); + list_add(&entry->head, &chan->ramht->entries); + co = ho = nouveau_ramht_hash_handle(chan, handle); do { if (!nouveau_ramht_entry_valid(dev, ramht, co)) { @@ -114,6 +117,7 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle, nv_wo32(ramht, co + 0, handle); nv_wo32(ramht, co + 4, ctx); + spin_unlock_irqrestore(&chan->ramht->lock, flags); instmem->flush(dev); return 0; } @@ -127,12 +131,13 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle, NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id); list_del(&entry->head); + spin_unlock_irqrestore(&chan->ramht->lock, flags); kfree(entry); return -ENOMEM; } -void -nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle) +static void +nouveau_ramht_remove_locked(struct nouveau_channel *chan, u32 handle) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -173,17 +178,35 @@ nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle) chan->id, handle); } +void +nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle) +{ + struct nouveau_ramht *ramht = chan->ramht; + unsigned long flags; + + spin_lock_irqsave(&ramht->lock, flags); + nouveau_ramht_remove_locked(chan, handle); + spin_unlock_irqrestore(&ramht->lock, flags); +} + struct nouveau_gpuobj * nouveau_ramht_find(struct nouveau_channel *chan, u32 handle) { + struct nouveau_ramht *ramht = chan->ramht; struct nouveau_ramht_entry *entry; + struct nouveau_gpuobj *gpuobj = NULL; + unsigned long flags; + spin_lock_irqsave(&ramht->lock, flags); list_for_each_entry(entry, &chan->ramht->entries, head) { - if (entry->channel == chan && entry->handle == handle) - return entry->gpuobj; + if (entry->channel == chan && entry->handle == handle) { + gpuobj = entry->gpuobj; + break; + } } + spin_unlock_irqrestore(&ramht->lock, flags); - return NULL; + return gpuobj; } int @@ -197,36 +220,49 @@ nouveau_ramht_new(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, return -ENOMEM; ramht->dev = dev; - ramht->refcount = 1; + kref_init(&ramht->refcount); ramht->bits = drm_order(gpuobj->size / 8); INIT_LIST_HEAD(&ramht->entries); + spin_lock_init(&ramht->lock); nouveau_gpuobj_ref(gpuobj, &ramht->gpuobj); *pramht = ramht; return 0; } +static void +nouveau_ramht_del(struct kref *ref) +{ + struct nouveau_ramht *ramht = + container_of(ref, struct nouveau_ramht, refcount); + + nouveau_gpuobj_ref(NULL, &ramht->gpuobj); + kfree(ramht); +} + void nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr, struct nouveau_channel *chan) { struct nouveau_ramht_entry *entry, *tmp; struct nouveau_ramht *ramht; + unsigned long flags; if (ref) - ref->refcount++; + kref_get(&ref->refcount); ramht = *ptr; if (ramht) { + spin_lock_irqsave(&ramht->lock, flags); list_for_each_entry_safe(entry, tmp, &ramht->entries, head) { - if (entry->channel == chan) - nouveau_ramht_remove(chan, entry->handle); - } + if (entry->channel != chan) + continue; - if (--ramht->refcount == 0) { - nouveau_gpuobj_ref(NULL, &ramht->gpuobj); - kfree(ramht); + nouveau_ramht_remove_locked(chan, entry->handle); } + spin_unlock_irqrestore(&ramht->lock, flags); + + kref_put(&ramht->refcount, nouveau_ramht_del); } *ptr = ref; } diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.h b/drivers/gpu/drm/nouveau/nouveau_ramht.h index f37737a93642..b79cb5e1a8f1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ramht.h +++ b/drivers/gpu/drm/nouveau/nouveau_ramht.h @@ -34,7 +34,8 @@ struct nouveau_ramht_entry { struct nouveau_ramht { struct drm_device *dev; - int refcount; + struct kref refcount; + spinlock_t lock; struct nouveau_gpuobj *gpuobj; struct list_head entries; int bits; From eb9bcbdc45369105bc004a82c7bed60655aae926 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 1 Sep 2010 15:24:37 +1000 Subject: [PATCH 243/476] drm/nouveau: fix gpuobj refcount to use atomics Reviewed-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 2 +- drivers/gpu/drm/nouveau/nouveau_object.c | 33 ++++++++---------------- 2 files changed, 12 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 71e27087951b..8ee854a4b3f5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -137,6 +137,7 @@ enum nouveau_flags { #define NVOBJ_FLAG_ZERO_FREE (1 << 2) struct nouveau_gpuobj { struct drm_device *dev; + struct kref refcount; struct list_head list; struct drm_mm_node *im_pramin; @@ -145,7 +146,6 @@ struct nouveau_gpuobj { int im_bound; uint32_t flags; - int refcount; u32 size; u32 pinst; diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 198c2514f893..02a0151b0738 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -90,7 +90,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, NV_DEBUG(dev, "gpuobj %p\n", gpuobj); gpuobj->dev = dev; gpuobj->flags = flags; - gpuobj->refcount = 1; + kref_init(&gpuobj->refcount); gpuobj->size = size; list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); @@ -198,25 +198,15 @@ void nouveau_gpuobj_late_takedown(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *gpuobj = NULL; - struct list_head *entry, *tmp; - NV_DEBUG(dev, "\n"); - - list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) { - gpuobj = list_entry(entry, struct nouveau_gpuobj, list); - - NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n", - gpuobj, gpuobj->refcount); - - gpuobj->refcount = 1; - nouveau_gpuobj_ref(NULL, &gpuobj); - } + BUG_ON(!list_empty(&dev_priv->gpuobj_list)); } -static int -nouveau_gpuobj_del(struct nouveau_gpuobj *gpuobj) +static void +nouveau_gpuobj_del(struct kref *ref) { + struct nouveau_gpuobj *gpuobj = + container_of(ref, struct nouveau_gpuobj, refcount); struct drm_device *dev = gpuobj->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_engine *engine = &dev_priv->engine; @@ -242,17 +232,16 @@ nouveau_gpuobj_del(struct nouveau_gpuobj *gpuobj) list_del(&gpuobj->list); kfree(gpuobj); - return 0; } void nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr) { if (ref) - ref->refcount++; + kref_get(&ref->refcount); - if (*ptr && --(*ptr)->refcount == 0) - nouveau_gpuobj_del(*ptr); + if (*ptr) + kref_put(&(*ptr)->refcount, nouveau_gpuobj_del); *ptr = ref; } @@ -275,7 +264,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst, NV_DEBUG(dev, "gpuobj %p\n", gpuobj); gpuobj->dev = dev; gpuobj->flags = flags; - gpuobj->refcount = 1; + kref_init(&gpuobj->refcount); gpuobj->size = size; gpuobj->pinst = pinst; gpuobj->cinst = 0xdeadbeef; @@ -561,7 +550,7 @@ nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, gpuobj->dev = chan->dev; gpuobj->engine = NVOBJ_ENGINE_SW; gpuobj->class = class; - gpuobj->refcount = 1; + kref_init(&gpuobj->refcount); gpuobj->cinst = 0x40; list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); From e05d7eaeba57921abad0ef564b0875e225171de8 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 1 Sep 2010 15:24:38 +1000 Subject: [PATCH 244/476] drm/nouveau: protect gpuobj list + global instmem heap with spinlock Reviewed-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 8 +++---- drivers/gpu/drm/nouveau/nouveau_object.c | 28 ++++++++++++++++++++---- 2 files changed, 27 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 8ee854a4b3f5..0844f27651c0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -514,11 +514,13 @@ struct drm_nouveau_private { void __iomem *mmio; + spinlock_t ramin_lock; void __iomem *ramin; u32 ramin_size; u32 ramin_base; bool ramin_available; - spinlock_t ramin_lock; + struct drm_mm ramin_heap; + struct list_head gpuobj_list; struct nouveau_bo *vga_ram; @@ -592,10 +594,6 @@ struct drm_nouveau_private { struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; int vm_vram_pt_nr; - struct drm_mm ramin_heap; - - struct list_head gpuobj_list; - struct nvbios vbios; struct nv04_mode_state mode_reg; diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 02a0151b0738..37615a447a4a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -93,7 +93,9 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, kref_init(&gpuobj->refcount); gpuobj->size = size; + spin_lock(&dev_priv->ramin_lock); list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); + spin_unlock(&dev_priv->ramin_lock); if (chan) { NV_DEBUG(dev, "channel heap\n"); @@ -117,9 +119,22 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, } /* try and get aperture space */ - ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0); - if (ramin) - ramin = drm_mm_get_block(ramin, size, align); + do { + if (drm_mm_pre_get(&dev_priv->ramin_heap)) + return -ENOMEM; + + spin_lock(&dev_priv->ramin_lock); + ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, + align, 0); + if (ramin == NULL) { + spin_unlock(&dev_priv->ramin_lock); + nouveau_gpuobj_ref(NULL, &gpuobj); + return ret; + } + + ramin = drm_mm_get_block_atomic(ramin, size, align); + spin_unlock(&dev_priv->ramin_lock); + } while (ramin == NULL); /* on nv50 it's ok to fail, we have a fallback path */ if (!ramin && dev_priv->card_type < NV_50) { @@ -226,10 +241,11 @@ nouveau_gpuobj_del(struct kref *ref) if (gpuobj->im_backing) engine->instmem.clear(dev, gpuobj); + spin_lock(&dev_priv->ramin_lock); if (gpuobj->im_pramin) drm_mm_put_block(gpuobj->im_pramin); - list_del(&gpuobj->list); + spin_unlock(&dev_priv->ramin_lock); kfree(gpuobj); } @@ -276,7 +292,9 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst, dev_priv->engine.instmem.flush(dev); } + spin_lock(&dev_priv->ramin_lock); list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); + spin_unlock(&dev_priv->ramin_lock); *pgpuobj = gpuobj; return 0; } @@ -553,7 +571,9 @@ nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, kref_init(&gpuobj->refcount); gpuobj->cinst = 0x40; + spin_lock(&dev_priv->ramin_lock); list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); + spin_unlock(&dev_priv->ramin_lock); *gpuobj_ret = gpuobj; return 0; } From 185abeccabe5aed0ef79e8572c3916ea08034a45 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 1 Sep 2010 15:24:39 +1000 Subject: [PATCH 245/476] drm/nouveau: remove nouveau_gpuobj_late_takedown Reviewed-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 1 - drivers/gpu/drm/nouveau/nouveau_object.c | 7 +------ 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 0844f27651c0..dc90fd2861b7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -741,7 +741,6 @@ extern void nouveau_channel_free(struct nouveau_channel *); extern int nouveau_gpuobj_early_init(struct drm_device *); extern int nouveau_gpuobj_init(struct drm_device *); extern void nouveau_gpuobj_takedown(struct drm_device *); -extern void nouveau_gpuobj_late_takedown(struct drm_device *); extern int nouveau_gpuobj_suspend(struct drm_device *dev); extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev); extern void nouveau_gpuobj_resume(struct drm_device *dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 37615a447a4a..896cf8634144 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -207,16 +207,11 @@ nouveau_gpuobj_takedown(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; NV_DEBUG(dev, "\n"); -} - -void -nouveau_gpuobj_late_takedown(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; BUG_ON(!list_empty(&dev_priv->gpuobj_list)); } + static void nouveau_gpuobj_del(struct kref *ref) { From 2941482ead0b02c9efd81fc3862be3ebfce607a5 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 3 Sep 2010 10:25:02 +1000 Subject: [PATCH 246/476] drm/nouveau: protect ramht_find() from oopsing if on channel without ramht This doesn't actually happen now, but there's a test case for an earlier kernel where a GPU error is signalled on one of nv50's fake channels, and the ramht lookup by the IRQ handler triggered an oops. This adds a check for RAMHT's existance on a channel before looking up an object handle. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_ramht.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c index de34b6bb059f..f240ba241943 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ramht.c +++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c @@ -197,6 +197,9 @@ nouveau_ramht_find(struct nouveau_channel *chan, u32 handle) struct nouveau_gpuobj *gpuobj = NULL; unsigned long flags; + if (unlikely(!chan->ramht)) + return NULL; + spin_lock_irqsave(&ramht->lock, flags); list_for_each_entry(entry, &chan->ramht->entries, head) { if (entry->channel == chan && entry->handle == handle) { From e69b4418825c2e4c6563ae1d69bd75377826e263 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marcin=20Ko=C5=9Bcielnicki?= Date: Wed, 1 Sep 2010 15:41:24 +1000 Subject: [PATCH 247/476] drm/nv50: demagic grctx, and add NVAF support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Ben Skeggs Signed-off-by: Marcin Kościelnicki Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv50_grctx.c | 3447 ++++++++++++++++---------- 1 file changed, 2194 insertions(+), 1253 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c index ba6c033c8c95..336aab2a24a6 100644 --- a/drivers/gpu/drm/nouveau/nv50_grctx.c +++ b/drivers/gpu/drm/nouveau/nv50_grctx.c @@ -103,6 +103,9 @@ #include "nouveau_drv.h" #include "nouveau_grctx.h" +#define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf) +#define IS_NVAAF(x) ((x) >= 0xaa && (x) <= 0xac) + /* * This code deals with PGRAPH contexts on NV50 family cards. Like NV40, it's * the GPU itself that does context-switching, but it needs a special @@ -182,6 +185,7 @@ nv50_grctx_init(struct nouveau_grctx *ctx) case 0xa8: case 0xaa: case 0xac: + case 0xaf: break; default: NV_ERROR(ctx->dev, "I don't know how to make a ctxprog for " @@ -267,6 +271,9 @@ nv50_grctx_init(struct nouveau_grctx *ctx) * registers to save/restore and the default values for them. */ +static void +nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx); + static void nv50_graph_construct_mmio(struct nouveau_grctx *ctx) { @@ -286,7 +293,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) gr_def(ctx, 0x400840, 0xffe806a8); } gr_def(ctx, 0x400844, 0x00000002); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + if (IS_NVA3F(dev_priv->chipset)) gr_def(ctx, 0x400894, 0x00001000); gr_def(ctx, 0x4008e8, 0x00000003); gr_def(ctx, 0x4008ec, 0x00001000); @@ -299,13 +306,15 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) if (dev_priv->chipset >= 0xa0) cp_ctx(ctx, 0x400b00, 0x1); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { + if (IS_NVA3F(dev_priv->chipset)) { cp_ctx(ctx, 0x400b10, 0x1); gr_def(ctx, 0x400b10, 0x0001629d); cp_ctx(ctx, 0x400b20, 0x1); gr_def(ctx, 0x400b20, 0x0001629d); } + nv50_graph_construct_mmio_ddata(ctx); + /* 0C00: VFETCH */ cp_ctx(ctx, 0x400c08, 0x2); gr_def(ctx, 0x400c08, 0x0000fe0c); @@ -314,7 +323,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) if (dev_priv->chipset < 0xa0) { cp_ctx(ctx, 0x401008, 0x4); gr_def(ctx, 0x401014, 0x00001000); - } else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) { + } else if (!IS_NVA3F(dev_priv->chipset)) { cp_ctx(ctx, 0x401008, 0x5); gr_def(ctx, 0x401018, 0x00001000); } else { @@ -368,10 +377,13 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) case 0xa3: case 0xa5: case 0xa8: + case 0xaf: gr_def(ctx, 0x401c00, 0x142500df); break; } + /* 2000 */ + /* 2400 */ cp_ctx(ctx, 0x402400, 0x1); if (dev_priv->chipset == 0x50) @@ -380,12 +392,12 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) cp_ctx(ctx, 0x402408, 0x2); gr_def(ctx, 0x402408, 0x00000600); - /* 2800 */ + /* 2800: CSCHED */ cp_ctx(ctx, 0x402800, 0x1); if (dev_priv->chipset == 0x50) gr_def(ctx, 0x402800, 0x00000006); - /* 2C00 */ + /* 2C00: ZCULL */ cp_ctx(ctx, 0x402c08, 0x6); if (dev_priv->chipset != 0x50) gr_def(ctx, 0x402c14, 0x01000000); @@ -396,23 +408,23 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) cp_ctx(ctx, 0x402ca0, 0x2); if (dev_priv->chipset < 0xa0) gr_def(ctx, 0x402ca0, 0x00000400); - else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) + else if (!IS_NVA3F(dev_priv->chipset)) gr_def(ctx, 0x402ca0, 0x00000800); else gr_def(ctx, 0x402ca0, 0x00000400); cp_ctx(ctx, 0x402cac, 0x4); - /* 3000 */ + /* 3000: ENG2D */ cp_ctx(ctx, 0x403004, 0x1); gr_def(ctx, 0x403004, 0x00000001); - /* 3404 */ + /* 3400 */ if (dev_priv->chipset >= 0xa0) { cp_ctx(ctx, 0x403404, 0x1); gr_def(ctx, 0x403404, 0x00000001); } - /* 5000 */ + /* 5000: CCACHE */ cp_ctx(ctx, 0x405000, 0x1); switch (dev_priv->chipset) { case 0x50: @@ -425,6 +437,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) case 0xa8: case 0xaa: case 0xac: + case 0xaf: gr_def(ctx, 0x405000, 0x000e0080); break; case 0x86: @@ -441,210 +454,6 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) cp_ctx(ctx, 0x405024, 0x1); cp_ctx(ctx, 0x40502c, 0x1); - /* 5400 or maybe 4800 */ - if (dev_priv->chipset == 0x50) { - offset = 0x405400; - cp_ctx(ctx, 0x405400, 0xea); - } else if (dev_priv->chipset < 0x94) { - offset = 0x405400; - cp_ctx(ctx, 0x405400, 0xcb); - } else if (dev_priv->chipset < 0xa0) { - offset = 0x405400; - cp_ctx(ctx, 0x405400, 0xcc); - } else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { - offset = 0x404800; - cp_ctx(ctx, 0x404800, 0xda); - } else { - offset = 0x405400; - cp_ctx(ctx, 0x405400, 0xd4); - } - gr_def(ctx, offset + 0x0c, 0x00000002); - gr_def(ctx, offset + 0x10, 0x00000001); - if (dev_priv->chipset >= 0x94) - offset += 4; - gr_def(ctx, offset + 0x1c, 0x00000001); - gr_def(ctx, offset + 0x20, 0x00000100); - gr_def(ctx, offset + 0x38, 0x00000002); - gr_def(ctx, offset + 0x3c, 0x00000001); - gr_def(ctx, offset + 0x40, 0x00000001); - gr_def(ctx, offset + 0x50, 0x00000001); - gr_def(ctx, offset + 0x54, 0x003fffff); - gr_def(ctx, offset + 0x58, 0x00001fff); - gr_def(ctx, offset + 0x60, 0x00000001); - gr_def(ctx, offset + 0x64, 0x00000001); - gr_def(ctx, offset + 0x6c, 0x00000001); - gr_def(ctx, offset + 0x70, 0x00000001); - gr_def(ctx, offset + 0x74, 0x00000001); - gr_def(ctx, offset + 0x78, 0x00000004); - gr_def(ctx, offset + 0x7c, 0x00000001); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - offset += 4; - gr_def(ctx, offset + 0x80, 0x00000001); - gr_def(ctx, offset + 0x84, 0x00000001); - gr_def(ctx, offset + 0x88, 0x00000007); - gr_def(ctx, offset + 0x8c, 0x00000001); - gr_def(ctx, offset + 0x90, 0x00000007); - gr_def(ctx, offset + 0x94, 0x00000001); - gr_def(ctx, offset + 0x98, 0x00000001); - gr_def(ctx, offset + 0x9c, 0x00000001); - if (dev_priv->chipset == 0x50) { - gr_def(ctx, offset + 0xb0, 0x00000001); - gr_def(ctx, offset + 0xb4, 0x00000001); - gr_def(ctx, offset + 0xbc, 0x00000001); - gr_def(ctx, offset + 0xc0, 0x0000000a); - gr_def(ctx, offset + 0xd0, 0x00000040); - gr_def(ctx, offset + 0xd8, 0x00000002); - gr_def(ctx, offset + 0xdc, 0x00000100); - gr_def(ctx, offset + 0xe0, 0x00000001); - gr_def(ctx, offset + 0xe4, 0x00000100); - gr_def(ctx, offset + 0x100, 0x00000001); - gr_def(ctx, offset + 0x124, 0x00000004); - gr_def(ctx, offset + 0x13c, 0x00000001); - gr_def(ctx, offset + 0x140, 0x00000100); - gr_def(ctx, offset + 0x148, 0x00000001); - gr_def(ctx, offset + 0x154, 0x00000100); - gr_def(ctx, offset + 0x158, 0x00000001); - gr_def(ctx, offset + 0x15c, 0x00000100); - gr_def(ctx, offset + 0x164, 0x00000001); - gr_def(ctx, offset + 0x170, 0x00000100); - gr_def(ctx, offset + 0x174, 0x00000001); - gr_def(ctx, offset + 0x17c, 0x00000001); - gr_def(ctx, offset + 0x188, 0x00000002); - gr_def(ctx, offset + 0x190, 0x00000001); - gr_def(ctx, offset + 0x198, 0x00000001); - gr_def(ctx, offset + 0x1ac, 0x00000003); - offset += 0xd0; - } else { - gr_def(ctx, offset + 0xb0, 0x00000001); - gr_def(ctx, offset + 0xb4, 0x00000100); - gr_def(ctx, offset + 0xbc, 0x00000001); - gr_def(ctx, offset + 0xc8, 0x00000100); - gr_def(ctx, offset + 0xcc, 0x00000001); - gr_def(ctx, offset + 0xd0, 0x00000100); - gr_def(ctx, offset + 0xd8, 0x00000001); - gr_def(ctx, offset + 0xe4, 0x00000100); - } - gr_def(ctx, offset + 0xf8, 0x00000004); - gr_def(ctx, offset + 0xfc, 0x00000070); - gr_def(ctx, offset + 0x100, 0x00000080); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - offset += 4; - gr_def(ctx, offset + 0x114, 0x0000000c); - if (dev_priv->chipset == 0x50) - offset -= 4; - gr_def(ctx, offset + 0x11c, 0x00000008); - gr_def(ctx, offset + 0x120, 0x00000014); - if (dev_priv->chipset == 0x50) { - gr_def(ctx, offset + 0x124, 0x00000026); - offset -= 0x18; - } else { - gr_def(ctx, offset + 0x128, 0x00000029); - gr_def(ctx, offset + 0x12c, 0x00000027); - gr_def(ctx, offset + 0x130, 0x00000026); - gr_def(ctx, offset + 0x134, 0x00000008); - gr_def(ctx, offset + 0x138, 0x00000004); - gr_def(ctx, offset + 0x13c, 0x00000027); - } - gr_def(ctx, offset + 0x148, 0x00000001); - gr_def(ctx, offset + 0x14c, 0x00000002); - gr_def(ctx, offset + 0x150, 0x00000003); - gr_def(ctx, offset + 0x154, 0x00000004); - gr_def(ctx, offset + 0x158, 0x00000005); - gr_def(ctx, offset + 0x15c, 0x00000006); - gr_def(ctx, offset + 0x160, 0x00000007); - gr_def(ctx, offset + 0x164, 0x00000001); - gr_def(ctx, offset + 0x1a8, 0x000000cf); - if (dev_priv->chipset == 0x50) - offset -= 4; - gr_def(ctx, offset + 0x1d8, 0x00000080); - gr_def(ctx, offset + 0x1dc, 0x00000004); - gr_def(ctx, offset + 0x1e0, 0x00000004); - if (dev_priv->chipset == 0x50) - offset -= 4; - else - gr_def(ctx, offset + 0x1e4, 0x00000003); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { - gr_def(ctx, offset + 0x1ec, 0x00000003); - offset += 8; - } - gr_def(ctx, offset + 0x1e8, 0x00000001); - if (dev_priv->chipset == 0x50) - offset -= 4; - gr_def(ctx, offset + 0x1f4, 0x00000012); - gr_def(ctx, offset + 0x1f8, 0x00000010); - gr_def(ctx, offset + 0x1fc, 0x0000000c); - gr_def(ctx, offset + 0x200, 0x00000001); - gr_def(ctx, offset + 0x210, 0x00000004); - gr_def(ctx, offset + 0x214, 0x00000002); - gr_def(ctx, offset + 0x218, 0x00000004); - if (dev_priv->chipset >= 0xa0) - offset += 4; - gr_def(ctx, offset + 0x224, 0x003fffff); - gr_def(ctx, offset + 0x228, 0x00001fff); - if (dev_priv->chipset == 0x50) - offset -= 0x20; - else if (dev_priv->chipset >= 0xa0) { - gr_def(ctx, offset + 0x250, 0x00000001); - gr_def(ctx, offset + 0x254, 0x00000001); - gr_def(ctx, offset + 0x258, 0x00000002); - offset += 0x10; - } - gr_def(ctx, offset + 0x250, 0x00000004); - gr_def(ctx, offset + 0x254, 0x00000014); - gr_def(ctx, offset + 0x258, 0x00000001); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - offset += 4; - gr_def(ctx, offset + 0x264, 0x00000002); - if (dev_priv->chipset >= 0xa0) - offset += 8; - gr_def(ctx, offset + 0x270, 0x00000001); - gr_def(ctx, offset + 0x278, 0x00000002); - gr_def(ctx, offset + 0x27c, 0x00001000); - if (dev_priv->chipset == 0x50) - offset -= 0xc; - else { - gr_def(ctx, offset + 0x280, 0x00000e00); - gr_def(ctx, offset + 0x284, 0x00001000); - gr_def(ctx, offset + 0x288, 0x00001e00); - } - gr_def(ctx, offset + 0x290, 0x00000001); - gr_def(ctx, offset + 0x294, 0x00000001); - gr_def(ctx, offset + 0x298, 0x00000001); - gr_def(ctx, offset + 0x29c, 0x00000001); - gr_def(ctx, offset + 0x2a0, 0x00000001); - gr_def(ctx, offset + 0x2b0, 0x00000200); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { - gr_def(ctx, offset + 0x2b4, 0x00000200); - offset += 4; - } - if (dev_priv->chipset < 0xa0) { - gr_def(ctx, offset + 0x2b8, 0x00000001); - gr_def(ctx, offset + 0x2bc, 0x00000070); - gr_def(ctx, offset + 0x2c0, 0x00000080); - gr_def(ctx, offset + 0x2cc, 0x00000001); - gr_def(ctx, offset + 0x2d0, 0x00000070); - gr_def(ctx, offset + 0x2d4, 0x00000080); - } else { - gr_def(ctx, offset + 0x2b8, 0x00000001); - gr_def(ctx, offset + 0x2bc, 0x000000f0); - gr_def(ctx, offset + 0x2c0, 0x000000ff); - gr_def(ctx, offset + 0x2cc, 0x00000001); - gr_def(ctx, offset + 0x2d0, 0x000000f0); - gr_def(ctx, offset + 0x2d4, 0x000000ff); - gr_def(ctx, offset + 0x2dc, 0x00000009); - offset += 4; - } - gr_def(ctx, offset + 0x2e4, 0x00000001); - gr_def(ctx, offset + 0x2e8, 0x000000cf); - gr_def(ctx, offset + 0x2f0, 0x00000001); - gr_def(ctx, offset + 0x300, 0x000000cf); - gr_def(ctx, offset + 0x308, 0x00000002); - gr_def(ctx, offset + 0x310, 0x00000001); - gr_def(ctx, offset + 0x318, 0x00000001); - gr_def(ctx, offset + 0x320, 0x000000cf); - gr_def(ctx, offset + 0x324, 0x000000cf); - gr_def(ctx, offset + 0x328, 0x00000001); - /* 6000? */ if (dev_priv->chipset == 0x50) cp_ctx(ctx, 0x4063e0, 0x1); @@ -661,7 +470,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) gr_def(ctx, 0x406818, 0x00000f80); else gr_def(ctx, 0x406818, 0x00001f80); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + if (IS_NVA3F(dev_priv->chipset)) gr_def(ctx, 0x40681c, 0x00000030); cp_ctx(ctx, 0x406830, 0x3); } @@ -706,7 +515,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) if (dev_priv->chipset < 0xa0) cp_ctx(ctx, 0x407094 + (i<<8), 1); - else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) + else if (!IS_NVA3F(dev_priv->chipset)) cp_ctx(ctx, 0x407094 + (i<<8), 3); else { cp_ctx(ctx, 0x407094 + (i<<8), 4); @@ -799,6 +608,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) case 0xa8: case 0xaa: case 0xac: + case 0xaf: gr_def(ctx, offset + 0x1c, 0x300c0000); break; } @@ -825,7 +635,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) gr_def(ctx, base + 0x304, 0x00007070); else if (dev_priv->chipset < 0xa0) gr_def(ctx, base + 0x304, 0x00027070); - else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) + else if (!IS_NVA3F(dev_priv->chipset)) gr_def(ctx, base + 0x304, 0x01127070); else gr_def(ctx, base + 0x304, 0x05127070); @@ -849,7 +659,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) if (dev_priv->chipset < 0xa0) { cp_ctx(ctx, base + 0x340, 9); offset = base + 0x340; - } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) { + } else if (!IS_NVA3F(dev_priv->chipset)) { cp_ctx(ctx, base + 0x33c, 0xb); offset = base + 0x344; } else { @@ -880,7 +690,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) gr_def(ctx, offset + 0x0, 0x000001f0); gr_def(ctx, offset + 0x4, 0x00000001); gr_def(ctx, offset + 0x8, 0x00000003); - if (dev_priv->chipset == 0x50 || dev_priv->chipset >= 0xaa) + if (dev_priv->chipset == 0x50 || IS_NVAAF(dev_priv->chipset)) gr_def(ctx, offset + 0xc, 0x00008000); gr_def(ctx, offset + 0x14, 0x00039e00); cp_ctx(ctx, offset + 0x1c, 2); @@ -892,7 +702,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) if (dev_priv->chipset >= 0xa0) { cp_ctx(ctx, base + 0x54c, 2); - if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) + if (!IS_NVA3F(dev_priv->chipset)) gr_def(ctx, base + 0x54c, 0x003fe006); else gr_def(ctx, base + 0x54c, 0x003fe007); @@ -948,6 +758,336 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) } } +static void +dd_emit(struct nouveau_grctx *ctx, int num, uint32_t val) { + int i; + if (val && ctx->mode == NOUVEAU_GRCTX_VALS) + for (i = 0; i < num; i++) + nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val); + ctx->ctxvals_pos += num; +} + +static void +nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + int base, num; + base = ctx->ctxvals_pos; + + /* tesla state */ + dd_emit(ctx, 1, 0); /* 00000001 UNK0F90 */ + dd_emit(ctx, 1, 0); /* 00000001 UNK135C */ + + /* SRC_TIC state */ + dd_emit(ctx, 1, 0); /* 00000007 SRC_TILE_MODE_Z */ + dd_emit(ctx, 1, 2); /* 00000007 SRC_TILE_MODE_Y */ + dd_emit(ctx, 1, 1); /* 00000001 SRC_LINEAR #1 */ + dd_emit(ctx, 1, 0); /* 000000ff SRC_ADDRESS_HIGH */ + dd_emit(ctx, 1, 0); /* 00000001 SRC_SRGB */ + if (dev_priv->chipset >= 0x94) + dd_emit(ctx, 1, 0); /* 00000003 eng2d UNK0258 */ + dd_emit(ctx, 1, 1); /* 00000fff SRC_DEPTH */ + dd_emit(ctx, 1, 0x100); /* 0000ffff SRC_HEIGHT */ + + /* turing state */ + dd_emit(ctx, 1, 0); /* 0000000f TEXTURES_LOG2 */ + dd_emit(ctx, 1, 0); /* 0000000f SAMPLERS_LOG2 */ + dd_emit(ctx, 1, 0); /* 000000ff CB_DEF_ADDRESS_HIGH */ + dd_emit(ctx, 1, 0); /* ffffffff CB_DEF_ADDRESS_LOW */ + dd_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */ + dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */ + dd_emit(ctx, 1, 1); /* 0000ffff BLOCK_ALLOC_THREADS */ + dd_emit(ctx, 1, 1); /* 00000001 LANES32 */ + dd_emit(ctx, 1, 0); /* 000000ff UNK370 */ + dd_emit(ctx, 1, 0); /* 000000ff USER_PARAM_UNK */ + dd_emit(ctx, 1, 0); /* 000000ff USER_PARAM_COUNT */ + dd_emit(ctx, 1, 1); /* 000000ff UNK384 bits 8-15 */ + dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */ + dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */ + dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */ + dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_X */ + dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_XMY */ + dd_emit(ctx, 1, 0); /* 00000001 BLOCKDIM_XMY_OVERFLOW */ + dd_emit(ctx, 1, 1); /* 0003ffff BLOCKDIM_XMYMZ */ + dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_Y */ + dd_emit(ctx, 1, 1); /* 0000007f BLOCKDIM_Z */ + dd_emit(ctx, 1, 4); /* 000000ff CP_REG_ALLOC_TEMP */ + dd_emit(ctx, 1, 1); /* 00000001 BLOCKDIM_DIRTY */ + if (IS_NVA3F(dev_priv->chipset)) + dd_emit(ctx, 1, 0); /* 00000003 UNK03E8 */ + dd_emit(ctx, 1, 1); /* 0000007f BLOCK_ALLOC_HALFWARPS */ + dd_emit(ctx, 1, 1); /* 00000007 LOCAL_WARPS_NO_CLAMP */ + dd_emit(ctx, 1, 7); /* 00000007 LOCAL_WARPS_LOG_ALLOC */ + dd_emit(ctx, 1, 1); /* 00000007 STACK_WARPS_NO_CLAMP */ + dd_emit(ctx, 1, 7); /* 00000007 STACK_WARPS_LOG_ALLOC */ + dd_emit(ctx, 1, 1); /* 00001fff BLOCK_ALLOC_REGSLOTS_PACKED */ + dd_emit(ctx, 1, 1); /* 00001fff BLOCK_ALLOC_REGSLOTS_STRIDED */ + dd_emit(ctx, 1, 1); /* 000007ff BLOCK_ALLOC_THREADS */ + + /* compat 2d state */ + if (dev_priv->chipset == 0x50) { + dd_emit(ctx, 4, 0); /* 0000ffff clip X, Y, W, H */ + + dd_emit(ctx, 1, 1); /* ffffffff chroma COLOR_FORMAT */ + + dd_emit(ctx, 1, 1); /* ffffffff pattern COLOR_FORMAT */ + dd_emit(ctx, 1, 0); /* ffffffff pattern SHAPE */ + dd_emit(ctx, 1, 1); /* ffffffff pattern PATTERN_SELECT */ + + dd_emit(ctx, 1, 0xa); /* ffffffff surf2d SRC_FORMAT */ + dd_emit(ctx, 1, 0); /* ffffffff surf2d DMA_SRC */ + dd_emit(ctx, 1, 0); /* 000000ff surf2d SRC_ADDRESS_HIGH */ + dd_emit(ctx, 1, 0); /* ffffffff surf2d SRC_ADDRESS_LOW */ + dd_emit(ctx, 1, 0x40); /* 0000ffff surf2d SRC_PITCH */ + dd_emit(ctx, 1, 0); /* 0000000f surf2d SRC_TILE_MODE_Z */ + dd_emit(ctx, 1, 2); /* 0000000f surf2d SRC_TILE_MODE_Y */ + dd_emit(ctx, 1, 0x100); /* ffffffff surf2d SRC_HEIGHT */ + dd_emit(ctx, 1, 1); /* 00000001 surf2d SRC_LINEAR */ + dd_emit(ctx, 1, 0x100); /* ffffffff surf2d SRC_WIDTH */ + + dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_B_X */ + dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_B_Y */ + dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_C_X */ + dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_C_Y */ + dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_D_X */ + dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_D_Y */ + dd_emit(ctx, 1, 1); /* ffffffff gdirect COLOR_FORMAT */ + dd_emit(ctx, 1, 0); /* ffffffff gdirect OPERATION */ + dd_emit(ctx, 1, 0); /* 0000ffff gdirect POINT_X */ + dd_emit(ctx, 1, 0); /* 0000ffff gdirect POINT_Y */ + + dd_emit(ctx, 1, 0); /* 0000ffff blit SRC_Y */ + dd_emit(ctx, 1, 0); /* ffffffff blit OPERATION */ + + dd_emit(ctx, 1, 0); /* ffffffff ifc OPERATION */ + + dd_emit(ctx, 1, 0); /* ffffffff iifc INDEX_FORMAT */ + dd_emit(ctx, 1, 0); /* ffffffff iifc LUT_OFFSET */ + dd_emit(ctx, 1, 4); /* ffffffff iifc COLOR_FORMAT */ + dd_emit(ctx, 1, 0); /* ffffffff iifc OPERATION */ + } + + /* m2mf state */ + dd_emit(ctx, 1, 0); /* ffffffff m2mf LINE_COUNT */ + dd_emit(ctx, 1, 0); /* ffffffff m2mf LINE_LENGTH_IN */ + dd_emit(ctx, 2, 0); /* ffffffff m2mf OFFSET_IN, OFFSET_OUT */ + dd_emit(ctx, 1, 1); /* ffffffff m2mf TILING_DEPTH_OUT */ + dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_HEIGHT_OUT */ + dd_emit(ctx, 1, 0); /* ffffffff m2mf TILING_POSITION_OUT_Z */ + dd_emit(ctx, 1, 1); /* 00000001 m2mf LINEAR_OUT */ + dd_emit(ctx, 2, 0); /* 0000ffff m2mf TILING_POSITION_OUT_X, Y */ + dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_OUT */ + dd_emit(ctx, 1, 1); /* ffffffff m2mf TILING_DEPTH_IN */ + dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_HEIGHT_IN */ + dd_emit(ctx, 1, 0); /* ffffffff m2mf TILING_POSITION_IN_Z */ + dd_emit(ctx, 1, 1); /* 00000001 m2mf LINEAR_IN */ + dd_emit(ctx, 2, 0); /* 0000ffff m2mf TILING_POSITION_IN_X, Y */ + dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_IN */ + + /* more compat 2d state */ + if (dev_priv->chipset == 0x50) { + dd_emit(ctx, 1, 1); /* ffffffff line COLOR_FORMAT */ + dd_emit(ctx, 1, 0); /* ffffffff line OPERATION */ + + dd_emit(ctx, 1, 1); /* ffffffff triangle COLOR_FORMAT */ + dd_emit(ctx, 1, 0); /* ffffffff triangle OPERATION */ + + dd_emit(ctx, 1, 0); /* 0000000f sifm TILE_MODE_Z */ + dd_emit(ctx, 1, 2); /* 0000000f sifm TILE_MODE_Y */ + dd_emit(ctx, 1, 0); /* 000000ff sifm FORMAT_FILTER */ + dd_emit(ctx, 1, 1); /* 000000ff sifm FORMAT_ORIGIN */ + dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_PITCH */ + dd_emit(ctx, 1, 1); /* 00000001 sifm SRC_LINEAR */ + dd_emit(ctx, 1, 0); /* 000000ff sifm SRC_OFFSET_HIGH */ + dd_emit(ctx, 1, 0); /* ffffffff sifm SRC_OFFSET */ + dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_HEIGHT */ + dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_WIDTH */ + dd_emit(ctx, 1, 3); /* ffffffff sifm COLOR_FORMAT */ + dd_emit(ctx, 1, 0); /* ffffffff sifm OPERATION */ + + dd_emit(ctx, 1, 0); /* ffffffff sifc OPERATION */ + } + + /* tesla state */ + dd_emit(ctx, 1, 0); /* 0000000f GP_TEXTURES_LOG2 */ + dd_emit(ctx, 1, 0); /* 0000000f GP_SAMPLERS_LOG2 */ + dd_emit(ctx, 1, 0); /* 000000ff */ + dd_emit(ctx, 1, 0); /* ffffffff */ + dd_emit(ctx, 1, 4); /* 000000ff UNK12B0_0 */ + dd_emit(ctx, 1, 0x70); /* 000000ff UNK12B0_1 */ + dd_emit(ctx, 1, 0x80); /* 000000ff UNK12B0_3 */ + dd_emit(ctx, 1, 0); /* 000000ff UNK12B0_2 */ + dd_emit(ctx, 1, 0); /* 0000000f FP_TEXTURES_LOG2 */ + dd_emit(ctx, 1, 0); /* 0000000f FP_SAMPLERS_LOG2 */ + if (IS_NVA3F(dev_priv->chipset)) { + dd_emit(ctx, 1, 0); /* ffffffff */ + dd_emit(ctx, 1, 0); /* 0000007f MULTISAMPLE_SAMPLES_LOG2 */ + } else { + dd_emit(ctx, 1, 0); /* 0000000f MULTISAMPLE_SAMPLES_LOG2 */ + } + dd_emit(ctx, 1, 0xc); /* 000000ff SEMANTIC_COLOR.BFC0_ID */ + if (dev_priv->chipset != 0x50) + dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_COLOR.CLMP_EN */ + dd_emit(ctx, 1, 8); /* 000000ff SEMANTIC_COLOR.COLR_NR */ + dd_emit(ctx, 1, 0x14); /* 000000ff SEMANTIC_COLOR.FFC0_ID */ + if (dev_priv->chipset == 0x50) { + dd_emit(ctx, 1, 0); /* 000000ff SEMANTIC_LAYER */ + dd_emit(ctx, 1, 0); /* 00000001 */ + } else { + dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_PTSZ.ENABLE */ + dd_emit(ctx, 1, 0x29); /* 000000ff SEMANTIC_PTSZ.PTSZ_ID */ + dd_emit(ctx, 1, 0x27); /* 000000ff SEMANTIC_PRIM */ + dd_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */ + dd_emit(ctx, 1, 8); /* 0000000f SMENATIC_CLIP.CLIP_HIGH */ + dd_emit(ctx, 1, 4); /* 000000ff SEMANTIC_CLIP.CLIP_LO */ + dd_emit(ctx, 1, 0x27); /* 000000ff UNK0FD4 */ + dd_emit(ctx, 1, 0); /* 00000001 UNK1900 */ + } + dd_emit(ctx, 1, 0); /* 00000007 RT_CONTROL_MAP0 */ + dd_emit(ctx, 1, 1); /* 00000007 RT_CONTROL_MAP1 */ + dd_emit(ctx, 1, 2); /* 00000007 RT_CONTROL_MAP2 */ + dd_emit(ctx, 1, 3); /* 00000007 RT_CONTROL_MAP3 */ + dd_emit(ctx, 1, 4); /* 00000007 RT_CONTROL_MAP4 */ + dd_emit(ctx, 1, 5); /* 00000007 RT_CONTROL_MAP5 */ + dd_emit(ctx, 1, 6); /* 00000007 RT_CONTROL_MAP6 */ + dd_emit(ctx, 1, 7); /* 00000007 RT_CONTROL_MAP7 */ + dd_emit(ctx, 1, 1); /* 0000000f RT_CONTROL_COUNT */ + dd_emit(ctx, 8, 0); /* 00000001 RT_HORIZ_UNK */ + dd_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */ + dd_emit(ctx, 1, 0xcf); /* 000000ff RT_FORMAT */ + dd_emit(ctx, 7, 0); /* 000000ff RT_FORMAT */ + if (dev_priv->chipset != 0x50) + dd_emit(ctx, 3, 0); /* 1, 1, 1 */ + else + dd_emit(ctx, 2, 0); /* 1, 1 */ + dd_emit(ctx, 1, 0); /* ffffffff GP_ENABLE */ + dd_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT*/ + dd_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ + dd_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + if (IS_NVA3F(dev_priv->chipset)) { + dd_emit(ctx, 1, 3); /* 00000003 */ + dd_emit(ctx, 1, 0); /* 00000001 UNK1418. Alone. */ + } + if (dev_priv->chipset != 0x50) + dd_emit(ctx, 1, 3); /* 00000003 UNK15AC */ + dd_emit(ctx, 1, 1); /* ffffffff RASTERIZE_ENABLE */ + dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.EXPORTS_Z */ + if (dev_priv->chipset != 0x50) + dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.MULTIPLE_RESULTS */ + dd_emit(ctx, 1, 0x12); /* 000000ff FP_INTERPOLANT_CTRL.COUNT */ + dd_emit(ctx, 1, 0x10); /* 000000ff FP_INTERPOLANT_CTRL.COUNT_NONFLAT */ + dd_emit(ctx, 1, 0xc); /* 000000ff FP_INTERPOLANT_CTRL.OFFSET */ + dd_emit(ctx, 1, 1); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.W */ + dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.X */ + dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.Y */ + dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.Z */ + dd_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */ + dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */ + dd_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */ + if (dev_priv->chipset >= 0xa0) + dd_emit(ctx, 1, 0); /* ffffffff */ + dd_emit(ctx, 1, 0); /* 00000001 GP_BUILTIN_RESULT_EN.LAYER_IDX */ + dd_emit(ctx, 1, 0); /* ffffffff STRMOUT_ENABLE */ + dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */ + dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */ + dd_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE*/ + if (dev_priv->chipset != 0x50) + dd_emit(ctx, 8, 0); /* 00000001 */ + if (dev_priv->chipset >= 0xa0) { + dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.COMP */ + dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.SIZE */ + dd_emit(ctx, 1, 2); /* 00000007 VTX_ATTR_DEFINE.TYPE */ + dd_emit(ctx, 1, 0); /* 000000ff VTX_ATTR_DEFINE.ATTR */ + } + dd_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ + dd_emit(ctx, 1, 0x14); /* 0000001f ZETA_FORMAT */ + dd_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + dd_emit(ctx, 1, 0); /* 0000000f VP_TEXTURES_LOG2 */ + dd_emit(ctx, 1, 0); /* 0000000f VP_SAMPLERS_LOG2 */ + if (IS_NVA3F(dev_priv->chipset)) + dd_emit(ctx, 1, 0); /* 00000001 */ + dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_BACK */ + if (dev_priv->chipset >= 0xa0) + dd_emit(ctx, 1, 0); /* 00000003 VTX_ATTR_DEFINE.SIZE - 1 */ + dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */ + if (dev_priv->chipset >= 0xa0) + dd_emit(ctx, 1, 0); /* 00000003 */ + dd_emit(ctx, 1, 0); /* 00000001 CULL_FACE_ENABLE */ + dd_emit(ctx, 1, 1); /* 00000003 CULL_FACE */ + dd_emit(ctx, 1, 0); /* 00000001 FRONT_FACE */ + dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_FRONT */ + dd_emit(ctx, 1, 0x1000); /* 00007fff UNK141C */ + if (dev_priv->chipset != 0x50) { + dd_emit(ctx, 1, 0xe00); /* 7fff */ + dd_emit(ctx, 1, 0x1000); /* 7fff */ + dd_emit(ctx, 1, 0x1e00); /* 7fff */ + } + dd_emit(ctx, 1, 0); /* 00000001 BEGIN_END_ACTIVE */ + dd_emit(ctx, 1, 1); /* 00000001 POLYGON_MODE_??? */ + dd_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP / 4 rounded up */ + dd_emit(ctx, 1, 1); /* 000000ff FP_REG_ALLOC_TEMP... without /4? */ + dd_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP / 4 rounded up */ + dd_emit(ctx, 1, 1); /* 00000001 */ + dd_emit(ctx, 1, 0); /* 00000001 */ + dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK0 nonempty */ + dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK1 nonempty */ + dd_emit(ctx, 1, 0x200); /* 0003ffff GP_VERTEX_OUTPUT_COUNT*GP_REG_ALLOC_RESULT */ + if (IS_NVA3F(dev_priv->chipset)) + dd_emit(ctx, 1, 0x200); + dd_emit(ctx, 1, 0); /* 00000001 */ + if (dev_priv->chipset < 0xa0) { + dd_emit(ctx, 1, 1); /* 00000001 */ + dd_emit(ctx, 1, 0x70); /* 000000ff */ + dd_emit(ctx, 1, 0x80); /* 000000ff */ + dd_emit(ctx, 1, 0); /* 000000ff */ + dd_emit(ctx, 1, 0); /* 00000001 */ + dd_emit(ctx, 1, 1); /* 00000001 */ + dd_emit(ctx, 1, 0x70); /* 000000ff */ + dd_emit(ctx, 1, 0x80); /* 000000ff */ + dd_emit(ctx, 1, 0); /* 000000ff */ + } else { + dd_emit(ctx, 1, 1); /* 00000001 */ + dd_emit(ctx, 1, 0xf0); /* 000000ff */ + dd_emit(ctx, 1, 0xff); /* 000000ff */ + dd_emit(ctx, 1, 0); /* 000000ff */ + dd_emit(ctx, 1, 0); /* 00000001 */ + dd_emit(ctx, 1, 1); /* 00000001 */ + dd_emit(ctx, 1, 0xf0); /* 000000ff */ + dd_emit(ctx, 1, 0xff); /* 000000ff */ + dd_emit(ctx, 1, 0); /* 000000ff */ + dd_emit(ctx, 1, 9); /* 0000003f UNK114C.COMP,SIZE */ + } + + /* eng2d state */ + dd_emit(ctx, 1, 0); /* 00000001 eng2d COLOR_KEY_ENABLE */ + dd_emit(ctx, 1, 0); /* 00000007 eng2d COLOR_KEY_FORMAT */ + dd_emit(ctx, 1, 1); /* ffffffff eng2d DST_DEPTH */ + dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d DST_FORMAT */ + dd_emit(ctx, 1, 0); /* ffffffff eng2d DST_LAYER */ + dd_emit(ctx, 1, 1); /* 00000001 eng2d DST_LINEAR */ + dd_emit(ctx, 1, 0); /* 00000007 eng2d PATTERN_COLOR_FORMAT */ + dd_emit(ctx, 1, 0); /* 00000007 eng2d OPERATION */ + dd_emit(ctx, 1, 0); /* 00000003 eng2d PATTERN_SELECT */ + dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d SIFC_FORMAT */ + dd_emit(ctx, 1, 0); /* 00000001 eng2d SIFC_BITMAP_ENABLE */ + dd_emit(ctx, 1, 2); /* 00000003 eng2d SIFC_BITMAP_UNK808 */ + dd_emit(ctx, 1, 0); /* ffffffff eng2d BLIT_DU_DX_FRACT */ + dd_emit(ctx, 1, 1); /* ffffffff eng2d BLIT_DU_DX_INT */ + dd_emit(ctx, 1, 0); /* ffffffff eng2d BLIT_DV_DY_FRACT */ + dd_emit(ctx, 1, 1); /* ffffffff eng2d BLIT_DV_DY_INT */ + dd_emit(ctx, 1, 0); /* 00000001 eng2d BLIT_CONTROL_FILTER */ + dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d DRAW_COLOR_FORMAT */ + dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d SRC_FORMAT */ + dd_emit(ctx, 1, 1); /* 00000001 eng2d SRC_LINEAR #2 */ + + num = ctx->ctxvals_pos - base; + ctx->ctxvals_pos = base; + if (IS_NVA3F(dev_priv->chipset)) + cp_ctx(ctx, 0x404800, num); + else + cp_ctx(ctx, 0x405400, num); +} + /* * xfer areas. These are a pain. * @@ -990,28 +1130,33 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) * without the help of ctxprog. */ -static inline void +static void xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) { int i; if (val && ctx->mode == NOUVEAU_GRCTX_VALS) for (i = 0; i < num; i++) - nv_wo32(ctx->data, (ctx->ctxvals_pos + (i << 3))*4, val); + nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val); ctx->ctxvals_pos += num << 3; } /* Gene declarations... */ +static void nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx); static void nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx); -static void nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx); -static void nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx); -static void nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx); -static void nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx); -static void nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx); -static void nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx); -static void nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx); -static void nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx); -static void nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx); -static void nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx); static void nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx); static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx); @@ -1030,102 +1175,32 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx) if (dev_priv->chipset < 0xa0) { /* Strand 0 */ ctx->ctxvals_pos = offset; - switch (dev_priv->chipset) { - case 0x50: - xf_emit(ctx, 0x99, 0); - break; - case 0x84: - case 0x86: - xf_emit(ctx, 0x384, 0); - break; - case 0x92: - case 0x94: - case 0x96: - case 0x98: - xf_emit(ctx, 0x380, 0); - break; - } - nv50_graph_construct_gene_m2mf (ctx); - switch (dev_priv->chipset) { - case 0x50: - case 0x84: - case 0x86: - case 0x98: - xf_emit(ctx, 0x4c4, 0); - break; - case 0x92: - case 0x94: - case 0x96: - xf_emit(ctx, 0x984, 0); - break; - } - nv50_graph_construct_gene_unk5(ctx); - if (dev_priv->chipset == 0x50) - xf_emit(ctx, 0xa, 0); - else - xf_emit(ctx, 0xb, 0); - nv50_graph_construct_gene_unk4(ctx); - nv50_graph_construct_gene_unk3(ctx); + nv50_graph_construct_gene_dispatch(ctx); + nv50_graph_construct_gene_m2mf(ctx); + nv50_graph_construct_gene_unk24xx(ctx); + nv50_graph_construct_gene_clipid(ctx); + nv50_graph_construct_gene_zcull(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; /* Strand 1 */ ctx->ctxvals_pos = offset + 0x1; - nv50_graph_construct_gene_unk6(ctx); - nv50_graph_construct_gene_unk7(ctx); - nv50_graph_construct_gene_unk8(ctx); - switch (dev_priv->chipset) { - case 0x50: - case 0x92: - xf_emit(ctx, 0xfb, 0); - break; - case 0x84: - xf_emit(ctx, 0xd3, 0); - break; - case 0x94: - case 0x96: - xf_emit(ctx, 0xab, 0); - break; - case 0x86: - case 0x98: - xf_emit(ctx, 0x6b, 0); - break; - } - xf_emit(ctx, 2, 0x4e3bfdf); - xf_emit(ctx, 4, 0); - xf_emit(ctx, 1, 0x0fac6881); - xf_emit(ctx, 0xb, 0); - xf_emit(ctx, 2, 0x4e3bfdf); + nv50_graph_construct_gene_vfetch(ctx); + nv50_graph_construct_gene_eng2d(ctx); + nv50_graph_construct_gene_csched(ctx); + nv50_graph_construct_gene_ropm1(ctx); + nv50_graph_construct_gene_ropm2(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; /* Strand 2 */ ctx->ctxvals_pos = offset + 0x2; - switch (dev_priv->chipset) { - case 0x50: - case 0x92: - xf_emit(ctx, 0xa80, 0); - break; - case 0x84: - xf_emit(ctx, 0xa7e, 0); - break; - case 0x94: - case 0x96: - xf_emit(ctx, 0xa7c, 0); - break; - case 0x86: - case 0x98: - xf_emit(ctx, 0xa7a, 0); - break; - } - xf_emit(ctx, 1, 0x3fffff); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 0x1fff); - xf_emit(ctx, 0xe, 0); - nv50_graph_construct_gene_unk9(ctx); - nv50_graph_construct_gene_unk2(ctx); - nv50_graph_construct_gene_unk1(ctx); - nv50_graph_construct_gene_unk10(ctx); + nv50_graph_construct_gene_ccache(ctx); + nv50_graph_construct_gene_unk1cxx(ctx); + nv50_graph_construct_gene_strmout(ctx); + nv50_graph_construct_gene_unk14xx(ctx); + nv50_graph_construct_gene_unk10xx(ctx); + nv50_graph_construct_gene_unk34xx(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; @@ -1150,86 +1225,46 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx) } else { /* Strand 0 */ ctx->ctxvals_pos = offset; - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 0x385, 0); - else - xf_emit(ctx, 0x384, 0); + nv50_graph_construct_gene_dispatch(ctx); nv50_graph_construct_gene_m2mf(ctx); - xf_emit(ctx, 0x950, 0); - nv50_graph_construct_gene_unk10(ctx); - xf_emit(ctx, 1, 0x0fac6881); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { - xf_emit(ctx, 1, 1); - xf_emit(ctx, 3, 0); - } - nv50_graph_construct_gene_unk8(ctx); - if (dev_priv->chipset == 0xa0) - xf_emit(ctx, 0x189, 0); - else if (dev_priv->chipset == 0xa3) - xf_emit(ctx, 0xd5, 0); - else if (dev_priv->chipset == 0xa5) - xf_emit(ctx, 0x99, 0); - else if (dev_priv->chipset == 0xaa) - xf_emit(ctx, 0x65, 0); - else - xf_emit(ctx, 0x6d, 0); - nv50_graph_construct_gene_unk9(ctx); + nv50_graph_construct_gene_unk34xx(ctx); + nv50_graph_construct_gene_csched(ctx); + nv50_graph_construct_gene_unk1cxx(ctx); + nv50_graph_construct_gene_strmout(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; /* Strand 1 */ ctx->ctxvals_pos = offset + 1; - nv50_graph_construct_gene_unk1(ctx); + nv50_graph_construct_gene_unk10xx(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; /* Strand 2 */ ctx->ctxvals_pos = offset + 2; - if (dev_priv->chipset == 0xa0) { - nv50_graph_construct_gene_unk2(ctx); - } - xf_emit(ctx, 0x36, 0); - nv50_graph_construct_gene_unk5(ctx); + if (dev_priv->chipset == 0xa0) + nv50_graph_construct_gene_unk14xx(ctx); + nv50_graph_construct_gene_unk24xx(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; /* Strand 3 */ ctx->ctxvals_pos = offset + 3; - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - nv50_graph_construct_gene_unk6(ctx); + nv50_graph_construct_gene_vfetch(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; /* Strand 4 */ ctx->ctxvals_pos = offset + 4; - if (dev_priv->chipset == 0xa0) - xf_emit(ctx, 0xa80, 0); - else if (dev_priv->chipset == 0xa3) - xf_emit(ctx, 0xa7c, 0); - else - xf_emit(ctx, 0xa7a, 0); - xf_emit(ctx, 1, 0x3fffff); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 0x1fff); + nv50_graph_construct_gene_ccache(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; /* Strand 5 */ ctx->ctxvals_pos = offset + 5; - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x0fac6881); - xf_emit(ctx, 0xb, 0); - xf_emit(ctx, 2, 0x4e3bfdf); - xf_emit(ctx, 3, 0); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 2, 0x4e3bfdf); - xf_emit(ctx, 2, 0); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 1, 0); + nv50_graph_construct_gene_ropm2(ctx); + nv50_graph_construct_gene_ropm1(ctx); + /* per-ROP context */ for (i = 0; i < 8; i++) if (units & (1<<(i+16))) nv50_graph_construct_gene_ropc(ctx); @@ -1238,10 +1273,9 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx) /* Strand 6 */ ctx->ctxvals_pos = offset + 6; - nv50_graph_construct_gene_unk3(ctx); - xf_emit(ctx, 0xb, 0); - nv50_graph_construct_gene_unk4(ctx); - nv50_graph_construct_gene_unk7(ctx); + nv50_graph_construct_gene_zcull(ctx); + nv50_graph_construct_gene_clipid(ctx); + nv50_graph_construct_gene_eng2d(ctx); if (units & (1 << 0)) nv50_graph_construct_xfer_tp(ctx); if (units & (1 << 1)) @@ -1269,7 +1303,7 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx) if (units & (1 << 9)) nv50_graph_construct_xfer_tp(ctx); } else { - nv50_graph_construct_gene_unk2(ctx); + nv50_graph_construct_gene_unk14xx(ctx); } if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; @@ -1289,10 +1323,71 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx) * non-trivial demagiced parts of ctx init go here */ +static void +nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx) +{ + /* start of strand 0 */ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + /* SEEK */ + if (dev_priv->chipset == 0x50) + xf_emit(ctx, 5, 0); + else if (!IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 6, 0); + else + xf_emit(ctx, 4, 0); + /* SEEK */ + /* the PGRAPH's internal FIFO */ + if (dev_priv->chipset == 0x50) + xf_emit(ctx, 8*3, 0); + else + xf_emit(ctx, 0x100*3, 0); + /* and another bonus slot?!? */ + xf_emit(ctx, 3, 0); + /* and YET ANOTHER bonus slot? */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 3, 0); + /* SEEK */ + /* CTX_SWITCH: caches of gr objects bound to subchannels. 8 values, last used index */ + xf_emit(ctx, 9, 0); + /* SEEK */ + xf_emit(ctx, 9, 0); + /* SEEK */ + xf_emit(ctx, 9, 0); + /* SEEK */ + xf_emit(ctx, 9, 0); + /* SEEK */ + if (dev_priv->chipset < 0x90) + xf_emit(ctx, 4, 0); + /* SEEK */ + xf_emit(ctx, 2, 0); + /* SEEK */ + xf_emit(ctx, 6*2, 0); + xf_emit(ctx, 2, 0); + /* SEEK */ + xf_emit(ctx, 2, 0); + /* SEEK */ + xf_emit(ctx, 6*2, 0); + xf_emit(ctx, 2, 0); + /* SEEK */ + if (dev_priv->chipset == 0x50) + xf_emit(ctx, 0x1c, 0); + else if (dev_priv->chipset < 0xa0) + xf_emit(ctx, 0x1e, 0); + else + xf_emit(ctx, 0x22, 0); + /* SEEK */ + xf_emit(ctx, 0x15, 0); +} + static void nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx) { - /* m2mf state */ + /* Strand 0, right after dispatch */ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + int smallm2mf = 0; + if (dev_priv->chipset < 0x92 || dev_priv->chipset == 0x98) + smallm2mf = 1; + /* SEEK */ xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */ xf_emit (ctx, 1, 0); /* DMA_BUFFER_IN instance >> 4 */ xf_emit (ctx, 1, 0); /* DMA_BUFFER_OUT instance >> 4 */ @@ -1319,427 +1414,975 @@ nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx) xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT */ xf_emit (ctx, 1, 0); /* OFFSET_IN_HIGH */ xf_emit (ctx, 1, 0); /* OFFSET_OUT_HIGH */ + /* SEEK */ + if (smallm2mf) + xf_emit(ctx, 0x40, 0); /* 20 * ffffffff, 3ffff */ + else + xf_emit(ctx, 0x100, 0); /* 80 * ffffffff, 3ffff */ + xf_emit(ctx, 4, 0); /* 1f/7f, 0, 1f/7f, 0 [1f for smallm2mf, 7f otherwise] */ + /* SEEK */ + if (smallm2mf) + xf_emit(ctx, 0x400, 0); /* ffffffff */ + else + xf_emit(ctx, 0x800, 0); /* ffffffff */ + xf_emit(ctx, 4, 0); /* ff/1ff, 0, 0, 0 [ff for smallm2mf, 1ff otherwise] */ + /* SEEK */ + xf_emit(ctx, 0x40, 0); /* 20 * bits ffffffff, 3ffff */ + xf_emit(ctx, 0x6, 0); /* 1f, 0, 1f, 0, 1f, 0 */ } static void -nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx) +nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + xf_emit(ctx, 2, 0); /* RO */ + xf_emit(ctx, 0x800, 0); /* ffffffff */ + switch (dev_priv->chipset) { + case 0x50: + case 0x92: + case 0xa0: + xf_emit(ctx, 0x2b, 0); + break; + case 0x84: + xf_emit(ctx, 0x29, 0); + break; + case 0x94: + case 0x96: + case 0xa3: + xf_emit(ctx, 0x27, 0); + break; + case 0x86: + case 0x98: + case 0xa5: + case 0xa8: + case 0xaa: + case 0xac: + case 0xaf: + xf_emit(ctx, 0x25, 0); + break; + } + /* CB bindings, 0x80 of them. first word is address >> 8, second is + * size >> 4 | valid << 24 */ + xf_emit(ctx, 0x100, 0); /* ffffffff CB_DEF */ + xf_emit(ctx, 1, 0); /* 0000007f CB_ADDR_BUFFER */ + xf_emit(ctx, 1, 0); /* 0 */ + xf_emit(ctx, 0x30, 0); /* ff SET_PROGRAM_CB */ + xf_emit(ctx, 1, 0); /* 3f last SET_PROGRAM_CB */ + xf_emit(ctx, 4, 0); /* RO */ + xf_emit(ctx, 0x100, 0); /* ffffffff */ + xf_emit(ctx, 8, 0); /* 1f, 0, 0, ... */ + xf_emit(ctx, 8, 0); /* ffffffff */ + xf_emit(ctx, 4, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 3 */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_CODE_CB */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_TIC */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_TSC */ + xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */ + xf_emit(ctx, 1, 0); /* 000000ff TIC_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff TIC_ADDRESS_LOW */ + xf_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */ + xf_emit(ctx, 1, 0); /* 000000ff TSC_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff TSC_ADDRESS_LOW */ + xf_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */ + xf_emit(ctx, 1, 0); /* 000000ff VP_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff VP_ADDRESS_LOW */ + xf_emit(ctx, 1, 0); /* 00ffffff VP_START_ID */ + xf_emit(ctx, 1, 0); /* 000000ff CB_DEF_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff CB_DEF_ADDRESS_LOW */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0); /* 000000ff GP_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff GP_ADDRESS_LOW */ + xf_emit(ctx, 1, 0); /* 00ffffff GP_START_ID */ + xf_emit(ctx, 1, 0); /* 000000ff FP_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff FP_ADDRESS_LOW */ + xf_emit(ctx, 1, 0); /* 00ffffff FP_START_ID */ +} + +static void +nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + int i; /* end of area 2 on pre-NVA0, area 1 on NVAx */ - xf_emit(ctx, 2, 4); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x80); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 0x80c14); - xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */ + xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ + xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ + xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ if (dev_priv->chipset == 0x50) xf_emit(ctx, 1, 0x3ff); else - xf_emit(ctx, 1, 0x7ff); - switch (dev_priv->chipset) { - case 0x50: - case 0x86: - case 0x98: - case 0xaa: - case 0xac: - xf_emit(ctx, 0x542, 0); - break; - case 0x84: - case 0x92: - case 0x94: - case 0x96: - xf_emit(ctx, 0x942, 0); - break; - case 0xa0: - case 0xa3: - xf_emit(ctx, 0x2042, 0); - break; - case 0xa5: - case 0xa8: - xf_emit(ctx, 0x842, 0); - break; + xf_emit(ctx, 1, 0x7ff); /* 000007ff */ + xf_emit(ctx, 1, 0); /* 111/113 */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + for (i = 0; i < 8; i++) { + switch (dev_priv->chipset) { + case 0x50: + case 0x86: + case 0x98: + case 0xaa: + case 0xac: + xf_emit(ctx, 0xa0, 0); /* ffffffff */ + break; + case 0x84: + case 0x92: + case 0x94: + case 0x96: + xf_emit(ctx, 0x120, 0); + break; + case 0xa5: + case 0xa8: + xf_emit(ctx, 0x100, 0); /* ffffffff */ + break; + case 0xa0: + case 0xa3: + case 0xaf: + xf_emit(ctx, 0x400, 0); /* ffffffff */ + break; + } + xf_emit(ctx, 4, 0); /* 3f, 0, 0, 0 */ + xf_emit(ctx, 4, 0); /* ffffffff */ } - xf_emit(ctx, 2, 4); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x80); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x27); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x26); - xf_emit(ctx, 3, 0); + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */ + xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_TEMP */ + xf_emit(ctx, 1, 1); /* 00000001 RASTERIZE_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ + xf_emit(ctx, 1, 0x27); /* 000000ff UNK0FD4 */ + xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ + xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ } static void -nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx) +nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx) { + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; /* end of area 2 on pre-NVA0, area 1 on NVAx */ - xf_emit(ctx, 0x10, 0x04000000); - xf_emit(ctx, 0x24, 0); - xf_emit(ctx, 2, 0x04e3bfdf); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 0x1fe21); + xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */ + xf_emit(ctx, 1, 0); /* 00000003 VIEWPORT_CLIP_MODE */ + xf_emit(ctx, 0x10, 0x04000000); /* 07ffffff VIEWPORT_CLIP_HORIZ*8, VIEWPORT_CLIP_VERT*8 */ + xf_emit(ctx, 1, 0); /* 00000001 POLYGON_STIPPLE_ENABLE */ + xf_emit(ctx, 0x20, 0); /* ffffffff POLYGON_STIPPLE */ + xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0D64 */ + xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0DF4 */ + xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ + xf_emit(ctx, 1, 0); /* 00000007 */ + xf_emit(ctx, 1, 0x1fe21); /* 0001ffff tesla UNK0FAC */ + if (dev_priv->chipset >= 0xa0) + xf_emit(ctx, 1, 0x0fac6881); + if (IS_NVA3F(dev_priv->chipset)) { + xf_emit(ctx, 1, 1); + xf_emit(ctx, 3, 0); + } } static void -nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx) +nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; /* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */ if (dev_priv->chipset != 0x50) { - xf_emit(ctx, 5, 0); - xf_emit(ctx, 1, 0x80c14); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 0x804); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 2, 4); - xf_emit(ctx, 1, 0x8100c12); + xf_emit(ctx, 5, 0); /* ffffffff */ + xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 1, 0); /* 000003ff */ + xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */ + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 2, 4); /* 7f, ff */ + xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ } - xf_emit(ctx, 1, 0); - xf_emit(ctx, 2, 4); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x10); - if (dev_priv->chipset == 0x50) - xf_emit(ctx, 3, 0); - else - xf_emit(ctx, 4, 0); - xf_emit(ctx, 1, 0x804); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0x1a); + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ + xf_emit(ctx, 1, 0); /* 000000ff VP_CLIP_DISTANCE_ENABLE */ if (dev_priv->chipset != 0x50) - xf_emit(ctx, 1, 0x7f); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0x80c14); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x8100c12); - xf_emit(ctx, 2, 4); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x10); - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0x8100c12); - xf_emit(ctx, 6, 0); - if (dev_priv->chipset == 0x50) - xf_emit(ctx, 1, 0x3ff); - else - xf_emit(ctx, 1, 0x7ff); - xf_emit(ctx, 1, 0x80c14); - xf_emit(ctx, 0x38, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 0x10); - xf_emit(ctx, 0x38, 0); - xf_emit(ctx, 2, 0x88); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 0x16, 0); - xf_emit(ctx, 1, 0x26); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 0x3f800000); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 4, 0); - else - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 0x1a); - xf_emit(ctx, 1, 0x10); + xf_emit(ctx, 1, 0); /* 3ff */ + xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1940 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */ + xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */ + xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */ + xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ if (dev_priv->chipset != 0x50) - xf_emit(ctx, 0x28, 0); - else - xf_emit(ctx, 0x25, 0); - xf_emit(ctx, 1, 0x52); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x26); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 2, 4); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x1a); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 0x00ffff00); - xf_emit(ctx, 1, 0); -} - -static void -nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx) -{ - struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; - /* end of area 0 on pre-NVA0, beginning of area 6 on NVAx */ - xf_emit(ctx, 1, 0x3f); - xf_emit(ctx, 0xa, 0); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 2, 0x04000000); - xf_emit(ctx, 8, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 4); + xf_emit(ctx, 1, 0x7f); /* 000000ff tesla UNK0FFC */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 1); /* 00000001 SHADE_MODEL */ + xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ + xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0F8C */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */ + xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ + xf_emit(ctx, 1, 0); /* 0000000f */ if (dev_priv->chipset == 0x50) - xf_emit(ctx, 0x10, 0); + xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */ else - xf_emit(ctx, 0x11, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0x1001); - xf_emit(ctx, 4, 0xffff); - xf_emit(ctx, 0x20, 0); - xf_emit(ctx, 0x10, 0x3f800000); - xf_emit(ctx, 1, 0x10); - if (dev_priv->chipset == 0x50) - xf_emit(ctx, 1, 0); - else - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 3); - xf_emit(ctx, 2, 0); -} - -static void -nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx) -{ - /* middle of area 0 on pre-NVA0, middle of area 6 on NVAx */ - xf_emit(ctx, 2, 0x04000000); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x80); - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 0x80); - xf_emit(ctx, 1, 0); -} - -static void -nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx) -{ - struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; - /* middle of area 0 on pre-NVA0 [after m2mf], end of area 2 on NVAx */ - xf_emit(ctx, 2, 4); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 0x1c4d, 0); - else - xf_emit(ctx, 0x1c4b, 0); - xf_emit(ctx, 2, 4); - xf_emit(ctx, 1, 0x8100c12); - if (dev_priv->chipset != 0x50) - xf_emit(ctx, 1, 3); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x8100c12); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x80c14); - xf_emit(ctx, 1, 1); - if (dev_priv->chipset >= 0xa0) - xf_emit(ctx, 2, 4); - xf_emit(ctx, 1, 0x80c14); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 0x8100c12); - xf_emit(ctx, 1, 0x27); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 0x3c1, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 0x16, 0); - xf_emit(ctx, 1, 0x8100c12); - xf_emit(ctx, 1, 0); -} - -static void -nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx) -{ - struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; - /* beginning of area 1 on pre-NVA0 [after m2mf], area 3 on NVAx */ - xf_emit(ctx, 4, 0); - xf_emit(ctx, 1, 0xf); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 8, 0); - else - xf_emit(ctx, 4, 0); - xf_emit(ctx, 1, 0x20); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 0x11, 0); - else if (dev_priv->chipset >= 0xa0) - xf_emit(ctx, 0xf, 0); - else - xf_emit(ctx, 0xe, 0); - xf_emit(ctx, 1, 0x1a); - xf_emit(ctx, 0xd, 0); - xf_emit(ctx, 2, 4); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 8); - xf_emit(ctx, 1, 0); - if (dev_priv->chipset == 0x50) - xf_emit(ctx, 1, 0x3ff); - else - xf_emit(ctx, 1, 0x7ff); - if (dev_priv->chipset == 0xa8) - xf_emit(ctx, 1, 0x1e00); - xf_emit(ctx, 0xc, 0); - xf_emit(ctx, 1, 0xf); - if (dev_priv->chipset == 0x50) - xf_emit(ctx, 0x125, 0); - else if (dev_priv->chipset < 0xa0) - xf_emit(ctx, 0x126, 0); - else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) - xf_emit(ctx, 0x124, 0); - else - xf_emit(ctx, 0x1f7, 0); - xf_emit(ctx, 1, 0xf); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 3, 0); - else - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 0xa1, 0); - else - xf_emit(ctx, 0x5a, 0); - xf_emit(ctx, 1, 0xf); - if (dev_priv->chipset < 0xa0) - xf_emit(ctx, 0x834, 0); - else if (dev_priv->chipset == 0xa0) - xf_emit(ctx, 0x1873, 0); - else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 0x8ba, 0); - else - xf_emit(ctx, 0x833, 0); - xf_emit(ctx, 1, 0xf); - xf_emit(ctx, 0xf, 0); -} - -static void -nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx) -{ - struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; - /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 6 on NVAx */ - xf_emit(ctx, 2, 0); - if (dev_priv->chipset == 0x50) - xf_emit(ctx, 2, 1); - else - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 2, 0x100); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 8); - xf_emit(ctx, 5, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 3, 1); - xf_emit(ctx, 1, 0xcf); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 6, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 3, 1); - xf_emit(ctx, 4, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0x15); - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 0x4444480); - xf_emit(ctx, 0x37, 0); -} - -static void -nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx) -{ - /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 0 on NVAx */ - xf_emit(ctx, 4, 0); - xf_emit(ctx, 1, 0x8100c12); - xf_emit(ctx, 4, 0); - xf_emit(ctx, 1, 0x100); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 0x10001); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x10001); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0x10001); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 2); -} - -static void -nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx) -{ - struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; - /* middle of area 2 on pre-NVA0 [after m2mf], end of area 0 on NVAx */ - xf_emit(ctx, 1, 0x3f800000); - xf_emit(ctx, 6, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 0x1a); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 0x12, 0); - xf_emit(ctx, 1, 0x00ffff00); - xf_emit(ctx, 6, 0); - xf_emit(ctx, 1, 0xf); - xf_emit(ctx, 7, 0); - xf_emit(ctx, 1, 0x0fac6881); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 0xf, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 2, 0); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 1, 3); - else if (dev_priv->chipset >= 0xa0) - xf_emit(ctx, 1, 1); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 2, 0x04000000); - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 5); - xf_emit(ctx, 1, 0x52); - if (dev_priv->chipset == 0x50) { - xf_emit(ctx, 0x13, 0); - } else { - xf_emit(ctx, 4, 0); - xf_emit(ctx, 1, 1); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 0x11, 0); - else - xf_emit(ctx, 0x10, 0); - } - xf_emit(ctx, 0x10, 0x3f800000); - xf_emit(ctx, 1, 0x10); - xf_emit(ctx, 0x26, 0); - xf_emit(ctx, 1, 0x8100c12); - xf_emit(ctx, 1, 5); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 4, 0xffff); - if (dev_priv->chipset != 0x50) - xf_emit(ctx, 1, 3); - if (dev_priv->chipset < 0xa0) - xf_emit(ctx, 0x1f, 0); - else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 0xc, 0); - else - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 0x00ffff00); - xf_emit(ctx, 1, 0x1a); + xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */ + xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ + xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ + xf_emit(ctx, 0x30, 0); /* ffffffff VIEWPORT_SCALE: X0, Y0, Z0, X1, Y1, ... */ + xf_emit(ctx, 3, 0); /* f, 0, 0 */ + xf_emit(ctx, 3, 0); /* ffffffff last VIEWPORT_SCALE? */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */ + xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */ + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 0x30, 0); /* ffffffff VIEWPORT_TRANSLATE */ + xf_emit(ctx, 3, 0); /* f, 0, 0 */ + xf_emit(ctx, 3, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 2, 0x88); /* 000001ff tesla UNK19D8 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */ + xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */ + xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */ + xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ + xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */ + xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ + xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ + xf_emit(ctx, 1, 0); /* 0000000f */ + xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ + xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */ if (dev_priv->chipset != 0x50) { - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 3); + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 1, 0); /* 000003ff */ + } + xf_emit(ctx, 0x20, 0); /* 10xbits ffffffff, 3fffff. SCISSOR_* */ + xf_emit(ctx, 1, 0); /* f */ + xf_emit(ctx, 1, 0); /* 0? */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 003fffff */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 0x52); /* 000001ff SEMANTIC_PTSZ */ + xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ + xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ + xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ + xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ + xf_emit(ctx, 1, 0); /* 0000000f */ +} + +static void +nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + /* end of strand 0 on pre-NVA0, beginning of strand 6 on NVAx */ + /* SEEK */ + xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */ + xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ + xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */ + xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ + xf_emit(ctx, 2, 0x04000000); /* 07ffffff tesla UNK0D6C */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 1, 0); /* 00000001 CLIPID_ENABLE */ + xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */ + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */ + xf_emit(ctx, 1, 0); /* 0000ffff */ + xf_emit(ctx, 1, 0); /* 00000001 UNK0FB0 */ + xf_emit(ctx, 1, 0); /* 00000001 POLYGON_STIPPLE_ENABLE */ + xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ + xf_emit(ctx, 1, 0); /* 000000ff CLEAR_STENCIL */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ + xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ + xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */ + xf_emit(ctx, 1, 0); /* 00000007 */ + if (dev_priv->chipset != 0x50) + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1108 */ + xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */ + /* SEEK */ + xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */ + xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */ + xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */ + xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */ + xf_emit(ctx, 1, 0x10); /* 7f/ff/3ff VIEW_VOLUME_CLIP_CTRL */ + xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */ + xf_emit(ctx, 1, 3); /* 00000003 FP_CTRL_UNK196C */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1968 */ + if (dev_priv->chipset != 0x50) + xf_emit(ctx, 1, 0); /* 0fffffff tesla UNK1104 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK151C */ +} + +static void +nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx) +{ + /* middle of strand 0 on pre-NVA0 [after 24xx], middle of area 6 on NVAx */ + /* SEEK */ + xf_emit(ctx, 1, 0); /* 00000007 UNK0FB4 */ + /* SEEK */ + xf_emit(ctx, 4, 0); /* 07ffffff CLIPID_REGION_HORIZ */ + xf_emit(ctx, 4, 0); /* 07ffffff CLIPID_REGION_VERT */ + xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */ + xf_emit(ctx, 2, 0x04000000); /* 07ffffff UNK1508 */ + xf_emit(ctx, 1, 0); /* 00000001 CLIPID_ENABLE */ + xf_emit(ctx, 1, 0x80); /* 00003fff CLIPID_WIDTH */ + xf_emit(ctx, 1, 0); /* 000000ff CLIPID_ID */ + xf_emit(ctx, 1, 0); /* 000000ff CLIPID_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff CLIPID_ADDRESS_LOW */ + xf_emit(ctx, 1, 0x80); /* 00003fff CLIPID_HEIGHT */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_CLIPID */ +} + +static void +nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + int i; + /* middle of strand 0 on pre-NVA0 [after m2mf], end of strand 2 on NVAx */ + /* SEEK */ + xf_emit(ctx, 0x33, 0); + /* SEEK */ + xf_emit(ctx, 2, 0); + /* SEEK */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + /* SEEK */ + if (IS_NVA3F(dev_priv->chipset)) { + xf_emit(ctx, 4, 0); /* RO */ + xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ + xf_emit(ctx, 1, 0); /* 1ff */ + xf_emit(ctx, 8, 0); /* 0? */ + xf_emit(ctx, 9, 0); /* ffffffff, 7ff */ + + xf_emit(ctx, 4, 0); /* RO */ + xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ + xf_emit(ctx, 1, 0); /* 1ff */ + xf_emit(ctx, 8, 0); /* 0? */ + xf_emit(ctx, 9, 0); /* ffffffff, 7ff */ } - if (dev_priv->chipset < 0xa0) - xf_emit(ctx, 0x26, 0); else - xf_emit(ctx, 0x3c, 0); - xf_emit(ctx, 1, 0x102); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 4, 4); + { + xf_emit(ctx, 0xc, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ + xf_emit(ctx, 1, 0); /* 1ff */ + xf_emit(ctx, 8, 0); /* 0? */ + + /* SEEK */ + xf_emit(ctx, 0xc, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ + xf_emit(ctx, 1, 0); /* 1ff */ + xf_emit(ctx, 8, 0); /* 0? */ + } + /* SEEK */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + if (dev_priv->chipset != 0x50) + xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */ + /* SEEK */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ + xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ + xf_emit(ctx, 1, 1); /* 00000001 */ + /* SEEK */ if (dev_priv->chipset >= 0xa0) - xf_emit(ctx, 8, 0); - xf_emit(ctx, 2, 4); - xf_emit(ctx, 1, 0); - if (dev_priv->chipset == 0x50) - xf_emit(ctx, 1, 0x3ff); + xf_emit(ctx, 2, 4); /* 000000ff */ + xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ + xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */ + xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + xf_emit(ctx, 1, 0x27); /* 000000ff SEMANTIC_PRIM_ID */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0); /* 0000000f */ + xf_emit(ctx, 1, 1); /* 00000001 */ + for (i = 0; i < 10; i++) { + /* SEEK */ + xf_emit(ctx, 0x40, 0); /* ffffffff */ + xf_emit(ctx, 0x10, 0); /* 3, 0, 0.... */ + xf_emit(ctx, 0x10, 0); /* ffffffff */ + } + /* SEEK */ + xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_CTRL */ + xf_emit(ctx, 1, 1); /* 00000001 */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */ + xf_emit(ctx, 0x10, 0); /* 00ffffff POINT_COORD_REPLACE_MAP */ + xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ + xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + if (dev_priv->chipset != 0x50) + xf_emit(ctx, 1, 0); /* 000003ff */ +} + +static void +nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + int acnt = 0x10, rep, i; + /* beginning of strand 1 on pre-NVA0, strand 3 on NVAx */ + if (IS_NVA3F(dev_priv->chipset)) + acnt = 0x20; + /* SEEK */ + if (dev_priv->chipset >= 0xa0) { + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK13A4 */ + xf_emit(ctx, 1, 1); /* 00000fff tesla UNK1318 */ + } + xf_emit(ctx, 1, 0); /* ffffffff VERTEX_BUFFER_FIRST */ + xf_emit(ctx, 1, 0); /* 00000001 PRIMITIVE_RESTART_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 UNK0DE8 */ + xf_emit(ctx, 1, 0); /* ffffffff PRIMITIVE_RESTART_INDEX */ + xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ + xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ + xf_emit(ctx, acnt/8, 0); /* ffffffff VTX_ATR_MASK_UNK0DD0 */ + xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ + xf_emit(ctx, 1, 0x20); /* 0000ffff tesla UNK129C */ + xf_emit(ctx, 1, 0); /* 000000ff turing UNK370??? */ + xf_emit(ctx, 1, 0); /* 0000ffff turing USER_PARAM_COUNT */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + /* SEEK */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 0xb, 0); /* RO */ + else if (dev_priv->chipset >= 0xa0) + xf_emit(ctx, 0x9, 0); /* RO */ else - xf_emit(ctx, 1, 0x7ff); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x102); - xf_emit(ctx, 9, 0); - xf_emit(ctx, 4, 4); - xf_emit(ctx, 0x2c, 0); + xf_emit(ctx, 0x8, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 1, 0); /* 00000001 EDGE_FLAG */ + xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ + /* SEEK */ + xf_emit(ctx, 0xc, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 1, 0); /* 7f/ff */ + xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */ + xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ + xf_emit(ctx, 1, 4); /* 000001ff UNK1A28 */ + xf_emit(ctx, 1, 8); /* 000001ff UNK0DF0 */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + if (dev_priv->chipset == 0x50) + xf_emit(ctx, 1, 0x3ff); /* 3ff tesla UNK0D68 */ + else + xf_emit(ctx, 1, 0x7ff); /* 7ff tesla UNK0D68 */ + if (dev_priv->chipset == 0xa8) + xf_emit(ctx, 1, 0x1e00); /* 7fff */ + /* SEEK */ + xf_emit(ctx, 0xc, 0); /* RO or close */ + /* SEEK */ + xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ + xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ + xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ + if (dev_priv->chipset > 0x50 && dev_priv->chipset < 0xa0) + xf_emit(ctx, 2, 0); /* ffffffff */ + else + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0FD8 */ + /* SEEK */ + if (IS_NVA3F(dev_priv->chipset)) { + xf_emit(ctx, 0x10, 0); /* 0? */ + xf_emit(ctx, 2, 0); /* weird... */ + xf_emit(ctx, 2, 0); /* RO */ + } else { + xf_emit(ctx, 8, 0); /* 0? */ + xf_emit(ctx, 1, 0); /* weird... */ + xf_emit(ctx, 2, 0); /* RO */ + } + /* SEEK */ + xf_emit(ctx, 1, 0); /* ffffffff VB_ELEMENT_BASE */ + xf_emit(ctx, 1, 0); /* ffffffff UNK1438 */ + xf_emit(ctx, acnt, 0); /* 1 tesla UNK1000 */ + if (dev_priv->chipset >= 0xa0) + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1118? */ + /* SEEK */ + xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */ + xf_emit(ctx, 1, 0); /* f/1f */ + /* SEEK */ + xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */ + xf_emit(ctx, 1, 0); /* f/1f */ + /* SEEK */ + xf_emit(ctx, acnt, 0); /* RO */ + xf_emit(ctx, 2, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK111C? */ + xf_emit(ctx, 1, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 1, 0); /* 000000ff UNK15F4_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff UNK15F4_ADDRESS_LOW */ + xf_emit(ctx, 1, 0); /* 000000ff UNK0F84_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff UNK0F84_ADDRESS_LOW */ + /* SEEK */ + xf_emit(ctx, acnt, 0); /* 00003fff VERTEX_ARRAY_ATTRIB_OFFSET */ + xf_emit(ctx, 3, 0); /* f/1f */ + /* SEEK */ + xf_emit(ctx, acnt, 0); /* 00000fff VERTEX_ARRAY_STRIDE */ + xf_emit(ctx, 3, 0); /* f/1f */ + /* SEEK */ + xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_LOW */ + xf_emit(ctx, 3, 0); /* f/1f */ + /* SEEK */ + xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_ARRAY_HIGH */ + xf_emit(ctx, 3, 0); /* f/1f */ + /* SEEK */ + xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_LIMIT_LOW */ + xf_emit(ctx, 3, 0); /* f/1f */ + /* SEEK */ + xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_LIMIT_HIGH */ + xf_emit(ctx, 3, 0); /* f/1f */ + /* SEEK */ + if (IS_NVA3F(dev_priv->chipset)) { + xf_emit(ctx, acnt, 0); /* f */ + xf_emit(ctx, 3, 0); /* f/1f */ + } + /* SEEK */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 2, 0); /* RO */ + else + xf_emit(ctx, 5, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 1, 0); /* ffff DMA_VTXBUF */ + /* SEEK */ + if (dev_priv->chipset < 0xa0) { + xf_emit(ctx, 0x41, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 0x11, 0); /* RO */ + } else if (!IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 0x50, 0); /* RO */ + else + xf_emit(ctx, 0x58, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ + xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ + xf_emit(ctx, 1, 1); /* 1 UNK0DEC */ + /* SEEK */ + xf_emit(ctx, acnt*4, 0); /* ffffffff VTX_ATTR */ + xf_emit(ctx, 4, 0); /* f/1f, 0, 0, 0 */ + /* SEEK */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 0x1d, 0); /* RO */ + else + xf_emit(ctx, 0x16, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ + xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ + /* SEEK */ + if (dev_priv->chipset < 0xa0) + xf_emit(ctx, 8, 0); /* RO */ + else if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 0xc, 0); /* RO */ + else + xf_emit(ctx, 7, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 0xa, 0); /* RO */ + if (dev_priv->chipset == 0xa0) + rep = 0xc; + else + rep = 4; + for (i = 0; i < rep; i++) { + /* SEEK */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 0x20, 0); /* ffffffff */ + xf_emit(ctx, 0x200, 0); /* ffffffff */ + xf_emit(ctx, 4, 0); /* 7f/ff, 0, 0, 0 */ + xf_emit(ctx, 4, 0); /* ffffffff */ + } + /* SEEK */ + xf_emit(ctx, 1, 0); /* 113/111 */ + xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ + xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ + xf_emit(ctx, acnt/8, 0); /* ffffffff VTX_ATTR_MASK_UNK0DD0 */ + xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + /* SEEK */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 7, 0); /* weird... */ + else + xf_emit(ctx, 5, 0); /* weird... */ +} + +static void +nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + /* middle of strand 1 on pre-NVA0 [after vfetch], middle of strand 6 on NVAx */ + /* SEEK */ + xf_emit(ctx, 2, 0); /* 0001ffff CLIP_X, CLIP_Y */ + xf_emit(ctx, 2, 0); /* 0000ffff CLIP_W, CLIP_H */ + xf_emit(ctx, 1, 0); /* 00000001 CLIP_ENABLE */ + if (dev_priv->chipset < 0xa0) { + /* this is useless on everything but the original NV50, + * guess they forgot to nuke it. Or just didn't bother. */ + xf_emit(ctx, 2, 0); /* 0000ffff IFC_CLIP_X, Y */ + xf_emit(ctx, 2, 1); /* 0000ffff IFC_CLIP_W, H */ + xf_emit(ctx, 1, 0); /* 00000001 IFC_CLIP_ENABLE */ + } + xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ + xf_emit(ctx, 1, 0x100); /* 0001ffff DST_WIDTH */ + xf_emit(ctx, 1, 0x100); /* 0001ffff DST_HEIGHT */ + xf_emit(ctx, 1, 0x11); /* 3f[NV50]/7f[NV84+] DST_FORMAT */ + xf_emit(ctx, 1, 0); /* 0001ffff DRAW_POINT_X */ + xf_emit(ctx, 1, 8); /* 0000000f DRAW_UNK58C */ + xf_emit(ctx, 1, 0); /* 000fffff SIFC_DST_X_FRACT */ + xf_emit(ctx, 1, 0); /* 0001ffff SIFC_DST_X_INT */ + xf_emit(ctx, 1, 0); /* 000fffff SIFC_DST_Y_FRACT */ + xf_emit(ctx, 1, 0); /* 0001ffff SIFC_DST_Y_INT */ + xf_emit(ctx, 1, 0); /* 000fffff SIFC_DX_DU_FRACT */ + xf_emit(ctx, 1, 1); /* 0001ffff SIFC_DX_DU_INT */ + xf_emit(ctx, 1, 0); /* 000fffff SIFC_DY_DV_FRACT */ + xf_emit(ctx, 1, 1); /* 0001ffff SIFC_DY_DV_INT */ + xf_emit(ctx, 1, 1); /* 0000ffff SIFC_WIDTH */ + xf_emit(ctx, 1, 1); /* 0000ffff SIFC_HEIGHT */ + xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */ + xf_emit(ctx, 1, 2); /* 00000003 SIFC_BITMAP_UNK808 */ + xf_emit(ctx, 1, 0); /* 00000003 SIFC_BITMAP_LINE_PACK_MODE */ + xf_emit(ctx, 1, 0); /* 00000001 SIFC_BITMAP_LSB_FIRST */ + xf_emit(ctx, 1, 0); /* 00000001 SIFC_BITMAP_ENABLE */ + xf_emit(ctx, 1, 0); /* 0000ffff BLIT_DST_X */ + xf_emit(ctx, 1, 0); /* 0000ffff BLIT_DST_Y */ + xf_emit(ctx, 1, 0); /* 000fffff BLIT_DU_DX_FRACT */ + xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DU_DX_INT */ + xf_emit(ctx, 1, 0); /* 000fffff BLIT_DV_DY_FRACT */ + xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DV_DY_INT */ + xf_emit(ctx, 1, 1); /* 0000ffff BLIT_DST_W */ + xf_emit(ctx, 1, 1); /* 0000ffff BLIT_DST_H */ + xf_emit(ctx, 1, 0); /* 000fffff BLIT_SRC_X_FRACT */ + xf_emit(ctx, 1, 0); /* 0001ffff BLIT_SRC_X_INT */ + xf_emit(ctx, 1, 0); /* 000fffff BLIT_SRC_Y_FRACT */ + xf_emit(ctx, 1, 0); /* 00000001 UNK888 */ + xf_emit(ctx, 1, 4); /* 0000003f UNK884 */ + xf_emit(ctx, 1, 0); /* 00000007 UNK880 */ + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK0FB8 */ + xf_emit(ctx, 1, 0x15); /* 000000ff tesla UNK128C */ + xf_emit(ctx, 2, 0); /* 00000007, ffff0ff3 */ + xf_emit(ctx, 1, 0); /* 00000001 UNK260 */ + xf_emit(ctx, 1, 0x4444480); /* 1fffffff UNK870 */ + /* SEEK */ + xf_emit(ctx, 0x10, 0); + /* SEEK */ + xf_emit(ctx, 0x27, 0); +} + +static void +nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + /* middle of strand 1 on pre-NVA0 [after eng2d], middle of strand 0 on NVAx */ + /* SEEK */ + xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY... what is it doing here??? */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */ + xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ + xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + xf_emit(ctx, 1, 0); /* 000003ff */ + /* SEEK */ + xf_emit(ctx, 1, 0); /* ffffffff turing UNK364 */ + xf_emit(ctx, 1, 0); /* 0000000f turing UNK36C */ + xf_emit(ctx, 1, 0); /* 0000ffff USER_PARAM_COUNT */ + xf_emit(ctx, 1, 0x100); /* 00ffffff turing UNK384 */ + xf_emit(ctx, 1, 0); /* 0000000f turing UNK2A0 */ + xf_emit(ctx, 1, 0); /* 0000ffff GRIDID */ + xf_emit(ctx, 1, 0x10001); /* ffffffff GRIDDIM_XY */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 1, 0x10001); /* ffffffff BLOCKDIM_XY */ + xf_emit(ctx, 1, 1); /* 0000ffff BLOCKDIM_Z */ + xf_emit(ctx, 1, 0x10001); /* 00ffffff BLOCK_ALLOC */ + xf_emit(ctx, 1, 1); /* 00000001 LANES32 */ + xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */ + xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */ + /* SEEK */ + xf_emit(ctx, 0x40, 0); /* ffffffff USER_PARAM */ + switch (dev_priv->chipset) { + case 0x50: + case 0x92: + xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ + xf_emit(ctx, 0x80, 0); /* fff */ + xf_emit(ctx, 2, 0); /* ff, fff */ + xf_emit(ctx, 0x10*2, 0); /* ffffffff, 1f */ + break; + case 0x84: + xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ + xf_emit(ctx, 0x60, 0); /* fff */ + xf_emit(ctx, 2, 0); /* ff, fff */ + xf_emit(ctx, 0xc*2, 0); /* ffffffff, 1f */ + break; + case 0x94: + case 0x96: + xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ + xf_emit(ctx, 0x40, 0); /* fff */ + xf_emit(ctx, 2, 0); /* ff, fff */ + xf_emit(ctx, 8*2, 0); /* ffffffff, 1f */ + break; + case 0x86: + case 0x98: + xf_emit(ctx, 4, 0); /* f, 0, 0, 0 */ + xf_emit(ctx, 0x10, 0); /* fff */ + xf_emit(ctx, 2, 0); /* ff, fff */ + xf_emit(ctx, 2*2, 0); /* ffffffff, 1f */ + break; + case 0xa0: + xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ + xf_emit(ctx, 0xf0, 0); /* fff */ + xf_emit(ctx, 2, 0); /* ff, fff */ + xf_emit(ctx, 0x1e*2, 0); /* ffffffff, 1f */ + break; + case 0xa3: + xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ + xf_emit(ctx, 0x60, 0); /* fff */ + xf_emit(ctx, 2, 0); /* ff, fff */ + xf_emit(ctx, 0xc*2, 0); /* ffffffff, 1f */ + break; + case 0xa5: + case 0xaf: + xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ + xf_emit(ctx, 0x30, 0); /* fff */ + xf_emit(ctx, 2, 0); /* ff, fff */ + xf_emit(ctx, 6*2, 0); /* ffffffff, 1f */ + break; + case 0xaa: + xf_emit(ctx, 0x12, 0); + break; + case 0xa8: + case 0xac: + xf_emit(ctx, 4, 0); /* f, 0, 0, 0 */ + xf_emit(ctx, 0x10, 0); /* fff */ + xf_emit(ctx, 2, 0); /* ff, fff */ + xf_emit(ctx, 2*2, 0); /* ffffffff, 1f */ + break; + } + xf_emit(ctx, 1, 0); /* 0000000f */ + xf_emit(ctx, 1, 0); /* 00000000 */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 0000001f */ + xf_emit(ctx, 4, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 00000003 turing UNK35C */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 4, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 00000003 turing UNK35C */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 000000ff */ +} + +static void +nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */ + xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1658 */ + xf_emit(ctx, 1, 0); /* 00000001 POLYGON_SMOOTH_ENABLE */ + xf_emit(ctx, 3, 0); /* 00000001 POLYGON_OFFSET_*_ENABLE */ + xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */ + xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK165C */ + xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ + xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ + xf_emit(ctx, 1, 0); /* ffffffff POLYGON_OFFSET_UNITS */ + xf_emit(ctx, 1, 0); /* ffffffff POLYGON_OFFSET_FACTOR */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */ + xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ + xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ + xf_emit(ctx, 1, 0x11); /* 0000007f RT_FORMAT */ + xf_emit(ctx, 7, 0); /* 0000007f RT_FORMAT */ + xf_emit(ctx, 8, 0); /* 00000001 RT_HORIZ_LINEAR */ + xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ + xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 3); /* 00000003 UNK16B4 */ + else if (dev_priv->chipset >= 0xa0) + xf_emit(ctx, 1, 1); /* 00000001 UNK16B4 */ + xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */ + xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ + xf_emit(ctx, 2, 0x04000000); /* 07ffffff tesla UNK0D6C */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ + xf_emit(ctx, 1, 5); /* 0000000f UNK1408 */ + xf_emit(ctx, 1, 0x52); /* 000001ff SEMANTIC_PTSZ */ + xf_emit(ctx, 1, 0); /* ffffffff POINT_SIZE */ + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 1, 0); /* 00000007 tesla UNK0FB4 */ + if (dev_priv->chipset != 0x50) { + xf_emit(ctx, 1, 0); /* 3ff */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK1110 */ + } + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */ + xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */ + xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */ + xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */ + xf_emit(ctx, 0x20, 0); /* 07ffffff VIEWPORT_HORIZ, then VIEWPORT_VERT. (W&0x3fff)<<13 | (X&0x1fff). */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK187C */ + xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ + xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + xf_emit(ctx, 1, 5); /* 0000000f tesla UNK1220 */ + xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1A20 */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ + xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */ + if (dev_priv->chipset != 0x50) + xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */ + if (dev_priv->chipset < 0xa0) + xf_emit(ctx, 0x1c, 0); /* RO */ + else if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 0x9, 0); + xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ + xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ + xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ + xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ + if (dev_priv->chipset != 0x50) { + xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */ + xf_emit(ctx, 1, 0); /* 3ff */ + } + /* XXX: the following block could belong either to unk1cxx, or + * to STRMOUT. Rather hard to tell. */ + if (dev_priv->chipset < 0xa0) + xf_emit(ctx, 0x25, 0); + else + xf_emit(ctx, 0x3b, 0); +} + +static void +nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */ + xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */ + xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */ + if (dev_priv->chipset >= 0xa0) { + xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */ + xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */ + } + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + if (dev_priv->chipset == 0x50) + xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */ + else + xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + /* SEEK */ + xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */ + xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */ + xf_emit(ctx, 4, 0); /* 000000ff STRMOUT_ADDRESS_HIGH */ + xf_emit(ctx, 4, 0); /* ffffffff STRMOUT_ADDRESS_LOW */ + xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */ + if (dev_priv->chipset >= 0xa0) { + xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */ + xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */ + } + xf_emit(ctx, 1, 0); /* 0000ffff DMA_STRMOUT */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */ + xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */ + xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW QUERY_COUNTER */ + xf_emit(ctx, 2, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + /* SEEK */ + xf_emit(ctx, 0x20, 0); /* ffffffff STRMOUT_MAP */ + xf_emit(ctx, 1, 0); /* 0000000f */ + xf_emit(ctx, 1, 0); /* 00000000? */ + xf_emit(ctx, 2, 0); /* ffffffff */ +} + +static void +nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */ + xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */ + xf_emit(ctx, 1, 0); /* 00000007 */ + xf_emit(ctx, 1, 0); /* 000003ff */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ +} + +static void +nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + /* SEEK */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */ + xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ + xf_emit(ctx, 2, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */ + xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW, COUNTER */ + xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ + xf_emit(ctx, 1, 0); /* 7 */ + /* SEEK */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */ + xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */ + xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW, COUNTER */ + xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */ + xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */ + xf_emit(ctx, 1, 0); /* 00000001 eng2d UNK260 */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 1, 0); /* 00000007 */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ } static void @@ -1749,443 +2392,709 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx) int magic2; if (dev_priv->chipset == 0x50) { magic2 = 0x00003e60; - } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) { + } else if (!IS_NVA3F(dev_priv->chipset)) { magic2 = 0x001ffe67; } else { magic2 = 0x00087e67; } - xf_emit(ctx, 8, 0); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, magic2); - xf_emit(ctx, 4, 0); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 1, 1); - xf_emit(ctx, 7, 0); - if (dev_priv->chipset >= 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 1, 0x15); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0x10); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 4, 0); + xf_emit(ctx, 1, 0); /* f/7 MUTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ + xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */ + xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ + xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ + xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + if (dev_priv->chipset >= 0xa0 && !IS_NVAAF(dev_priv->chipset)) + xf_emit(ctx, 1, 0x15); /* 000000ff */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ + xf_emit(ctx, 1, 0x10); /* 3ff/ff VIEW_VOLUME_CLIP_CTRL */ + xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa0) { - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 0x400); - xf_emit(ctx, 1, 0x300); - xf_emit(ctx, 1, 0x1001); + xf_emit(ctx, 3, 0); /* ff, ffffffff, ffffffff */ + xf_emit(ctx, 1, 4); /* 7 */ + xf_emit(ctx, 1, 0x400); /* fffffff */ + xf_emit(ctx, 1, 0x300); /* ffff */ + xf_emit(ctx, 1, 0x1001); /* 1fff */ if (dev_priv->chipset != 0xa0) { - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 1, 0); + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 0); /* 0000000f UNK15C8 */ else - xf_emit(ctx, 1, 0x15); + xf_emit(ctx, 1, 0x15); /* ff */ } - xf_emit(ctx, 3, 0); } - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 8, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0x10); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 0x13, 0); - xf_emit(ctx, 1, 0x10); - xf_emit(ctx, 0x10, 0); - xf_emit(ctx, 0x10, 0x3f800000); - xf_emit(ctx, 0x19, 0); - xf_emit(ctx, 1, 0x10); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x3f); - xf_emit(ctx, 6, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ + xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ + xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */ + xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ + xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + xf_emit(ctx, 1, 0); /* 0000000f */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ + xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ + xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */ + xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ + xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */ + xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ + xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + xf_emit(ctx, 1, 0); /* 000000ff CLEAR_STENCIL */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ + xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ + xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */ + xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 2, 0); /* ffff0ff3, ffff */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */ + xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */ if (dev_priv->chipset >= 0xa0) { xf_emit(ctx, 2, 0); xf_emit(ctx, 1, 0x1001); xf_emit(ctx, 0xb, 0); } else { - xf_emit(ctx, 0xc, 0); + xf_emit(ctx, 1, 0); /* 00000007 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ } - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 7, 0); - xf_emit(ctx, 1, 0xf); - xf_emit(ctx, 7, 0); - xf_emit(ctx, 1, 0x11); - if (dev_priv->chipset == 0x50) - xf_emit(ctx, 4, 0); - else - xf_emit(ctx, 6, 0); - xf_emit(ctx, 3, 1); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, magic2); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x0fac6881); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { - xf_emit(ctx, 1, 0); - xf_emit(ctx, 0x18, 1); - xf_emit(ctx, 8, 2); - xf_emit(ctx, 8, 1); - xf_emit(ctx, 8, 2); - xf_emit(ctx, 8, 1); - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 5, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 0x16, 0); + xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 1, 0x11); /* 3f/7f */ + xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ + if (dev_priv->chipset != 0x50) { + xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */ + xf_emit(ctx, 1, 0); /* 000000ff */ + } + xf_emit(ctx, 1, 0); /* 00000007 OPERATION */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ + xf_emit(ctx, 2, 1); /* 00000007 BLEND_EQUATION_RGB, ALPHA */ + xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ + xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ + if (IS_NVA3F(dev_priv->chipset)) { + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK12E4 */ + xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ + xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ + xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */ + xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */ + xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */ + xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */ + xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1140 */ + xf_emit(ctx, 2, 0); /* 00000001 */ + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + xf_emit(ctx, 1, 0); /* 0000000f */ + xf_emit(ctx, 1, 0); /* 00000003 */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 2, 0); /* 00000001 */ + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 1, 0); /* 000003ff */ + } else if (dev_priv->chipset >= 0xa0) { + xf_emit(ctx, 2, 0); /* 00000001 */ + xf_emit(ctx, 1, 0); /* 00000007 */ + xf_emit(ctx, 1, 0); /* 00000003 */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 2, 0); /* 00000001 */ } else { - if (dev_priv->chipset >= 0xa0) - xf_emit(ctx, 0x1b, 0); - else - xf_emit(ctx, 0x15, 0); + xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1430 */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ } - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 2, 1); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 2, 1); + xf_emit(ctx, 4, 0); /* ffffffff CLEAR_COLOR */ + xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR A R G B */ + xf_emit(ctx, 1, 0); /* 00000fff eng2d UNK2B0 */ if (dev_priv->chipset >= 0xa0) - xf_emit(ctx, 4, 0); - else - xf_emit(ctx, 3, 0); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { - xf_emit(ctx, 0x10, 1); - xf_emit(ctx, 8, 2); - xf_emit(ctx, 0x10, 1); - xf_emit(ctx, 8, 2); - xf_emit(ctx, 8, 1); - xf_emit(ctx, 3, 0); + xf_emit(ctx, 2, 0); /* 00000001 */ + xf_emit(ctx, 1, 0); /* 000003ff */ + xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ + xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ + xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ + xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */ + xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */ + xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ + xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */ + if (dev_priv->chipset >= 0xa0) + xf_emit(ctx, 1, 0); /* 00000001 UNK12E4? NVA3+ only? */ + if (IS_NVA3F(dev_priv->chipset)) { + xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */ + xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ + xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */ + xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */ + xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ + xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */ + xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK15C4 */ + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1140 */ } - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 0x5b, 0); + xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ + xf_emit(ctx, 1, 0); /* 00000007 PATTERN_COLOR_FORMAT */ + xf_emit(ctx, 2, 0); /* ffffffff PATTERN_MONO_COLOR */ + xf_emit(ctx, 1, 0); /* 00000001 PATTERN_MONO_FORMAT */ + xf_emit(ctx, 2, 0); /* ffffffff PATTERN_MONO_BITMAP */ + xf_emit(ctx, 1, 0); /* 00000003 PATTERN_SELECT */ + xf_emit(ctx, 1, 0); /* 000000ff ROP */ + xf_emit(ctx, 1, 0); /* ffffffff BETA1 */ + xf_emit(ctx, 1, 0); /* ffffffff BETA4 */ + xf_emit(ctx, 1, 0); /* 00000007 OPERATION */ + xf_emit(ctx, 0x50, 0); /* 10x ffffff, ffffff, ffffff, ffffff, 3 PATTERN */ } static void -nv50_graph_construct_xfer_tp_x1(struct nouveau_grctx *ctx) +nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; int magic3; - if (dev_priv->chipset == 0x50) + switch (dev_priv->chipset) { + case 0x50: magic3 = 0x1000; - else if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8) + break; + case 0x86: + case 0x98: + case 0xa8: + case 0xaa: + case 0xac: + case 0xaf: magic3 = 0x1e00; - else + break; + default: magic3 = 0; - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 4); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 0x24, 0); + } + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 4); /* 7f/ff[NVA0+] VP_REG_ALLOC_RESULT */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 0); /* 111/113[NVA0+] */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 0x1f, 0); /* ffffffff */ else if (dev_priv->chipset >= 0xa0) - xf_emit(ctx, 0x14, 0); + xf_emit(ctx, 0x0f, 0); /* ffffffff */ else - xf_emit(ctx, 0x15, 0); - xf_emit(ctx, 2, 4); + xf_emit(ctx, 0x10, 0); /* fffffff VP_RESULT_MAP_1 up */ + xf_emit(ctx, 2, 0); /* f/1f[NVA3], fffffff/ffffffff[NVA0+] */ + xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */ + xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ if (dev_priv->chipset >= 0xa0) - xf_emit(ctx, 1, 0x03020100); + xf_emit(ctx, 1, 0x03020100); /* ffffffff */ else - xf_emit(ctx, 1, 0x00608080); - xf_emit(ctx, 4, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 2, 4); - xf_emit(ctx, 1, 0x80); + xf_emit(ctx, 1, 0x00608080); /* fffffff VP_RESULT_MAP_0 */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 2, 0); /* 111/113, 7f/ff */ + xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */ if (magic3) - xf_emit(ctx, 1, magic3); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 0x24, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 0x80); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 0x03020100); - xf_emit(ctx, 1, 3); + xf_emit(ctx, 1, magic3); /* 00007fff tesla UNK141C */ + xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 0); /* 111/113 */ + xf_emit(ctx, 0x1f, 0); /* ffffffff GP_RESULT_MAP_1 up */ + xf_emit(ctx, 1, 0); /* 0000001f */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ + xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */ + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0x03020100); /* ffffffff GP_RESULT_MAP_0 */ + xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */ if (magic3) - xf_emit(ctx, 1, magic3); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 4, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 3); - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 4); + xf_emit(ctx, 1, magic3); /* 7fff tesla UNK141C */ + xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 0); /* 111/113 */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */ + xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK13A0 */ + xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 0); /* 111/113 */ if (dev_priv->chipset == 0x94 || dev_priv->chipset == 0x96) - xf_emit(ctx, 0x1024, 0); + xf_emit(ctx, 0x1020, 0); /* 4 x (0x400 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */ else if (dev_priv->chipset < 0xa0) - xf_emit(ctx, 0xa24, 0); - else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) - xf_emit(ctx, 0x214, 0); + xf_emit(ctx, 0xa20, 0); /* 4 x (0x280 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */ + else if (!IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 0x210, 0); /* ffffffff */ else - xf_emit(ctx, 0x414, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 3); - xf_emit(ctx, 2, 0); + xf_emit(ctx, 0x410, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */ + xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ } static void -nv50_graph_construct_xfer_tp_x2(struct nouveau_grctx *ctx) +nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; int magic1, magic2; if (dev_priv->chipset == 0x50) { magic1 = 0x3ff; magic2 = 0x00003e60; - } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) { + } else if (!IS_NVA3F(dev_priv->chipset)) { magic1 = 0x7ff; magic2 = 0x001ffe67; } else { magic1 = 0x7ff; magic2 = 0x00087e67; } - xf_emit(ctx, 3, 0); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 1, 1); - xf_emit(ctx, 0xc, 0); - xf_emit(ctx, 1, 0xf); - xf_emit(ctx, 0xb, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 4, 0xffff); - xf_emit(ctx, 8, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 5, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 2, 0); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { - xf_emit(ctx, 1, 3); - xf_emit(ctx, 1, 0); - } else if (dev_priv->chipset >= 0xa0) - xf_emit(ctx, 1, 1); - xf_emit(ctx, 0xa, 0); - xf_emit(ctx, 2, 1); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 2, 1); - xf_emit(ctx, 1, 2); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { - xf_emit(ctx, 1, 0); - xf_emit(ctx, 0x18, 1); - xf_emit(ctx, 8, 2); - xf_emit(ctx, 8, 1); - xf_emit(ctx, 8, 2); - xf_emit(ctx, 8, 1); - xf_emit(ctx, 1, 0); - } - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 7, 0); - xf_emit(ctx, 1, 0x0fac6881); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 3, 0xcf); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 1, 1); - xf_emit(ctx, 0xa, 0); - xf_emit(ctx, 2, 1); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 2, 1); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 8, 1); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 7, 0); - xf_emit(ctx, 1, 0x0fac6881); - xf_emit(ctx, 1, 0xf); - xf_emit(ctx, 7, 0); - xf_emit(ctx, 1, magic2); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 0x11); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 2, 1); - else - xf_emit(ctx, 1, 1); - if(dev_priv->chipset == 0x50) - xf_emit(ctx, 1, 0); - else - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 5, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 4, 0); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 7, 0); - xf_emit(ctx, 1, 0x0fac6881); - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, magic1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 2, 0); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 1, 1); - xf_emit(ctx, 0x28, 0); - xf_emit(ctx, 8, 8); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 7, 0); - xf_emit(ctx, 1, 0x0fac6881); - xf_emit(ctx, 8, 0x400); - xf_emit(ctx, 8, 0x300); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0xf); - xf_emit(ctx, 7, 0); - xf_emit(ctx, 1, 0x20); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 1, 0x100); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 0x40); - xf_emit(ctx, 1, 0x100); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 3); - xf_emit(ctx, 4, 0); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, magic2); - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 1, 0x0fac6881); - xf_emit(ctx, 9, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 4, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0x400); - xf_emit(ctx, 1, 0x300); - xf_emit(ctx, 1, 0x1001); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 4, 0); - else - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 7, 0); - xf_emit(ctx, 1, 0x0fac6881); - xf_emit(ctx, 1, 0xf); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { - xf_emit(ctx, 0x15, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 3, 0); - } else - xf_emit(ctx, 0x17, 0); - if (dev_priv->chipset >= 0xa0) - xf_emit(ctx, 1, 0x0fac6881); - xf_emit(ctx, 1, magic2); - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 2, 1); - xf_emit(ctx, 3, 0); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 2, 1); - else - xf_emit(ctx, 1, 1); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 2, 0); - else if (dev_priv->chipset != 0x50) - xf_emit(ctx, 1, 0); -} - -static void -nv50_graph_construct_xfer_tp_x3(struct nouveau_grctx *ctx) -{ - struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - if (dev_priv->chipset == 0x50) - xf_emit(ctx, 2, 0); - else - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 0x2a712488); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x4085c000); - xf_emit(ctx, 1, 0x40); - xf_emit(ctx, 1, 0x100); - xf_emit(ctx, 1, 0x10100); - xf_emit(ctx, 1, 0x02800000); -} - -static void -nv50_graph_construct_xfer_tp_x4(struct nouveau_grctx *ctx) -{ - struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; - xf_emit(ctx, 2, 0x04e3bfdf); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x00ffff00); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 2, 1); - else - xf_emit(ctx, 1, 1); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 0x00ffff00); - xf_emit(ctx, 8, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0x30201000); - xf_emit(ctx, 1, 0x70605040); - xf_emit(ctx, 1, 0xb8a89888); - xf_emit(ctx, 1, 0xf8e8d8c8); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x1a); -} - -static void -nv50_graph_construct_xfer_tp_x5(struct nouveau_grctx *ctx) -{ - struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 0xfac6881); - xf_emit(ctx, 4, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 2, 1); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 1); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 0xb, 0); - else - xf_emit(ctx, 0xa, 0); - xf_emit(ctx, 8, 1); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 7, 0); - xf_emit(ctx, 1, 0xfac6881); - xf_emit(ctx, 1, 0xf); - xf_emit(ctx, 7, 0); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 1, 1); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { - xf_emit(ctx, 6, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 6, 0); + xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ + xf_emit(ctx, 1, 0); /* ffffffff ALPHA_TEST_REF */ + xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 1); /* 0000000f UNK16A0 */ + xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ + xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */ + xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR */ + xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */ + xf_emit(ctx, 1, 0); /* 00000001 UNK0FDC */ + xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ + xf_emit(ctx, 1, 0); /* ff[NV50]/3ff[NV84+] */ + xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ + xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ + xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ + xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */ + xf_emit(ctx, 1, 0); /* 7 */ + xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + xf_emit(ctx, 1, 0); /* ffffffff COLOR_KEY */ + xf_emit(ctx, 1, 0); /* 00000001 COLOR_KEY_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000007 COLOR_KEY_FORMAT */ + xf_emit(ctx, 2, 0); /* ffffffff SIFC_BITMAP_COLOR */ + xf_emit(ctx, 1, 1); /* 00000001 SIFC_BITMAP_WRITE_BIT0_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ + xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ + if (IS_NVA3F(dev_priv->chipset)) { + xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */ + xf_emit(ctx, 1, 0); /* 00000003 */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1298 */ + } else if (dev_priv->chipset >= 0xa0) { + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK16B4 */ + xf_emit(ctx, 1, 0); /* 00000003 */ } else { - xf_emit(ctx, 0xb, 0); + xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */ } + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ + xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ + xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ + if (IS_NVA3F(dev_priv->chipset)) { + xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */ + xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ + xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ + xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */ + xf_emit(ctx, 8, 2); /* 0000001f IBLEND_SRC_RGB */ + xf_emit(ctx, 8, 1); /* 0000001f IBLEND_DST_RGB */ + xf_emit(ctx, 8, 2); /* 0000001f IBLEND_SRC_ALPHA */ + xf_emit(ctx, 8, 1); /* 0000001f IBLEND_DST_ALPHA */ + xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ + } + xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ + xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ + xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ + xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */ + xf_emit(ctx, 1, 0); /* 7 */ + xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ + xf_emit(ctx, 1, 0); /* 00000007 OPERATION */ + xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */ + xf_emit(ctx, 1, 0xcf); /* 000000ff DRAW_COLOR_FORMAT */ + xf_emit(ctx, 1, 0xcf); /* 000000ff SRC_FORMAT */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ + xf_emit(ctx, 1, 0); /* 7/f[NVA3] MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ + xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ + xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ + xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 8, 1); /* 00000001 UNK19E0 */ + xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ + xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + if(dev_priv->chipset == 0x50) + xf_emit(ctx, 1, 0); /* ff */ + else + xf_emit(ctx, 3, 0); /* 1, 7, 3ff */ + xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ + xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000007 */ + xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ + xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ + xf_emit(ctx, 1, 0); /* 000fffff BLIT_DU_DX_FRACT */ + xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DU_DX_INT */ + xf_emit(ctx, 1, 0); /* 000fffff BLIT_DV_DY_FRACT */ + xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DV_DY_INT */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 1, magic1); /* 3ff/7ff tesla UNK0D68 */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000007 */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + xf_emit(ctx, 8, 0); /* 0000ffff DMA_COLOR */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_GLOBAL */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_LOCAL */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_STACK */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_DST */ + xf_emit(ctx, 1, 0); /* 7 */ + xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 8, 0); /* 000000ff RT_ADDRESS_HIGH */ + xf_emit(ctx, 8, 0); /* ffffffff RT_LAYER_STRIDE */ + xf_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */ + xf_emit(ctx, 8, 8); /* 0000007f RT_TILE_MODE */ + xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ + xf_emit(ctx, 8, 0x400); /* 0fffffff RT_HORIZ */ + xf_emit(ctx, 8, 0x300); /* 0000ffff RT_VERT */ + xf_emit(ctx, 1, 1); /* 00001fff RT_ARRAY_MODE */ + xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 1, 0x20); /* 00000fff DST_TILE_MODE */ + xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ + xf_emit(ctx, 1, 0x100); /* 0001ffff DST_HEIGHT */ + xf_emit(ctx, 1, 0); /* 000007ff DST_LAYER */ + xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ + xf_emit(ctx, 1, 0); /* ffffffff DST_ADDRESS_LOW */ + xf_emit(ctx, 1, 0); /* 000000ff DST_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0x40); /* 0007ffff DST_PITCH */ + xf_emit(ctx, 1, 0x100); /* 0001ffff DST_WIDTH */ + xf_emit(ctx, 1, 0); /* 0000ffff */ + xf_emit(ctx, 1, 3); /* 00000003 tesla UNK15AC */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ + xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ + xf_emit(ctx, 1, 0); /* 00000007 */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ + xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ + xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_ZETA */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + xf_emit(ctx, 2, 0); /* ffff, ff/3ff */ + xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ + xf_emit(ctx, 1, 0); /* 00000007 */ + xf_emit(ctx, 1, 0); /* ffffffff ZETA_LAYER_STRIDE */ + xf_emit(ctx, 1, 0); /* 000000ff ZETA_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff ZETA_ADDRESS_LOW */ + xf_emit(ctx, 1, 4); /* 00000007 ZETA_TILE_MODE */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + xf_emit(ctx, 1, 0x400); /* 0fffffff ZETA_HORIZ */ + xf_emit(ctx, 1, 0x300); /* 0000ffff ZETA_VERT */ + xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ + xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ + xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ + xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */ + xf_emit(ctx, 1, 0); /* 7 */ + xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ + if (IS_NVA3F(dev_priv->chipset)) { + xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + } + xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + if (dev_priv->chipset >= 0xa0) + xf_emit(ctx, 1, 0x0fac6881); /* fffffff */ + xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */ + xf_emit(ctx, 1, 0); /* 00000007 */ + xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + if (IS_NVA3F(dev_priv->chipset)) { + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + xf_emit(ctx, 1, 0); /* 0000000f tesla UNK15C8 */ + } + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ + if (dev_priv->chipset >= 0xa0) { + xf_emit(ctx, 3, 0); /* 7/f, 1, ffff0ff3 */ + xf_emit(ctx, 1, 0xfac6881); /* fffffff */ + xf_emit(ctx, 4, 0); /* 1, 1, 1, 3ff */ + xf_emit(ctx, 1, 4); /* 7 */ + xf_emit(ctx, 1, 0); /* 1 */ + xf_emit(ctx, 2, 1); /* 1 */ + xf_emit(ctx, 2, 0); /* 7, f */ + xf_emit(ctx, 1, 1); /* 1 */ + xf_emit(ctx, 1, 0); /* 7/f */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 0x9, 0); /* 1 */ + else + xf_emit(ctx, 0x8, 0); /* 1 */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 8, 1); /* 1 */ + xf_emit(ctx, 1, 0x11); /* 7f */ + xf_emit(ctx, 7, 0); /* 7f */ + xf_emit(ctx, 1, 0xfac6881); /* fffffff */ + xf_emit(ctx, 1, 0xf); /* f */ + xf_emit(ctx, 7, 0); /* f */ + xf_emit(ctx, 1, 0x11); /* 7f */ + xf_emit(ctx, 1, 1); /* 1 */ + xf_emit(ctx, 5, 0); /* 1, 7, 3ff, 3, 7 */ + if (IS_NVA3F(dev_priv->chipset)) { + xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + } + } +} + +static void +nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + xf_emit(ctx, 2, 0); /* 1 LINKED_TSC. yes, 2. */ + if (dev_priv->chipset != 0x50) + xf_emit(ctx, 1, 0); /* 3 */ + xf_emit(ctx, 1, 1); /* 1ffff BLIT_DU_DX_INT */ + xf_emit(ctx, 1, 0); /* fffff BLIT_DU_DX_FRACT */ + xf_emit(ctx, 1, 1); /* 1ffff BLIT_DV_DY_INT */ + xf_emit(ctx, 1, 0); /* fffff BLIT_DV_DY_FRACT */ + if (dev_priv->chipset == 0x50) + xf_emit(ctx, 1, 0); /* 3 BLIT_CONTROL */ + else + xf_emit(ctx, 2, 0); /* 3ff, 1 */ + xf_emit(ctx, 1, 0x2a712488); /* ffffffff SRC_TIC_0 */ + xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_1 */ + xf_emit(ctx, 1, 0x4085c000); /* ffffffff SRC_TIC_2 */ + xf_emit(ctx, 1, 0x40); /* ffffffff SRC_TIC_3 */ + xf_emit(ctx, 1, 0x100); /* ffffffff SRC_TIC_4 */ + xf_emit(ctx, 1, 0x10100); /* ffffffff SRC_TIC_5 */ + xf_emit(ctx, 1, 0x02800000); /* ffffffff SRC_TIC_6 */ + xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_7 */ + if (dev_priv->chipset == 0x50) { + xf_emit(ctx, 1, 0); /* 00000001 turing UNK358 */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */ + xf_emit(ctx, 1, 0); /* 00000003 turing UNK37C tesla UNK1690 */ + xf_emit(ctx, 1, 0); /* 00000003 BLIT_CONTROL */ + xf_emit(ctx, 1, 0); /* 00000001 turing UNK32C tesla UNK0F94 */ + } else if (!IS_NVAAF(dev_priv->chipset)) { + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */ + xf_emit(ctx, 1, 0); /* 00000003 */ + xf_emit(ctx, 1, 0); /* 000003ff */ + xf_emit(ctx, 1, 0); /* 00000003 */ + xf_emit(ctx, 1, 0); /* 000003ff */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1664 / turing UNK03E8 */ + xf_emit(ctx, 1, 0); /* 00000003 */ + xf_emit(ctx, 1, 0); /* 000003ff */ + } else { + xf_emit(ctx, 0x6, 0); + } + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_TEXTURE */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_SRC */ +} + +static void +nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ + xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 2, 0); /* 7, ffff0ff3 */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE */ + xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0D64 */ + xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0DF4 */ + xf_emit(ctx, 1, 1); /* 00000001 UNK15B4 */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ + xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK0F98 */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ + xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ + xf_emit(ctx, 1, 0); /* 00000001 POLYGON_SMOOTH_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ + xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1658 */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE */ + xf_emit(ctx, 1, 1); /* 00000001 UNK15B4 */ + xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK165C */ + xf_emit(ctx, 1, 0x30201000); /* ffffffff tesla UNK1670 */ + xf_emit(ctx, 1, 0x70605040); /* ffffffff tesla UNK1670 */ + xf_emit(ctx, 1, 0xb8a89888); /* ffffffff tesla UNK1670 */ + xf_emit(ctx, 1, 0xf8e8d8c8); /* ffffffff tesla UNK1670 */ + xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ + xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ } static void @@ -2193,108 +3102,136 @@ nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; if (dev_priv->chipset < 0xa0) { - nv50_graph_construct_xfer_tp_x1(ctx); - nv50_graph_construct_xfer_tp_x2(ctx); - nv50_graph_construct_xfer_tp_x3(ctx); - if (dev_priv->chipset == 0x50) - xf_emit(ctx, 0xf, 0); - else - xf_emit(ctx, 0x12, 0); - nv50_graph_construct_xfer_tp_x4(ctx); + nv50_graph_construct_xfer_unk84xx(ctx); + nv50_graph_construct_xfer_tprop(ctx); + nv50_graph_construct_xfer_tex(ctx); + nv50_graph_construct_xfer_unk8cxx(ctx); } else { - nv50_graph_construct_xfer_tp_x3(ctx); - if (dev_priv->chipset < 0xaa) - xf_emit(ctx, 0xc, 0); - else - xf_emit(ctx, 0xa, 0); - nv50_graph_construct_xfer_tp_x2(ctx); - nv50_graph_construct_xfer_tp_x5(ctx); - nv50_graph_construct_xfer_tp_x4(ctx); - nv50_graph_construct_xfer_tp_x1(ctx); + nv50_graph_construct_xfer_tex(ctx); + nv50_graph_construct_xfer_tprop(ctx); + nv50_graph_construct_xfer_unk8cxx(ctx); + nv50_graph_construct_xfer_unk84xx(ctx); } } static void -nv50_graph_construct_xfer_tp2(struct nouveau_grctx *ctx) +nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; - int i, mpcnt; - if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa) - mpcnt = 1; - else if (dev_priv->chipset < 0xa0 || dev_priv->chipset >= 0xa8) - mpcnt = 2; - else - mpcnt = 3; + int i, mpcnt = 2; + switch (dev_priv->chipset) { + case 0x98: + case 0xaa: + mpcnt = 1; + break; + case 0x50: + case 0x84: + case 0x86: + case 0x92: + case 0x94: + case 0x96: + case 0xa8: + case 0xac: + mpcnt = 2; + break; + case 0xa0: + case 0xa3: + case 0xa5: + case 0xaf: + mpcnt = 3; + break; + } for (i = 0; i < mpcnt; i++) { - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x80); - xf_emit(ctx, 1, 0x80007004); - xf_emit(ctx, 1, 0x04000400); + xf_emit(ctx, 1, 0); /* ff */ + xf_emit(ctx, 1, 0x80); /* ffffffff tesla UNK1404 */ + xf_emit(ctx, 1, 0x80007004); /* ffffffff tesla UNK12B0 */ + xf_emit(ctx, 1, 0x04000400); /* ffffffff */ if (dev_priv->chipset >= 0xa0) - xf_emit(ctx, 1, 0xc0); - xf_emit(ctx, 1, 0x1000); - xf_emit(ctx, 2, 0); - if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8) { - xf_emit(ctx, 1, 0xe00); - xf_emit(ctx, 1, 0x1e00); + xf_emit(ctx, 1, 0xc0); /* 00007fff tesla UNK152C */ + xf_emit(ctx, 1, 0x1000); /* 0000ffff tesla UNK0D60 */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset == 0xa8 || IS_NVAAF(dev_priv->chipset)) { + xf_emit(ctx, 1, 0xe00); /* 7fff */ + xf_emit(ctx, 1, 0x1e00); /* 7fff */ } - xf_emit(ctx, 1, 1); - xf_emit(ctx, 2, 0); + xf_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP */ + xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ if (dev_priv->chipset == 0x50) - xf_emit(ctx, 2, 0x1000); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 2); - if (dev_priv->chipset >= 0xaa) - xf_emit(ctx, 0xb, 0); + xf_emit(ctx, 2, 0x1000); /* 7fff tesla UNK141C */ + xf_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */ + xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */ + if (IS_NVAAF(dev_priv->chipset)) + xf_emit(ctx, 0xb, 0); /* RO */ else if (dev_priv->chipset >= 0xa0) - xf_emit(ctx, 0xc, 0); + xf_emit(ctx, 0xc, 0); /* RO */ else - xf_emit(ctx, 0xa, 0); + xf_emit(ctx, 0xa, 0); /* RO */ } - xf_emit(ctx, 1, 0x08100c12); - xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + xf_emit(ctx, 1, 0); /* ff/3ff */ if (dev_priv->chipset >= 0xa0) { - xf_emit(ctx, 1, 0x1fe21); + xf_emit(ctx, 1, 0x1fe21); /* 0003ffff tesla UNK0FAC */ } - xf_emit(ctx, 5, 0); - xf_emit(ctx, 4, 0xffff); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 2, 0x10001); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x1fe21); - xf_emit(ctx, 1, 0); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 1, 1); - xf_emit(ctx, 4, 0); - xf_emit(ctx, 1, 0x08100c12); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 8, 0); - xf_emit(ctx, 1, 0xfac6881); - xf_emit(ctx, 1, 0); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 1, 3); - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 9, 0); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 2, 1); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 3, 1); - xf_emit(ctx, 1, 0); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { - xf_emit(ctx, 8, 2); - xf_emit(ctx, 0x10, 1); - xf_emit(ctx, 8, 2); - xf_emit(ctx, 0x18, 1); - xf_emit(ctx, 3, 0); + xf_emit(ctx, 3, 0); /* 7fff, 0, 0 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */ + xf_emit(ctx, 1, 1); /* 00000001 LANES32 */ + xf_emit(ctx, 1, 0x10001); /* 00ffffff BLOCK_ALLOC */ + xf_emit(ctx, 1, 0x10001); /* ffffffff BLOCKDIM_XY */ + xf_emit(ctx, 1, 1); /* 0000ffff BLOCKDIM_Z */ + xf_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */ + xf_emit(ctx, 1, 0x1fe21); /* 1ffff/3ffff[NVA0+] tesla UNk0FAC */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 1, 0); /* 1 LINKED_TSC */ + xf_emit(ctx, 1, 0); /* ff FP_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff FP_ADDRESS_LOW */ + xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ + xf_emit(ctx, 1, 0); /* 000000ff FRAG_COLOR_CLAMP_EN */ + xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */ + xf_emit(ctx, 1, 0x11); /* 0000007f RT_FORMAT */ + xf_emit(ctx, 7, 0); /* 0000007f RT_FORMAT */ + xf_emit(ctx, 1, 0); /* 00000007 */ + xf_emit(ctx, 1, 0xfac6881); /* 0fffffff RT_CONTROL */ + xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */ + xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ + xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */ + xf_emit(ctx, 1, 4); /* ffffffff tesla UNK1400 */ + xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ + xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ + xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */ + xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ + if (IS_NVA3F(dev_priv->chipset)) { + xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */ + xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */ + xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */ + xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ + xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */ + xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */ + xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ + xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */ + xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ } - xf_emit(ctx, 1, 4); + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */ + xf_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */ + /* XXX: demagic this part some day */ if (dev_priv->chipset == 0x50) xf_emit(ctx, 0x3a0, 0); else if (dev_priv->chipset < 0x94) @@ -2303,9 +3240,9 @@ nv50_graph_construct_xfer_tp2(struct nouveau_grctx *ctx) xf_emit(ctx, 0x39f, 0); else xf_emit(ctx, 0x3a3, 0); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ + xf_emit(ctx, 1, 0); /* 7 OPERATION */ + xf_emit(ctx, 1, 1); /* 1 DST_LINEAR */ xf_emit(ctx, 0x2d, 0); } @@ -2323,52 +3260,56 @@ nv50_graph_construct_xfer2(struct nouveau_grctx *ctx) if (dev_priv->chipset < 0xa0) { for (i = 0; i < 8; i++) { ctx->ctxvals_pos = offset + i; + /* that little bugger belongs to csched. No idea + * what it's doing here. */ if (i == 0) - xf_emit(ctx, 1, 0x08100c12); + xf_emit(ctx, 1, 0x08100c12); /* FP_INTERPOLANT_CTRL */ if (units & (1 << i)) - nv50_graph_construct_xfer_tp2(ctx); + nv50_graph_construct_xfer_mpc(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; } } else { /* Strand 0: TPs 0, 1 */ ctx->ctxvals_pos = offset; - xf_emit(ctx, 1, 0x08100c12); + /* that little bugger belongs to csched. No idea + * what it's doing here. */ + xf_emit(ctx, 1, 0x08100c12); /* FP_INTERPOLANT_CTRL */ if (units & (1 << 0)) - nv50_graph_construct_xfer_tp2(ctx); + nv50_graph_construct_xfer_mpc(ctx); if (units & (1 << 1)) - nv50_graph_construct_xfer_tp2(ctx); + nv50_graph_construct_xfer_mpc(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; - /* Strand 0: TPs 2, 3 */ + /* Strand 1: TPs 2, 3 */ ctx->ctxvals_pos = offset + 1; if (units & (1 << 2)) - nv50_graph_construct_xfer_tp2(ctx); + nv50_graph_construct_xfer_mpc(ctx); if (units & (1 << 3)) - nv50_graph_construct_xfer_tp2(ctx); + nv50_graph_construct_xfer_mpc(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; - /* Strand 0: TPs 4, 5, 6 */ + /* Strand 2: TPs 4, 5, 6 */ ctx->ctxvals_pos = offset + 2; if (units & (1 << 4)) - nv50_graph_construct_xfer_tp2(ctx); + nv50_graph_construct_xfer_mpc(ctx); if (units & (1 << 5)) - nv50_graph_construct_xfer_tp2(ctx); + nv50_graph_construct_xfer_mpc(ctx); if (units & (1 << 6)) - nv50_graph_construct_xfer_tp2(ctx); + nv50_graph_construct_xfer_mpc(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; - /* Strand 0: TPs 7, 8, 9 */ + /* Strand 3: TPs 7, 8, 9 */ ctx->ctxvals_pos = offset + 3; if (units & (1 << 7)) - nv50_graph_construct_xfer_tp2(ctx); + nv50_graph_construct_xfer_mpc(ctx); if (units & (1 << 8)) - nv50_graph_construct_xfer_tp2(ctx); + nv50_graph_construct_xfer_mpc(ctx); if (units & (1 << 9)) - nv50_graph_construct_xfer_tp2(ctx); + nv50_graph_construct_xfer_mpc(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; } From d96773e7b61d9976b8227e018a1c94fb7374e641 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 3 Sep 2010 15:46:58 +1000 Subject: [PATCH 248/476] drm/nv50: move vm trap to nv50_fb.c Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 2 +- drivers/gpu/drm/nouveau/nouveau_irq.c | 56 ++++++--------------------- drivers/gpu/drm/nouveau/nv50_fb.c | 39 +++++++++++++++++++ 3 files changed, 51 insertions(+), 46 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index dc90fd2861b7..c8b990b09f5d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -903,10 +903,10 @@ extern int nv40_fb_init(struct drm_device *); extern void nv40_fb_takedown(struct drm_device *); extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t, uint32_t, uint32_t); - /* nv50_fb.c */ extern int nv50_fb_init(struct drm_device *); extern void nv50_fb_takedown(struct drm_device *); +extern void nv50_fb_vm_trap(struct drm_device *, int display, const char *); /* nvc0_fb.c */ extern int nvc0_fb_init(struct drm_device *); diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 9cc3259a54b9..316e0587fb01 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -607,40 +607,6 @@ nouveau_pgraph_irq_handler(struct drm_device *dev) nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); } -static void -nv50_pfb_vm_trap(struct drm_device *dev, int display, const char *name) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t trap[6]; - int i, ch; - uint32_t idx = nv_rd32(dev, 0x100c90); - if (idx & 0x80000000) { - idx &= 0xffffff; - if (display) { - for (i = 0; i < 6; i++) { - nv_wr32(dev, 0x100c90, idx | i << 24); - trap[i] = nv_rd32(dev, 0x100c94); - } - for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) { - struct nouveau_channel *chan = dev_priv->fifos[ch]; - - if (!chan || !chan->ramin) - continue; - - if (trap[1] == chan->ramin->vinst >> 12) - break; - } - NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x %08x channel %d\n", - name, (trap[5]&0x100?"read":"write"), - trap[5]&0xff, trap[4]&0xffff, - trap[3]&0xffff, trap[0], trap[2], ch); - } - nv_wr32(dev, 0x100c90, idx | 0x80000000); - } else if (display) { - NV_INFO(dev, "%s - no VM fault?\n", name); - } -} - static struct nouveau_enum_names nv50_mp_exec_error_names[] = { { 3, "STACK_UNDERFLOW" }, @@ -713,7 +679,7 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old, tps++; switch (type) { case 6: /* texture error... unknown for now */ - nv50_pfb_vm_trap(dev, display, name); + nv50_fb_vm_trap(dev, display, name); if (display) { NV_ERROR(dev, "magic set %d:\n", i); for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4) @@ -736,7 +702,7 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old, uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14); uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18); uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c); - nv50_pfb_vm_trap(dev, display, name); + nv50_fb_vm_trap(dev, display, name); /* 2d engine destination */ if (ustatus & 0x00000010) { if (display) { @@ -819,7 +785,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev) /* Known to be triggered by screwed up NOTIFY and COND... */ if (ustatus & 0x00000001) { - nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT"); + nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT"); nv_wr32(dev, 0x400500, 0); if (nv_rd32(dev, 0x400808) & 0x80000000) { if (display) { @@ -844,7 +810,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev) ustatus &= ~0x00000001; } if (ustatus & 0x00000002) { - nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY"); + nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY"); nv_wr32(dev, 0x400500, 0); if (nv_rd32(dev, 0x40084c) & 0x80000000) { if (display) { @@ -886,15 +852,15 @@ nv50_pgraph_trap_handler(struct drm_device *dev) NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n"); } if (ustatus & 0x00000001) { - nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY"); + nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY"); ustatus &= ~0x00000001; } if (ustatus & 0x00000002) { - nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN"); + nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN"); ustatus &= ~0x00000002; } if (ustatus & 0x00000004) { - nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT"); + nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT"); ustatus &= ~0x00000004; } NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n", @@ -919,7 +885,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev) NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n"); } if (ustatus & 0x00000001) { - nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT"); + nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT"); NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n", nv_rd32(dev, 0x400c00), nv_rd32(dev, 0x400c08), @@ -941,7 +907,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev) NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n"); } if (ustatus & 0x00000001) { - nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT"); + nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT"); NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n", nv_rd32(dev, 0x401804), nv_rd32(dev, 0x401808), @@ -966,7 +932,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev) NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n"); } if (ustatus & 0x00000001) { - nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT"); + nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT"); NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n", nv_rd32(dev, 0x405800), nv_rd32(dev, 0x405804), @@ -988,7 +954,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev) * remaining, so try to handle it anyway. Perhaps related to that * unknown DMA slot on tesla? */ if (status & 0x20) { - nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04"); + nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04"); ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff; if (display) NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus); diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c index 32611bd30e6d..594720bd5191 100644 --- a/drivers/gpu/drm/nouveau/nv50_fb.c +++ b/drivers/gpu/drm/nouveau/nv50_fb.c @@ -36,3 +36,42 @@ void nv50_fb_takedown(struct drm_device *dev) { } + +void +nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + u32 trap[6], idx, chinst; + int i, ch; + + idx = nv_rd32(dev, 0x100c90); + if (!(idx & 0x80000000)) + return; + idx &= 0x00ffffff; + + for (i = 0; i < 6; i++) { + nv_wr32(dev, 0x100c90, idx | i << 24); + trap[i] = nv_rd32(dev, 0x100c94); + } + nv_wr32(dev, 0x100c90, idx | 0x80000000); + + if (!display) + return; + + chinst = (trap[2] << 16) | trap[1]; + for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) { + struct nouveau_channel *chan = dev_priv->fifos[ch]; + + if (!chan || !chan->ramin) + continue; + + if (chinst == chan->ramin->vinst >> 12) + break; + } + + NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x " + "channel %d (0x%08x)\n", + name, (trap[5] & 0x100 ? "read" : "write"), + trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, + trap[0], ch, chinst); +} From 1da265662db0306d04efb5f687c2992e40d1b85c Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 3 Sep 2010 15:56:12 +1000 Subject: [PATCH 249/476] drm/nv50: report BAR access faults Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_irq.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 316e0587fb01..a818306781cc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -228,6 +228,14 @@ nouveau_fifo_irq_handler(struct drm_device *dev) nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); } + if (dev_priv->card_type == NV_50) { + if (status & 0x00000010) { + nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT"); + status &= ~0x00000010; + nv_wr32(dev, 0x002100, 0x00000010); + } + } + if (status) { NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", status, chid); From 8597a1ba376e03229835a0e4215a7a4cb1dcaa32 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 6 Sep 2010 11:39:25 +1000 Subject: [PATCH 250/476] drm/nv50: fix SOR count for early chipsets Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_reg.h | 7 ++----- drivers/gpu/drm/nouveau/nv50_display.c | 21 +++++++++++++++++---- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h index 21a6e453b975..4b813284fdcf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_reg.h +++ b/drivers/gpu/drm/nouveau/nouveau_reg.h @@ -785,15 +785,12 @@ #define NV50_PDISPLAY_DAC_MODE_CTRL_C(i) (0x00610b5c + (i) * 0x8) #define NV50_PDISPLAY_SOR_MODE_CTRL_P(i) (0x00610b70 + (i) * 0x8) #define NV50_PDISPLAY_SOR_MODE_CTRL_C(i) (0x00610b74 + (i) * 0x8) +#define NV50_PDISPLAY_EXT_MODE_CTRL_P(i) (0x00610b80 + (i) * 0x8) +#define NV50_PDISPLAY_EXT_MODE_CTRL_C(i) (0x00610b84 + (i) * 0x8) #define NV50_PDISPLAY_DAC_MODE_CTRL2_P(i) (0x00610bdc + (i) * 0x8) #define NV50_PDISPLAY_DAC_MODE_CTRL2_C(i) (0x00610be0 + (i) * 0x8) - #define NV90_PDISPLAY_SOR_MODE_CTRL_P(i) (0x00610794 + (i) * 0x8) #define NV90_PDISPLAY_SOR_MODE_CTRL_C(i) (0x00610798 + (i) * 0x8) -#define NV90_PDISPLAY_DAC_MODE_CTRL_P(i) (0x00610b58 + (i) * 0x8) -#define NV90_PDISPLAY_DAC_MODE_CTRL_C(i) (0x00610b5c + (i) * 0x8) -#define NV90_PDISPLAY_DAC_MODE_CTRL2_P(i) (0x00610b80 + (i) * 0x8) -#define NV90_PDISPLAY_DAC_MODE_CTRL2_C(i) (0x00610b84 + (i) * 0x8) #define NV50_PDISPLAY_CRTC_CLK 0x00614000 #define NV50_PDISPLAY_CRTC_CLK_CTRL1(i) ((i) * 0x800 + 0x614100) diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index b65d2ddd415d..c11a2fa43c7f 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -33,6 +33,19 @@ #include "nouveau_ramht.h" #include "drm_crtc_helper.h" +static inline int +nv50_sor_nr(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->chipset < 0x90 || + dev_priv->chipset == 0x92 || + dev_priv->chipset == 0xa0) + return 2; + + return 4; +} + static void nv50_evo_channel_del(struct nouveau_channel **pchan) { @@ -233,11 +246,11 @@ nv50_display_init(struct drm_device *dev) nv_wr32(dev, 0x006101d0 + (i * 0x04), val); } /* SOR */ - for (i = 0; i < 4; i++) { + for (i = 0; i < nv50_sor_nr(dev); i++) { val = nv_rd32(dev, 0x0061c000 + (i * 0x800)); nv_wr32(dev, 0x006101e0 + (i * 0x04), val); } - /* Something not yet in use, tv-out maybe. */ + /* EXT */ for (i = 0; i < 3; i++) { val = nv_rd32(dev, 0x0061e000 + (i * 0x800)); nv_wr32(dev, 0x006101f0 + (i * 0x04), val); @@ -716,7 +729,7 @@ nv50_display_unk10_handler(struct drm_device *dev) or = i; } - for (i = 0; type == OUTPUT_ANY && i < 4; i++) { + for (i = 0; type == OUTPUT_ANY && i < nv50_sor_nr(dev); i++) { if (dev_priv->chipset < 0x90 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0) @@ -847,7 +860,7 @@ nv50_display_unk20_handler(struct drm_device *dev) or = i; } - for (i = 0; type == OUTPUT_ANY && i < 4; i++) { + for (i = 0; type == OUTPUT_ANY && i < nv50_sor_nr(dev); i++) { if (dev_priv->chipset < 0x90 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0) From f243423759271026ddc3f8ab12a16cc7abfabc49 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Wed, 8 Sep 2010 02:21:09 +0200 Subject: [PATCH 251/476] drm/nouveau: Break some long lines in the TV-out code. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv17_tv.c | 99 ++++++++++++++----------- drivers/gpu/drm/nouveau/nv17_tv.h | 15 ++-- drivers/gpu/drm/nouveau/nv17_tv_modes.c | 46 +++++++----- 3 files changed, 95 insertions(+), 65 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c index 703c188c32d6..a3b886166302 100644 --- a/drivers/gpu/drm/nouveau/nv17_tv.c +++ b/drivers/gpu/drm/nouveau/nv17_tv.c @@ -193,55 +193,56 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) } } -static const struct { - int hdisplay; - int vdisplay; -} modes[] = { - { 640, 400 }, - { 640, 480 }, - { 720, 480 }, - { 720, 576 }, - { 800, 600 }, - { 1024, 768 }, - { 1280, 720 }, - { 1280, 1024 }, - { 1920, 1080 } -}; - -static int nv17_tv_get_modes(struct drm_encoder *encoder, - struct drm_connector *connector) +static int nv17_tv_get_ld_modes(struct drm_encoder *encoder, + struct drm_connector *connector) { struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); - struct drm_display_mode *mode; - struct drm_display_mode *output_mode; + struct drm_display_mode *mode, *tv_mode; int n = 0; - int i; - if (tv_norm->kind != CTV_ENC_MODE) { - struct drm_display_mode *tv_mode; + for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) { + mode = drm_mode_duplicate(encoder->dev, tv_mode); - for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) { - mode = drm_mode_duplicate(encoder->dev, tv_mode); + mode->clock = tv_norm->tv_enc_mode.vrefresh * + mode->htotal / 1000 * + mode->vtotal / 1000; - mode->clock = tv_norm->tv_enc_mode.vrefresh * - mode->htotal / 1000 * - mode->vtotal / 1000; + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + mode->clock *= 2; - if (mode->flags & DRM_MODE_FLAG_DBLSCAN) - mode->clock *= 2; + if (mode->hdisplay == tv_norm->tv_enc_mode.hdisplay && + mode->vdisplay == tv_norm->tv_enc_mode.vdisplay) + mode->type |= DRM_MODE_TYPE_PREFERRED; - if (mode->hdisplay == tv_norm->tv_enc_mode.hdisplay && - mode->vdisplay == tv_norm->tv_enc_mode.vdisplay) - mode->type |= DRM_MODE_TYPE_PREFERRED; - - drm_mode_probed_add(connector, mode); - n++; - } - return n; + drm_mode_probed_add(connector, mode); + n++; } - /* tv_norm->kind == CTV_ENC_MODE */ - output_mode = &tv_norm->ctv_enc_mode.mode; + return n; +} + +static int nv17_tv_get_hd_modes(struct drm_encoder *encoder, + struct drm_connector *connector) +{ + struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); + struct drm_display_mode *output_mode = &tv_norm->ctv_enc_mode.mode; + struct drm_display_mode *mode; + const struct { + int hdisplay; + int vdisplay; + } modes[] = { + { 640, 400 }, + { 640, 480 }, + { 720, 480 }, + { 720, 576 }, + { 800, 600 }, + { 1024, 768 }, + { 1280, 720 }, + { 1280, 1024 }, + { 1920, 1080 } + }; + int i, n = 0; + for (i = 0; i < ARRAY_SIZE(modes); i++) { if (modes[i].hdisplay > output_mode->hdisplay || modes[i].vdisplay > output_mode->vdisplay) @@ -251,11 +252,12 @@ static int nv17_tv_get_modes(struct drm_encoder *encoder, modes[i].vdisplay == output_mode->vdisplay) { mode = drm_mode_duplicate(encoder->dev, output_mode); mode->type |= DRM_MODE_TYPE_PREFERRED; + } else { mode = drm_cvt_mode(encoder->dev, modes[i].hdisplay, - modes[i].vdisplay, 60, false, - output_mode->flags & DRM_MODE_FLAG_INTERLACE, - false); + modes[i].vdisplay, 60, false, + (output_mode->flags & + DRM_MODE_FLAG_INTERLACE), false); } /* CVT modes are sometimes unsuitable... */ @@ -266,6 +268,7 @@ static int nv17_tv_get_modes(struct drm_encoder *encoder, - mode->hdisplay) * 9 / 10) & ~7; mode->hsync_end = mode->hsync_start + 8; } + if (output_mode->vdisplay >= 1024) { mode->vtotal = output_mode->vtotal; mode->vsync_start = output_mode->vsync_start; @@ -276,9 +279,21 @@ static int nv17_tv_get_modes(struct drm_encoder *encoder, drm_mode_probed_add(connector, mode); n++; } + return n; } +static int nv17_tv_get_modes(struct drm_encoder *encoder, + struct drm_connector *connector) +{ + struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); + + if (tv_norm->kind == CTV_ENC_MODE) + return nv17_tv_get_hd_modes(encoder, connector); + else + return nv17_tv_get_ld_modes(encoder, connector); +} + static int nv17_tv_mode_valid(struct drm_encoder *encoder, struct drm_display_mode *mode) { diff --git a/drivers/gpu/drm/nouveau/nv17_tv.h b/drivers/gpu/drm/nouveau/nv17_tv.h index c00977cedabd..6bf03840f9eb 100644 --- a/drivers/gpu/drm/nouveau/nv17_tv.h +++ b/drivers/gpu/drm/nouveau/nv17_tv.h @@ -127,7 +127,8 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder); /* TV hardware access functions */ -static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg, uint32_t val) +static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg, + uint32_t val) { nv_wr32(dev, reg, val); } @@ -137,7 +138,8 @@ static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg) return nv_rd32(dev, reg); } -static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg, uint8_t val) +static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg, + uint8_t val) { nv_write_ptv(dev, NV_PTV_TV_INDEX, reg); nv_write_ptv(dev, NV_PTV_TV_DATA, val); @@ -149,8 +151,11 @@ static inline uint8_t nv_read_tv_enc(struct drm_device *dev, uint8_t reg) return nv_read_ptv(dev, NV_PTV_TV_DATA); } -#define nv_load_ptv(dev, state, reg) nv_write_ptv(dev, NV_PTV_OFFSET + 0x##reg, state->ptv_##reg) -#define nv_save_ptv(dev, state, reg) state->ptv_##reg = nv_read_ptv(dev, NV_PTV_OFFSET + 0x##reg) -#define nv_load_tv_enc(dev, state, reg) nv_write_tv_enc(dev, 0x##reg, state->tv_enc[0x##reg]) +#define nv_load_ptv(dev, state, reg) \ + nv_write_ptv(dev, NV_PTV_OFFSET + 0x##reg, state->ptv_##reg) +#define nv_save_ptv(dev, state, reg) \ + state->ptv_##reg = nv_read_ptv(dev, NV_PTV_OFFSET + 0x##reg) +#define nv_load_tv_enc(dev, state, reg) \ + nv_write_tv_enc(dev, 0x##reg, state->tv_enc[0x##reg]) #endif diff --git a/drivers/gpu/drm/nouveau/nv17_tv_modes.c b/drivers/gpu/drm/nouveau/nv17_tv_modes.c index d64683d97e0d..9d3893c50a41 100644 --- a/drivers/gpu/drm/nouveau/nv17_tv_modes.c +++ b/drivers/gpu/drm/nouveau/nv17_tv_modes.c @@ -336,12 +336,17 @@ static void tv_setup_filter(struct drm_encoder *encoder) struct filter_params *p = &fparams[k][j]; for (i = 0; i < 7; i++) { - int64_t c = (p->k1 + p->ki*i + p->ki2*i*i + p->ki3*i*i*i) - + (p->kr + p->kir*i + p->ki2r*i*i + p->ki3r*i*i*i)*rs[k] - + (p->kf + p->kif*i + p->ki2f*i*i + p->ki3f*i*i*i)*flicker - + (p->krf + p->kirf*i + p->ki2rf*i*i + p->ki3rf*i*i*i)*flicker*rs[k]; + int64_t c = (p->k1 + p->ki*i + p->ki2*i*i + + p->ki3*i*i*i) + + (p->kr + p->kir*i + p->ki2r*i*i + + p->ki3r*i*i*i) * rs[k] + + (p->kf + p->kif*i + p->ki2f*i*i + + p->ki3f*i*i*i) * flicker + + (p->krf + p->kirf*i + p->ki2rf*i*i + + p->ki3rf*i*i*i) * flicker * rs[k]; - (*filters[k])[j][i] = (c + id5/2) >> 39 & (0x1 << 31 | 0x7f << 9); + (*filters[k])[j][i] = (c + id5/2) >> 39 + & (0x1 << 31 | 0x7f << 9); } } } @@ -349,7 +354,8 @@ static void tv_setup_filter(struct drm_encoder *encoder) /* Hardware state saving/restoring */ -static void tv_save_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7]) +static void tv_save_filter(struct drm_device *dev, uint32_t base, + uint32_t regs[4][7]) { int i, j; uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c }; @@ -360,7 +366,8 @@ static void tv_save_filter(struct drm_device *dev, uint32_t base, uint32_t regs[ } } -static void tv_load_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7]) +static void tv_load_filter(struct drm_device *dev, uint32_t base, + uint32_t regs[4][7]) { int i, j; uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c }; @@ -504,10 +511,10 @@ void nv17_tv_update_properties(struct drm_encoder *encoder) break; } - regs->tv_enc[0x20] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x20], 255, - tv_enc->saturation); - regs->tv_enc[0x22] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x22], 255, - tv_enc->saturation); + regs->tv_enc[0x20] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x20], + 255, tv_enc->saturation); + regs->tv_enc[0x22] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x22], + 255, tv_enc->saturation); regs->tv_enc[0x25] = tv_enc->hue * 255 / 100; nv_load_ptv(dev, regs, 204); @@ -541,7 +548,8 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder) int head = nouveau_crtc(encoder->crtc)->index; struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head]; struct drm_display_mode *crtc_mode = &encoder->crtc->mode; - struct drm_display_mode *output_mode = &get_tv_norm(encoder)->ctv_enc_mode.mode; + struct drm_display_mode *output_mode = + &get_tv_norm(encoder)->ctv_enc_mode.mode; int overscan, hmargin, vmargin, hratio, vratio; /* The rescaler doesn't do the right thing for interlaced modes. */ @@ -553,13 +561,15 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder) hmargin = (output_mode->hdisplay - crtc_mode->hdisplay) / 2; vmargin = (output_mode->vdisplay - crtc_mode->vdisplay) / 2; - hmargin = interpolate(0, min(hmargin, output_mode->hdisplay/20), hmargin, - overscan); - vmargin = interpolate(0, min(vmargin, output_mode->vdisplay/20), vmargin, - overscan); + hmargin = interpolate(0, min(hmargin, output_mode->hdisplay/20), + hmargin, overscan); + vmargin = interpolate(0, min(vmargin, output_mode->vdisplay/20), + vmargin, overscan); - hratio = crtc_mode->hdisplay * 0x800 / (output_mode->hdisplay - 2*hmargin); - vratio = crtc_mode->vdisplay * 0x800 / (output_mode->vdisplay - 2*vmargin) & ~3; + hratio = crtc_mode->hdisplay * 0x800 / + (output_mode->hdisplay - 2*hmargin); + vratio = crtc_mode->vdisplay * 0x800 / + (output_mode->vdisplay - 2*vmargin) & ~3; regs->fp_horiz_regs[FP_VALID_START] = hmargin; regs->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - hmargin - 1; From 3bc14b4dca2f8bc088162476279480e78491fd83 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Sun, 5 Sep 2010 06:03:07 +0200 Subject: [PATCH 252/476] drm/nouveau: Don't remove ramht entries from the neighboring channels. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_ramht.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c index f240ba241943..7f16697cc96c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ramht.c +++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c @@ -63,6 +63,23 @@ nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht, return (ctx != 0); } +static int +nouveau_ramht_entry_same_channel(struct nouveau_channel *chan, + struct nouveau_gpuobj *ramht, u32 offset) +{ + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; + u32 ctx = nv_ro32(ramht, offset + 4); + + if (dev_priv->card_type >= NV_50) + return true; + else if (dev_priv->card_type >= NV_40) + return chan->id == + ((ctx >> NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f); + else + return chan->id == + ((ctx >> NV_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f); +} + int nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle, struct nouveau_gpuobj *gpuobj) @@ -159,6 +176,7 @@ nouveau_ramht_remove_locked(struct nouveau_channel *chan, u32 handle) co = ho = nouveau_ramht_hash_handle(chan, handle); do { if (nouveau_ramht_entry_valid(dev, ramht, co) && + nouveau_ramht_entry_same_channel(chan, ramht, co) && (handle == nv_ro32(ramht, co))) { NV_DEBUG(dev, "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n", From 71d0618661f9dd531caa94ce2ce5dc919321624b Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Wed, 8 Sep 2010 02:23:20 +0200 Subject: [PATCH 253/476] drm/nouveau: Don't enable AGP FW on nv18. FW seems to be broken on nv18, it causes random lockups and breaks suspend/resume even with the blob. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_mem.c | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index fb15a1b0dda9..a885cd0b27e8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -417,6 +417,23 @@ nouveau_mem_detect(struct drm_device *dev) return -ENOMEM; } +#if __OS_HAS_AGP +static unsigned long +get_agp_mode(struct drm_device *dev, unsigned long mode) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + /* + * FW seems to be broken on nv18, it makes the card lock up + * randomly. + */ + if (dev_priv->chipset == 0x18) + mode &= ~PCI_AGP_COMMAND_FW; + + return mode; +} +#endif + int nouveau_mem_reset_agp(struct drm_device *dev) { @@ -436,7 +453,7 @@ nouveau_mem_reset_agp(struct drm_device *dev) if (ret) return ret; - mode.mode = info.mode & ~PCI_AGP_COMMAND_FW; + mode.mode = get_agp_mode(dev, info.mode) & ~PCI_AGP_COMMAND_FW; ret = drm_agp_enable(dev, mode); if (ret) return ret; @@ -491,7 +508,7 @@ nouveau_mem_init_agp(struct drm_device *dev) } /* see agp.h for the AGPSTAT_* modes available */ - mode.mode = info.mode; + mode.mode = get_agp_mode(dev, info.mode); ret = drm_agp_enable(dev, mode); if (ret) { NV_ERROR(dev, "Unable to enable AGP: %d\n", ret); From de5899bdaccaabfaab894559a7eec3302ae88b52 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Wed, 8 Sep 2010 02:28:23 +0200 Subject: [PATCH 254/476] drm/nouveau: Add module parameter to override the default AGP rate. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.c | 6 +++--- drivers/gpu/drm/nouveau/nouveau_drv.h | 2 +- drivers/gpu/drm/nouveau/nouveau_mem.c | 12 +++++++++++- 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 209912a1b7a5..14a4960a989a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -35,9 +35,9 @@ #include "drm_pciids.h" -MODULE_PARM_DESC(noagp, "Disable AGP"); -int nouveau_noagp; -module_param_named(noagp, nouveau_noagp, int, 0400); +MODULE_PARM_DESC(agpmode, "AGP mode (0 to disable AGP)"); +int nouveau_agpmode = -1; +module_param_named(agpmode, nouveau_agpmode, int, 0400); MODULE_PARM_DESC(modeset, "Enable kernel modesetting"); static int nouveau_modeset = -1; /* kms */ diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index c8b990b09f5d..cc1892cce6cd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -659,7 +659,7 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo) } while (0) /* nouveau_drv.c */ -extern int nouveau_noagp; +extern int nouveau_agpmode; extern int nouveau_duallink; extern int nouveau_uscript_lvds; extern int nouveau_uscript_tmds; diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index a885cd0b27e8..02aa9d2351da 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -430,6 +430,16 @@ get_agp_mode(struct drm_device *dev, unsigned long mode) if (dev_priv->chipset == 0x18) mode &= ~PCI_AGP_COMMAND_FW; + /* + * AGP mode set in the command line. + */ + if (nouveau_agpmode > 0) { + bool agpv3 = mode & 0x8; + int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode; + + mode = (mode & ~0x7) | (rate & 0x7); + } + return mode; } #endif @@ -613,7 +623,7 @@ nouveau_mem_gart_init(struct drm_device *dev) dev_priv->gart_info.type = NOUVEAU_GART_NONE; #if !defined(__powerpc__) && !defined(__ia64__) - if (drm_device_is_agp(dev) && dev->agp && !nouveau_noagp) { + if (drm_device_is_agp(dev) && dev->agp && nouveau_agpmode) { ret = nouveau_mem_init_agp(dev); if (ret) NV_ERROR(dev, "Error initialising AGP: %d\n", ret); From 35fd5b42f035c795bd9c6649591f59b500cb62b3 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Mon, 6 Sep 2010 20:25:28 +0200 Subject: [PATCH 255/476] drm/nouveau: PRAMIN is available from the start on pre-nv50. This makes sure that RAMHT is cleared correctly on start up. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv04_instmem.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c index 88316100389b..0b5ae297abde 100644 --- a/drivers/gpu/drm/nouveau/nv04_instmem.c +++ b/drivers/gpu/drm/nouveau/nv04_instmem.c @@ -25,6 +25,9 @@ int nv04_instmem_init(struct drm_device *dev) u32 offset, length; int ret; + /* RAMIN always available */ + dev_priv->ramin_available = true; + /* Setup shared RAMHT */ ret = nouveau_gpuobj_new_fake(dev, 0x10000, ~0, 4096, NVOBJ_FLAG_ZERO_ALLOC, &ramht); @@ -81,7 +84,6 @@ int nv04_instmem_init(struct drm_device *dev) return ret; } - dev_priv->ramin_available = true; return 0; } From 4b5c152a79d512803ea525b0878ccef627cd1629 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Tue, 7 Sep 2010 17:34:44 +0200 Subject: [PATCH 256/476] drm/nouveau: Remove implicit argument from nv_wait(). Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_dp.c | 3 ++- drivers/gpu/drm/nouveau/nouveau_drv.h | 2 +- drivers/gpu/drm/nouveau/nouveau_state.c | 2 +- drivers/gpu/drm/nouveau/nv50_cursor.c | 2 +- drivers/gpu/drm/nouveau/nv50_dac.c | 4 ++-- drivers/gpu/drm/nouveau/nv50_display.c | 19 ++++++++++--------- drivers/gpu/drm/nouveau/nv50_graph.c | 2 +- drivers/gpu/drm/nouveau/nv50_instmem.c | 6 +++--- drivers/gpu/drm/nouveau/nv50_sor.c | 4 ++-- drivers/gpu/drm/nouveau/nvc0_instmem.c | 2 +- 10 files changed, 24 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c index 8a1b188b4cd1..89ca1f6851a0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c @@ -524,7 +524,8 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000); nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl); nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000); - if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) { + if (!nv_wait(dev, NV50_AUXCH_CTRL(index), + 0x00010000, 0x00000000)) { NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n", nv_rd32(dev, NV50_AUXCH_CTRL(index))); ret = -EBUSY; diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index cc1892cce6cd..6313ba4fcdcd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -1252,7 +1252,7 @@ static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val) iowrite8(val, dev_priv->mmio + reg); } -#define nv_wait(reg, mask, val) \ +#define nv_wait(dev, reg, mask, val) \ nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val)) /* PRAMIN access */ diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 19eb06dca899..9a4bf4442924 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -1044,7 +1044,7 @@ bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout, /* Waits for PGRAPH to go completely idle */ bool nouveau_wait_for_idle(struct drm_device *dev) { - if (!nv_wait(NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) { + if (!nv_wait(dev, NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) { NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n", nv_rd32(dev, NV04_PGRAPH_STATUS)); return false; diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c index 03ad7ab14f09..1b9ce3021aa3 100644 --- a/drivers/gpu/drm/nouveau/nv50_cursor.c +++ b/drivers/gpu/drm/nouveau/nv50_cursor.c @@ -147,7 +147,7 @@ nv50_cursor_fini(struct nouveau_crtc *nv_crtc) NV_DEBUG_KMS(dev, "\n"); nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 0); - if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), + if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) { NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n"); NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n", diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c index 1bc085962945..875414b09ade 100644 --- a/drivers/gpu/drm/nouveau/nv50_dac.c +++ b/drivers/gpu/drm/nouveau/nv50_dac.c @@ -79,7 +79,7 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), 0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); - if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or), + if (!nv_wait(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) { NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or); NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or, @@ -130,7 +130,7 @@ nv50_dac_dpms(struct drm_encoder *encoder, int mode) NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode); /* wait for it to be done */ - if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or), + if (!nv_wait(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) { NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or); NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or, diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index c11a2fa43c7f..11d366ad4036 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -279,7 +279,7 @@ nv50_display_init(struct drm_device *dev) if (nv_rd32(dev, NV50_PDISPLAY_INTR_1) & 0x100) { nv_wr32(dev, NV50_PDISPLAY_INTR_1, 0x100); nv_wr32(dev, 0x006194e8, nv_rd32(dev, 0x006194e8) & ~1); - if (!nv_wait(0x006194e8, 2, 0)) { + if (!nv_wait(dev, 0x006194e8, 2, 0)) { NV_ERROR(dev, "timeout: (0x6194e8 & 2) != 0\n"); NV_ERROR(dev, "0x6194e8 = 0x%08x\n", nv_rd32(dev, 0x6194e8)); @@ -310,7 +310,8 @@ nv50_display_init(struct drm_device *dev) nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE); nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03); - if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x40000000, 0x40000000)) { + if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0), + 0x40000000, 0x40000000)) { NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n"); NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))); @@ -319,7 +320,7 @@ nv50_display_init(struct drm_device *dev) for (i = 0; i < 2; i++) { nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000); - if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), + if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) { NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n"); NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n", @@ -329,7 +330,7 @@ nv50_display_init(struct drm_device *dev) nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON); - if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), + if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) { NV_ERROR(dev, "timeout: " @@ -349,7 +350,7 @@ nv50_display_init(struct drm_device *dev) NV50_PDISPLAY_CHANNEL_DMA_CB_VALID); nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000); nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002); - if (!nv_wait(0x610200, 0x80000000, 0x00000000)) { + if (!nv_wait(dev, 0x610200, 0x80000000, 0x00000000)) { NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n"); NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200)); return -EBUSY; @@ -389,7 +390,7 @@ nv50_display_init(struct drm_device *dev) BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1); OUT_RING(evo, 0); FIRE_RING(evo); - if (!nv_wait(0x640004, 0xffffffff, evo->dma.put << 2)) + if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2)) NV_ERROR(dev, "evo pushbuf stalled\n"); /* enable clock change interrupts. */ @@ -443,7 +444,7 @@ static int nv50_display_disable(struct drm_device *dev) continue; nv_wr32(dev, NV50_PDISPLAY_INTR_1, mask); - if (!nv_wait(NV50_PDISPLAY_INTR_1, mask, mask)) { + if (!nv_wait(dev, NV50_PDISPLAY_INTR_1, mask, mask)) { NV_ERROR(dev, "timeout: (0x610024 & 0x%08x) == " "0x%08x\n", mask, mask); NV_ERROR(dev, "0x610024 = 0x%08x\n", @@ -453,14 +454,14 @@ static int nv50_display_disable(struct drm_device *dev) nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0); nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0); - if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) { + if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) { NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n"); NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))); } for (i = 0; i < 3; i++) { - if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(i), + if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i), NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) { NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i); NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", i, diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 7db0d627c1b9..cbf5ae2f67d4 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -181,7 +181,7 @@ nv50_graph_channel(struct drm_device *dev) /* Be sure we're not in the middle of a context switch or bad things * will happen, such as unloading the wrong pgraph context. */ - if (!nv_wait(0x400300, 0x00000001, 0x00000000)) + if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000)) NV_ERROR(dev, "Ctxprog is still running\n"); inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 2e0aaf971b2f..bb73c67e23b5 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -439,7 +439,7 @@ void nv50_instmem_flush(struct drm_device *dev) { nv_wr32(dev, 0x00330c, 0x00000001); - if (!nv_wait(0x00330c, 0x00000002, 0x00000000)) + if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) NV_ERROR(dev, "PRAMIN flush timeout\n"); } @@ -447,7 +447,7 @@ void nv84_instmem_flush(struct drm_device *dev) { nv_wr32(dev, 0x070000, 0x00000001); - if (!nv_wait(0x070000, 0x00000002, 0x00000000)) + if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) NV_ERROR(dev, "PRAMIN flush timeout\n"); } @@ -455,7 +455,7 @@ void nv50_vm_flush(struct drm_device *dev, int engine) { nv_wr32(dev, 0x100c80, (engine << 16) | 1); - if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) + if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); } diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c index bcd4cf84a7e6..b4a5ecb199f9 100644 --- a/drivers/gpu/drm/nouveau/nv50_sor.c +++ b/drivers/gpu/drm/nouveau/nv50_sor.c @@ -92,7 +92,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode) } /* wait for it to be done */ - if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or), + if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or), NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) { NV_ERROR(dev, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or); NV_ERROR(dev, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or, @@ -108,7 +108,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode) nv_wr32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val | NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING); - if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(or), + if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(or), NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) { NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or); NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", or, diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c index 595540975637..6a41d644e044 100644 --- a/drivers/gpu/drm/nouveau/nvc0_instmem.c +++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c @@ -133,7 +133,7 @@ void nvc0_instmem_flush(struct drm_device *dev) { nv_wr32(dev, 0x070000, 1); - if (!nv_wait(0x070000, 0x00000002, 0x00000000)) + if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) NV_ERROR(dev, "PRAMIN flush timeout\n"); } From 9f56b1265d66f6698e3155529e6c5d0c34b37afa Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Tue, 7 Sep 2010 18:24:52 +0200 Subject: [PATCH 257/476] drm/nouveau: Simplify tile region handling. Instead of emptying the caches to avoid a race with the PFIFO puller, go straight ahead and try to recover from it when it happens. Also, kill pfifo->cache_flush and tile->lock, we don't need them anymore. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 8 +---- drivers/gpu/drm/nouveau/nouveau_mem.c | 30 +++++++---------- drivers/gpu/drm/nouveau/nouveau_reg.h | 2 ++ drivers/gpu/drm/nouveau/nouveau_state.c | 5 --- drivers/gpu/drm/nouveau/nv04_fifo.c | 45 +++++++++++-------------- drivers/gpu/drm/nouveau/nvc0_fifo.c | 6 ---- 6 files changed, 36 insertions(+), 60 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 6313ba4fcdcd..912b04be0def 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -296,7 +296,6 @@ struct nouveau_fifo_engine { void (*disable)(struct drm_device *); void (*enable)(struct drm_device *); bool (*reassign)(struct drm_device *, bool enable); - bool (*cache_flush)(struct drm_device *dev); bool (*cache_pull)(struct drm_device *dev, bool enable); int (*channel_id)(struct drm_device *); @@ -569,10 +568,7 @@ struct drm_nouveau_private { } gart_info; /* nv10-nv40 tiling regions */ - struct { - struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR]; - spinlock_t lock; - } tile; + struct nouveau_tile_reg tile[NOUVEAU_MAX_TILE_NR]; /* VRAM/fb configuration */ uint64_t vram_size; @@ -917,7 +913,6 @@ extern int nv04_fifo_init(struct drm_device *); extern void nv04_fifo_disable(struct drm_device *); extern void nv04_fifo_enable(struct drm_device *); extern bool nv04_fifo_reassign(struct drm_device *, bool); -extern bool nv04_fifo_cache_flush(struct drm_device *); extern bool nv04_fifo_cache_pull(struct drm_device *, bool); extern int nv04_fifo_channel_id(struct drm_device *); extern int nv04_fifo_create_context(struct nouveau_channel *); @@ -955,7 +950,6 @@ extern void nvc0_fifo_takedown(struct drm_device *); extern void nvc0_fifo_disable(struct drm_device *); extern void nvc0_fifo_enable(struct drm_device *); extern bool nvc0_fifo_reassign(struct drm_device *, bool); -extern bool nvc0_fifo_cache_flush(struct drm_device *); extern bool nvc0_fifo_cache_pull(struct drm_device *, bool); extern int nvc0_fifo_channel_id(struct drm_device *); extern int nvc0_fifo_create_context(struct nouveau_channel *); diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 02aa9d2351da..2db01f80f38e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -47,18 +47,14 @@ nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; - struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; + struct nouveau_tile_reg *tile = &dev_priv->tile[i]; tile->addr = addr; tile->size = size; tile->used = !!pitch; nouveau_fence_unref((void **)&tile->fence); - if (!pfifo->cache_flush(dev)) - return; - pfifo->reassign(dev, false); - pfifo->cache_flush(dev); pfifo->cache_pull(dev, false); nouveau_wait_for_idle(dev); @@ -76,34 +72,36 @@ nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; - struct nouveau_tile_reg *tile = dev_priv->tile.reg, *found = NULL; - int i; + struct nouveau_tile_reg *found = NULL; + unsigned long i, flags; - spin_lock(&dev_priv->tile.lock); + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); for (i = 0; i < pfb->num_tiles; i++) { - if (tile[i].used) + struct nouveau_tile_reg *tile = &dev_priv->tile[i]; + + if (tile->used) /* Tile region in use. */ continue; - if (tile[i].fence && - !nouveau_fence_signalled(tile[i].fence, NULL)) + if (tile->fence && + !nouveau_fence_signalled(tile->fence, NULL)) /* Pending tile region. */ continue; - if (max(tile[i].addr, addr) < - min(tile[i].addr + tile[i].size, addr + size)) + if (max(tile->addr, addr) < + min(tile->addr + tile->size, addr + size)) /* Kill an intersecting tile region. */ nv10_mem_set_region_tiling(dev, i, 0, 0, 0); if (pitch && !found) { /* Free tile region. */ nv10_mem_set_region_tiling(dev, i, addr, size, pitch); - found = &tile[i]; + found = tile; } } - spin_unlock(&dev_priv->tile.lock); + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); return found; } @@ -568,8 +566,6 @@ nouveau_mem_vram_init(struct drm_device *dev) return ret; } - spin_lock_init(&dev_priv->tile.lock); - dev_priv->fb_available_size = dev_priv->vram_size; dev_priv->fb_mappable_pages = dev_priv->fb_available_size; if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1)) diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h index 4b813284fdcf..1b42541ca9e5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_reg.h +++ b/drivers/gpu/drm/nouveau/nouveau_reg.h @@ -551,6 +551,8 @@ #define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C #define NV03_PFIFO_CACHE1_PULL0 0x00003240 #define NV04_PFIFO_CACHE1_PULL0 0x00003250 +# define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED 0x00000010 +# define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY 0x00001000 #define NV03_PFIFO_CACHE1_PULL1 0x00003250 #define NV04_PFIFO_CACHE1_PULL1 0x00003254 #define NV04_PFIFO_CACHE1_HASH 0x00003258 diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 9a4bf4442924..be859604cf64 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -79,7 +79,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fifo.disable = nv04_fifo_disable; engine->fifo.enable = nv04_fifo_enable; engine->fifo.reassign = nv04_fifo_reassign; - engine->fifo.cache_flush = nv04_fifo_cache_flush; engine->fifo.cache_pull = nv04_fifo_cache_pull; engine->fifo.channel_id = nv04_fifo_channel_id; engine->fifo.create_context = nv04_fifo_create_context; @@ -131,7 +130,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fifo.disable = nv04_fifo_disable; engine->fifo.enable = nv04_fifo_enable; engine->fifo.reassign = nv04_fifo_reassign; - engine->fifo.cache_flush = nv04_fifo_cache_flush; engine->fifo.cache_pull = nv04_fifo_cache_pull; engine->fifo.channel_id = nv10_fifo_channel_id; engine->fifo.create_context = nv10_fifo_create_context; @@ -183,7 +181,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fifo.disable = nv04_fifo_disable; engine->fifo.enable = nv04_fifo_enable; engine->fifo.reassign = nv04_fifo_reassign; - engine->fifo.cache_flush = nv04_fifo_cache_flush; engine->fifo.cache_pull = nv04_fifo_cache_pull; engine->fifo.channel_id = nv10_fifo_channel_id; engine->fifo.create_context = nv10_fifo_create_context; @@ -235,7 +232,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fifo.disable = nv04_fifo_disable; engine->fifo.enable = nv04_fifo_enable; engine->fifo.reassign = nv04_fifo_reassign; - engine->fifo.cache_flush = nv04_fifo_cache_flush; engine->fifo.cache_pull = nv04_fifo_cache_pull; engine->fifo.channel_id = nv10_fifo_channel_id; engine->fifo.create_context = nv10_fifo_create_context; @@ -288,7 +284,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fifo.disable = nv04_fifo_disable; engine->fifo.enable = nv04_fifo_enable; engine->fifo.reassign = nv04_fifo_reassign; - engine->fifo.cache_flush = nv04_fifo_cache_flush; engine->fifo.cache_pull = nv04_fifo_cache_pull; engine->fifo.channel_id = nv10_fifo_channel_id; engine->fifo.create_context = nv40_fifo_create_context; diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c index 64dc0e215eeb..708293b7ddcd 100644 --- a/drivers/gpu/drm/nouveau/nv04_fifo.c +++ b/drivers/gpu/drm/nouveau/nv04_fifo.c @@ -70,38 +70,33 @@ nv04_fifo_reassign(struct drm_device *dev, bool enable) return (reassign == 1); } -bool -nv04_fifo_cache_flush(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; - uint64_t start = ptimer->read(dev); - - do { - if (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) == - nv_rd32(dev, NV03_PFIFO_CACHE1_PUT)) - return true; - - } while (ptimer->read(dev) - start < 100000000); - - NV_ERROR(dev, "Timeout flushing the PFIFO cache.\n"); - - return false; -} - bool nv04_fifo_cache_pull(struct drm_device *dev, bool enable) { - uint32_t pull = nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0); + int pull = nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 1, enable); + + if (!enable) { + /* In some cases the PFIFO puller may be left in an + * inconsistent state if you try to stop it when it's + * busy translating handles. Sometimes you get a + * PFIFO_CACHE_ERROR, sometimes it just fails silently + * sending incorrect instance offsets to PGRAPH after + * it's started up again. To avoid the latter we + * invalidate the most recently calculated instance. + */ + if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0, + NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0)) + NV_ERROR(dev, "Timeout idling the PFIFO puller.\n"); + + if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) & + NV04_PFIFO_CACHE1_PULL0_HASH_FAILED) + nv_wr32(dev, NV03_PFIFO_INTR_0, + NV_PFIFO_INTR_CACHE_ERROR); - if (enable) { - nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull | 1); - } else { - nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull & ~1); nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0); } - return !!(pull & 1); + return pull & 1; } int diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c index d64375871979..890c2b95fbc1 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fifo.c +++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c @@ -42,12 +42,6 @@ nvc0_fifo_reassign(struct drm_device *dev, bool enable) return false; } -bool -nvc0_fifo_cache_flush(struct drm_device *dev) -{ - return true; -} - bool nvc0_fifo_cache_pull(struct drm_device *dev, bool enable) { From c16c570762bb4419f2bb764f2a7428c249d905d0 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Thu, 9 Sep 2010 14:33:17 +0200 Subject: [PATCH 258/476] drm/nouveau: Try to fetch an EDID from OF if DDC fails. More Apple brain damage, it fixes the modesetting failure on an eMac G4 (fdo bug 29810). Reported-by: Zoltan Varnagy Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_connector.c | 36 +++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 87186a4bbf03..98c214c34922 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -130,6 +130,36 @@ nouveau_connector_ddc_detect(struct drm_connector *connector, return NULL; } +static struct nouveau_encoder * +nouveau_connector_of_detect(struct drm_connector *connector) +{ +#ifdef __powerpc__ + struct drm_device *dev = connector->dev; + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nouveau_encoder *nv_encoder; + struct device_node *cn, *dn = pci_device_to_OF_node(dev->pdev); + + if (!dn || + !((nv_encoder = find_encoder_by_type(connector, OUTPUT_TMDS)) || + (nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG)))) + return NULL; + + for_each_child_of_node(dn, cn) { + const char *name = of_get_property(cn, "name", NULL); + const void *edid = of_get_property(cn, "EDID", NULL); + int idx = name ? name[strlen(name) - 1] - 'A' : 0; + + if (nv_encoder->dcb->i2c_index == idx && edid) { + nv_connector->edid = + kmemdup(edid, EDID_LENGTH, GFP_KERNEL); + of_node_put(cn); + return nv_encoder; + } + } +#endif + return NULL; +} + static void nouveau_connector_set_encoder(struct drm_connector *connector, struct nouveau_encoder *nv_encoder) @@ -225,6 +255,12 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) return connector_status_connected; } + nv_encoder = nouveau_connector_of_detect(connector); + if (nv_encoder) { + nouveau_connector_set_encoder(connector, nv_encoder); + return connector_status_connected; + } + detect_analog: nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); if (!nv_encoder && !nouveau_tv_disable) From e1429b4c3c79512f0e1a44d9d895c1db52b8c42f Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 10 Sep 2010 11:12:25 +1000 Subject: [PATCH 259/476] drm/nouveau: better handling of unmappable vram Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 8e4a9bce4f3b..2b97d97f1493 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -162,8 +162,6 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size); align >>= PAGE_SHIFT; - nvbo->placement.fpfn = 0; - nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0; nouveau_bo_placement_set(nvbo, flags, 0); nvbo->channel = chan; @@ -907,7 +905,26 @@ nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) static int nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) { - return 0; + struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); + struct nouveau_bo *nvbo = nouveau_bo(bo); + + /* as long as the bo isn't in vram, and isn't tiled, we've got + * nothing to do here. + */ + if (bo->mem.mem_type != TTM_PL_VRAM) { + if (dev_priv->chipset < NV_50 || !nvbo->tile_flags) + return 0; + } + + /* make sure bo is in mappable vram */ + if (bo->mem.mm_node->start + bo->mem.num_pages < dev_priv->fb_mappable_pages) + return 0; + + + nvbo->placement.fpfn = 0; + nvbo->placement.lpfn = dev_priv->fb_mappable_pages; + nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0); + return ttm_bo_validate(bo, &nvbo->placement, false, true, false); } struct ttm_bo_driver nouveau_bo_driver = { From e071f8cd714261cb4f5ce7104eb54b16b2bbb0cf Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 8 Sep 2010 15:40:30 +1000 Subject: [PATCH 260/476] drm/nouveau: handle fifo pusher errors better The most important part of this change is that we now instruct PFIFO to drop all pending fetches, rather than attempting to skip a single dword and hope that things would magically sort themselves out - they usually don't, and we end up with PFIFO being completely hung. This commit also adds somewhat more useful logging when these exceptions occur. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_irq.c | 45 ++++++++++++++++++++++----- 1 file changed, 37 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index a818306781cc..6fd51a51c608 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -202,16 +202,45 @@ nouveau_fifo_irq_handler(struct drm_device *dev) } if (status & NV_PFIFO_INTR_DMA_PUSHER) { - NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d\n", chid); + u32 get = nv_rd32(dev, 0x003244); + u32 put = nv_rd32(dev, 0x003240); + u32 push = nv_rd32(dev, 0x003220); + u32 state = nv_rd32(dev, 0x003228); + if (dev_priv->card_type == NV_50) { + u32 ho_get = nv_rd32(dev, 0x003328); + u32 ho_put = nv_rd32(dev, 0x003320); + u32 ib_get = nv_rd32(dev, 0x003334); + u32 ib_put = nv_rd32(dev, 0x003330); + + NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x " + "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x " + "State 0x%08x Push 0x%08x\n", + chid, ho_get, get, ho_put, put, ib_get, ib_put, + state, push); + + /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ + nv_wr32(dev, 0x003364, 0x00000000); + if (get != put || ho_get != ho_put) { + nv_wr32(dev, 0x003244, put); + nv_wr32(dev, 0x003328, ho_put); + } else + if (ib_get != ib_put) { + nv_wr32(dev, 0x003334, ib_put); + } + } else { + NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x " + "Put 0x%08x State 0x%08x Push 0x%08x\n", + chid, get, put, state, push); + + if (get != put) + nv_wr32(dev, 0x003244, put); + } + + nv_wr32(dev, 0x003228, 0x00000000); + nv_wr32(dev, 0x003220, 0x00000001); + nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); status &= ~NV_PFIFO_INTR_DMA_PUSHER; - nv_wr32(dev, NV03_PFIFO_INTR_0, - NV_PFIFO_INTR_DMA_PUSHER); - - nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000); - if (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT) != get) - nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, - get + 4); } if (status & NV_PFIFO_INTR_SEMAPHORE) { From 24b102d3488c9d201915d070a519e07098e0cd30 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 10 Sep 2010 15:33:11 +1000 Subject: [PATCH 261/476] drm/nouveau: we can't free ACPI EDID, so make a copy that we can The rest of the connector code assumes we can kfree() the EDID pointer. This causes things to blow up with the ACPI EDID pointer we get passed. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_acpi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index c17a055ee3e5..119152606e4c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -292,6 +292,6 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) if (ret < 0) return ret; - nv_connector->edid = edid; + nv_connector->edid = kmemdup(edid, EDID_LENGTH, GFP_KERNEL); return 0; } From 17b20348ea94a92a54898c518b514c564d12e4c3 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 13 Sep 2010 09:57:44 +1000 Subject: [PATCH 262/476] drm/nv50: mark PCIEGART pages non-present rather than using dummy page Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_sgdma.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 5a66a7ae6e29..c0b79659419b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -151,7 +151,7 @@ nouveau_sgdma_unbind(struct ttm_backend *be) nv_wo32(gpuobj, (pte * 4) + 0, dma_offset | 3); pte += 1; } else { - nv_wo32(gpuobj, (pte * 4), dma_offset | 0x21); + nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000); pte += 2; } @@ -279,9 +279,8 @@ nouveau_sgdma_init(struct drm_device *dev) } } else { for (i = 0; i < obj_size; i += 8) { - nv_wo32(gpuobj, i + 0, - dev_priv->gart_info.sg_dummy_bus | 0x21); - nv_wo32(gpuobj, i + 4, 0); + nv_wo32(gpuobj, i + 0, 0x00000000); + nv_wo32(gpuobj, i + 4, 0x00000000); } } dev_priv->engine.instmem.flush(dev); From b6fd780791e9189b781e27a443d47bd21ce5145f Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 13 Sep 2010 09:58:37 +1000 Subject: [PATCH 263/476] drm/nouveau: zero dummy page Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_sgdma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index c0b79659419b..7f028fee7a58 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -242,7 +242,7 @@ nouveau_sgdma_init(struct drm_device *dev) } dev_priv->gart_info.sg_dummy_page = - alloc_page(GFP_KERNEL|__GFP_DMA32); + alloc_page(GFP_KERNEL|__GFP_DMA32|__GFP_ZERO); if (!dev_priv->gart_info.sg_dummy_page) { nouveau_gpuobj_ref(NULL, &gpuobj); return -ENOMEM; From f9aafdd30ef8356f0a3690bf9bdd9c6e51b7705c Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 13 Sep 2010 11:12:50 +1000 Subject: [PATCH 264/476] drm/nv50: fix 100c90 write on nva3 Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv50_fb.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c index 594720bd5191..cd1988b15d2c 100644 --- a/drivers/gpu/drm/nouveau/nv50_fb.c +++ b/drivers/gpu/drm/nouveau/nv50_fb.c @@ -20,6 +20,7 @@ nv50_fb_init(struct drm_device *dev) case 0x50: nv_wr32(dev, 0x100c90, 0x0707ff); break; + case 0xa3: case 0xa5: case 0xa8: nv_wr32(dev, 0x100c90, 0x0d0fff); From 855a95e4fc2ac6b758145ca7d6a0c95b66a57ef8 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 16 Sep 2010 15:25:25 +1000 Subject: [PATCH 265/476] drm/nouveau: make the behaviour of get_pll_limits() consistent This replaces all the pll_types definitions for ones that match the types used in the tables in recent VBIOS versions. get_pll_limits() will now accept either type or register value as input across all limits table versions, and will store the actual register ID that a PLL type refers to in the returned structure. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bios.c | 136 +++++++++++++++++++------ drivers/gpu/drm/nouveau/nouveau_bios.h | 24 +++-- drivers/gpu/drm/nouveau/nouveau_calc.c | 4 +- drivers/gpu/drm/nouveau/nouveau_drv.h | 1 + drivers/gpu/drm/nouveau/nouveau_hw.c | 27 ++--- drivers/gpu/drm/nouveau/nv04_crtc.c | 2 +- 6 files changed, 135 insertions(+), 59 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 974b0f8ae048..2319390b200a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -4675,6 +4675,92 @@ int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, i return 0; } +struct pll_mapping { + u8 type; + u32 reg; +}; + +static struct pll_mapping nv04_pll_mapping[] = { + { PLL_CORE , NV_PRAMDAC_NVPLL_COEFF }, + { PLL_MEMORY, NV_PRAMDAC_MPLL_COEFF }, + { PLL_VPLL0 , NV_PRAMDAC_VPLL_COEFF }, + { PLL_VPLL1 , NV_RAMDAC_VPLL2 }, + {} +}; + +static struct pll_mapping nv40_pll_mapping[] = { + { PLL_CORE , 0x004000 }, + { PLL_MEMORY, 0x004020 }, + { PLL_VPLL0 , NV_PRAMDAC_VPLL_COEFF }, + { PLL_VPLL1 , NV_RAMDAC_VPLL2 }, + {} +}; + +static struct pll_mapping nv50_pll_mapping[] = { + { PLL_CORE , 0x004028 }, + { PLL_SHADER, 0x004020 }, + { PLL_UNK03 , 0x004000 }, + { PLL_MEMORY, 0x004008 }, + { PLL_UNK40 , 0x00e810 }, + { PLL_UNK41 , 0x00e818 }, + { PLL_UNK42 , 0x00e824 }, + { PLL_VPLL0 , 0x614100 }, + { PLL_VPLL1 , 0x614900 }, + {} +}; + +static struct pll_mapping nv84_pll_mapping[] = { + { PLL_CORE , 0x004028 }, + { PLL_SHADER, 0x004020 }, + { PLL_MEMORY, 0x004008 }, + { PLL_UNK05 , 0x004030 }, + { PLL_UNK41 , 0x00e818 }, + { PLL_VPLL0 , 0x614100 }, + { PLL_VPLL1 , 0x614900 }, + {} +}; + +u32 +get_pll_register(struct drm_device *dev, enum pll_types type) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->vbios; + struct pll_mapping *map; + int i; + + if (dev_priv->card_type < NV_40) + map = nv04_pll_mapping; + else + if (dev_priv->card_type < NV_50) + map = nv40_pll_mapping; + else { + u8 *plim = &bios->data[bios->pll_limit_tbl_ptr]; + + if (plim[0] >= 0x40) { + u8 *entry = plim + plim[1]; + for (i = 0; i < plim[3]; i++, entry += plim[2]) { + if (entry[0] == type) + return ROM32(entry[3]); + } + + return 0; + } + + if (dev_priv->chipset == 0x50) + map = nv50_pll_mapping; + else + map = nv84_pll_mapping; + } + + while (map->reg) { + if (map->type == type) + return map->reg; + map++; + } + + return 0; +} + int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims *pll_lim) { /* @@ -4750,6 +4836,14 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims /* initialize all members to zero */ memset(pll_lim, 0, sizeof(struct pll_lims)); + /* if we were passed a type rather than a register, figure + * out the register and store it + */ + if (limit_match > PLL_MAX) + pll_lim->reg = limit_match; + else + pll_lim->reg = get_pll_register(dev, limit_match); + if (pll_lim_ver == 0x10 || pll_lim_ver == 0x11) { uint8_t *pll_rec = &bios->data[bios->pll_limit_tbl_ptr + headerlen + recordlen * pllindex]; @@ -4785,7 +4879,6 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims pll_lim->max_usable_log2p = 0x6; } else if (pll_lim_ver == 0x20 || pll_lim_ver == 0x21) { uint16_t plloffs = bios->pll_limit_tbl_ptr + headerlen; - uint32_t reg = 0; /* default match */ uint8_t *pll_rec; int i; @@ -4797,29 +4890,8 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims NV_WARN(dev, "Default PLL limit entry has non-zero " "register field\n"); - if (limit_match > MAX_PLL_TYPES) - /* we've been passed a reg as the match */ - reg = limit_match; - else /* limit match is a pll type */ - for (i = 1; i < entries && !reg; i++) { - uint32_t cmpreg = ROM32(bios->data[plloffs + recordlen * i]); - - if (limit_match == NVPLL && - (cmpreg == NV_PRAMDAC_NVPLL_COEFF || cmpreg == 0x4000)) - reg = cmpreg; - if (limit_match == MPLL && - (cmpreg == NV_PRAMDAC_MPLL_COEFF || cmpreg == 0x4020)) - reg = cmpreg; - if (limit_match == VPLL1 && - (cmpreg == NV_PRAMDAC_VPLL_COEFF || cmpreg == 0x4010)) - reg = cmpreg; - if (limit_match == VPLL2 && - (cmpreg == NV_RAMDAC_VPLL2 || cmpreg == 0x4018)) - reg = cmpreg; - } - for (i = 1; i < entries; i++) - if (ROM32(bios->data[plloffs + recordlen * i]) == reg) { + if (ROM32(bios->data[plloffs + recordlen * i]) == pll_lim->reg) { pllindex = i; break; } @@ -4827,7 +4899,7 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims pll_rec = &bios->data[plloffs + recordlen * pllindex]; BIOSLOG(bios, "Loading PLL limits for reg 0x%08x\n", - pllindex ? reg : 0); + pllindex ? pll_lim->reg : 0); /* * Frequencies are stored in tables in MHz, kHz are more @@ -4877,8 +4949,8 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims if (cv == 0x51 && !pll_lim->refclk) { uint32_t sel_clk = bios_rd32(bios, NV_PRAMDAC_SEL_CLK); - if (((limit_match == NV_PRAMDAC_VPLL_COEFF || limit_match == VPLL1) && sel_clk & 0x20) || - ((limit_match == NV_RAMDAC_VPLL2 || limit_match == VPLL2) && sel_clk & 0x80)) { + if ((pll_lim->reg == NV_PRAMDAC_VPLL_COEFF && sel_clk & 0x20) || + (pll_lim->reg == NV_RAMDAC_VPLL2 && sel_clk & 0x80)) { if (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_CHIP_ID_INDEX) < 0xa3) pll_lim->refclk = 200000; else @@ -4891,10 +4963,10 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims int i; BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n", - limit_match); + pll_lim->reg); for (i = 0; i < entries; i++, entry += recordlen) { - if (ROM32(entry[3]) == limit_match) { + if (ROM32(entry[3]) == pll_lim->reg) { record = &bios->data[ROM16(entry[1])]; break; } @@ -4902,7 +4974,7 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims if (!record) { NV_ERROR(dev, "Register 0x%08x not found in PLL " - "limits table", limit_match); + "limits table", pll_lim->reg); return -ENOENT; } @@ -4931,10 +5003,10 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims int i; BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n", - limit_match); + pll_lim->reg); for (i = 0; i < entries; i++, entry += recordlen) { - if (ROM32(entry[3]) == limit_match) { + if (ROM32(entry[3]) == pll_lim->reg) { record = &bios->data[ROM16(entry[1])]; break; } @@ -4942,7 +5014,7 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims if (!record) { NV_ERROR(dev, "Register 0x%08x not found in PLL " - "limits table", limit_match); + "limits table", pll_lim->reg); return -ENOENT; } diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h index c1de2f3fcb0e..02c5dd09ba7f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.h +++ b/drivers/gpu/drm/nouveau/nouveau_bios.h @@ -170,16 +170,28 @@ enum LVDS_script { LVDS_PANEL_OFF }; -/* changing these requires matching changes to reg tables in nv_get_clock */ -#define MAX_PLL_TYPES 4 +/* these match types in pll limits table version 0x40, + * nouveau uses them on all chipsets internally where a + * specific pll needs to be referenced, but the exact + * register isn't known. + */ enum pll_types { - NVPLL, - MPLL, - VPLL1, - VPLL2 + PLL_CORE = 0x01, + PLL_SHADER = 0x02, + PLL_UNK03 = 0x03, + PLL_MEMORY = 0x04, + PLL_UNK05 = 0x05, + PLL_UNK40 = 0x40, + PLL_UNK41 = 0x41, + PLL_UNK42 = 0x42, + PLL_VPLL0 = 0x80, + PLL_VPLL1 = 0x81, + PLL_MAX = 0xff }; struct pll_lims { + u32 reg; + struct { int minfreq; int maxfreq; diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/nouveau_calc.c index 23d9896962f4..dad96cce5e39 100644 --- a/drivers/gpu/drm/nouveau/nouveau_calc.c +++ b/drivers/gpu/drm/nouveau/nouveau_calc.c @@ -198,8 +198,8 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp, struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv_fifo_info fifo_data; struct nv_sim_state sim_data; - int MClk = nouveau_hw_get_clock(dev, MPLL); - int NVClk = nouveau_hw_get_clock(dev, NVPLL); + int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY); + int NVClk = nouveau_hw_get_clock(dev, PLL_CORE); uint32_t cfg1 = nvReadFB(dev, NV04_PFB_CFG1); sim_data.pclk_khz = VClk; diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 912b04be0def..d2fecc05eae4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -853,6 +853,7 @@ extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *, enum dcb_gpio_tag); extern struct dcb_connector_table_entry * nouveau_bios_connector_entry(struct drm_device *, int index); +extern u32 get_pll_register(struct drm_device *, enum pll_types); extern int get_pll_limits(struct drm_device *, uint32_t limit_match, struct pll_lims *); extern int nouveau_bios_run_display_table(struct drm_device *, diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c index f8ec49b5308b..e228aafc03e0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hw.c +++ b/drivers/gpu/drm/nouveau/nouveau_hw.c @@ -427,22 +427,11 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum pll_types plltype, struct nouveau_pll_vals *pllvals) { struct drm_nouveau_private *dev_priv = dev->dev_private; - const uint32_t nv04_regs[MAX_PLL_TYPES] = { NV_PRAMDAC_NVPLL_COEFF, - NV_PRAMDAC_MPLL_COEFF, - NV_PRAMDAC_VPLL_COEFF, - NV_RAMDAC_VPLL2 }; - const uint32_t nv40_regs[MAX_PLL_TYPES] = { 0x4000, - 0x4020, - NV_PRAMDAC_VPLL_COEFF, - NV_RAMDAC_VPLL2 }; - uint32_t reg1, pll1, pll2 = 0; + uint32_t reg1 = get_pll_register(dev, plltype), pll1, pll2 = 0; struct pll_lims pll_lim; int ret; - if (dev_priv->card_type < NV_40) - reg1 = nv04_regs[plltype]; - else - reg1 = nv40_regs[plltype]; + BUG_ON(reg1 == 0); pll1 = nvReadMC(dev, reg1); @@ -492,7 +481,8 @@ nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype) { struct nouveau_pll_vals pllvals; - if (plltype == MPLL && (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) { + if (plltype == PLL_MEMORY && + (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) { uint32_t mpllP; pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP); @@ -501,7 +491,8 @@ nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype) return 400000 / mpllP; } else - if (plltype == MPLL && (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) { + if (plltype == PLL_MEMORY && + (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) { uint32_t clock; pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock); @@ -526,9 +517,9 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head) struct nouveau_pll_vals pv; uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF; - if (get_pll_limits(dev, head ? VPLL2 : VPLL1, &pll_lim)) + if (get_pll_limits(dev, pllreg, &pll_lim)) return; - nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &pv); + nouveau_hw_get_pllvals(dev, pllreg, &pv); if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m && pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n && @@ -661,7 +652,7 @@ nv_save_state_ramdac(struct drm_device *dev, int head, if (dev_priv->card_type >= NV_10) regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC); - nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, ®p->pllvals); + nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, ®p->pllvals); state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT); if (nv_two_heads(dev)) state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK); diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index 1f0f978d8e9d..ef480281afec 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c @@ -109,7 +109,7 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod struct nouveau_pll_vals *pv = ®p->pllvals; struct pll_lims pll_lim; - if (get_pll_limits(dev, nv_crtc->index ? VPLL2 : VPLL1, &pll_lim)) + if (get_pll_limits(dev, nv_crtc->index ? PLL_VPLL1 : PLL_VPLL0, &pll_lim)) return; /* NM2 == 0 is used to determine single stage mode on two stage plls */ From 4709bff02adcb0d05d2d1a397e60581baa562de9 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 13 Sep 2010 15:18:40 +1000 Subject: [PATCH 266/476] drm/nouveau: make bios code easier to use externally Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bios.c | 54 +++++++++++++++----------- drivers/gpu/drm/nouveau/nouveau_bios.h | 19 +++++++++ 2 files changed, 50 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 2319390b200a..ef44070321e6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -43,9 +43,6 @@ #define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg) #define LOG_OLD_VALUE(x) -#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x)) -#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x)) - struct init_exec { bool execute; bool repeat; @@ -272,12 +269,6 @@ struct init_tbl_entry { int (*handler)(struct nvbios *, uint16_t, struct init_exec *); }; -struct bit_entry { - uint8_t id[2]; - uint16_t length; - uint16_t offset; -}; - static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *); #define MACRO_INDEX_SIZE 2 @@ -5365,7 +5356,7 @@ parse_bit_M_tbl_entry(struct drm_device *dev, struct nvbios *bios, if (bitentry->length < 0x5) return 0; - if (bitentry->id[1] < 2) { + if (bitentry->version < 2) { bios->ram_restrict_group_count = bios->data[bitentry->offset + 2]; bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 3]); } else { @@ -5475,27 +5466,40 @@ struct bit_table { #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry }) +int +bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->vbios; + u8 entries, *entry; + + entries = bios->data[bios->offset + 10]; + entry = &bios->data[bios->offset + 12]; + while (entries--) { + if (entry[0] == id) { + bit->id = entry[0]; + bit->version = entry[1]; + bit->length = ROM16(entry[2]); + bit->offset = ROM16(entry[4]); + bit->data = ROMPTR(bios, entry[4]); + return 0; + } + + entry += bios->data[bios->offset + 9]; + } + + return -ENOENT; +} + static int parse_bit_table(struct nvbios *bios, const uint16_t bitoffset, struct bit_table *table) { struct drm_device *dev = bios->dev; - uint8_t maxentries = bios->data[bitoffset + 4]; - int i, offset; struct bit_entry bitentry; - for (i = 0, offset = bitoffset + 6; i < maxentries; i++, offset += 6) { - bitentry.id[0] = bios->data[offset]; - - if (bitentry.id[0] != table->id) - continue; - - bitentry.id[1] = bios->data[offset + 1]; - bitentry.length = ROM16(bios->data[offset + 2]); - bitentry.offset = ROM16(bios->data[offset + 4]); - + if (bit_table(dev, table->id, &bitentry) == 0) return table->parse_fn(dev, bios, &bitentry); - } NV_INFO(dev, "BIT table '%c' not found\n", table->id); return -ENOSYS; @@ -6752,6 +6756,8 @@ static int nouveau_parse_vbios_struct(struct drm_device *dev) bit_signature, sizeof(bit_signature)); if (offset) { NV_TRACE(dev, "BIT BIOS found\n"); + bios->type = NVBIOS_BIT; + bios->offset = offset; return parse_bit_structure(bios, offset + 6); } @@ -6759,6 +6765,8 @@ static int nouveau_parse_vbios_struct(struct drm_device *dev) bmp_signature, sizeof(bmp_signature)); if (offset) { NV_TRACE(dev, "BMP BIOS found\n"); + bios->type = NVBIOS_BMP; + bios->offset = offset; return parse_bmp_structure(dev, bios, offset); } diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h index 02c5dd09ba7f..50a648e01c49 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.h +++ b/drivers/gpu/drm/nouveau/nouveau_bios.h @@ -34,6 +34,20 @@ #define DCB_LOC_ON_CHIP 0 +#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x)) +#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x)) +#define ROMPTR(bios, x) (ROM16(x) ? &(bios)->data[ROM16(x)] : NULL) + +struct bit_entry { + uint8_t id; + uint8_t version; + uint16_t length; + uint16_t offset; + uint8_t *data; +}; + +int bit_table(struct drm_device *, u8 id, struct bit_entry *); + struct dcb_i2c_entry { uint32_t entry; uint8_t port_type; @@ -224,6 +238,11 @@ struct pll_lims { struct nvbios { struct drm_device *dev; + enum { + NVBIOS_BMP, + NVBIOS_BIT + } type; + uint16_t offset; uint8_t chip_version; From 330c5988ee78045e6a731c3693251aaa5b0d14e3 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 16 Sep 2010 15:39:49 +1000 Subject: [PATCH 267/476] drm/nouveau: import initial work on vbios performance table parsing Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/Makefile | 1 + drivers/gpu/drm/nouveau/nouveau_drv.h | 46 +++++ drivers/gpu/drm/nouveau/nouveau_perf.c | 159 ++++++++++++++++++ drivers/gpu/drm/nouveau/nouveau_pm.c | 214 ++++++++++++++++++++++++ drivers/gpu/drm/nouveau/nouveau_pm.h | 44 +++++ drivers/gpu/drm/nouveau/nouveau_state.c | 5 + drivers/gpu/drm/nouveau/nouveau_volt.c | 209 +++++++++++++++++++++++ 7 files changed, 678 insertions(+) create mode 100644 drivers/gpu/drm/nouveau/nouveau_perf.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_pm.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_pm.h create mode 100644 drivers/gpu/drm/nouveau/nouveau_volt.c diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index d6cfbf259876..2fd61888a83d 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile @@ -10,6 +10,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \ nouveau_display.o nouveau_connector.o nouveau_fbcon.o \ nouveau_dp.o nouveau_ramht.o \ + nouveau_pm.o nouveau_volt.o nouveau_perf.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o nv50_mc.o \ nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \ diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index d2fecc05eae4..bda4d1e7c63a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -359,6 +359,51 @@ struct nouveau_gpio_engine { void (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on); }; +struct nouveau_pm_voltage_level { + u8 voltage; + u8 vid; +}; + +struct nouveau_pm_voltage { + bool supported; + u8 vid_mask; + + struct nouveau_pm_voltage_level *level; + int nr_level; +}; + +#define NOUVEAU_PM_MAX_LEVEL 8 +struct nouveau_pm_level { + struct device_attribute dev_attr; + char name[32]; + int id; + + u32 core; + u32 memory; + u32 shader; + u32 unk05; + + u8 voltage; + u8 fanspeed; +}; + +struct nouveau_pm_engine { + struct nouveau_pm_voltage voltage; + struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL]; + int nr_perflvl; + + struct nouveau_pm_level boot; + struct nouveau_pm_level *cur; + + int (*clock_get)(struct drm_device *, u32 id); + void *(*clock_pre)(struct drm_device *, u32 id, int khz); + void (*clock_set)(struct drm_device *, void *); + int (*voltage_get)(struct drm_device *); + int (*voltage_set)(struct drm_device *, int voltage); + int (*fanspeed_get)(struct drm_device *); + int (*fanspeed_set)(struct drm_device *, int fanspeed); +}; + struct nouveau_engine { struct nouveau_instmem_engine instmem; struct nouveau_mc_engine mc; @@ -368,6 +413,7 @@ struct nouveau_engine { struct nouveau_fifo_engine fifo; struct nouveau_display_engine display; struct nouveau_gpio_engine gpio; + struct nouveau_pm_engine pm; }; struct nouveau_pll_vals { diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c new file mode 100644 index 000000000000..a882a366487f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_perf.c @@ -0,0 +1,159 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" + +#include "nouveau_drv.h" +#include "nouveau_pm.h" + +void +nouveau_perf_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nvbios *bios = &dev_priv->vbios; + struct bit_entry P; + u8 version, headerlen, recordlen, entries; + u8 *perf, *entry; + int vid, i; + + if (bios->type == NVBIOS_BIT) { + if (bit_table(dev, 'P', &P)) + return; + + if (P.version != 1 && P.version != 2) { + NV_WARN(dev, "unknown perf for BIT P %d\n", P.version); + return; + } + + perf = ROMPTR(bios, P.data[0]); + version = perf[0]; + headerlen = perf[1]; + if (version < 0x40) { + recordlen = perf[3] + (perf[4] * perf[5]); + entries = perf[2]; + } else { + recordlen = perf[2] + (perf[3] * perf[4]); + entries = perf[5]; + } + } else { + if (bios->data[bios->offset + 6] < 0x27) { + NV_DEBUG(dev, "BMP version too old for perf\n"); + return; + } + + perf = ROMPTR(bios, bios->data[bios->offset + 0x94]); + if (!perf) { + NV_DEBUG(dev, "perf table pointer invalid\n"); + return; + } + + version = perf[1]; + headerlen = perf[0]; + recordlen = perf[3]; + entries = perf[2]; + } + + entry = perf + headerlen; + for (i = 0; i < entries; i++) { + struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl]; + + if (entry[0] == 0xff) { + entry += recordlen; + continue; + } + + switch (version) { + case 0x12: + case 0x13: + case 0x15: + perflvl->fanspeed = entry[55]; + perflvl->voltage = entry[56]; + perflvl->core = ROM32(entry[1]) / 100; + perflvl->memory = ROM32(entry[5]) / 100; + break; + case 0x21: + case 0x23: + case 0x24: + perflvl->fanspeed = entry[4]; + perflvl->voltage = entry[5]; + perflvl->core = ROM16(entry[6]); + perflvl->memory = ROM16(entry[11]); + break; + case 0x25: + perflvl->fanspeed = entry[4]; + perflvl->voltage = entry[5]; + perflvl->core = ROM16(entry[6]); + perflvl->shader = ROM16(entry[10]); + perflvl->memory = ROM16(entry[12]); + break; + case 0x30: + case 0x35: + perflvl->fanspeed = entry[6]; + perflvl->voltage = entry[7]; + perflvl->core = ROM16(entry[8]); + perflvl->shader = ROM16(entry[10]); + perflvl->memory = ROM16(entry[12]); + /*XXX: confirm on 0x35 */ + perflvl->unk05 = ROM16(entry[16]); + break; + case 0x40: +#define subent(n) entry[perf[2] + ((n) * perf[3])] + perflvl->fanspeed = 0; /*XXX*/ + perflvl->voltage = 0; /*XXX: entry[2] */; + perflvl->core = ROM16(subent(0)) & 0xfff; + perflvl->shader = ROM16(subent(1)) & 0xfff; + perflvl->memory = ROM16(subent(2)) & 0xfff; + break; + } + + /* convert MHz -> KHz, it's more convenient */ + perflvl->core *= 1000; + perflvl->memory *= 1000; + perflvl->shader *= 1000; + perflvl->unk05 *= 1000; + + /* make sure vid is valid */ + if (pm->voltage.supported && perflvl->voltage) { + vid = nouveau_volt_vid_lookup(dev, perflvl->voltage); + if (vid < 0) { + NV_DEBUG(dev, "drop perflvl %d, bad vid\n", i); + entry += recordlen; + continue; + } + } + + snprintf(perflvl->name, sizeof(perflvl->name), + "performance_level_%d", i); + perflvl->id = i; + pm->nr_perflvl++; + + entry += recordlen; + } +} + +void +nouveau_perf_fini(struct drm_device *dev) +{ +} diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c new file mode 100644 index 000000000000..9cf5fd665b8c --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c @@ -0,0 +1,214 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" + +#include "nouveau_drv.h" +#include "nouveau_pm.h" + +static int +nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + int ret; + + if (!pm->clock_get) + return -EINVAL; + + memset(perflvl, 0, sizeof(*perflvl)); + + ret = pm->clock_get(dev, PLL_CORE); + if (ret > 0) + perflvl->core = ret; + + ret = pm->clock_get(dev, PLL_MEMORY); + if (ret > 0) + perflvl->memory = ret; + + ret = pm->clock_get(dev, PLL_SHADER); + if (ret > 0) + perflvl->shader = ret; + + ret = pm->clock_get(dev, PLL_UNK05); + if (ret > 0) + perflvl->unk05 = ret; + + if (pm->voltage.supported && pm->voltage_get) { + ret = pm->voltage_get(dev); + if (ret > 0) + perflvl->voltage = ret; + } + + return 0; +} + +static void +nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len) +{ + char s[16], v[16], f[16]; + + s[0] = '\0'; + if (perflvl->shader) + snprintf(s, sizeof(s), " shader %dMHz", perflvl->shader / 1000); + + v[0] = '\0'; + if (perflvl->voltage) + snprintf(v, sizeof(v), " voltage %dmV", perflvl->voltage * 10); + + f[0] = '\0'; + if (perflvl->fanspeed) + snprintf(f, sizeof(f), " fanspeed %d%%", perflvl->fanspeed); + + snprintf(ptr, len, "core %dMHz memory %dMHz%s%s%s\n", + perflvl->core / 1000, perflvl->memory / 1000, s, v, f); +} + +static ssize_t +nouveau_pm_get_perflvl_info(struct device *d, + struct device_attribute *a, char *buf) +{ + struct nouveau_pm_level *perflvl = (struct nouveau_pm_level *)a; + char *ptr = buf; + int len = PAGE_SIZE; + + snprintf(ptr, len, "%d: ", perflvl->id); + ptr += strlen(buf); + len -= strlen(buf); + + nouveau_pm_perflvl_info(perflvl, ptr, len); + return strlen(buf); +} + +static ssize_t +nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf) +{ + struct drm_device *dev = pci_get_drvdata(to_pci_dev(d)); + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nouveau_pm_level cur; + int len = PAGE_SIZE, ret; + char *ptr = buf; + + if (!pm->cur) + snprintf(ptr, len, "setting: boot\n"); + else if (pm->cur == &pm->boot) + snprintf(ptr, len, "setting: boot\nc: "); + else + snprintf(ptr, len, "setting: static %d\nc: ", pm->cur->id); + ptr += strlen(buf); + len -= strlen(buf); + + ret = nouveau_pm_perflvl_get(dev, &cur); + if (ret == 0) + nouveau_pm_perflvl_info(&cur, ptr, len); + return strlen(buf); +} + +static ssize_t +nouveau_pm_set_perflvl(struct device *d, struct device_attribute *a, + const char *buf, size_t count) +{ + return -EPERM; +} + +DEVICE_ATTR(performance_level, S_IRUGO | S_IWUSR, + nouveau_pm_get_perflvl, nouveau_pm_set_perflvl); + +int +nouveau_pm_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct device *d = &dev->pdev->dev; + char info[256]; + int ret, i; + + nouveau_volt_init(dev); + nouveau_perf_init(dev); + + NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl); + for (i = 0; i < pm->nr_perflvl; i++) { + nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info)); + NV_INFO(dev, "%d: %s", pm->perflvl[i].id, info); + } + + /* determine current ("boot") performance level */ + ret = nouveau_pm_perflvl_get(dev, &pm->boot); + if (ret == 0) { + pm->cur = &pm->boot; + + nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info)); + NV_INFO(dev, "c: %s", info); + } + + /* initialise sysfs */ + ret = device_create_file(d, &dev_attr_performance_level); + if (ret) + return ret; + + for (i = 0; i < pm->nr_perflvl; i++) { + struct nouveau_pm_level *perflvl = &pm->perflvl[i]; + + perflvl->dev_attr.attr.name = perflvl->name; + perflvl->dev_attr.attr.mode = S_IRUGO; + perflvl->dev_attr.show = nouveau_pm_get_perflvl_info; + perflvl->dev_attr.store = NULL; + sysfs_attr_init(&perflvl->dev_attr.attr); + + ret = device_create_file(d, &perflvl->dev_attr); + if (ret) { + NV_ERROR(dev, "failed pervlvl %d sysfs: %d\n", + perflvl->id, i); + perflvl->dev_attr.attr.name = NULL; + nouveau_pm_fini(dev); + return ret; + } + } + + return 0; +} + +void +nouveau_pm_fini(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct device *d = &dev->pdev->dev; + int i; + + device_remove_file(d, &dev_attr_performance_level); + for (i = 0; i < pm->nr_perflvl; i++) { + struct nouveau_pm_level *pl = &pm->perflvl[i]; + + if (!pl->dev_attr.attr.name) + break; + + device_remove_file(d, &pl->dev_attr); + } + + nouveau_perf_fini(dev); + nouveau_volt_fini(dev); +} + diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h new file mode 100644 index 000000000000..a401ec0a1269 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_pm.h @@ -0,0 +1,44 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#ifndef __NOUVEAU_PM_H__ +#define __NOUVEAU_PM_H__ + +/* nouveau_pm.c */ +int nouveau_pm_init(struct drm_device *dev); +void nouveau_pm_fini(struct drm_device *dev); + +/* nouveau_volt.c */ +void nouveau_volt_init(struct drm_device *); +void nouveau_volt_fini(struct drm_device *); +int nouveau_volt_vid_lookup(struct drm_device *, int voltage); +int nouveau_volt_lvl_lookup(struct drm_device *, int vid); +int nouveau_voltage_gpio_get(struct drm_device *); +int nouveau_voltage_gpio_set(struct drm_device *, int voltage); + +/* nouveau_perf.c */ +void nouveau_perf_init(struct drm_device *); +void nouveau_perf_fini(struct drm_device *); + +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index be859604cf64..18c4a8a85940 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -36,6 +36,7 @@ #include "nouveau_drm.h" #include "nouveau_fbcon.h" #include "nouveau_ramht.h" +#include "nouveau_pm.h" #include "nv50_display.h" static void nouveau_stub_takedown(struct drm_device *dev) {} @@ -527,6 +528,8 @@ nouveau_card_init(struct drm_device *dev) if (ret) goto out_display_early; + nouveau_pm_init(dev); + ret = nouveau_mem_vram_init(dev); if (ret) goto out_bios; @@ -635,6 +638,7 @@ out_gpuobj: out_vram: nouveau_mem_vram_fini(dev); out_bios: + nouveau_pm_fini(dev); nouveau_bios_takedown(dev); out_display_early: engine->display.late_takedown(dev); @@ -677,6 +681,7 @@ static void nouveau_card_takedown(struct drm_device *dev) drm_irq_uninstall(dev); + nouveau_pm_fini(dev); nouveau_bios_takedown(dev); vga_client_register(dev->pdev, NULL, NULL, NULL); diff --git a/drivers/gpu/drm/nouveau/nouveau_volt.c b/drivers/gpu/drm/nouveau/nouveau_volt.c new file mode 100644 index 000000000000..6ce857688eb6 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_volt.c @@ -0,0 +1,209 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" + +#include "nouveau_drv.h" +#include "nouveau_pm.h" + +static const enum dcb_gpio_tag vidtag[] = { 0x04, 0x05, 0x06, 0x1a }; +static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]); + +int +nouveau_voltage_gpio_get(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio; + struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage; + u8 vid = 0; + int i; + + for (i = 0; i < nr_vidtag; i++) { + if (!(volt->vid_mask & (1 << i))) + continue; + + vid |= gpio->get(dev, vidtag[i]) << i; + } + + return nouveau_volt_lvl_lookup(dev, vid); +} + +int +nouveau_voltage_gpio_set(struct drm_device *dev, int voltage) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio; + struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage; + int vid, i; + + vid = nouveau_volt_vid_lookup(dev, voltage); + if (vid < 0) + return vid; + + for (i = 0; i < nr_vidtag; i++) { + if (!(volt->vid_mask & (1 << i))) + continue; + + gpio->set(dev, vidtag[i], !!(vid & (1 << i))); + } + + return 0; +} + +int +nouveau_volt_vid_lookup(struct drm_device *dev, int voltage) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage; + int i; + + for (i = 0; i < volt->nr_level; i++) { + if (volt->level[i].voltage == voltage) + return volt->level[i].vid; + } + + return -ENOENT; +} + +int +nouveau_volt_lvl_lookup(struct drm_device *dev, int vid) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage; + int i; + + for (i = 0; i < volt->nr_level; i++) { + if (volt->level[i].vid == vid) + return volt->level[i].voltage; + } + + return -ENOENT; +} + +void +nouveau_volt_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nouveau_pm_voltage *voltage = &pm->voltage; + struct nvbios *bios = &dev_priv->vbios; + struct bit_entry P; + u8 *volt = NULL, *entry; + int i, recordlen, entries, vidmask, vidshift; + + if (bios->type == NVBIOS_BIT) { + if (bit_table(dev, 'P', &P)) + return; + + if (P.version == 1) + volt = ROMPTR(bios, P.data[16]); + else + if (P.version == 2) + volt = ROMPTR(bios, P.data[12]); + else { + NV_WARN(dev, "unknown volt for BIT P %d\n", P.version); + } + } else { + if (bios->data[bios->offset + 6] < 0x27) { + NV_DEBUG(dev, "BMP version too old for voltage\n"); + return; + } + + volt = ROMPTR(bios, bios->data[bios->offset + 0x98]); + } + + if (!volt) { + NV_DEBUG(dev, "voltage table pointer invalid\n"); + return; + } + + switch (volt[0]) { + case 0x10: + case 0x11: + case 0x12: + recordlen = 5; + entries = volt[2]; + vidshift = 0; + vidmask = volt[4]; + break; + case 0x20: + recordlen = volt[3]; + entries = volt[2]; + vidshift = 0; /* could be vidshift like 0x30? */ + vidmask = volt[5]; + break; + case 0x30: + recordlen = volt[2]; + entries = volt[3]; + vidshift = hweight8(volt[5]); + vidmask = volt[4]; + break; + default: + NV_WARN(dev, "voltage table 0x%02x unknown\n", volt[0]); + return; + } + + /* validate vid mask */ + voltage->vid_mask = vidmask; + if (!voltage->vid_mask) + return; + + i = 0; + while (vidmask) { + if (i > nr_vidtag) { + NV_DEBUG(dev, "vid bit %d unknown\n", i); + return; + } + + if (!nouveau_bios_gpio_entry(dev, vidtag[i])) { + NV_DEBUG(dev, "vid bit %d has no gpio tag\n", i); + return; + } + + vidmask >>= 1; + i++; + } + + /* parse vbios entries into common format */ + voltage->level = kcalloc(entries, sizeof(*voltage->level), GFP_KERNEL); + if (!voltage->level) + return; + + entry = volt + volt[1]; + for (i = 0; i < entries; i++, entry += recordlen) { + voltage->level[i].voltage = entry[0]; + voltage->level[i].vid = entry[1] >> vidshift; + } + voltage->nr_level = entries; + voltage->supported = true; +} + +void +nouveau_volt_fini(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage; + + kfree(volt->level); +} From 02c30ca0a1d6d8b878fc32f47b3b25192ef4a8ef Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 16 Sep 2010 16:17:35 +1000 Subject: [PATCH 268/476] drm/nv50: import initial clock get/set routines + hook up pm engine This will make nouveau_pm attempt to report the card's current performance level both during bootup, and through sysfs. This is a very initial implementation, and can be improved a *lot* Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/Makefile | 3 +- drivers/gpu/drm/nouveau/nouveau_pm.h | 5 + drivers/gpu/drm/nouveau/nouveau_state.c | 5 + drivers/gpu/drm/nouveau/nv50_pm.c | 126 ++++++++++++++++++++++++ 4 files changed, 138 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/nouveau/nv50_pm.c diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index 2fd61888a83d..c5319901e95c 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile @@ -24,7 +24,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ nv10_gpio.o nv50_gpio.o \ - nv50_calc.o + nv50_calc.o \ + nv50_pm.o nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h index a401ec0a1269..81d27722964b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.h +++ b/drivers/gpu/drm/nouveau/nouveau_pm.h @@ -41,4 +41,9 @@ int nouveau_voltage_gpio_set(struct drm_device *, int voltage); void nouveau_perf_init(struct drm_device *); void nouveau_perf_fini(struct drm_device *); +/* nv50_pm.c */ +int nv50_pm_clock_get(struct drm_device *, u32 id); +void *nv50_pm_clock_pre(struct drm_device *, u32 id, int khz); +void nv50_pm_clock_set(struct drm_device *, void *); + #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 18c4a8a85940..bbe9ba015bca 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -355,6 +355,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->gpio.get = nv50_gpio_get; engine->gpio.set = nv50_gpio_set; engine->gpio.irq_enable = nv50_gpio_irq_enable; + engine->pm.clock_get = nv50_pm_clock_get; + engine->pm.clock_pre = nv50_pm_clock_pre; + engine->pm.clock_set = nv50_pm_clock_set; + engine->pm.voltage_get = nouveau_voltage_gpio_get; + engine->pm.voltage_set = nouveau_voltage_gpio_set; break; case 0xC0: engine->instmem.init = nvc0_instmem_init; diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c new file mode 100644 index 000000000000..a616e4240346 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_pm.c @@ -0,0 +1,126 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_pm.h" + +/*XXX: boards using limits 0x40 need fixing, the register layout + * is correct here, but, there's some other funny magic + * that modifies things, so it's not likely we'll set/read + * the correct timings yet.. working on it... + */ + +struct nv50_pm_state { + struct pll_lims pll; + enum pll_types type; + int N, M, P; +}; + +int +nv50_pm_clock_get(struct drm_device *dev, u32 id) +{ + struct pll_lims pll; + int P, N, M, ret; + u32 reg0, reg1; + + ret = get_pll_limits(dev, id, &pll); + if (ret) + return ret; + + if (pll.vco2.maxfreq) { + reg0 = nv_rd32(dev, pll.reg + 0); + reg1 = nv_rd32(dev, pll.reg + 4); + P = (reg0 & 0x00070000) >> 16; + N = (reg1 & 0x0000ff00) >> 8; + M = (reg1 & 0x000000ff); + + return ((pll.refclk * N / M) >> P); + } + + reg0 = nv_rd32(dev, pll.reg + 4); + P = (reg0 & 0x003f0000) >> 16; + N = (reg0 & 0x0000ff00) >> 8; + M = (reg0 & 0x000000ff); + return pll.refclk * N / M / P; +} + +void * +nv50_pm_clock_pre(struct drm_device *dev, u32 id, int khz) +{ + struct nv50_pm_state *state; + int dummy, ret; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return ERR_PTR(-ENOMEM); + state->type = id; + + ret = get_pll_limits(dev, id, &state->pll); + if (ret < 0) { + kfree(state); + return ERR_PTR(ret); + } + + ret = nv50_calc_pll(dev, &state->pll, khz, &state->N, &state->M, + &dummy, &dummy, &state->P); + if (ret < 0) { + kfree(state); + return ERR_PTR(ret); + } + + return state; +} + +void +nv50_pm_clock_set(struct drm_device *dev, void *pre_state) +{ + struct nv50_pm_state *state = pre_state; + u32 reg = state->pll.reg, tmp; + int N = state->N; + int M = state->M; + int P = state->P; + + if (state->pll.vco2.maxfreq) { + if (state->type == PLL_MEMORY) { + nv_wr32(dev, 0x100210, 0); + nv_wr32(dev, 0x1002dc, 1); + } + + tmp = nv_rd32(dev, reg + 0) & 0xfff8ffff; + tmp |= 0x80000000 | (P << 16); + nv_wr32(dev, reg + 0, tmp); + nv_wr32(dev, reg + 4, (N << 8) | M); + + if (state->type == PLL_MEMORY) { + nv_wr32(dev, 0x1002dc, 0); + nv_wr32(dev, 0x100210, 0x80000000); + } + } else { + nv_wr32(dev, reg + 4, (P << 16) | (N << 8) | M); + } + + kfree(state); +} + From 442b626ece6fbbe7f52c03a09f85ae5755f29eab Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 16 Sep 2010 16:25:26 +1000 Subject: [PATCH 269/476] drm/nv04-nv40: import initial pm backend Currently just hooked up to the already-existing nouveau_hw, which should handle all relevant chipsets as well as we currently can. This will likely be eventually split out and improved into chipset specific code at a later point. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/Makefile | 2 +- drivers/gpu/drm/nouveau/nouveau_hw.c | 8 ++- drivers/gpu/drm/nouveau/nouveau_pm.h | 5 ++ drivers/gpu/drm/nouveau/nouveau_state.c | 19 ++++++ drivers/gpu/drm/nouveau/nv04_pm.c | 79 +++++++++++++++++++++++++ 5 files changed, 110 insertions(+), 3 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/nv04_pm.c diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index c5319901e95c..3cedabeb1617 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile @@ -25,7 +25,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ nv10_gpio.o nv50_gpio.o \ nv50_calc.o \ - nv50_pm.o + nv04_pm.o nv50_pm.o nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c index e228aafc03e0..ebcf8a8190c2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hw.c +++ b/drivers/gpu/drm/nouveau/nouveau_hw.c @@ -431,7 +431,8 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum pll_types plltype, struct pll_lims pll_lim; int ret; - BUG_ON(reg1 == 0); + if (reg1 == 0) + return -ENOENT; pll1 = nvReadMC(dev, reg1); @@ -480,6 +481,7 @@ int nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype) { struct nouveau_pll_vals pllvals; + int ret; if (plltype == PLL_MEMORY && (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) { @@ -499,7 +501,9 @@ nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype) return clock; } - nouveau_hw_get_pllvals(dev, plltype, &pllvals); + ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals); + if (ret) + return ret; return nouveau_hw_pllvals_to_clk(&pllvals); } diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h index 81d27722964b..70e1862572f8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.h +++ b/drivers/gpu/drm/nouveau/nouveau_pm.h @@ -41,6 +41,11 @@ int nouveau_voltage_gpio_set(struct drm_device *, int voltage); void nouveau_perf_init(struct drm_device *); void nouveau_perf_fini(struct drm_device *); +/* nv04_pm.c */ +int nv04_pm_clock_get(struct drm_device *, u32 id); +void *nv04_pm_clock_pre(struct drm_device *, u32 id, int khz); +void nv04_pm_clock_set(struct drm_device *, void *); + /* nv50_pm.c */ int nv50_pm_clock_get(struct drm_device *, u32 id); void *nv50_pm_clock_pre(struct drm_device *, u32 id, int khz); diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index bbe9ba015bca..f9f77de6bbc0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -96,6 +96,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->gpio.get = NULL; engine->gpio.set = NULL; engine->gpio.irq_enable = NULL; + engine->pm.clock_get = nv04_pm_clock_get; + engine->pm.clock_pre = nv04_pm_clock_pre; + engine->pm.clock_set = nv04_pm_clock_set; break; case 0x10: engine->instmem.init = nv04_instmem_init; @@ -147,6 +150,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->gpio.get = nv10_gpio_get; engine->gpio.set = nv10_gpio_set; engine->gpio.irq_enable = NULL; + engine->pm.clock_get = nv04_pm_clock_get; + engine->pm.clock_pre = nv04_pm_clock_pre; + engine->pm.clock_set = nv04_pm_clock_set; break; case 0x20: engine->instmem.init = nv04_instmem_init; @@ -198,6 +204,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->gpio.get = nv10_gpio_get; engine->gpio.set = nv10_gpio_set; engine->gpio.irq_enable = NULL; + engine->pm.clock_get = nv04_pm_clock_get; + engine->pm.clock_pre = nv04_pm_clock_pre; + engine->pm.clock_set = nv04_pm_clock_set; break; case 0x30: engine->instmem.init = nv04_instmem_init; @@ -249,6 +258,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->gpio.get = nv10_gpio_get; engine->gpio.set = nv10_gpio_set; engine->gpio.irq_enable = NULL; + engine->pm.clock_get = nv04_pm_clock_get; + engine->pm.clock_pre = nv04_pm_clock_pre; + engine->pm.clock_set = nv04_pm_clock_set; + engine->pm.voltage_get = nouveau_voltage_gpio_get; + engine->pm.voltage_set = nouveau_voltage_gpio_set; break; case 0x40: case 0x60: @@ -301,6 +315,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->gpio.get = nv10_gpio_get; engine->gpio.set = nv10_gpio_set; engine->gpio.irq_enable = NULL; + engine->pm.clock_get = nv04_pm_clock_get; + engine->pm.clock_pre = nv04_pm_clock_pre; + engine->pm.clock_set = nv04_pm_clock_set; + engine->pm.voltage_get = nouveau_voltage_gpio_get; + engine->pm.voltage_set = nouveau_voltage_gpio_set; break; case 0x50: case 0x80: /* gotta love NVIDIA's consistency.. */ diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c new file mode 100644 index 000000000000..35c200eb476c --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv04_pm.c @@ -0,0 +1,79 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_hw.h" + +struct nv04_pm_state { + struct pll_lims pll; + struct nouveau_pll_vals calc; +}; + +int +nv04_pm_clock_get(struct drm_device *dev, u32 id) +{ + return nouveau_hw_get_clock(dev, id); +} + +void * +nv04_pm_clock_pre(struct drm_device *dev, u32 id, int khz) +{ + struct nv04_pm_state *state; + int ret; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return ERR_PTR(-ENOMEM); + + ret = get_pll_limits(dev, id, &state->pll); + if (ret) { + kfree(state); + return ERR_PTR(ret); + } + + ret = nouveau_calc_pll_mnp(dev, &state->pll, khz, &state->calc); + if (!ret) { + kfree(state); + return ERR_PTR(-EINVAL); + } + + return state; +} + +void +nv04_pm_clock_set(struct drm_device *dev, void *pre_state) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv04_pm_state *state = pre_state; + u32 reg = state->pll.reg; + + /* thank the insane nouveau_hw_setpll() interface for this */ + if (dev_priv->card_type >= NV_40) + reg += 4; + + nouveau_hw_setpll(dev, reg, &state->calc); + kfree(state); +} + From 6f876986bedf23b40ab707543e88fae7eac27f1f Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 16 Sep 2010 16:47:14 +1000 Subject: [PATCH 270/476] drm/nouveau: allow static performance level setting Guarded by a module parameter for the moment, read the code for the magic value which enables it. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bios.c | 5 +- drivers/gpu/drm/nouveau/nouveau_drv.c | 8 +++ drivers/gpu/drm/nouveau/nouveau_drv.h | 2 + drivers/gpu/drm/nouveau/nouveau_pm.c | 89 +++++++++++++++++++++++++- drivers/gpu/drm/nouveau/nv04_pm.c | 2 +- drivers/gpu/drm/nouveau/nv50_pm.c | 2 +- 6 files changed, 104 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index ef44070321e6..07171dd3c166 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -4832,8 +4832,11 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims */ if (limit_match > PLL_MAX) pll_lim->reg = limit_match; - else + else { pll_lim->reg = get_pll_register(dev, limit_match); + if (!pll_lim->reg) + return -ENOENT; + } if (pll_lim_ver == 0x10 || pll_lim_ver == 0x11) { uint8_t *pll_rec = &bios->data[bios->pll_limit_tbl_ptr + headerlen + recordlen * pllindex]; diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 14a4960a989a..b03bb6d5b987 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -102,6 +102,14 @@ MODULE_PARM_DESC(reg_debug, "Register access debug bitmask:\n" int nouveau_reg_debug; module_param_named(reg_debug, nouveau_reg_debug, int, 0600); +MODULE_PARM_DESC(perflvl, "Performance level (default: boot)\n"); +char *nouveau_perflvl; +module_param_named(perflvl, nouveau_perflvl, charp, 0400); + +MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)\n"); +int nouveau_perflvl_wr; +module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400); + int nouveau_fbpercrtc; #if 0 module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index bda4d1e7c63a..8d36ed6907d1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -716,6 +716,8 @@ extern int nouveau_ignorelid; extern int nouveau_nofbaccel; extern int nouveau_noaccel; extern int nouveau_override_conntype; +extern char *nouveau_perflvl; +extern int nouveau_perflvl_wr; extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state); extern int nouveau_pci_resume(struct pci_dev *pdev); diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index 9cf5fd665b8c..9e8e14eb6df3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c @@ -27,6 +27,78 @@ #include "nouveau_drv.h" #include "nouveau_pm.h" +static int +nouveau_pm_clock_set(struct drm_device *dev, u8 id, u32 khz) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + void *pre_state; + + if (khz == 0) + return 0; + + pre_state = pm->clock_pre(dev, id, khz); + if (IS_ERR(pre_state)) + return PTR_ERR(pre_state); + + if (pre_state) + pm->clock_set(dev, pre_state); + return 0; +} + +static int +nouveau_pm_profile_set(struct drm_device *dev, const char *profile) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nouveau_pm_level *perflvl = NULL; + int ret; + + /* safety precaution, for now */ + if (nouveau_perflvl_wr != 7777) + return -EPERM; + + if (!pm->clock_set) + return -EINVAL; + + if (!strncmp(profile, "boot", 4)) + perflvl = &pm->boot; + else { + int pl = simple_strtol(profile, NULL, 10); + int i; + + for (i = 0; i < pm->nr_perflvl; i++) { + if (pm->perflvl[i].id == pl) { + perflvl = &pm->perflvl[i]; + break; + } + } + + if (!perflvl) + return -EINVAL; + } + + if (perflvl == pm->cur) + return 0; + + NV_INFO(dev, "setting performance level: %s\n", profile); + if (pm->voltage.supported && pm->voltage_set && perflvl->voltage) { + ret = pm->voltage_set(dev, perflvl->voltage); + if (ret) { + NV_ERROR(dev, "voltage_set %d failed: %d\n", + perflvl->voltage, ret); + } + } + + nouveau_pm_clock_set(dev, PLL_CORE, perflvl->core); + nouveau_pm_clock_set(dev, PLL_SHADER, perflvl->shader); + nouveau_pm_clock_set(dev, PLL_MEMORY, perflvl->memory); + nouveau_pm_clock_set(dev, PLL_UNK05, perflvl->unk05); + + pm->cur = perflvl; + return 0; +} + static int nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) { @@ -130,7 +202,13 @@ static ssize_t nouveau_pm_set_perflvl(struct device *d, struct device_attribute *a, const char *buf, size_t count) { - return -EPERM; + struct drm_device *dev = pci_get_drvdata(to_pci_dev(d)); + int ret; + + ret = nouveau_pm_profile_set(dev, buf); + if (ret) + return ret; + return strlen(buf); } DEVICE_ATTR(performance_level, S_IRUGO | S_IWUSR, @@ -163,6 +241,15 @@ nouveau_pm_init(struct drm_device *dev) NV_INFO(dev, "c: %s", info); } + /* switch performance levels now if requested */ + if (nouveau_perflvl != NULL) { + ret = nouveau_pm_profile_set(dev, nouveau_perflvl); + if (ret) { + NV_ERROR(dev, "error setting perflvl \"%s\": %d\n", + nouveau_perflvl, ret); + } + } + /* initialise sysfs */ ret = device_create_file(d, &dev_attr_performance_level); if (ret) diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c index 35c200eb476c..15e4b9029df8 100644 --- a/drivers/gpu/drm/nouveau/nv04_pm.c +++ b/drivers/gpu/drm/nouveau/nv04_pm.c @@ -50,7 +50,7 @@ nv04_pm_clock_pre(struct drm_device *dev, u32 id, int khz) ret = get_pll_limits(dev, id, &state->pll); if (ret) { kfree(state); - return ERR_PTR(ret); + return (ret == -ENOENT) ? NULL : ERR_PTR(ret); } ret = nouveau_calc_pll_mnp(dev, &state->pll, khz, &state->calc); diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c index a616e4240346..64bc29c39c3a 100644 --- a/drivers/gpu/drm/nouveau/nv50_pm.c +++ b/drivers/gpu/drm/nouveau/nv50_pm.c @@ -80,7 +80,7 @@ nv50_pm_clock_pre(struct drm_device *dev, u32 id, int khz) ret = get_pll_limits(dev, id, &state->pll); if (ret < 0) { kfree(state); - return ERR_PTR(ret); + return (ret == -ENOENT) ? NULL : ERR_PTR(ret); } ret = nv50_calc_pll(dev, &state->pll, khz, &state->N, &state->M, From 64f1c11a477cb76e1572ee0793234739e045b3d5 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 17 Sep 2010 13:35:25 +1000 Subject: [PATCH 271/476] drm/nouveau: restore perflvl on resume, and restore boot perflvl on unload Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.c | 3 ++ drivers/gpu/drm/nouveau/nouveau_pm.c | 64 +++++++++++++++++++-------- drivers/gpu/drm/nouveau/nouveau_pm.h | 1 + 3 files changed, 49 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index b03bb6d5b987..f919e411e39a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -31,6 +31,7 @@ #include "nouveau_hw.h" #include "nouveau_fb.h" #include "nouveau_fbcon.h" +#include "nouveau_pm.h" #include "nv50_display.h" #include "drm_pciids.h" @@ -279,6 +280,8 @@ nouveau_pci_resume(struct pci_dev *pdev) if (ret) return ret; + nouveau_pm_resume(dev); + if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { ret = nouveau_mem_init_agp(dev); if (ret) { diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index 9e8e14eb6df3..4e92d215f05d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c @@ -46,13 +46,39 @@ nouveau_pm_clock_set(struct drm_device *dev, u8 id, u32 khz) return 0; } +static int +nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + int ret; + + if (perflvl == pm->cur) + return 0; + + if (pm->voltage.supported && pm->voltage_set && perflvl->voltage) { + ret = pm->voltage_set(dev, perflvl->voltage); + if (ret) { + NV_ERROR(dev, "voltage_set %d failed: %d\n", + perflvl->voltage, ret); + } + } + + nouveau_pm_clock_set(dev, PLL_CORE, perflvl->core); + nouveau_pm_clock_set(dev, PLL_SHADER, perflvl->shader); + nouveau_pm_clock_set(dev, PLL_MEMORY, perflvl->memory); + nouveau_pm_clock_set(dev, PLL_UNK05, perflvl->unk05); + + pm->cur = perflvl; + return 0; +} + static int nouveau_pm_profile_set(struct drm_device *dev, const char *profile) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_level *perflvl = NULL; - int ret; /* safety precaution, for now */ if (nouveau_perflvl_wr != 7777) @@ -78,25 +104,8 @@ nouveau_pm_profile_set(struct drm_device *dev, const char *profile) return -EINVAL; } - if (perflvl == pm->cur) - return 0; - NV_INFO(dev, "setting performance level: %s\n", profile); - if (pm->voltage.supported && pm->voltage_set && perflvl->voltage) { - ret = pm->voltage_set(dev, perflvl->voltage); - if (ret) { - NV_ERROR(dev, "voltage_set %d failed: %d\n", - perflvl->voltage, ret); - } - } - - nouveau_pm_clock_set(dev, PLL_CORE, perflvl->core); - nouveau_pm_clock_set(dev, PLL_SHADER, perflvl->shader); - nouveau_pm_clock_set(dev, PLL_MEMORY, perflvl->memory); - nouveau_pm_clock_set(dev, PLL_UNK05, perflvl->unk05); - - pm->cur = perflvl; - return 0; + return nouveau_pm_perflvl_set(dev, perflvl); } static int @@ -285,6 +294,9 @@ nouveau_pm_fini(struct drm_device *dev) struct device *d = &dev->pdev->dev; int i; + if (pm->cur != &pm->boot) + nouveau_pm_perflvl_set(dev, &pm->boot); + device_remove_file(d, &dev_attr_performance_level); for (i = 0; i < pm->nr_perflvl; i++) { struct nouveau_pm_level *pl = &pm->perflvl[i]; @@ -299,3 +311,17 @@ nouveau_pm_fini(struct drm_device *dev) nouveau_volt_fini(dev); } +void +nouveau_pm_resume(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nouveau_pm_level *perflvl; + + if (pm->cur == &pm->boot) + return; + + perflvl = pm->cur; + pm->cur = &pm->boot; + nouveau_pm_perflvl_set(dev, perflvl); +} diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h index 70e1862572f8..f3de5a68c41f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.h +++ b/drivers/gpu/drm/nouveau/nouveau_pm.h @@ -28,6 +28,7 @@ /* nouveau_pm.c */ int nouveau_pm_init(struct drm_device *dev); void nouveau_pm_fini(struct drm_device *dev); +void nouveau_pm_resume(struct drm_device *dev); /* nouveau_volt.c */ void nouveau_volt_init(struct drm_device *); From e022878571690e09e965e8c6bfc837b3dc5b6b74 Mon Sep 17 00:00:00 2001 From: Phil Turmel Date: Tue, 14 Sep 2010 20:14:11 -0400 Subject: [PATCH 272/476] drm/nouveau: Fix build regression, undefined reference to `acpi_video_get_edid' Build breakage: drivers/built-in.o: In function `nouveau_acpi_edid': (.text+0x13404e): undefined reference to `acpi_video_get_edid' make: *** [.tmp_vmlinux1] Error 1 Introduced by: a6ed76d7ffc62ffa474b41d31b011b6853c5de32 is the first bad commit commit a6ed76d7ffc62ffa474b41d31b011b6853c5de32 Author: Ben Skeggs Date: Mon Jul 12 15:33:07 2010 +1000 drm/nouveau: support fetching LVDS EDID from ACPI Based on a patch from Matthew Garrett. Signed-off-by: Ben Skeggs Acked-by: Matthew Garrett It doesn't seem to revert cleanly, but the problem lies in these two config entries: CONFIG_ACPI=y CONFIG_ACPI_VIDEO=m Adding a select for ACPI_VIDEO appears to be the best solution, and is comparable to what is done in DRM_I915. Builds, boots, and appears to work correctly. Signed-off-by: Philip J. Turmel Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index d2d28048efb2..72730e9ca06c 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig @@ -10,6 +10,7 @@ config DRM_NOUVEAU select FB select FRAMEBUFFER_CONSOLE if !EMBEDDED select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT + select ACPI_VIDEO if ACPI help Choose this option for open-source nVidia support. From 07b1266962085412e85af2e7df471ec9ed0c35f5 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Sat, 18 Sep 2010 22:13:04 +1000 Subject: [PATCH 273/476] drm/nouveau: fix potential accuracy loss when parsing perf 0x1c tables Reported-by: Roy Spliet Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_perf.c | 34 +++++++++++--------------- 1 file changed, 14 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c index a882a366487f..a397420e46c6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_perf.c +++ b/drivers/gpu/drm/nouveau/nouveau_perf.c @@ -90,50 +90,44 @@ nouveau_perf_init(struct drm_device *dev) case 0x15: perflvl->fanspeed = entry[55]; perflvl->voltage = entry[56]; - perflvl->core = ROM32(entry[1]) / 100; - perflvl->memory = ROM32(entry[5]) / 100; + perflvl->core = ROM32(entry[1]) * 10; + perflvl->memory = ROM32(entry[5]) * 10; break; case 0x21: case 0x23: case 0x24: perflvl->fanspeed = entry[4]; perflvl->voltage = entry[5]; - perflvl->core = ROM16(entry[6]); - perflvl->memory = ROM16(entry[11]); + perflvl->core = ROM16(entry[6]) * 1000; + perflvl->memory = ROM16(entry[11]) * 1000; break; case 0x25: perflvl->fanspeed = entry[4]; perflvl->voltage = entry[5]; - perflvl->core = ROM16(entry[6]); - perflvl->shader = ROM16(entry[10]); - perflvl->memory = ROM16(entry[12]); + perflvl->core = ROM16(entry[6]) * 1000; + perflvl->shader = ROM16(entry[10]) * 1000; + perflvl->memory = ROM16(entry[12]) * 1000; break; case 0x30: case 0x35: perflvl->fanspeed = entry[6]; perflvl->voltage = entry[7]; - perflvl->core = ROM16(entry[8]); - perflvl->shader = ROM16(entry[10]); - perflvl->memory = ROM16(entry[12]); + perflvl->core = ROM16(entry[8]) * 1000; + perflvl->shader = ROM16(entry[10]) * 1000; + perflvl->memory = ROM16(entry[12]) * 1000; /*XXX: confirm on 0x35 */ - perflvl->unk05 = ROM16(entry[16]); + perflvl->unk05 = ROM16(entry[16]) * 1000; break; case 0x40: #define subent(n) entry[perf[2] + ((n) * perf[3])] perflvl->fanspeed = 0; /*XXX*/ perflvl->voltage = 0; /*XXX: entry[2] */; - perflvl->core = ROM16(subent(0)) & 0xfff; - perflvl->shader = ROM16(subent(1)) & 0xfff; - perflvl->memory = ROM16(subent(2)) & 0xfff; + perflvl->core = (ROM16(subent(0)) & 0xfff) * 1000; + perflvl->shader = (ROM16(subent(1)) & 0xfff) * 1000; + perflvl->memory = (ROM16(subent(2)) & 0xfff) * 1000; break; } - /* convert MHz -> KHz, it's more convenient */ - perflvl->core *= 1000; - perflvl->memory *= 1000; - perflvl->shader *= 1000; - perflvl->unk05 *= 1000; - /* make sure vid is valid */ if (pm->voltage.supported && perflvl->voltage) { vid = nouveau_volt_vid_lookup(dev, perflvl->voltage); From e49f70f775335ab1ee0ecd54904d2b25982a2600 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 20 Sep 2010 10:06:50 +1000 Subject: [PATCH 274/476] drm/nouveau: implement parsing of DCB 2.2 GPIO table Found on NV3x boards, this should allow voltage modifications to work on these chipsets. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bios.c | 156 ++++++++++--------------- 1 file changed, 63 insertions(+), 93 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 07171dd3c166..8d60bfdf6c74 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -5762,8 +5762,14 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len) static struct dcb_gpio_entry * new_gpio_entry(struct nvbios *bios) { + struct drm_device *dev = bios->dev; struct dcb_gpio_table *gpio = &bios->dcb.gpio; + if (gpio->entries >= DCB_MAX_NUM_GPIO_ENTRIES) { + NV_ERROR(dev, "exceeded maximum number of gpio entries!!\n"); + return NULL; + } + return &gpio->entry[gpio->entries++]; } @@ -5784,114 +5790,78 @@ nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag) return NULL; } -static void -parse_dcb30_gpio_entry(struct nvbios *bios, uint16_t offset) -{ - struct dcb_gpio_entry *gpio; - uint16_t ent = ROM16(bios->data[offset]); - uint8_t line = ent & 0x1f, - tag = ent >> 5 & 0x3f, - flags = ent >> 11 & 0x1f; - - if (tag == 0x3f) - return; - - gpio = new_gpio_entry(bios); - - gpio->tag = tag; - gpio->line = line; - gpio->invert = flags != 4; - gpio->entry = ent; -} - -static void -parse_dcb40_gpio_entry(struct nvbios *bios, uint16_t offset) -{ - uint32_t entry = ROM32(bios->data[offset]); - struct dcb_gpio_entry *gpio; - - if ((entry & 0x0000ff00) == 0x0000ff00) - return; - - gpio = new_gpio_entry(bios); - gpio->tag = (entry & 0x0000ff00) >> 8; - gpio->line = (entry & 0x0000001f) >> 0; - gpio->state_default = (entry & 0x01000000) >> 24; - gpio->state[0] = (entry & 0x18000000) >> 27; - gpio->state[1] = (entry & 0x60000000) >> 29; - gpio->entry = entry; -} - static void parse_dcb_gpio_table(struct nvbios *bios) { struct drm_device *dev = bios->dev; - uint16_t gpio_table_ptr = bios->dcb.gpio_table_ptr; - uint8_t *gpio_table = &bios->data[gpio_table_ptr]; - int header_len = gpio_table[1], - entries = gpio_table[2], - entry_len = gpio_table[3]; - void (*parse_entry)(struct nvbios *, uint16_t) = NULL; + struct dcb_gpio_entry *e; + u8 headerlen, entries, recordlen; + u8 *dcb, *gpio = NULL, *entry; int i; - if (bios->dcb.version >= 0x40) { - if (gpio_table_ptr && entry_len != 4) { - NV_WARN(dev, "Invalid DCB GPIO table entry length.\n"); - return; - } + dcb = ROMPTR(bios, bios->data[0x36]); + if (dcb[0] >= 0x30) { + gpio = ROMPTR(bios, dcb[10]); + if (!gpio) + goto no_table; - parse_entry = parse_dcb40_gpio_entry; + headerlen = gpio[1]; + entries = gpio[2]; + recordlen = gpio[3]; + } else + if (dcb[0] >= 0x22) { + gpio = ROMPTR(bios, dcb[-15]); + if (!gpio) + goto no_table; - } else if (bios->dcb.version >= 0x30) { - if (gpio_table_ptr && entry_len != 2) { - NV_WARN(dev, "Invalid DCB GPIO table entry length.\n"); - return; - } - - parse_entry = parse_dcb30_gpio_entry; - - } else if (bios->dcb.version >= 0x22) { - /* - * DCBs older than v3.0 don't really have a GPIO - * table, instead they keep some GPIO info at fixed - * locations. - */ - uint16_t dcbptr = ROM16(bios->data[0x36]); - uint8_t *tvdac_gpio = &bios->data[dcbptr - 5]; - - if (tvdac_gpio[0] & 1) { - struct dcb_gpio_entry *gpio = new_gpio_entry(bios); - - gpio->tag = DCB_GPIO_TVDAC0; - gpio->line = tvdac_gpio[1] >> 4; - gpio->invert = tvdac_gpio[0] & 2; - } + headerlen = 3; + entries = gpio[2]; + recordlen = gpio[1]; } else { - /* - * No systematic way to store GPIO info on pre-v2.2 - * DCBs, try to match the PCI device IDs. - */ + NV_DEBUG(dev, "no/unknown gpio table on DCB 0x%02x\n", dcb[0]); + goto no_table; + } - /* Apple iMac G4 NV18 */ - if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) { - struct dcb_gpio_entry *gpio = new_gpio_entry(bios); + entry = gpio + headerlen; + for (i = 0; i < entries; i++, entry += recordlen) { + e = new_gpio_entry(bios); + if (!e) + break; - gpio->tag = DCB_GPIO_TVDAC0; - gpio->line = 4; + if (gpio[0] < 0x40) { + e->entry = ROM16(entry[0]); + e->tag = (e->entry & 0x07e0) >> 5; + if (e->tag == 0x3f) { + bios->dcb.gpio.entries--; + continue; + } + + e->line = (e->entry & 0x001f); + e->invert = ((e->entry & 0xf800) >> 11) != 4; + } else { + e->entry = ROM32(entry[0]); + e->tag = (e->entry & 0x0000ff00) >> 8; + if (e->tag == 0xff) { + bios->dcb.gpio.entries--; + continue; + } + + e->line = (e->entry & 0x0000001f) >> 0; + e->state_default = (e->entry & 0x01000000) >> 24; + e->state[0] = (e->entry & 0x18000000) >> 27; + e->state[1] = (e->entry & 0x60000000) >> 29; } - } - if (!gpio_table_ptr) - return; - - if (entries > DCB_MAX_NUM_GPIO_ENTRIES) { - NV_WARN(dev, "Too many entries in the DCB GPIO table.\n"); - entries = DCB_MAX_NUM_GPIO_ENTRIES; +no_table: + /* Apple iMac G4 NV18 */ + if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) { + e = new_gpio_entry(bios); + if (e) { + e->tag = DCB_GPIO_TVDAC0; + e->line = 4; + } } - - for (i = 0; i < entries; i++) - parse_entry(bios, gpio_table_ptr + header_len + entry_len * i); } struct dcb_connector_table_entry * From dc7339df11bc7a90268929e57e13a14951343cc3 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 20 Sep 2010 10:38:30 +1000 Subject: [PATCH 275/476] drm/nouveau: fix thinko in volt 0x1x parsing Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_volt.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_volt.c b/drivers/gpu/drm/nouveau/nouveau_volt.c index 6ce857688eb6..04fdc00a67d5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_volt.c +++ b/drivers/gpu/drm/nouveau/nouveau_volt.c @@ -110,7 +110,7 @@ nouveau_volt_init(struct drm_device *dev) struct nvbios *bios = &dev_priv->vbios; struct bit_entry P; u8 *volt = NULL, *entry; - int i, recordlen, entries, vidmask, vidshift; + int i, headerlen, recordlen, entries, vidmask, vidshift; if (bios->type == NVBIOS_BIT) { if (bit_table(dev, 'P', &P)) @@ -142,18 +142,21 @@ nouveau_volt_init(struct drm_device *dev) case 0x10: case 0x11: case 0x12: - recordlen = 5; + headerlen = 5; + recordlen = volt[1]; entries = volt[2]; vidshift = 0; vidmask = volt[4]; break; case 0x20: + headerlen = volt[1]; recordlen = volt[3]; entries = volt[2]; vidshift = 0; /* could be vidshift like 0x30? */ vidmask = volt[5]; break; case 0x30: + headerlen = volt[1]; recordlen = volt[2]; entries = volt[3]; vidshift = hweight8(volt[5]); @@ -190,7 +193,7 @@ nouveau_volt_init(struct drm_device *dev) if (!voltage->level) return; - entry = volt + volt[1]; + entry = volt + headerlen; for (i = 0; i < entries; i++, entry += recordlen) { voltage->level[i].voltage = entry[0]; voltage->level[i].vid = entry[1] >> vidshift; From 038b8b2a0dd8a0760d086f0c90af656b242369e3 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 20 Sep 2010 18:27:39 +1000 Subject: [PATCH 276/476] drm/nv50: flush bar1 vm / dma object setup before poking 0x1708 Should fix issues noticed on NVAC (MacBook Pro / ION) since gpuobj rework. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv50_instmem.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index bb73c67e23b5..f5800f21a9dc 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -226,6 +226,8 @@ nv50_instmem_init(struct drm_device *dev) nv_wo32(priv->fb_bar, 0x10, 0x00000000); nv_wo32(priv->fb_bar, 0x14, 0x00000000); + dev_priv->engine.instmem.flush(dev); + nv_wr32(dev, 0x001708, 0x80000000 | (priv->fb_bar->cinst >> 4)); for (i = 0; i < 8; i++) nv_wr32(dev, 0x1900 + (i*4), 0); From 0fbb114af7ea63227599460c412fb8796556a169 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Mon, 20 Sep 2010 16:18:28 +0200 Subject: [PATCH 277/476] drm/nouveau: Parse old style perf tables. Used on nv17-nv28, they contain memory clocks and timings, only one of the table entries can actually be used, depending on the RAMCFG straps, and it's usually higher than the frequency programmed on boot by the BIOS. The memory timings listed in table version 0x1x are used to init the 0x12xx range but they aren't required for reclocking to work. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_perf.c | 47 +++++++++++++++++++++++++- drivers/gpu/drm/nouveau/nouveau_pm.c | 10 ++++-- 2 files changed, 53 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c index a397420e46c6..00f8243c6c73 100644 --- a/drivers/gpu/drm/nouveau/nouveau_perf.c +++ b/drivers/gpu/drm/nouveau/nouveau_perf.c @@ -27,6 +27,51 @@ #include "nouveau_drv.h" #include "nouveau_pm.h" +static void +legacy_perf_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->vbios; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + char *perf, *entry, *bmp = &bios->data[bios->offset]; + int headerlen, use_straps; + + if (bmp[5] < 0x5 || bmp[6] < 0x14) { + NV_DEBUG(dev, "BMP version too old for perf\n"); + return; + } + + perf = ROMPTR(bios, bmp[0x73]); + if (!perf) { + NV_DEBUG(dev, "No memclock table pointer found.\n"); + return; + } + + switch (perf[0]) { + case 0x12: + case 0x14: + case 0x18: + use_straps = 0; + headerlen = 1; + break; + case 0x01: + use_straps = perf[1] & 1; + headerlen = (use_straps ? 8 : 2); + break; + default: + NV_WARN(dev, "Unknown memclock table version %x.\n", perf[0]); + return; + } + + entry = perf + headerlen; + if (use_straps) + entry += (nv_rd32(dev, NV_PEXTDEV_BOOT_0) & 0x3c) >> 1; + + sprintf(pm->perflvl[0].name, "performance_level_0"); + pm->perflvl[0].memory = ROM16(entry[0]) * 20; + pm->nr_perflvl = 1; +} + void nouveau_perf_init(struct drm_device *dev) { @@ -59,7 +104,7 @@ nouveau_perf_init(struct drm_device *dev) } } else { if (bios->data[bios->offset + 6] < 0x27) { - NV_DEBUG(dev, "BMP version too old for perf\n"); + legacy_perf_init(dev); return; } diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index 4e92d215f05d..a07f27447cf9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c @@ -148,7 +148,11 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) static void nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len) { - char s[16], v[16], f[16]; + char c[16], s[16], v[16], f[16]; + + c[0] = '\0'; + if (perflvl->core) + snprintf(c, sizeof(c), " core %dMHz", perflvl->core / 1000); s[0] = '\0'; if (perflvl->shader) @@ -162,8 +166,8 @@ nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len) if (perflvl->fanspeed) snprintf(f, sizeof(f), " fanspeed %d%%", perflvl->fanspeed); - snprintf(ptr, len, "core %dMHz memory %dMHz%s%s%s\n", - perflvl->core / 1000, perflvl->memory / 1000, s, v, f); + snprintf(ptr, len, "memory %dMHz%s%s%s%s\n", perflvl->memory / 1000, + c, s, v, f); } static ssize_t From 64d202b4d78968979c0d44306854d41f9b71626d Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 21 Sep 2010 12:10:51 +1000 Subject: [PATCH 278/476] drm/nouveau: correct INIT_DP_CONDITION subcondition 5 Fixes DP output on a GTX 465 board I have. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bios.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 8d60bfdf6c74..7b7b1e27ed0b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -1222,7 +1222,7 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) return 3; } - if (cond & 1) + if (!(cond & 1)) iexec->execute = false; } break; From 34e9d85a1aae28b090ec4e72a0f98a5483c198c4 Mon Sep 17 00:00:00 2001 From: Martin Peres Date: Wed, 22 Sep 2010 20:54:22 +0200 Subject: [PATCH 279/476] drm/nouveau: Add temperature support (vbios parsing, readings, hwmon) Signed-off-by: Martin Peres Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/Makefile | 2 +- drivers/gpu/drm/nouveau/nouveau_drv.h | 18 ++ drivers/gpu/drm/nouveau/nouveau_pm.c | 259 +++++++++++++++++++---- drivers/gpu/drm/nouveau/nouveau_pm.h | 6 + drivers/gpu/drm/nouveau/nouveau_temp.c | 272 +++++++++++++++++++++++++ 5 files changed, 518 insertions(+), 39 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/nouveau_temp.c diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index 3cedabeb1617..bdbde726778e 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile @@ -25,7 +25,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ nv10_gpio.o nv50_gpio.o \ nv50_calc.o \ - nv04_pm.o nv50_pm.o + nv04_pm.o nv50_pm.o nouveau_temp.o nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 8d36ed6907d1..3fc5596df360 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -387,10 +387,26 @@ struct nouveau_pm_level { u8 fanspeed; }; +struct nouveau_pm_temp_sensor_constants { + u16 offset_constant; + s16 offset_mult; + u16 offset_div; + u16 slope_mult; + u16 slope_div; +}; + +struct nouveau_pm_threshold_temp { + s16 critical; + s16 down_clock; + s16 fan_boost; +}; + struct nouveau_pm_engine { struct nouveau_pm_voltage voltage; struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL]; int nr_perflvl; + struct nouveau_pm_temp_sensor_constants sensor_constants; + struct nouveau_pm_threshold_temp threshold_temp; struct nouveau_pm_level boot; struct nouveau_pm_level *cur; @@ -663,6 +679,8 @@ struct drm_nouveau_private { struct nouveau_fbdev *nfbdev; struct apertures_struct *apertures; + + struct device *int_hwmon_dev; }; static inline struct drm_nouveau_private * diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index a07f27447cf9..09b638435f8f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c @@ -27,6 +27,9 @@ #include "nouveau_drv.h" #include "nouveau_pm.h" +#include +#include + static int nouveau_pm_clock_set(struct drm_device *dev, u8 id, u32 khz) { @@ -189,7 +192,7 @@ nouveau_pm_get_perflvl_info(struct device *d, static ssize_t nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf) { - struct drm_device *dev = pci_get_drvdata(to_pci_dev(d)); + struct drm_device *dev = pci_get_drvdata(to_pci_dev(d)); struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_level cur; @@ -215,7 +218,7 @@ static ssize_t nouveau_pm_set_perflvl(struct device *d, struct device_attribute *a, const char *buf, size_t count) { - struct drm_device *dev = pci_get_drvdata(to_pci_dev(d)); + struct drm_device *dev = pci_get_drvdata(to_pci_dev(d)); int ret; ret = nouveau_pm_profile_set(dev, buf); @@ -227,43 +230,14 @@ nouveau_pm_set_perflvl(struct device *d, struct device_attribute *a, DEVICE_ATTR(performance_level, S_IRUGO | S_IWUSR, nouveau_pm_get_perflvl, nouveau_pm_set_perflvl); -int -nouveau_pm_init(struct drm_device *dev) +static int +nouveau_sysfs_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct device *d = &dev->pdev->dev; - char info[256]; int ret, i; - nouveau_volt_init(dev); - nouveau_perf_init(dev); - - NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl); - for (i = 0; i < pm->nr_perflvl; i++) { - nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info)); - NV_INFO(dev, "%d: %s", pm->perflvl[i].id, info); - } - - /* determine current ("boot") performance level */ - ret = nouveau_pm_perflvl_get(dev, &pm->boot); - if (ret == 0) { - pm->cur = &pm->boot; - - nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info)); - NV_INFO(dev, "c: %s", info); - } - - /* switch performance levels now if requested */ - if (nouveau_perflvl != NULL) { - ret = nouveau_pm_profile_set(dev, nouveau_perflvl); - if (ret) { - NV_ERROR(dev, "error setting perflvl \"%s\": %d\n", - nouveau_perflvl, ret); - } - } - - /* initialise sysfs */ ret = device_create_file(d, &dev_attr_performance_level); if (ret) return ret; @@ -290,17 +264,14 @@ nouveau_pm_init(struct drm_device *dev) return 0; } -void -nouveau_pm_fini(struct drm_device *dev) +static void +nouveau_sysfs_fini(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct device *d = &dev->pdev->dev; int i; - if (pm->cur != &pm->boot) - nouveau_pm_perflvl_set(dev, &pm->boot); - device_remove_file(d, &dev_attr_performance_level); for (i = 0; i < pm->nr_perflvl; i++) { struct nouveau_pm_level *pl = &pm->perflvl[i]; @@ -310,9 +281,221 @@ nouveau_pm_fini(struct drm_device *dev) device_remove_file(d, &pl->dev_attr); } +} + + + +static ssize_t +nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) +{ + struct drm_device *dev = dev_get_drvdata(d); + + return snprintf(buf, PAGE_SIZE, "%d\n", nouveau_temp_get(dev)*1000); +} +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp, + NULL, 0); + +static ssize_t +nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp; + + return snprintf(buf, PAGE_SIZE, "%d\n", temp->down_clock*1000); +} +static ssize_t +nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a, + const char *buf, size_t count) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp; + long value; + + if (strict_strtoul(buf, 10, &value) == -EINVAL) + return count; + + temp->down_clock = value/1000; + + nouveau_temp_safety_checks(dev); + + return count; +} +static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, nouveau_hwmon_max_temp, + nouveau_hwmon_set_max_temp, + 0); + +static ssize_t +nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a, + char *buf) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp; + + return snprintf(buf, PAGE_SIZE, "%d\n", temp->critical*1000); +} +static ssize_t +nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a, + const char *buf, + size_t count) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp; + long value; + + if (strict_strtoul(buf, 10, &value) == -EINVAL) + return count; + + temp->critical = value/1000; + + nouveau_temp_safety_checks(dev); + + return count; +} +static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR, + nouveau_hwmon_critical_temp, + nouveau_hwmon_set_critical_temp, + 0); + +static ssize_t nouveau_hwmon_show_name(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "nouveau\n"); +} +static SENSOR_DEVICE_ATTR(name, S_IRUGO, nouveau_hwmon_show_name, NULL, 0); + +static ssize_t nouveau_hwmon_show_update_rate(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "1000\n"); +} +static SENSOR_DEVICE_ATTR(update_rate, S_IRUGO, + nouveau_hwmon_show_update_rate, + NULL, 0); + +static struct attribute *hwmon_attributes[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_max.dev_attr.attr, + &sensor_dev_attr_temp1_crit.dev_attr.attr, + &sensor_dev_attr_name.dev_attr.attr, + &sensor_dev_attr_update_rate.dev_attr.attr, + NULL +}; + +static const struct attribute_group hwmon_attrgroup = { + .attrs = hwmon_attributes, +}; + +static int +nouveau_hwmon_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct device *hwmon_dev; + int ret; + + dev_priv->int_hwmon_dev = NULL; + + hwmon_dev = hwmon_device_register(&dev->pdev->dev); + if (IS_ERR(hwmon_dev)) { + ret = PTR_ERR(hwmon_dev); + NV_ERROR(dev, + "Unable to register hwmon device: %d\n", ret); + return ret; + } + dev_set_drvdata(hwmon_dev, dev); + ret = sysfs_create_group(&hwmon_dev->kobj, + &hwmon_attrgroup); + if (ret) { + NV_ERROR(dev, + "Unable to create hwmon sysfs file: %d\n", ret); + hwmon_device_unregister(hwmon_dev); + return ret; + } + + dev_priv->int_hwmon_dev = hwmon_dev; + + return 0; +} + +static void +nouveau_hwmon_fini(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->int_hwmon_dev) { + sysfs_remove_group(&dev_priv->int_hwmon_dev->kobj, + &hwmon_attrgroup); + hwmon_device_unregister(dev_priv->int_hwmon_dev); + } +} + + +int +nouveau_pm_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + char info[256]; + int ret, i; + + nouveau_volt_init(dev); + nouveau_perf_init(dev); + nouveau_temp_init(dev); + + NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl); + for (i = 0; i < pm->nr_perflvl; i++) { + nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info)); + NV_INFO(dev, "%d: %s", pm->perflvl[i].id, info); + } + + /* determine current ("boot") performance level */ + ret = nouveau_pm_perflvl_get(dev, &pm->boot); + if (ret == 0) { + pm->cur = &pm->boot; + + nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info)); + NV_INFO(dev, "c: %s", info); + } + + /* switch performance levels now if requested */ + if (nouveau_perflvl != NULL) { + ret = nouveau_pm_profile_set(dev, nouveau_perflvl); + if (ret) { + NV_ERROR(dev, "error setting perflvl \"%s\": %d\n", + nouveau_perflvl, ret); + } + } + + nouveau_sysfs_init(dev); + nouveau_hwmon_init(dev); + + return 0; +} + +void +nouveau_pm_fini(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + + if (pm->cur != &pm->boot) + nouveau_pm_perflvl_set(dev, &pm->boot); nouveau_perf_fini(dev); nouveau_volt_fini(dev); + nouveau_temp_fini(dev); + + nouveau_hwmon_fini(dev); + nouveau_sysfs_fini(dev); } void diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h index f3de5a68c41f..d048b7516b1c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.h +++ b/drivers/gpu/drm/nouveau/nouveau_pm.h @@ -52,4 +52,10 @@ int nv50_pm_clock_get(struct drm_device *, u32 id); void *nv50_pm_clock_pre(struct drm_device *, u32 id, int khz); void nv50_pm_clock_set(struct drm_device *, void *); +/* nouveau_temp.c */ +void nouveau_temp_init(struct drm_device *dev); +void nouveau_temp_fini(struct drm_device *dev); +void nouveau_temp_safety_checks(struct drm_device *dev); +int16_t nouveau_temp_get(struct drm_device *dev); + #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c new file mode 100644 index 000000000000..3394075e4c3c --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_temp.c @@ -0,0 +1,272 @@ +/* + * Copyright 2010 PathScale inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Martin Peres + */ + +#include "drmP.h" + +#include "nouveau_drv.h" +#include "nouveau_pm.h" + +void +nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants; + struct nouveau_pm_threshold_temp *temps = &pm->threshold_temp; + int i, headerlen, recordlen, entries; + + if (!temp) { + NV_DEBUG(dev, "temperature table pointer invalid\n"); + return; + } + + /* Set the default sensor's contants */ + sensor->offset_constant = 0; + sensor->offset_mult = 1; + sensor->offset_div = 1; + sensor->slope_mult = 1; + sensor->slope_div = 1; + + /* Set the default temperature thresholds */ + temps->critical = 110; + temps->down_clock = 100; + temps->fan_boost = 90; + + /* Set the known default values to setup the temperature sensor */ + if (dev_priv->card_type >= NV_40) { + switch (dev_priv->chipset) { + case 0x43: + sensor->offset_mult = 32060; + sensor->offset_div = 1000; + sensor->slope_mult = 792; + sensor->slope_div = 1000; + break; + + case 0x44: + case 0x47: + sensor->offset_mult = 27839; + sensor->offset_div = 1000; + sensor->slope_mult = 780; + sensor->slope_div = 1000; + break; + + case 0x46: + sensor->offset_mult = -24775; + sensor->offset_div = 100; + sensor->slope_mult = 467; + sensor->slope_div = 10000; + break; + + case 0x49: + sensor->offset_mult = -25051; + sensor->offset_div = 100; + sensor->slope_mult = 458; + sensor->slope_div = 10000; + break; + + case 0x4b: + sensor->offset_mult = -24088; + sensor->offset_div = 100; + sensor->slope_mult = 442; + sensor->slope_div = 10000; + break; + + case 0x50: + sensor->offset_mult = -22749; + sensor->offset_div = 100; + sensor->slope_mult = 431; + sensor->slope_div = 10000; + break; + } + } + + headerlen = temp[1]; + recordlen = temp[2]; + entries = temp[3]; + temp = temp + headerlen; + + /* Read the entries from the table */ + for (i = 0; i < entries; i++) { + u16 value = ROM16(temp[1]); + + switch (temp[0]) { + case 0x01: + value = (value&0x8f) == 0 ? (value >> 9) & 0x7f : 0; + sensor->offset_constant = value; + break; + + case 0x04: + if ((value & 0xf00f) == 0xa000) /* core */ + temps->critical = (value&0x0ff0) >> 4; + break; + + case 0x07: + if ((value & 0xf00f) == 0xa000) /* core */ + temps->down_clock = (value&0x0ff0) >> 4; + break; + + case 0x08: + if ((value & 0xf00f) == 0xa000) /* core */ + temps->fan_boost = (value&0x0ff0) >> 4; + break; + + case 0x10: + sensor->offset_mult = value; + break; + + case 0x11: + sensor->offset_div = value; + break; + + case 0x12: + sensor->slope_mult = value; + break; + + case 0x13: + sensor->slope_div = value; + break; + } + temp += recordlen; + } + + nouveau_temp_safety_checks(dev); +} + +static s16 +nouveau_nv40_sensor_setup(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants; + u32 offset = sensor->offset_mult / sensor->offset_div; + u32 sensor_calibration; + + /* set up the sensors */ + sensor_calibration = 120 - offset - sensor->offset_constant; + sensor_calibration = sensor_calibration * sensor->slope_div / + sensor->slope_mult; + + if (dev_priv->chipset >= 0x46) + sensor_calibration |= 0x80000000; + else + sensor_calibration |= 0x10000000; + + nv_wr32(dev, 0x0015b0, sensor_calibration); + + /* Wait for the sensor to update */ + msleep(5); + + /* read */ + return nv_rd32(dev, 0x0015b4); +} + +s16 +nouveau_temp_get(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants; + + if (dev_priv->chipset >= 0x84) { + return nv_rd32(dev, 0x20400); + } else if (dev_priv->chipset >= 0x40) { + u32 offset = sensor->offset_mult / sensor->offset_div; + u32 core_temp; + + if (dev_priv->chipset >= 0x50) { + core_temp = nv_rd32(dev, 0x20008); + } else { + core_temp = nv_rd32(dev, 0x0015b4); + /* Setup the sensor if the temperature is 0 */ + if (core_temp == 0) + core_temp = nouveau_nv40_sensor_setup(dev); + } + + core_temp = core_temp * sensor->slope_mult / sensor->slope_div; + core_temp = core_temp + offset + sensor->offset_constant; + + return core_temp; + } else { + NV_ERROR(dev, + "Temperature cannot be retrieved from an nv%x card\n", + dev_priv->chipset); + return 0; + } + + return 0; +} + +void +nouveau_temp_safety_checks(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nouveau_pm_threshold_temp *temps = &pm->threshold_temp; + + if (temps->critical > 120) + temps->critical = 120; + else if (temps->critical < 80) + temps->critical = 80; + + if (temps->down_clock > 110) + temps->down_clock = 110; + else if (temps->down_clock < 60) + temps->down_clock = 60; + + if (temps->fan_boost > 100) + temps->fan_boost = 100; + else if (temps->fan_boost < 40) + temps->fan_boost = 40; +} + +void +nouveau_temp_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->vbios; + struct bit_entry P; + u8 *temp = NULL; + + if (bios->type == NVBIOS_BIT) { + if (bit_table(dev, 'P', &P)) + return; + + if (P.version == 1) + temp = ROMPTR(bios, P.data[12]); + else if (P.version == 2) + temp = ROMPTR(bios, P.data[16]); + else + NV_WARN(dev, "unknown temp for BIT P %d\n", P.version); + } else { + NV_WARN(dev, "BMP entry unknown for temperature table.\n"); + } + + nouveau_temp_vbios_parse(dev, temp); +} + +void +nouveau_temp_fini(struct drm_device *dev) +{ + +} From 5f7d42ece30104dd05307f0a004c9d45895582fb Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 23 Sep 2010 14:45:52 +1000 Subject: [PATCH 280/476] drm/nouveau: add debugfs file to forcibly evict everything from vram Very useful for debugging buffer migration issues. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_debugfs.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c index 7933de4aff2e..8e1592368cce 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c @@ -157,7 +157,23 @@ nouveau_debugfs_vbios_image(struct seq_file *m, void *data) return 0; } +static int +nouveau_debugfs_evict_vram(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_nouveau_private *dev_priv = node->minor->dev->dev_private; + int ret; + + ret = ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); + if (ret) + seq_printf(m, "failed: %d", ret); + else + seq_printf(m, "succeeded\n"); + return 0; +} + static struct drm_info_list nouveau_debugfs_list[] = { + { "evict_vram", nouveau_debugfs_evict_vram, 0, NULL }, { "chipset", nouveau_debugfs_chipset_info, 0, NULL }, { "memory", nouveau_debugfs_memory_info, 0, NULL }, { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL }, From 5220b3c14b5d4e35df5a4574534519d7007713c3 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 23 Sep 2010 15:21:17 +1000 Subject: [PATCH 281/476] drm/nv50: assume smaller tiles for bo moves Somehow fixes some corruption seen in KDE.. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 2b97d97f1493..c900aaa5cffa 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -529,8 +529,8 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, while (length) { u32 amount, stride, height; - amount = min(length, (u64)(16 * 1024 * 1024)); - stride = 64 * 4; + amount = min(length, (u64)(4 * 1024 * 1024)); + stride = 16 * 4; height = amount / stride; if (new_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) { @@ -540,7 +540,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, BEGIN_RING(chan, NvSubM2MF, 0x0200, 7); OUT_RING (chan, 0); - OUT_RING (chan, 0x20); + OUT_RING (chan, 0); OUT_RING (chan, stride); OUT_RING (chan, height); OUT_RING (chan, 1); @@ -561,7 +561,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, BEGIN_RING(chan, NvSubM2MF, 0x021c, 7); OUT_RING (chan, 0); - OUT_RING (chan, 0x20); + OUT_RING (chan, 0); OUT_RING (chan, stride); OUT_RING (chan, height); OUT_RING (chan, 1); From 9bb5863ab4c2bad487e9d02fabde60e858c22411 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 23 Sep 2010 15:23:16 +1000 Subject: [PATCH 282/476] drm/nouveau: fix chipset vs card_type thinko Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index c900aaa5cffa..2148e2d73de3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -912,7 +912,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) * nothing to do here. */ if (bo->mem.mem_type != TTM_PL_VRAM) { - if (dev_priv->chipset < NV_50 || !nvbo->tile_flags) + if (dev_priv->card_type < NV_50 || !nvbo->tile_flags) return 0; } From 80dad869a3bc30b4e6c3ae527f94abbe9bb497f5 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 23 Sep 2010 16:37:39 +1000 Subject: [PATCH 283/476] drm/nouveau: fix panels using straps-based mode detection nouveau_bios_fp_mode() zeroes the mode struct before filling in relevant entries. This nukes the mode id initialised by drm_mode_create(), and causes warnings from idr when we try to remove the mode. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_connector.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 98c214c34922..4b286a8c30c8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -594,8 +594,10 @@ nouveau_connector_get_modes(struct drm_connector *connector) if (nv_encoder->dcb->type == OUTPUT_LVDS && (nv_encoder->dcb->lvdsconf.use_straps_for_mode || dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) { - nv_connector->native_mode = drm_mode_create(dev); - nouveau_bios_fp_mode(dev, nv_connector->native_mode); + struct drm_display_mode mode; + + nouveau_bios_fp_mode(dev, &mode); + nv_connector->native_mode = drm_mode_duplicate(dev, &mode); } /* Find the native mode if this is a digital panel, if we didn't From bb338bb6f230aa70e70dc0d27af1b7a8efc34cdd Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Tue, 21 Sep 2010 19:03:19 +0200 Subject: [PATCH 284/476] drm/nv10: Don't oops if the card wants to switch to a channel with no grctx. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv10_graph.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c index b2f6a57c0cc5..8e68c9731159 100644 --- a/drivers/gpu/drm/nouveau/nv10_graph.c +++ b/drivers/gpu/drm/nouveau/nv10_graph.c @@ -803,7 +803,7 @@ nv10_graph_context_switch(struct drm_device *dev) /* Load context for next channel */ chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; chan = dev_priv->fifos[chid]; - if (chan) + if (chan && chan->pgraph_ctx) nv10_graph_load_context(chan); pgraph->fifo_access(dev, true); From 5e6a74436e378eb021a74f3e5f329eecf49d928e Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Wed, 22 Sep 2010 23:25:00 +0200 Subject: [PATCH 285/476] drm/nouveau: Don't try to parse a GPIO table on early DCBv2.2 BIOSes. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bios.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 7b7b1e27ed0b..8fc2ba164ef2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -5809,7 +5809,7 @@ parse_dcb_gpio_table(struct nvbios *bios) entries = gpio[2]; recordlen = gpio[3]; } else - if (dcb[0] >= 0x22) { + if (dcb[0] >= 0x22 && dcb[-1] >= 0x13) { gpio = ROMPTR(bios, dcb[-15]); if (!gpio) goto no_table; @@ -5817,6 +5817,19 @@ parse_dcb_gpio_table(struct nvbios *bios) headerlen = 3; entries = gpio[2]; recordlen = gpio[1]; + } else + if (dcb[0] >= 0x22) { + /* No GPIO table present, parse the TVDAC GPIO data. */ + uint8_t *tvdac_gpio = &dcb[-5]; + + if (tvdac_gpio[0] & 1) { + e = new_gpio_entry(bios); + e->tag = DCB_GPIO_TVDAC0; + e->line = tvdac_gpio[1] >> 4; + e->invert = tvdac_gpio[0] & 2; + } + + goto no_table; } else { NV_DEBUG(dev, "no/unknown gpio table on DCB 0x%02x\n", dcb[0]); goto no_table; From 4164743c8249ef2867cf71ae2cb2380b5a550b06 Mon Sep 17 00:00:00 2001 From: Francesco Marella Date: Thu, 23 Sep 2010 09:14:22 +0200 Subject: [PATCH 286/476] drm/nv40: fix reading temp value Signed-off-by: Francesco Marella Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_temp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c index 3394075e4c3c..3e0f19e1c72f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_temp.c +++ b/drivers/gpu/drm/nouveau/nouveau_temp.c @@ -178,7 +178,7 @@ nouveau_nv40_sensor_setup(struct drm_device *dev) msleep(5); /* read */ - return nv_rd32(dev, 0x0015b4); + return nv_rd32(dev, 0x0015b4) & 0x1fff; } s16 @@ -197,7 +197,7 @@ nouveau_temp_get(struct drm_device *dev) if (dev_priv->chipset >= 0x50) { core_temp = nv_rd32(dev, 0x20008); } else { - core_temp = nv_rd32(dev, 0x0015b4); + core_temp = nv_rd32(dev, 0x0015b4) & 0x1fff; /* Setup the sensor if the temperature is 0 */ if (core_temp == 0) core_temp = nouveau_nv40_sensor_setup(dev); From d34ec507d40faab908968985b3106869d8d8fbcf Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Thu, 23 Sep 2010 16:27:14 +0200 Subject: [PATCH 287/476] drm/nouveau: Add sane sensor correction defaults for nv4a. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_temp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c index 3e0f19e1c72f..54e3d9222f79 100644 --- a/drivers/gpu/drm/nouveau/nouveau_temp.c +++ b/drivers/gpu/drm/nouveau/nouveau_temp.c @@ -65,6 +65,7 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp) case 0x44: case 0x47: + case 0x4a: sensor->offset_mult = 27839; sensor->offset_div = 1000; sensor->slope_mult = 780; From 67e1d4fbaefd6a27b55523bb6bb0dd941351a325 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Thu, 23 Sep 2010 17:01:05 +0200 Subject: [PATCH 288/476] drm/nouveau: Fix parsing of the temperature constant correction. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_temp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c index 54e3d9222f79..86b170a851be 100644 --- a/drivers/gpu/drm/nouveau/nouveau_temp.c +++ b/drivers/gpu/drm/nouveau/nouveau_temp.c @@ -113,8 +113,8 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp) switch (temp[0]) { case 0x01: - value = (value&0x8f) == 0 ? (value >> 9) & 0x7f : 0; - sensor->offset_constant = value; + if ((value & 0x8f) == 0) + sensor->offset_constant = (value >> 9) & 0x7f; break; case 0x04: From e829d804d78c57b8e90039079284ac585f72851d Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Thu, 23 Sep 2010 15:34:09 +0200 Subject: [PATCH 289/476] drm/nouveau: Double the perf table memory clocks on pre-G71 cards. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_perf.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c index 00f8243c6c73..6b641b69cb77 100644 --- a/drivers/gpu/drm/nouveau/nouveau_perf.c +++ b/drivers/gpu/drm/nouveau/nouveau_perf.c @@ -136,7 +136,7 @@ nouveau_perf_init(struct drm_device *dev) perflvl->fanspeed = entry[55]; perflvl->voltage = entry[56]; perflvl->core = ROM32(entry[1]) * 10; - perflvl->memory = ROM32(entry[5]) * 10; + perflvl->memory = ROM32(entry[5]) * 20; break; case 0x21: case 0x23: @@ -144,7 +144,13 @@ nouveau_perf_init(struct drm_device *dev) perflvl->fanspeed = entry[4]; perflvl->voltage = entry[5]; perflvl->core = ROM16(entry[6]) * 1000; - perflvl->memory = ROM16(entry[11]) * 1000; + + if (dev_priv->chipset == 0x49 || + dev_priv->chipset == 0x4b) + perflvl->memory = ROM16(entry[11]) * 1000; + else + perflvl->memory = ROM16(entry[11]) * 2000; + break; case 0x25: perflvl->fanspeed = entry[4]; From 8155cac489eb8cc6fd96b9bdefacdf5a56e6ea32 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Thu, 23 Sep 2010 20:58:38 +0200 Subject: [PATCH 290/476] drm/nouveau: Refactor nouveau_temp_get() into engine pointers. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 5 ++- drivers/gpu/drm/nouveau/nouveau_pm.c | 19 ++++++---- drivers/gpu/drm/nouveau/nouveau_pm.h | 3 +- drivers/gpu/drm/nouveau/nouveau_state.c | 5 +++ drivers/gpu/drm/nouveau/nouveau_temp.c | 50 +++++++++++-------------- 5 files changed, 43 insertions(+), 39 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 3fc5596df360..799cd149745d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -411,6 +411,8 @@ struct nouveau_pm_engine { struct nouveau_pm_level boot; struct nouveau_pm_level *cur; + struct device *hwmon; + int (*clock_get)(struct drm_device *, u32 id); void *(*clock_pre)(struct drm_device *, u32 id, int khz); void (*clock_set)(struct drm_device *, void *); @@ -418,6 +420,7 @@ struct nouveau_pm_engine { int (*voltage_set)(struct drm_device *, int voltage); int (*fanspeed_get)(struct drm_device *); int (*fanspeed_set)(struct drm_device *, int fanspeed); + int (*temp_get)(struct drm_device *); }; struct nouveau_engine { @@ -679,8 +682,6 @@ struct drm_nouveau_private { struct nouveau_fbdev *nfbdev; struct apertures_struct *apertures; - - struct device *int_hwmon_dev; }; static inline struct drm_nouveau_private * diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index 09b638435f8f..85a56dea0ef7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c @@ -289,8 +289,10 @@ static ssize_t nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) { struct drm_device *dev = dev_get_drvdata(d); + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; - return snprintf(buf, PAGE_SIZE, "%d\n", nouveau_temp_get(dev)*1000); + return snprintf(buf, PAGE_SIZE, "%d\n", pm->temp_get(dev)*1000); } static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp, NULL, 0); @@ -399,10 +401,12 @@ static int nouveau_hwmon_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct device *hwmon_dev; int ret; - dev_priv->int_hwmon_dev = NULL; + if (!pm->temp_get) + return -ENODEV; hwmon_dev = hwmon_device_register(&dev->pdev->dev); if (IS_ERR(hwmon_dev)) { @@ -421,7 +425,7 @@ nouveau_hwmon_init(struct drm_device *dev) return ret; } - dev_priv->int_hwmon_dev = hwmon_dev; + pm->hwmon = hwmon_dev; return 0; } @@ -430,15 +434,14 @@ static void nouveau_hwmon_fini(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; - if (dev_priv->int_hwmon_dev) { - sysfs_remove_group(&dev_priv->int_hwmon_dev->kobj, - &hwmon_attrgroup); - hwmon_device_unregister(dev_priv->int_hwmon_dev); + if (pm->hwmon) { + sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup); + hwmon_device_unregister(pm->hwmon); } } - int nouveau_pm_init(struct drm_device *dev) { diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h index d048b7516b1c..6ad0ca9db88f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.h +++ b/drivers/gpu/drm/nouveau/nouveau_pm.h @@ -56,6 +56,7 @@ void nv50_pm_clock_set(struct drm_device *, void *); void nouveau_temp_init(struct drm_device *dev); void nouveau_temp_fini(struct drm_device *dev); void nouveau_temp_safety_checks(struct drm_device *dev); -int16_t nouveau_temp_get(struct drm_device *dev); +int nv40_temp_get(struct drm_device *dev); +int nv84_temp_get(struct drm_device *dev); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index f9f77de6bbc0..affcfc2fae19 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -320,6 +320,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->pm.clock_set = nv04_pm_clock_set; engine->pm.voltage_get = nouveau_voltage_gpio_get; engine->pm.voltage_set = nouveau_voltage_gpio_set; + engine->pm.temp_get = nv40_temp_get; break; case 0x50: case 0x80: /* gotta love NVIDIA's consistency.. */ @@ -379,6 +380,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->pm.clock_set = nv50_pm_clock_set; engine->pm.voltage_get = nouveau_voltage_gpio_get; engine->pm.voltage_set = nouveau_voltage_gpio_set; + if (dev_priv->chipset >= 0x84) + engine->pm.temp_get = nv84_temp_get; + else + engine->pm.temp_get = nv40_temp_get; break; case 0xC0: engine->instmem.init = nvc0_instmem_init; diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c index 86b170a851be..2f7785ca4e48 100644 --- a/drivers/gpu/drm/nouveau/nouveau_temp.c +++ b/drivers/gpu/drm/nouveau/nouveau_temp.c @@ -154,8 +154,8 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp) nouveau_temp_safety_checks(dev); } -static s16 -nouveau_nv40_sensor_setup(struct drm_device *dev) +static int +nv40_sensor_setup(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; @@ -182,40 +182,34 @@ nouveau_nv40_sensor_setup(struct drm_device *dev) return nv_rd32(dev, 0x0015b4) & 0x1fff; } -s16 -nouveau_temp_get(struct drm_device *dev) +int +nv40_temp_get(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants; + int offset = sensor->offset_mult / sensor->offset_div; + int core_temp; - if (dev_priv->chipset >= 0x84) { - return nv_rd32(dev, 0x20400); - } else if (dev_priv->chipset >= 0x40) { - u32 offset = sensor->offset_mult / sensor->offset_div; - u32 core_temp; - - if (dev_priv->chipset >= 0x50) { - core_temp = nv_rd32(dev, 0x20008); - } else { - core_temp = nv_rd32(dev, 0x0015b4) & 0x1fff; - /* Setup the sensor if the temperature is 0 */ - if (core_temp == 0) - core_temp = nouveau_nv40_sensor_setup(dev); - } - - core_temp = core_temp * sensor->slope_mult / sensor->slope_div; - core_temp = core_temp + offset + sensor->offset_constant; - - return core_temp; + if (dev_priv->chipset >= 0x50) { + core_temp = nv_rd32(dev, 0x20008); } else { - NV_ERROR(dev, - "Temperature cannot be retrieved from an nv%x card\n", - dev_priv->chipset); - return 0; + core_temp = nv_rd32(dev, 0x0015b4) & 0x1fff; + /* Setup the sensor if the temperature is 0 */ + if (core_temp == 0) + core_temp = nv40_sensor_setup(dev); } - return 0; + core_temp = core_temp * sensor->slope_mult / sensor->slope_div; + core_temp = core_temp + offset + sensor->offset_constant; + + return core_temp; +} + +int +nv84_temp_get(struct drm_device *dev) +{ + return nv_rd32(dev, 0x20400); } void From 66146da06643d8ee89bc5255fb0254006e3d0e79 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Thu, 23 Sep 2010 21:00:40 +0200 Subject: [PATCH 291/476] drm/nouveau: Add support for I2C hardware monitoring devices. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_i2c.c | 8 +++-- drivers/gpu/drm/nouveau/nouveau_i2c.h | 5 ++- drivers/gpu/drm/nouveau/nouveau_temp.c | 48 ++++++++++++++++++++++++-- drivers/gpu/drm/nouveau/nv04_dfp.c | 2 +- drivers/gpu/drm/nouveau/nv04_tv.c | 4 +-- 5 files changed, 58 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c index 84614858728b..fdd7e3de79c8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_i2c.c +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c @@ -299,7 +299,10 @@ nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr) int nouveau_i2c_identify(struct drm_device *dev, const char *what, - struct i2c_board_info *info, int index) + struct i2c_board_info *info, + bool (*match)(struct nouveau_i2c_chan *, + struct i2c_board_info *), + int index) { struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, index); int i; @@ -307,7 +310,8 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what, NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index); for (i = 0; info[i].addr; i++) { - if (nouveau_probe_i2c_addr(i2c, info[i].addr)) { + if (nouveau_probe_i2c_addr(i2c, info[i].addr) && + (!match || match(i2c, &info[i]))) { NV_INFO(dev, "Detected %s: %s\n", what, info[i].type); return i; } diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h index f71cb32f7571..c77a6ba66b7c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_i2c.h +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h @@ -44,7 +44,10 @@ void nouveau_i2c_fini(struct drm_device *, struct dcb_i2c_entry *); struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index); bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr); int nouveau_i2c_identify(struct drm_device *dev, const char *what, - struct i2c_board_info *info, int index); + struct i2c_board_info *info, + bool (*match)(struct nouveau_i2c_chan *, + struct i2c_board_info *), + int index); extern const struct i2c_algorithm nouveau_dp_i2c_algo; diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c index 2f7785ca4e48..f9eda87d1773 100644 --- a/drivers/gpu/drm/nouveau/nouveau_temp.c +++ b/drivers/gpu/drm/nouveau/nouveau_temp.c @@ -235,6 +235,48 @@ nouveau_temp_safety_checks(struct drm_device *dev) temps->fan_boost = 40; } +static bool +probe_monitoring_device(struct nouveau_i2c_chan *i2c, + struct i2c_board_info *info) +{ + char modalias[16] = "i2c:"; + struct i2c_client *client; + + strlcat(modalias, info->type, sizeof(modalias)); + request_module(modalias); + + client = i2c_new_device(&i2c->adapter, info); + if (!client) + return false; + + if (!client->driver || client->driver->detect(client, info)) { + i2c_unregister_device(client); + return false; + } + + return true; +} + +static void +nouveau_temp_probe_i2c(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct dcb_table *dcb = &dev_priv->vbios.dcb; + struct i2c_board_info info[] = { + { I2C_BOARD_INFO("w83l785ts", 0x2d) }, + { I2C_BOARD_INFO("w83781d", 0x2d) }, + { I2C_BOARD_INFO("f75375", 0x2e) }, + { I2C_BOARD_INFO("adt7473", 0x2e) }, + { I2C_BOARD_INFO("lm99", 0x4c) }, + { } + }; + int idx = (dcb->version >= 0x40 ? + dcb->i2c_default_indices & 0xf : 2); + + nouveau_i2c_identify(dev, "monitoring device", info, + probe_monitoring_device, idx); +} + void nouveau_temp_init(struct drm_device *dev) { @@ -253,11 +295,11 @@ nouveau_temp_init(struct drm_device *dev) temp = ROMPTR(bios, P.data[16]); else NV_WARN(dev, "unknown temp for BIT P %d\n", P.version); - } else { - NV_WARN(dev, "BMP entry unknown for temperature table.\n"); + + nouveau_temp_vbios_parse(dev, temp); } - nouveau_temp_vbios_parse(dev, temp); + nouveau_temp_probe_i2c(dev); } void diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c index e331b4faeb10..4b4f9aabde70 100644 --- a/drivers/gpu/drm/nouveau/nv04_dfp.c +++ b/drivers/gpu/drm/nouveau/nv04_dfp.c @@ -635,7 +635,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder) get_tmds_slave(encoder)) return; - type = nouveau_i2c_identify(dev, "TMDS transmitter", info, 2); + type = nouveau_i2c_identify(dev, "TMDS transmitter", info, NULL, 2); if (type < 0) return; diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c index 0b5d012d7c28..c8dc8a376ad9 100644 --- a/drivers/gpu/drm/nouveau/nv04_tv.c +++ b/drivers/gpu/drm/nouveau/nv04_tv.c @@ -49,8 +49,8 @@ static struct i2c_board_info nv04_tv_encoder_info[] = { int nv04_tv_identify(struct drm_device *dev, int i2c_index) { - return nouveau_i2c_identify(dev, "TV encoder", - nv04_tv_encoder_info, i2c_index); + return nouveau_i2c_identify(dev, "TV encoder", nv04_tv_encoder_info, + NULL, i2c_index); } From 5c4abd09bdefb41d0c80055aa9d98433624ce1f0 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Thu, 23 Sep 2010 20:36:42 +0200 Subject: [PATCH 292/476] drm/nouveau: Misc cleanup of the PM code. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_pm.c | 10 ++++------ drivers/gpu/drm/nouveau/nouveau_temp.c | 2 +- drivers/gpu/drm/nouveau/nv04_pm.c | 1 + 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index 85a56dea0ef7..b1d3f4b26ebd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c @@ -227,8 +227,8 @@ nouveau_pm_set_perflvl(struct device *d, struct device_attribute *a, return strlen(buf); } -DEVICE_ATTR(performance_level, S_IRUGO | S_IWUSR, - nouveau_pm_get_perflvl, nouveau_pm_set_perflvl); +static DEVICE_ATTR(performance_level, S_IRUGO | S_IWUSR, + nouveau_pm_get_perflvl, nouveau_pm_set_perflvl); static int nouveau_sysfs_init(struct drm_device *dev) @@ -283,8 +283,6 @@ nouveau_sysfs_fini(struct drm_device *dev) } } - - static ssize_t nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) { @@ -317,7 +315,7 @@ nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a, struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp; long value; - if (strict_strtoul(buf, 10, &value) == -EINVAL) + if (strict_strtol(buf, 10, &value) == -EINVAL) return count; temp->down_clock = value/1000; @@ -352,7 +350,7 @@ nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a, struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp; long value; - if (strict_strtoul(buf, 10, &value) == -EINVAL) + if (strict_strtol(buf, 10, &value) == -EINVAL) return count; temp->critical = value/1000; diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c index f9eda87d1773..16bbbf1eff63 100644 --- a/drivers/gpu/drm/nouveau/nouveau_temp.c +++ b/drivers/gpu/drm/nouveau/nouveau_temp.c @@ -27,7 +27,7 @@ #include "nouveau_drv.h" #include "nouveau_pm.h" -void +static void nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp) { struct drm_nouveau_private *dev_priv = dev->dev_private; diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c index 15e4b9029df8..61ca92744f93 100644 --- a/drivers/gpu/drm/nouveau/nv04_pm.c +++ b/drivers/gpu/drm/nouveau/nv04_pm.c @@ -25,6 +25,7 @@ #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_hw.h" +#include "nouveau_pm.h" struct nv04_pm_state { struct pll_lims pll; From 56edd964e883f2746bad7268cf557ab9b1d232cd Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 24 Sep 2010 09:15:50 +1000 Subject: [PATCH 293/476] drm/nouveau: v3.0 pll limits tables have type<->register mapping too Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bios.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 8fc2ba164ef2..03032528c8d6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -4727,7 +4727,7 @@ get_pll_register(struct drm_device *dev, enum pll_types type) else { u8 *plim = &bios->data[bios->pll_limit_tbl_ptr]; - if (plim[0] >= 0x40) { + if (plim[0] >= 0x30) { u8 *entry = plim + plim[1]; for (i = 0; i < plim[3]; i++, entry += plim[2]) { if (entry[0] == type) From 5b32165b044f7d2486e2815456b1b2894aaab4ee Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 24 Sep 2010 09:17:02 +1000 Subject: [PATCH 294/476] drm/nv50: use pll type rather than register for CRTC PLL Just in case someone, somewhere, does something difficult. This also removes one path that was different between fermi and non-fermi. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv50_crtc.c | 33 ++++++++++++----------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index d819eb935a2f..1686f8291b6d 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -266,15 +266,10 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct pll_lims pll; - uint32_t reg, reg1, reg2; + uint32_t reg1, reg2; int ret, N1, M1, N2, M2, P; - if (dev_priv->chipset < NV_C0) - reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head); - else - reg = 0x614140 + (head * 0x800); - - ret = get_pll_limits(dev, reg, &pll); + ret = get_pll_limits(dev, PLL_VPLL0 + head, &pll); if (ret) return ret; @@ -286,11 +281,11 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk) NV_DEBUG(dev, "pclk %d out %d NM1 %d %d NM2 %d %d P %d\n", pclk, ret, N1, M1, N2, M2, P); - reg1 = nv_rd32(dev, reg + 4) & 0xff00ff00; - reg2 = nv_rd32(dev, reg + 8) & 0x8000ff00; - nv_wr32(dev, reg, 0x10000611); - nv_wr32(dev, reg + 4, reg1 | (M1 << 16) | N1); - nv_wr32(dev, reg + 8, reg2 | (P << 28) | (M2 << 16) | N2); + reg1 = nv_rd32(dev, pll.reg + 4) & 0xff00ff00; + reg2 = nv_rd32(dev, pll.reg + 8) & 0x8000ff00; + nv_wr32(dev, pll.reg + 0, 0x10000611); + nv_wr32(dev, pll.reg + 4, reg1 | (M1 << 16) | N1); + nv_wr32(dev, pll.reg + 8, reg2 | (P << 28) | (M2 << 16) | N2); } else if (dev_priv->chipset < NV_C0) { ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P); @@ -300,10 +295,10 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk) NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n", pclk, ret, N1, N2, M1, P); - reg1 = nv_rd32(dev, reg + 4) & 0xffc00000; - nv_wr32(dev, reg, 0x50000610); - nv_wr32(dev, reg + 4, reg1 | (P << 16) | (M1 << 8) | N1); - nv_wr32(dev, reg + 8, N2); + reg1 = nv_rd32(dev, pll.reg + 4) & 0xffc00000; + nv_wr32(dev, pll.reg + 0, 0x50000610); + nv_wr32(dev, pll.reg + 4, reg1 | (P << 16) | (M1 << 8) | N1); + nv_wr32(dev, pll.reg + 8, N2); } else { ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P); if (ret <= 0) @@ -312,9 +307,9 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk) NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n", pclk, ret, N1, N2, M1, P); - nv_mask(dev, reg + 0x0c, 0x00000000, 0x00000100); - nv_wr32(dev, reg + 0x04, (P << 16) | (N1 << 8) | M1); - nv_wr32(dev, reg + 0x10, N2 << 16); + nv_mask(dev, pll.reg + 0x0c, 0x00000000, 0x00000100); + nv_wr32(dev, pll.reg + 0x04, (P << 16) | (N1 << 8) | M1); + nv_wr32(dev, pll.reg + 0x10, N2 << 16); } return 0; From 428d2e828c0a68206e5158a42451487601dc9194 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 23 Sep 2010 11:16:49 +0100 Subject: [PATCH 295/476] drm/i915/lvds: Probe DDC on creation Try to validate the panel's connection by writing to address 0xA0. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=18072 Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_lvds.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 2bcea8000859..e1f6e05169f6 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -810,6 +810,22 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev) return false; } +static bool intel_lvds_ddc_probe(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u8 buf = 0; + struct i2c_msg msgs[] = { + { + .addr = 0xA0, + .flags = 0, + .len = 1, + .buf = &buf, + }, + }; + struct i2c_adapter *i2c = &dev_priv->gmbus[GMBUS_PORT_PANEL].adapter; + return i2c_transfer(i2c, msgs, 1) == 1; +} + /** * intel_lvds_init - setup LVDS connectors on this device * @dev: drm device @@ -849,6 +865,11 @@ void intel_lvds_init(struct drm_device *dev) gpio = PCH_GPIOC; } + if (!intel_lvds_ddc_probe(dev)) { + DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n"); + return; + } + intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL); if (!intel_lvds) { return; From 3eee1794ac14f160338622ea57dcace7382ceb8f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 23 Sep 2010 16:45:52 +0100 Subject: [PATCH 296/476] drm/i915: Remove idle timer debugging messages These have served their purpose and are now just noise in the debug stream. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 16541ee9e1e0..dda0f646bda3 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4693,8 +4693,6 @@ static void intel_gpu_idle_timer(unsigned long arg) struct drm_device *dev = (struct drm_device *)arg; drm_i915_private_t *dev_priv = dev->dev_private; - DRM_DEBUG_DRIVER("idle timer fired, downclocking\n"); - dev_priv->busy = false; queue_work(dev_priv->wq, &dev_priv->idle_work); @@ -4708,8 +4706,6 @@ static void intel_crtc_idle_timer(unsigned long arg) struct drm_crtc *crtc = &intel_crtc->base; drm_i915_private_t *dev_priv = crtc->dev->dev_private; - DRM_DEBUG_DRIVER("idle timer fired, downclocking\n"); - intel_crtc->busy = false; queue_work(dev_priv->wq, &dev_priv->idle_work); From 780f0ca3e0cd3f0677d9149b7e14bf0878d1dbdc Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 23 Sep 2010 17:45:39 +0100 Subject: [PATCH 297/476] drm/i915/ringbuffer: Fix sign of ring space. As we presume space is signed when computing and looking for wrap along, make it so. Reported-by: Owain G. Ainsworth Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_ringbuffer.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index bfbc4889909e..ce521010ce96 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -31,7 +31,7 @@ struct intel_ring_buffer { unsigned int head; unsigned int tail; - unsigned int space; + int space; struct intel_hw_status_page status_page; u32 irq_gem_seqno; /* last seq seem at irq time */ From d3849eded23e6c78b19acc1a3a7811a01d2f541d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 23 Sep 2010 22:12:23 +0100 Subject: [PATCH 298/476] drm/i915: Remove unused dev_priv->panel_wants_dither This is now private to the DVO connector, remove it from the main device private. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 1 - drivers/gpu/drm/i915/intel_lvds.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ac41ca1157a5..a5aa11fbb68a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -328,7 +328,6 @@ typedef struct drm_i915_private { /* LVDS info */ int backlight_level; /* restore backlight to this value */ - bool panel_wants_dither; struct drm_display_mode *panel_fixed_mode; struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index e1f6e05169f6..1317731bc8ed 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -229,7 +229,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, /* Make sure pre-965s set dither correctly */ if (INTEL_INFO(dev)->gen < 4) { - if (dev_priv->panel_wants_dither || dev_priv->lvds_dither) + if (dev_priv->lvds_dither) pfit_control |= PANEL_8TO6_DITHER_ENABLE; } From e6c3a2a6d358a726c2c52cb0132c9ad8f6f37486 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 23 Sep 2010 23:04:43 +0100 Subject: [PATCH 299/476] drm/i915: Use an uninterruptible wait for page-flips during modeset We need to drain the pending flips prior to disabling the pipe during modeset, and these need to be done in an uninterruptible fashion. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 3 --- drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/intel_display.c | 29 +++++++++++++++++++--------- 3 files changed, 21 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a5aa11fbb68a..6aa34317dcbf 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1023,9 +1023,6 @@ int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, bool interruptible, struct intel_ring_buffer *ring); -int i915_gem_wait_for_pending_flip(struct drm_device *dev, - struct drm_gem_object **object_list, - int count); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 174e38abc9ef..dec7bbc81cb6 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3506,7 +3506,7 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec, return 0; } -int +static int i915_gem_wait_for_pending_flip(struct drm_device *dev, struct drm_gem_object **object_list, int count) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index dda0f646bda3..6fbaa633d946 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1593,17 +1593,12 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, } if (old_fb) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - if (atomic_read(&obj_priv->pending_flip)) { - ret = i915_gem_wait_for_pending_flip(dev, &obj, 1); - if (ret) { - i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); - mutex_unlock(&dev->struct_mutex); - return ret; - } - } + wait_event(dev_priv->pending_flip_queue, + atomic_read(&obj_priv->pending_flip) == 0); } ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y); @@ -1954,6 +1949,20 @@ static void intel_clear_scanline_wait(struct drm_device *dev) } } +static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) +{ + struct drm_i915_gem_object *obj_priv; + struct drm_i915_private *dev_priv; + + if (crtc->fb == NULL) + return; + + obj_priv = to_intel_bo(to_intel_framebuffer(crtc->fb)->obj); + dev_priv = crtc->dev->dev_private; + wait_event(dev_priv->pending_flip_queue, + atomic_read(&obj_priv->pending_flip) == 0); +} + static void ironlake_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -2130,6 +2139,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) if (!intel_crtc->active) return; + intel_crtc_wait_for_pending_flips(crtc); drm_vblank_off(dev, pipe); intel_crtc_update_cursor(crtc, false); @@ -2377,9 +2387,10 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) return; /* Give the overlay scaler a chance to disable if it's on this pipe */ + intel_crtc_wait_for_pending_flips(crtc); + drm_vblank_off(dev, pipe); intel_crtc_dpms_overlay(intel_crtc, false); intel_crtc_update_cursor(crtc, false); - drm_vblank_off(dev, pipe); if (dev_priv->cfb_plane == plane && dev_priv->display.disable_fbc) From 270eea0fd71ae95654606ff7448f195fa22d12c5 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 24 Sep 2010 01:15:02 +0100 Subject: [PATCH 300/476] drm/i915/lvds: Use the GMBUS pin if specified in VBT Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_lvds.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 1317731bc8ed..95e035a6009e 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -771,7 +771,8 @@ static void intel_find_lvds_downclock(struct drm_device *dev, * If it is not present, return false. * If no child dev is parsed from VBT, it assumes that the LVDS is present. */ -static bool lvds_is_present_in_vbt(struct drm_device *dev) +static bool lvds_is_present_in_vbt(struct drm_device *dev, + u8 *i2c_pin) { struct drm_i915_private *dev_priv = dev->dev_private; int i; @@ -790,6 +791,9 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev) child->device_type != DEVICE_TYPE_LFP) continue; + if (child->i2c_pin) + *i2c_pin = child->i2c_pin; + /* However, we cannot trust the BIOS writers to populate * the VBT correctly. Since LVDS requires additional * information from AIM blocks, a non-zero addin offset is @@ -810,7 +814,7 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev) return false; } -static bool intel_lvds_ddc_probe(struct drm_device *dev) +static bool intel_lvds_ddc_probe(struct drm_device *dev, u8 pin) { struct drm_i915_private *dev_priv = dev->dev_private; u8 buf = 0; @@ -822,7 +826,7 @@ static bool intel_lvds_ddc_probe(struct drm_device *dev) .buf = &buf, }, }; - struct i2c_adapter *i2c = &dev_priv->gmbus[GMBUS_PORT_PANEL].adapter; + struct i2c_adapter *i2c = &dev_priv->gmbus[pin].adapter; return i2c_transfer(i2c, msgs, 1) == 1; } @@ -844,13 +848,15 @@ void intel_lvds_init(struct drm_device *dev) struct drm_display_mode *scan; /* *modes, *bios_mode; */ struct drm_crtc *crtc; u32 lvds; - int pipe, gpio = GPIOC; + int pipe; + u8 pin; /* Skip init on machines we know falsely report LVDS */ if (dmi_check_system(intel_no_lvds)) return; - if (!lvds_is_present_in_vbt(dev)) { + pin = GMBUS_PORT_PANEL; + if (!lvds_is_present_in_vbt(dev, &pin)) { DRM_DEBUG_KMS("LVDS is not present in VBT\n"); return; } @@ -862,10 +868,9 @@ void intel_lvds_init(struct drm_device *dev) DRM_DEBUG_KMS("disable LVDS for eDP support\n"); return; } - gpio = PCH_GPIOC; } - if (!intel_lvds_ddc_probe(dev)) { + if (!intel_lvds_ddc_probe(dev, pin)) { DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n"); return; } @@ -930,7 +935,7 @@ void intel_lvds_init(struct drm_device *dev) * preferred mode is the right one. */ intel_lvds->edid = drm_get_edid(connector, - &dev_priv->gmbus[GMBUS_PORT_PANEL].adapter); + &dev_priv->gmbus[pin].adapter); if (!intel_lvds->edid) { /* Didn't get an EDID, so From 5ceb0f9bb7bde101d8b07cb803002591dcb8c804 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 24 Sep 2010 10:24:28 +0100 Subject: [PATCH 301/476] drm/i915: Parse the eDP link configuration from the vBIOS First step, lets have a look at the values for troublesome panels and see if they may be used to improve our link training. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 13 +++++++-- drivers/gpu/drm/i915/intel_bios.c | 41 ++++++++++++++++++---------- drivers/gpu/drm/i915/intel_display.c | 2 +- drivers/gpu/drm/i915/intel_dp.c | 4 +-- drivers/gpu/drm/i915/intel_lvds.c | 2 +- 5 files changed, 42 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6aa34317dcbf..cbfb99dce6aa 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -338,9 +338,18 @@ typedef struct drm_i915_private { unsigned int lvds_vbt:1; unsigned int int_crt_support:1; unsigned int lvds_use_ssc:1; - unsigned int edp_support:1; int lvds_ssc_freq; - int edp_bpp; + + struct { + u8 rate:4; + u8 lanes:4; + u8 preemphasis:4; + u8 vswing:4; + + u8 initialized:1; + u8 support:1; + u8 bpp:6; + } edp; struct notifier_block lid_notifier; diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 123e31d5a80b..42a7a5b33a0a 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -401,14 +401,11 @@ parse_driver_features(struct drm_i915_private *dev_priv, if (!driver) return; - if (driver && SUPPORTS_EDP(dev) && - driver->lvds_config == BDB_DRIVER_FEATURE_EDP) { - dev_priv->edp_support = 1; - } else { - dev_priv->edp_support = 0; - } + if (SUPPORTS_EDP(dev) && + driver->lvds_config == BDB_DRIVER_FEATURE_EDP) + dev_priv->edp.support = 1; - if (driver && driver->dual_frequency) + if (driver->dual_frequency) dev_priv->render_reclock_avail = true; } @@ -417,28 +414,44 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct bdb_edp *edp; + dev_priv->edp.bpp = 18; + edp = find_section(bdb, BDB_EDP); if (!edp) { - if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) { + if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) { DRM_DEBUG_KMS("No eDP BDB found but eDP panel " - "supported, assume 18bpp panel color " - "depth.\n"); - dev_priv->edp_bpp = 18; + "supported, assume %dbpp panel color " + "depth.\n", + dev_priv->edp.bpp); } return; } switch ((edp->color_depth >> (panel_type * 2)) & 3) { case EDP_18BPP: - dev_priv->edp_bpp = 18; + dev_priv->edp.bpp = 18; break; case EDP_24BPP: - dev_priv->edp_bpp = 24; + dev_priv->edp.bpp = 24; break; case EDP_30BPP: - dev_priv->edp_bpp = 30; + dev_priv->edp.bpp = 30; break; } + + dev_priv->edp.rate = edp->link_params[panel_type].rate; + dev_priv->edp.lanes = edp->link_params[panel_type].lanes; + dev_priv->edp.preemphasis = edp->link_params[panel_type].preemphasis; + dev_priv->edp.vswing = edp->link_params[panel_type].vswing; + + DRM_DEBUG_KMS("eDP vBIOS settings: bpp=%d, rate=%d, lanes=%d, preemphasis=%d, vswing=%d\n", + dev_priv->edp.bpp, + dev_priv->edp.rate, + dev_priv->edp.lanes, + dev_priv->edp.preemphasis, + dev_priv->edp.vswing); + + dev_priv->edp.initialized = true; } static void diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 6fbaa633d946..4843d027aaad 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3705,7 +3705,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, else temp |= PIPE_6BPC; } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) { - switch (dev_priv->edp_bpp/3) { + switch (dev_priv->edp.bpp/3) { case 8: temp |= PIPE_8BPC; break; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index ec26ee7ca992..117eb9988250 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -139,7 +139,7 @@ intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pi struct drm_i915_private *dev_priv = dev->dev_private; if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) - return (pixel_clock * dev_priv->edp_bpp) / 8; + return (pixel_clock * dev_priv->edp.bpp + 7) / 8; else return pixel_clock * 3; } @@ -653,7 +653,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) { lane_count = intel_dp->lane_count; if (IS_PCH_eDP(intel_dp)) - bpp = dev_priv->edp_bpp; + bpp = dev_priv->edp.bpp; break; } } diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 95e035a6009e..98172bcf485f 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -864,7 +864,7 @@ void intel_lvds_init(struct drm_device *dev) if (HAS_PCH_SPLIT(dev)) { if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) return; - if (dev_priv->edp_support) { + if (dev_priv->edp.support) { DRM_DEBUG_KMS("disable LVDS for eDP support\n"); return; } From e61cb0d5fd172ab95a4501917526382f25158e83 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Fri, 24 Sep 2010 13:25:30 +0100 Subject: [PATCH 302/476] some clean up to intel-gtt.c In commit e517a5e97080bbe52857bd0d7df9b66602d53c4d the call to map_page_into_agp() got removed from intel_i830_setup_flush(), but the counterpart call from intel_i830_fini_flush() to unmap_page_from_agp() was left in place. Additionally, the page allocated here never gets its physical address used for sending to hardware, so there's no need to allocate it with GFP_DMA32. Nor is __GFP_ZERO really necessary, as the page is used only to store data to force flushing of some internal processor state. Signed-off-by: Jan Beulich Cc: Eric Anholt Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 9a03815483c7..0c8ff6d8824b 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -814,7 +814,6 @@ static void i830_cleanup(void) { kunmap(intel_private.i8xx_page); intel_private.i8xx_flush_page = NULL; - unmap_page_from_agp(intel_private.i8xx_page); __free_page(intel_private.i8xx_page); intel_private.i8xx_page = NULL; @@ -826,7 +825,7 @@ static void intel_i830_setup_flush(void) if (intel_private.i8xx_page) return; - intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); + intel_private.i8xx_page = alloc_page(GFP_KERNEL); if (!intel_private.i8xx_page) return; From 27d64339a8d8465484286a2da93f5f6c36be5c3d Mon Sep 17 00:00:00 2001 From: Hette Visser Date: Fri, 24 Sep 2010 10:51:30 +0100 Subject: [PATCH 303/476] drm/i915/dp: Wait for PP_CONTROL to take effect. This patch fixes the black screen bug on Dell e6510, by adding two delays to give the eDP panel time to turn on before we continue with the next write. 300ms is rather arbitray and a rather long sleep, we need to find a way of refining this value. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=29278 Signed-off-by: Chris Wilson Acked-by: Jesse Barnes --- drivers/gpu/drm/i915/intel_dp.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 117eb9988250..d19334aa66ad 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -784,6 +784,11 @@ static bool ironlake_edp_panel_on (struct drm_device *dev) pp |= POWER_TARGET_ON; I915_WRITE(PCH_PP_CONTROL, pp); + /* Ouch. We need to wait here for some panels, like Dell e6510 + * https://bugs.freedesktop.org/show_bug.cgi?id=29278i + */ + msleep(300); + if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000)) DRM_ERROR("panel on wait timed out: 0x%08x\n", I915_READ(PCH_PP_STATUS)); @@ -819,6 +824,11 @@ static void ironlake_edp_panel_off (struct drm_device *dev) pp |= PANEL_POWER_RESET; /* restore panel reset bit */ I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); + + /* Ouch. We need to wait here for some panels, like Dell e6510 + * https://bugs.freedesktop.org/show_bug.cgi?id=29278i + */ + msleep(300); } static void ironlake_edp_panel_vdd_on(struct drm_device *dev) From f787a5f59e1b0e320a6b0a37e9a2e306551d1e40 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 24 Sep 2010 16:02:42 +0100 Subject: [PATCH 304/476] drm/i915: Only hold a process-local lock whilst throttling. Avoid cause latencies in other clients by not taking the global struct mutex and moving the per-client request manipulation a local per-client mutex. For example, this allows a compositor to schedule a page-flip (through X) whilst an OpenGL application is monopolising the GPU. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 6 +- drivers/gpu/drm/i915/i915_dma.c | 22 ++--- drivers/gpu/drm/i915/i915_drv.h | 15 ++- drivers/gpu/drm/i915/i915_gem.c | 120 ++++++++++++++---------- drivers/gpu/drm/i915/i915_irq.c | 29 +++--- drivers/gpu/drm/i915/intel_display.c | 2 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 16 ++-- drivers/gpu/drm/i915/intel_ringbuffer.h | 5 +- 8 files changed, 123 insertions(+), 92 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 36f0e3630f74..eb5dd52847a9 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -261,7 +261,7 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data) if (dev_priv->render_ring.status_page.page_addr != NULL) { seq_printf(m, "Current sequence: %d\n", - i915_get_gem_seqno(dev, &dev_priv->render_ring)); + dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring)); } else { seq_printf(m, "Current sequence: hws uninitialized\n"); } @@ -321,7 +321,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) atomic_read(&dev_priv->irq_received)); if (dev_priv->render_ring.status_page.page_addr != NULL) { seq_printf(m, "Current sequence: %d\n", - i915_get_gem_seqno(dev, &dev_priv->render_ring)); + dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring)); } else { seq_printf(m, "Current sequence: hws uninitialized\n"); } @@ -932,7 +932,7 @@ i915_wedged_write(struct file *filp, atomic_set(&dev_priv->mm.wedged, val); if (val) { - DRM_WAKEUP(&dev_priv->irq_queue); + wake_up_all(&dev_priv->irq_queue); queue_work(dev_priv->wq, &dev_priv->error_work); } diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 048c54bdfd4c..a3aea17c964b 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -2162,20 +2162,19 @@ int i915_driver_unload(struct drm_device *dev) return 0; } -int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) +int i915_driver_open(struct drm_device *dev, struct drm_file *file) { - struct drm_i915_file_private *i915_file_priv; + struct drm_i915_file_private *file_priv; DRM_DEBUG_DRIVER("\n"); - i915_file_priv = (struct drm_i915_file_private *) - kmalloc(sizeof(*i915_file_priv), GFP_KERNEL); - - if (!i915_file_priv) + file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL); + if (!file_priv) return -ENOMEM; - file_priv->driver_priv = i915_file_priv; + file->driver_priv = file_priv; - INIT_LIST_HEAD(&i915_file_priv->mm.request_list); + INIT_LIST_HEAD(&file_priv->mm.request_list); + mutex_init(&file_priv->mutex); return 0; } @@ -2218,11 +2217,12 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) i915_mem_release(dev, file_priv, dev_priv->agp_heap); } -void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) +void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) { - struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; + struct drm_i915_file_private *file_priv = file->driver_priv; - kfree(i915_file_priv); + mutex_destroy(&file_priv->mutex); + kfree(file_priv); } struct drm_ioctl_desc i915_ioctls[] = { diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index cbfb99dce6aa..2611e85bdd3d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -844,11 +844,13 @@ struct drm_i915_gem_request { /** global list entry for this request */ struct list_head list; + struct drm_i915_file_private *file_priv; /** file_priv list entry for this request */ struct list_head client_list; }; struct drm_i915_file_private { + struct mutex mutex; struct { struct list_head request_list; } mm; @@ -1005,9 +1007,16 @@ void i915_gem_object_unpin(struct drm_gem_object *obj); int i915_gem_object_unbind(struct drm_gem_object *obj); void i915_gem_release_mmap(struct drm_gem_object *obj); void i915_gem_lastclose(struct drm_device *dev); -uint32_t i915_get_gem_seqno(struct drm_device *dev, - struct intel_ring_buffer *ring); -bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); + +/** + * Returns true if seq1 is later than seq2. + */ +static inline bool +i915_seqno_passed(uint32_t seq1, uint32_t seq2) +{ + return (int32_t)(seq1 - seq2) >= 0; +} + int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool interruptible); int i915_gem_object_put_fence_reg(struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index dec7bbc81cb6..9185f098822d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1592,17 +1592,17 @@ i915_gem_process_flushing_list(struct drm_device *dev, uint32_t i915_add_request(struct drm_device *dev, - struct drm_file *file_priv, + struct drm_file *file, struct drm_i915_gem_request *request, struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_file_private *i915_file_priv = NULL; + struct drm_i915_file_private *file_priv = NULL; uint32_t seqno; int was_empty; - if (file_priv != NULL) - i915_file_priv = file_priv->driver_priv; + if (file != NULL) + file_priv = file->driver_priv; if (request == NULL) { request = kzalloc(sizeof(*request), GFP_KERNEL); @@ -1610,7 +1610,7 @@ i915_add_request(struct drm_device *dev, return 0; } - seqno = ring->add_request(dev, ring, file_priv, 0); + seqno = ring->add_request(dev, ring, 0); request->seqno = seqno; request->ring = ring; @@ -1618,11 +1618,12 @@ i915_add_request(struct drm_device *dev, was_empty = list_empty(&ring->request_list); list_add_tail(&request->list, &ring->request_list); - if (i915_file_priv) { + if (file_priv) { + mutex_lock(&file_priv->mutex); + request->file_priv = file_priv; list_add_tail(&request->client_list, - &i915_file_priv->mm.request_list); - } else { - INIT_LIST_HEAD(&request->client_list); + &file_priv->mm.request_list); + mutex_unlock(&file_priv->mutex); } if (!dev_priv->mm.suspended) { @@ -1654,20 +1655,14 @@ i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring) I915_GEM_DOMAIN_COMMAND, flush_domains); } -/** - * Returns true if seq1 is later than seq2. - */ -bool -i915_seqno_passed(uint32_t seq1, uint32_t seq2) +static inline void +i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) { - return (int32_t)(seq1 - seq2) >= 0; -} - -uint32_t -i915_get_gem_seqno(struct drm_device *dev, - struct intel_ring_buffer *ring) -{ - return ring->get_gem_seqno(dev, ring); + if (request->file_priv) { + mutex_lock(&request->file_priv->mutex); + list_del(&request->client_list); + mutex_unlock(&request->file_priv->mutex); + } } static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, @@ -1681,7 +1676,7 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, list); list_del(&request->list); - list_del(&request->client_list); + i915_gem_request_remove_from_client(request); kfree(request); } @@ -1746,7 +1741,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev, list_empty(&ring->request_list)) return; - seqno = i915_get_gem_seqno(dev, ring); + seqno = ring->get_seqno(dev, ring); while (!list_empty(&ring->request_list)) { struct drm_i915_gem_request *request; @@ -1760,7 +1755,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev, trace_i915_gem_request_retire(dev, request->seqno); list_del(&request->list); - list_del(&request->client_list); + i915_gem_request_remove_from_client(request); kfree(request); } @@ -1862,7 +1857,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, if (atomic_read(&dev_priv->mm.wedged)) return -EIO; - if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) { + if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) { if (HAS_PCH_SPLIT(dev)) ier = I915_READ(DEIER) | I915_READ(GTIER); else @@ -1881,12 +1876,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, if (interruptible) ret = wait_event_interruptible(ring->irq_queue, i915_seqno_passed( - ring->get_gem_seqno(dev, ring), seqno) + ring->get_seqno(dev, ring), seqno) || atomic_read(&dev_priv->mm.wedged)); else wait_event(ring->irq_queue, i915_seqno_passed( - ring->get_gem_seqno(dev, ring), seqno) + ring->get_seqno(dev, ring), seqno) || atomic_read(&dev_priv->mm.wedged)); ring->user_irq_put(dev, ring); @@ -1899,7 +1894,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, if (ret && ret != -ERESTARTSYS) DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n", - __func__, ret, seqno, ring->get_gem_seqno(dev, ring), + __func__, ret, seqno, ring->get_seqno(dev, ring), dev_priv->next_seqno); /* Directly dispatch request retiring. While we have the work queue @@ -3384,28 +3379,48 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, * relatively low latency when blocking on a particular request to finish. */ static int -i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) +i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) { - struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; - int ret = 0; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_file_private *file_priv = file->driver_priv; unsigned long recent_enough = jiffies - msecs_to_jiffies(20); + struct drm_i915_gem_request *request; + struct intel_ring_buffer *ring = NULL; + u32 seqno = 0; + int ret; - mutex_lock(&dev->struct_mutex); - while (!list_empty(&i915_file_priv->mm.request_list)) { - struct drm_i915_gem_request *request; - - request = list_first_entry(&i915_file_priv->mm.request_list, - struct drm_i915_gem_request, - client_list); - + mutex_lock(&file_priv->mutex); + list_for_each_entry(request, &file_priv->mm.request_list, client_list) { if (time_after_eq(request->emitted_jiffies, recent_enough)) break; - ret = i915_wait_request(dev, request->seqno, request->ring); - if (ret != 0) - break; + ring = request->ring; + seqno = request->seqno; } - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&file_priv->mutex); + + if (seqno == 0) + return 0; + + ret = 0; + if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) { + /* And wait for the seqno passing without holding any locks and + * causing extra latency for others. This is safe as the irq + * generation is designed to be run atomically and so is + * lockless. + */ + ring->user_irq_get(dev, ring); + ret = wait_event_interruptible(ring->irq_queue, + i915_seqno_passed(ring->get_seqno(dev, ring), seqno) + || atomic_read(&dev_priv->mm.wedged)); + ring->user_irq_put(dev, ring); + + if (ret == 0 && atomic_read(&dev_priv->mm.wedged)) + ret = -EIO; + } + + if (ret == 0) + queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); return ret; } @@ -4857,17 +4872,26 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, return 0; } -void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv) +void i915_gem_release(struct drm_device *dev, struct drm_file *file) { - struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; + struct drm_i915_file_private *file_priv = file->driver_priv; /* Clean up our request list when the client is going away, so that * later retire_requests won't dereference our soon-to-be-gone * file_priv. */ mutex_lock(&dev->struct_mutex); - while (!list_empty(&i915_file_priv->mm.request_list)) - list_del_init(i915_file_priv->mm.request_list.next); + mutex_lock(&file_priv->mutex); + while (!list_empty(&file_priv->mm.request_list)) { + struct drm_i915_gem_request *request; + + request = list_first_entry(&file_priv->mm.request_list, + struct drm_i915_gem_request, + client_list); + list_del(&request->client_list); + request->file_priv = NULL; + } + mutex_unlock(&file_priv->mutex); mutex_unlock(&dev->struct_mutex); } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index d4c053e1c376..245a07e6f1a4 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -327,16 +327,16 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) } if (gt_iir & GT_PIPE_NOTIFY) { - u32 seqno = render_ring->get_gem_seqno(dev, render_ring); + u32 seqno = render_ring->get_seqno(dev, render_ring); render_ring->irq_gem_seqno = seqno; trace_i915_gem_request_complete(dev, seqno); - DRM_WAKEUP(&dev_priv->render_ring.irq_queue); + wake_up_all(&dev_priv->render_ring.irq_queue); dev_priv->hangcheck_count = 0; mod_timer(&dev_priv->hangcheck_timer, jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); } if (gt_iir & bsd_usr_interrupt) - DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); + wake_up_all(&dev_priv->bsd_ring.irq_queue); if (de_iir & DE_GSE) intel_opregion_gse_intr(dev); @@ -573,7 +573,8 @@ static void i915_capture_error_state(struct drm_device *dev) return; } - error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring); + error->seqno = + dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring); error->eir = I915_READ(EIR); error->pgtbl_er = I915_READ(PGTBL_ER); error->pipeastat = I915_READ(PIPEASTAT); @@ -873,7 +874,9 @@ static void i915_handle_error(struct drm_device *dev, bool wedged) /* * Wakeup waiting processes so they don't hang */ - DRM_WAKEUP(&dev_priv->render_ring.irq_queue); + wake_up_all(&dev_priv->render_ring.irq_queue); + if (HAS_BSD(dev)) + wake_up_all(&dev_priv->bsd_ring.irq_queue); } queue_work(dev_priv->wq, &dev_priv->error_work); @@ -1012,18 +1015,17 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) } if (iir & I915_USER_INTERRUPT) { - u32 seqno = - render_ring->get_gem_seqno(dev, render_ring); + u32 seqno = render_ring->get_seqno(dev, render_ring); render_ring->irq_gem_seqno = seqno; trace_i915_gem_request_complete(dev, seqno); - DRM_WAKEUP(&dev_priv->render_ring.irq_queue); + wake_up_all(&dev_priv->render_ring.irq_queue); dev_priv->hangcheck_count = 0; mod_timer(&dev_priv->hangcheck_timer, jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); } if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) - DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); + wake_up_all(&dev_priv->bsd_ring.irq_queue); if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { intel_prepare_page_flip(dev, 0); @@ -1333,9 +1335,8 @@ void i915_hangcheck_elapsed(unsigned long data) /* If all work is done then ACTHD clearly hasn't advanced. */ if (list_empty(&dev_priv->render_ring.request_list) || - i915_seqno_passed(i915_get_gem_seqno(dev, - &dev_priv->render_ring), - i915_get_tail_request(dev)->seqno)) { + i915_seqno_passed(dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring), + i915_get_tail_request(dev)->seqno)) { bool missed_wakeup = false; dev_priv->hangcheck_count = 0; @@ -1343,13 +1344,13 @@ void i915_hangcheck_elapsed(unsigned long data) /* Issue a wake-up to catch stuck h/w. */ if (dev_priv->render_ring.waiting_gem_seqno && waitqueue_active(&dev_priv->render_ring.irq_queue)) { - DRM_WAKEUP(&dev_priv->render_ring.irq_queue); + wake_up_all(&dev_priv->render_ring.irq_queue); missed_wakeup = true; } if (dev_priv->bsd_ring.waiting_gem_seqno && waitqueue_active(&dev_priv->bsd_ring.irq_queue)) { - DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); + wake_up_all(&dev_priv->bsd_ring.irq_queue); missed_wakeup = true; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 4843d027aaad..00214c123ec2 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4983,7 +4983,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev, /* Initial scanout buffer will have a 0 pending flip count */ if ((atomic_read(&obj_priv->pending_flip) == 0) || atomic_dec_and_test(&obj_priv->pending_flip)) - DRM_WAKEUP(&dev_priv->pending_flip_queue); + wake_up(&dev_priv->pending_flip_queue); schedule_work(&work->work); trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 1bcea7c85238..9b848be40572 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -239,7 +239,6 @@ do { \ static u32 render_ring_add_request(struct drm_device *dev, struct intel_ring_buffer *ring, - struct drm_file *file_priv, u32 flush_domains) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -303,8 +302,8 @@ render_ring_add_request(struct drm_device *dev, } static u32 -render_ring_get_gem_seqno(struct drm_device *dev, - struct intel_ring_buffer *ring) +render_ring_get_seqno(struct drm_device *dev, + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; if (HAS_PIPE_CONTROL(dev)) @@ -390,7 +389,6 @@ static int init_bsd_ring(struct drm_device *dev, static u32 bsd_ring_add_request(struct drm_device *dev, struct intel_ring_buffer *ring, - struct drm_file *file_priv, u32 flush_domains) { u32 seqno; @@ -432,8 +430,8 @@ bsd_ring_put_user_irq(struct drm_device *dev, } static u32 -bsd_ring_get_gem_seqno(struct drm_device *dev, - struct intel_ring_buffer *ring) +bsd_ring_get_seqno(struct drm_device *dev, + struct intel_ring_buffer *ring) { return intel_read_status_page(ring, I915_GEM_HWS_INDEX); } @@ -773,7 +771,7 @@ static const struct intel_ring_buffer render_ring = { .get_active_head = render_ring_get_active_head, .flush = render_ring_flush, .add_request = render_ring_add_request, - .get_gem_seqno = render_ring_get_gem_seqno, + .get_seqno = render_ring_get_seqno, .user_irq_get = render_ring_get_user_irq, .user_irq_put = render_ring_put_user_irq, .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer, @@ -792,7 +790,7 @@ static const struct intel_ring_buffer bsd_ring = { .get_active_head = bsd_ring_get_active_head, .flush = bsd_ring_flush, .add_request = bsd_ring_add_request, - .get_gem_seqno = bsd_ring_get_gem_seqno, + .get_seqno = bsd_ring_get_seqno, .user_irq_get = bsd_ring_get_user_irq, .user_irq_put = bsd_ring_put_user_irq, .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer, @@ -883,7 +881,7 @@ static const struct intel_ring_buffer gen6_bsd_ring = { .get_active_head = gen6_bsd_ring_get_active_head, .flush = gen6_bsd_ring_flush, .add_request = bsd_ring_add_request, - .get_gem_seqno = bsd_ring_get_gem_seqno, + .get_seqno = bsd_ring_get_seqno, .user_irq_get = bsd_ring_get_user_irq, .user_irq_put = bsd_ring_put_user_irq, .dispatch_gem_execbuffer = gen6_bsd_ring_dispatch_gem_execbuffer, diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index ce521010ce96..d506da1605b4 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -58,10 +58,9 @@ struct intel_ring_buffer { u32 flush_domains); u32 (*add_request)(struct drm_device *dev, struct intel_ring_buffer *ring, - struct drm_file *file_priv, u32 flush_domains); - u32 (*get_gem_seqno)(struct drm_device *dev, - struct intel_ring_buffer *ring); + u32 (*get_seqno)(struct drm_device *dev, + struct intel_ring_buffer *ring); int (*dispatch_gem_execbuffer)(struct drm_device *dev, struct intel_ring_buffer *ring, struct drm_i915_gem_execbuffer2 *exec, From 30dbf0c07ff4e3e21b827e2a9d6ff7eb34458819 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 25 Sep 2010 10:19:17 +0100 Subject: [PATCH 305/476] drm/i915: Adjust hangcheck EIO semantics Owain Ainsworth reported an issue between the interaction of the hangcheck and userspace immediately (and permanently) falling back to s/w rasterisation. In order to break the mutex and begin resetting the GPU, we must abort the current operation (usually within the wait) and climb sufficiently far back up the call chain to drop the mutex. In his implementation, Owain has a loop within the ioctl handler to detect the hang and then sleep until the error handler has run. I've chosen to return to userspace and report an EAGAIN which should trigger the userspace ioctl handler to repeat the call (simply because it felt less invasive...). Before hitting a wedged GPU, we then wait upon completion of the error handler. Reported-by: Owain G. Ainsworth Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 2 + drivers/gpu/drm/i915/i915_gem.c | 66 +++++++++++++++++++++++--- drivers/gpu/drm/i915/i915_gem_tiling.c | 6 ++- drivers/gpu/drm/i915/i915_irq.c | 2 + 4 files changed, 68 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2611e85bdd3d..f2ff258cdfd5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -363,6 +363,7 @@ typedef struct drm_i915_private { spinlock_t error_lock; struct drm_i915_error_state *first_error; struct work_struct error_work; + struct completion error_completion; struct workqueue_struct *wq; /* Display functions */ @@ -957,6 +958,7 @@ extern void i915_mem_takedown(struct mem_block **heap); extern void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv, struct mem_block *heap); /* i915_gem.c */ +int i915_gem_check_is_wedged(struct drm_device *dev); int i915_gem_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_create_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 9185f098822d..a7283092c233 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -61,6 +61,37 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj); static LIST_HEAD(shrink_list); static DEFINE_SPINLOCK(shrink_list_lock); +int +i915_gem_check_is_wedged(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct completion *x = &dev_priv->error_completion; + unsigned long flags; + int ret; + + if (!atomic_read(&dev_priv->mm.wedged)) + return 0; + + ret = wait_for_completion_interruptible(x); + if (ret) + return ret; + + /* Success, we reset the GPU! */ + if (!atomic_read(&dev_priv->mm.wedged)) + return 0; + + /* GPU is hung, bump the completion count to account for + * the token we just consumed so that we never hit zero and + * end up waiting upon a subsequent completion event that + * will never happen. + */ + spin_lock_irqsave(&x->wait.lock, flags); + x->done++; + spin_unlock_irqrestore(&x->wait.lock, flags); + return -EIO; +} + + static inline bool i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv) { @@ -1848,15 +1879,15 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, BUG_ON(seqno == 0); + if (atomic_read(&dev_priv->mm.wedged)) + return -EAGAIN; + if (seqno == dev_priv->next_seqno) { seqno = i915_add_request(dev, NULL, NULL, ring); if (seqno == 0) return -ENOMEM; } - if (atomic_read(&dev_priv->mm.wedged)) - return -EIO; - if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) { if (HAS_PCH_SPLIT(dev)) ier = I915_READ(DEIER) | I915_READ(GTIER); @@ -1890,7 +1921,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, trace_i915_gem_request_wait_end(dev, seqno); } if (atomic_read(&dev_priv->mm.wedged)) - ret = -EIO; + ret = -EAGAIN; if (ret && ret != -ERESTARTSYS) DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n", @@ -3569,13 +3600,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_clip_rect *cliprects = NULL; struct drm_i915_gem_relocation_entry *relocs = NULL; struct drm_i915_gem_request *request = NULL; - int ret = 0, ret2, i, pinned = 0; + int ret, ret2, i, pinned = 0; uint64_t exec_offset; uint32_t reloc_index; int pin_tries, flips; struct intel_ring_buffer *ring = NULL; + ret = i915_gem_check_is_wedged(dev); + if (ret) + return ret; + #if WATCH_EXEC DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", (int) args->buffers_ptr, args->buffer_count, args->batch_len); @@ -3639,7 +3674,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (atomic_read(&dev_priv->mm.wedged)) { mutex_unlock(&dev->struct_mutex); - ret = -EIO; + ret = -EAGAIN; goto pre_mutex_err; } @@ -4126,6 +4161,10 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_object *obj_priv; int ret; + ret = i915_gem_check_is_wedged(dev); + if (ret) + return ret; + mutex_lock(&dev->struct_mutex); obj = drm_gem_object_lookup(dev, file_priv, args->handle); @@ -4215,9 +4254,15 @@ int i915_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_busy *args = data; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; + int ret; + + ret = i915_gem_check_is_wedged(dev); + if (ret) + return ret; obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) { @@ -4228,6 +4273,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, mutex_lock(&dev->struct_mutex); + if (atomic_read(&dev_priv->mm.wedged)) { + ret = -EAGAIN; + goto unlock; + } + /* Count all active objects as busy, even if they are currently not used * by the gpu. Users of this interface expect objects to eventually * become non-busy without any further actions, therefore emit any @@ -4256,9 +4306,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, args->busy = obj_priv->active; } +unlock: drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); - return 0; + return ret; } int @@ -4643,6 +4694,7 @@ i915_gem_load(struct drm_device *dev) INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); INIT_DELAYED_WORK(&dev_priv->mm.retire_work, i915_gem_retire_work_handler); + init_completion(&dev_priv->error_completion); spin_lock(&shrink_list_lock); list_add(&dev_priv->mm.shrink_list, &shrink_list); spin_unlock(&shrink_list_lock); diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index b09b157f6ada..8c9ffc4768ee 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -273,7 +273,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, drm_i915_private_t *dev_priv = dev->dev_private; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; - int ret = 0; + int ret; + + ret = i915_gem_check_is_wedged(dev); + if (ret) + return ret; obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 245a07e6f1a4..aaa0f1b9d6e1 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -406,6 +406,7 @@ static void i915_error_work_func(struct work_struct *work) atomic_set(&dev_priv->mm.wedged, 0); kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); } + complete_all(&dev_priv->error_completion); } } @@ -869,6 +870,7 @@ static void i915_handle_error(struct drm_device *dev, bool wedged) i915_report_and_clear_eir(dev); if (wedged) { + INIT_COMPLETION(dev_priv->error_completion); atomic_set(&dev_priv->mm.wedged, 1); /* From 76c1dec1979d9b552aab9600eb898ccec394fbbc Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 25 Sep 2010 11:22:51 +0100 Subject: [PATCH 306/476] drm/i915: Make the mutex_lock interruptible on ioctl paths ... and combine it with the wedged completion handler. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 126 +++++++++++++++++++++----------- 1 file changed, 84 insertions(+), 42 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a7283092c233..ac5bff85a4c7 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -91,6 +91,26 @@ i915_gem_check_is_wedged(struct drm_device *dev) return -EIO; } +static int i915_mutex_lock_interruptible(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + ret = i915_gem_check_is_wedged(dev); + if (ret) + return ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + if (atomic_read(&dev_priv->mm.wedged)) { + mutex_unlock(&dev->struct_mutex); + return -EAGAIN; + } + + return 0; +} static inline bool i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv) @@ -299,7 +319,9 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; - mutex_lock(&dev->struct_mutex); + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; ret = i915_gem_object_get_pages(obj, 0); if (ret != 0) @@ -418,7 +440,9 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); - mutex_lock(&dev->struct_mutex); + ret = i915_mutex_lock_interruptible(dev); + if (ret) + goto fail_put_user_pages; ret = i915_gem_object_get_pages_or_evict(obj); if (ret) @@ -617,8 +641,10 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, if (!access_ok(VERIFY_READ, user_data, remain)) return -EFAULT; + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; - mutex_lock(&dev->struct_mutex); ret = i915_gem_object_pin(obj, 0); if (ret) { mutex_unlock(&dev->struct_mutex); @@ -713,7 +739,10 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, goto out_unpin_pages; } - mutex_lock(&dev->struct_mutex); + ret = i915_mutex_lock_interruptible(dev); + if (ret) + goto out_unpin_pages; + ret = i915_gem_object_pin(obj, 0); if (ret) goto out_unlock; @@ -787,7 +816,9 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; - mutex_lock(&dev->struct_mutex); + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; ret = i915_gem_object_get_pages(obj, 0); if (ret != 0) @@ -883,7 +914,9 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); - mutex_lock(&dev->struct_mutex); + ret = i915_mutex_lock_interruptible(dev); + if (ret) + goto fail_put_user_pages; ret = i915_gem_object_get_pages_or_evict(obj); if (ret) @@ -1051,7 +1084,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, return -ENOENT; obj_priv = to_intel_bo(obj); - mutex_lock(&dev->struct_mutex); + ret = i915_mutex_lock_interruptible(dev); + if (ret) { + drm_gem_object_unreference_unlocked(obj); + return ret; + } intel_mark_busy(dev, obj); @@ -1106,11 +1143,14 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; - mutex_lock(&dev->struct_mutex); obj = drm_gem_object_lookup(dev, file_priv, args->handle); - if (obj == NULL) { - mutex_unlock(&dev->struct_mutex); + if (obj == NULL) return -ENOENT; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) { + drm_gem_object_unreference_unlocked(obj); + return ret; } #if WATCH_BUF @@ -1425,7 +1465,11 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, if (obj == NULL) return -ENOENT; - mutex_lock(&dev->struct_mutex); + ret = i915_mutex_lock_interruptible(dev); + if (ret) { + drm_gem_object_unreference_unlocked(obj); + return ret; + } obj_priv = to_intel_bo(obj); @@ -3668,16 +3712,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (ret != 0) goto pre_mutex_err; - mutex_lock(&dev->struct_mutex); + ret = i915_mutex_lock_interruptible(dev); + if (ret) + goto pre_mutex_err; i915_verify_inactive(dev, __FILE__, __LINE__); - if (atomic_read(&dev_priv->mm.wedged)) { - mutex_unlock(&dev->struct_mutex); - ret = -EAGAIN; - goto pre_mutex_err; - } - if (dev_priv->mm.suspended) { mutex_unlock(&dev->struct_mutex); ret = -EBUSY; @@ -4161,21 +4201,20 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_object *obj_priv; int ret; - ret = i915_gem_check_is_wedged(dev); - if (ret) - return ret; - - mutex_lock(&dev->struct_mutex); - obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) { DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n", args->handle); - mutex_unlock(&dev->struct_mutex); return -ENOENT; } obj_priv = to_intel_bo(obj); + ret = i915_mutex_lock_interruptible(dev); + if (ret) { + drm_gem_object_unreference_unlocked(obj); + return ret; + } + if (obj_priv->madv != I915_MADV_WILLNEED) { DRM_ERROR("Attempting to pin a purgeable buffer\n"); drm_gem_object_unreference(obj); @@ -4220,18 +4259,23 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_pin *args = data; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; - - mutex_lock(&dev->struct_mutex); + int ret; obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) { DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n", args->handle); - mutex_unlock(&dev->struct_mutex); return -ENOENT; } obj_priv = to_intel_bo(obj); + + ret = i915_mutex_lock_interruptible(dev); + if (ret) { + drm_gem_object_unreference_unlocked(obj); + return ret; + } + if (obj_priv->pin_filp != file_priv) { DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", args->handle); @@ -4254,16 +4298,11 @@ int i915_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_busy *args = data; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; int ret; - ret = i915_gem_check_is_wedged(dev); - if (ret) - return ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) { DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n", @@ -4271,11 +4310,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, return -ENOENT; } - mutex_lock(&dev->struct_mutex); - - if (atomic_read(&dev_priv->mm.wedged)) { - ret = -EAGAIN; - goto unlock; + ret = i915_mutex_lock_interruptible(dev); + if (ret) { + drm_gem_object_unreference_unlocked(obj); + return ret; } /* Count all active objects as busy, even if they are currently not used @@ -4306,10 +4344,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, args->busy = obj_priv->active; } -unlock: drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); - return ret; + return 0; } int @@ -4326,6 +4363,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_madvise *args = data; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; + int ret; switch (args->madv) { case I915_MADV_DONTNEED: @@ -4341,10 +4379,14 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, args->handle); return -ENOENT; } - - mutex_lock(&dev->struct_mutex); obj_priv = to_intel_bo(obj); + ret = i915_mutex_lock_interruptible(dev); + if (ret) { + drm_gem_object_unreference_unlocked(obj); + return ret; + } + if (obj_priv->pin_count) { drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); From 95375b7f9de429100b6e72df5c3abd9a3aaf266c Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 24 Sep 2010 20:54:39 +0200 Subject: [PATCH 307/476] drm/i915: kill now unnecessary gtt defines from i915_reg.h Everything is now handled in intel-gtt.h so these defines are only confusing. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_reg.h | 42 ++------------------------------- 1 file changed, 2 insertions(+), 40 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 77c9191f3fd6..605db647e920 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -30,49 +30,11 @@ /* * The Bridge device's PCI config space has information about the * fb aperture size and the amount of pre-reserved memory. + * This is all handled in the intel-gtt.ko module. i915.ko only + * cares about the vga bit for the vga rbiter. */ #define INTEL_GMCH_CTRL 0x52 #define INTEL_GMCH_VGA_DISABLE (1 << 1) -#define INTEL_GMCH_ENABLED 0x4 -#define INTEL_GMCH_MEM_MASK 0x1 -#define INTEL_GMCH_MEM_64M 0x1 -#define INTEL_GMCH_MEM_128M 0 - -#define INTEL_GMCH_GMS_MASK (0xf << 4) -#define INTEL_855_GMCH_GMS_DISABLED (0x0 << 4) -#define INTEL_855_GMCH_GMS_STOLEN_1M (0x1 << 4) -#define INTEL_855_GMCH_GMS_STOLEN_4M (0x2 << 4) -#define INTEL_855_GMCH_GMS_STOLEN_8M (0x3 << 4) -#define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4) -#define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4) - -#define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4) -#define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4) -#define INTEL_GMCH_GMS_STOLEN_128M (0x8 << 4) -#define INTEL_GMCH_GMS_STOLEN_256M (0x9 << 4) -#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4) -#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4) -#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) -#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) - -#define SNB_GMCH_CTRL 0x50 -#define SNB_GMCH_GMS_STOLEN_MASK 0xF8 -#define SNB_GMCH_GMS_STOLEN_32M (1 << 3) -#define SNB_GMCH_GMS_STOLEN_64M (2 << 3) -#define SNB_GMCH_GMS_STOLEN_96M (3 << 3) -#define SNB_GMCH_GMS_STOLEN_128M (4 << 3) -#define SNB_GMCH_GMS_STOLEN_160M (5 << 3) -#define SNB_GMCH_GMS_STOLEN_192M (6 << 3) -#define SNB_GMCH_GMS_STOLEN_224M (7 << 3) -#define SNB_GMCH_GMS_STOLEN_256M (8 << 3) -#define SNB_GMCH_GMS_STOLEN_288M (9 << 3) -#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3) -#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3) -#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3) -#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3) -#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3) -#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3) -#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3) /* PCI config space */ From bf7e0e1268f72ea1687140603a910eeaca031fa1 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 24 Sep 2010 21:08:29 +0200 Subject: [PATCH 308/476] drm/i915: fix ACTHD for gen <= 3 This was mixed up in the following patch: commit a6c45cf013a57e32ddae43dd4ac911eb4a3919fd Author: Chris Wilson Date: Fri Sep 17 00:32:17 2010 +0100 drm/i915: INTEL_INFO->gen supercedes i8xx, i9xx, i965g Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_ringbuffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 9b848be40572..ede436ba22d2 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -131,7 +131,7 @@ static unsigned int render_ring_get_active_head(struct drm_device *dev, struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; - u32 acthd_reg = INTEL_INFO(dev)->gen ? ACTHD_I965 : ACTHD; + u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD; return I915_READ(acthd_reg); } From 3d281d8cca1acb2483444e0d1519c8ab6dda3a47 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 24 Sep 2010 21:14:22 +0200 Subject: [PATCH 309/476] drm/i915: kill per-ring macros Two macros that use a base address for HWS_PGA were missing, add them. Also switch the remaining users of *_ACTHD to the ring-base one. Kill the other ring-specific macros because they're now unused. Signed-off-by: Daniel Vetter [ickle: And silence checkpatch whilst in the vicinity] Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_reg.h | 35 +++++-------------------- drivers/gpu/drm/i915/intel_ringbuffer.c | 25 ++++++++++-------- 2 files changed, 21 insertions(+), 39 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 605db647e920..ddbcd8c109e0 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -253,11 +253,13 @@ #define RENDER_RING_BASE 0x02000 #define BSD_RING_BASE 0x04000 #define GEN6_BSD_RING_BASE 0x12000 -#define RING_TAIL(base) (base)+0x30 -#define RING_HEAD(base) (base)+0x34 -#define RING_START(base) (base)+0x38 -#define RING_CTL(base) (base)+0x3c -#define RING_HWS_PGA(base) (base)+0x80 +#define RING_TAIL(base) ((base)+0x30) +#define RING_HEAD(base) ((base)+0x34) +#define RING_START(base) ((base)+0x38) +#define RING_CTL(base) ((base)+0x3c) +#define RING_HWS_PGA(base) ((base)+0x80) +#define RING_HWS_PGA_GEN6(base) ((base)+0x2080) +#define RING_ACTHD(base) ((base)+0x74) #define TAIL_ADDR 0x001FFFF8 #define HEAD_WRAP_COUNT 0xFFE00000 #define HEAD_WRAP_ONE 0x00200000 @@ -283,7 +285,6 @@ #define INSTDONE1 0x0207c /* 965+ only */ #define ACTHD_I965 0x02074 #define HWS_PGA 0x02080 -#define HWS_PGA_GEN6 0x04080 #define HWS_ADDRESS_MASK 0xfffff000 #define HWS_START_ADDRESS_SHIFT 4 #define PWRCTXA 0x2088 /* 965GM+ only */ @@ -441,28 +442,6 @@ #define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25) #define GEN6_BLITTER_SYNC_STATUS (1 << 24) #define GEN6_BLITTER_USER_INTERRUPT (1 << 22) -/* - * BSD (bit stream decoder instruction and interrupt control register defines - * (G4X and Ironlake only) - */ - -#define BSD_RING_TAIL 0x04030 -#define BSD_RING_HEAD 0x04034 -#define BSD_RING_START 0x04038 -#define BSD_RING_CTL 0x0403c -#define BSD_RING_ACTHD 0x04074 -#define BSD_HWS_PGA 0x04080 - -/* - * video command stream instruction and interrupt control register defines - * for GEN6 - */ -#define GEN6_BSD_RING_TAIL 0x12030 -#define GEN6_BSD_RING_HEAD 0x12034 -#define GEN6_BSD_RING_START 0x12038 -#define GEN6_BSD_RING_CTL 0x1203c -#define GEN6_BSD_RING_ACTHD 0x12074 -#define GEN6_BSD_HWS_PGA 0x14080 #define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 #define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index ede436ba22d2..487575f2340d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -131,7 +131,8 @@ static unsigned int render_ring_get_active_head(struct drm_device *dev, struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; - u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD; + u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ? + RING_ACTHD(ring->mmio_base) : ACTHD; return I915_READ(acthd_reg); } @@ -352,11 +353,13 @@ static void render_setup_status_page(struct drm_device *dev, { drm_i915_private_t *dev_priv = dev->dev_private; if (IS_GEN6(dev)) { - I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr); - I915_READ(HWS_PGA_GEN6); /* posting read */ + I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base), + ring->status_page.gfx_addr); + I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */ } else { - I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); - I915_READ(HWS_PGA); /* posting read */ + I915_WRITE(RING_HWS_PGA(ring->mmio_base), + ring->status_page.gfx_addr); + I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */ } } @@ -377,7 +380,7 @@ static unsigned int bsd_ring_get_active_head(struct drm_device *dev, struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; - return I915_READ(BSD_RING_ACTHD); + return I915_READ(RING_ACTHD(ring->mmio_base)); } static int init_bsd_ring(struct drm_device *dev, @@ -412,8 +415,8 @@ static void bsd_setup_status_page(struct drm_device *dev, struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; - I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr); - I915_READ(BSD_HWS_PGA); + I915_WRITE(RING_HWS_PGA(ring->mmio_base), ring->status_page.gfx_addr); + I915_READ(RING_HWS_PGA(ring->mmio_base)); } static void @@ -801,8 +804,8 @@ static void gen6_bsd_setup_status_page(struct drm_device *dev, struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; - I915_WRITE(GEN6_BSD_HWS_PGA, ring->status_page.gfx_addr); - I915_READ(GEN6_BSD_HWS_PGA); + I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base), ring->status_page.gfx_addr); + I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); } static void gen6_bsd_ring_set_tail(struct drm_device *dev, @@ -832,7 +835,7 @@ static unsigned int gen6_bsd_ring_get_active_head(struct drm_device *dev, struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; - return I915_READ(GEN6_BSD_RING_ACTHD); + return I915_READ(RING_ACTHD(ring->mmio_base)); } static void gen6_bsd_ring_flush(struct drm_device *dev, From 79f321b7e676bd54f563c5ce513588aa90b2cc21 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 24 Sep 2010 21:20:10 +0200 Subject: [PATCH 310/476] drm/i915: kill ring->get_active_head All functions are extremely similar, so fold them into one generic implementation. This function isn't used anyway, because there's not yet a bsd ring error state dumper. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_ringbuffer.c | 21 ++------------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 5 +++-- 2 files changed, 5 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 487575f2340d..a3e73d4cd391 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -127,8 +127,8 @@ static void ring_set_tail(struct drm_device *dev, I915_WRITE_TAIL(ring, ring->tail); } -static unsigned int render_ring_get_active_head(struct drm_device *dev, - struct intel_ring_buffer *ring) +u32 intel_ring_get_active_head(struct drm_device *dev, + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ? @@ -376,13 +376,6 @@ bsd_ring_flush(struct drm_device *dev, intel_ring_advance(dev, ring); } -static unsigned int bsd_ring_get_active_head(struct drm_device *dev, - struct intel_ring_buffer *ring) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - return I915_READ(RING_ACTHD(ring->mmio_base)); -} - static int init_bsd_ring(struct drm_device *dev, struct intel_ring_buffer *ring) { @@ -771,7 +764,6 @@ static const struct intel_ring_buffer render_ring = { .setup_status_page = render_setup_status_page, .init = init_render_ring, .set_tail = ring_set_tail, - .get_active_head = render_ring_get_active_head, .flush = render_ring_flush, .add_request = render_ring_add_request, .get_seqno = render_ring_get_seqno, @@ -790,7 +782,6 @@ static const struct intel_ring_buffer bsd_ring = { .setup_status_page = bsd_setup_status_page, .init = init_bsd_ring, .set_tail = ring_set_tail, - .get_active_head = bsd_ring_get_active_head, .flush = bsd_ring_flush, .add_request = bsd_ring_add_request, .get_seqno = bsd_ring_get_seqno, @@ -831,13 +822,6 @@ static void gen6_bsd_ring_set_tail(struct drm_device *dev, GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); } -static unsigned int gen6_bsd_ring_get_active_head(struct drm_device *dev, - struct intel_ring_buffer *ring) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - return I915_READ(RING_ACTHD(ring->mmio_base)); -} - static void gen6_bsd_ring_flush(struct drm_device *dev, struct intel_ring_buffer *ring, u32 invalidate_domains, @@ -881,7 +865,6 @@ static const struct intel_ring_buffer gen6_bsd_ring = { .setup_status_page = gen6_bsd_setup_status_page, .init = init_bsd_ring, .set_tail = gen6_bsd_ring_set_tail, - .get_active_head = gen6_bsd_ring_get_active_head, .flush = gen6_bsd_ring_flush, .add_request = bsd_ring_add_request, .get_seqno = bsd_ring_get_seqno, diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index d506da1605b4..43c5f7a476fa 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -50,8 +50,6 @@ struct intel_ring_buffer { void (*set_tail)(struct drm_device *dev, struct intel_ring_buffer *ring, u32 value); - unsigned int (*get_active_head)(struct drm_device *dev, - struct intel_ring_buffer *ring); void (*flush)(struct drm_device *dev, struct intel_ring_buffer *ring, u32 invalidate_domains, @@ -128,4 +126,7 @@ u32 intel_ring_get_seqno(struct drm_device *dev, int intel_init_render_ring_buffer(struct drm_device *dev); int intel_init_bsd_ring_buffer(struct drm_device *dev); +u32 intel_ring_get_active_head(struct drm_device *dev, + struct intel_ring_buffer *ring); + #endif /* _INTEL_RINGBUFFER_H_ */ From 447da18742b170b8e09ac71edf63c5798d2dbb0b Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 24 Sep 2010 21:49:27 +0200 Subject: [PATCH 311/476] drm/i915: kill ring->setup_status_page It's the same code, essentially, so kill all copies safe one unified version. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 2 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 25 +++---------------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 4 ++-- 3 files changed, 6 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index a3aea17c964b..ba050ed8df51 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -221,7 +221,7 @@ static int i915_dma_resume(struct drm_device * dev) DRM_DEBUG_DRIVER("hw status page @ %p\n", ring->status_page.page_addr); if (ring->status_page.gfx_addr != 0) - ring->setup_status_page(dev, ring); + intel_ring_setup_status_page(dev, ring); else I915_WRITE(HWS_PGA, dev_priv->dma_status_page); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index a3e73d4cd391..5103b95cea93 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -348,8 +348,8 @@ render_ring_put_user_irq(struct drm_device *dev, spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); } -static void render_setup_status_page(struct drm_device *dev, - struct intel_ring_buffer *ring) +void intel_ring_setup_status_page(struct drm_device *dev, + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; if (IS_GEN6(dev)) { @@ -404,14 +404,6 @@ bsd_ring_add_request(struct drm_device *dev, return seqno; } -static void bsd_setup_status_page(struct drm_device *dev, - struct intel_ring_buffer *ring) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - I915_WRITE(RING_HWS_PGA(ring->mmio_base), ring->status_page.gfx_addr); - I915_READ(RING_HWS_PGA(ring->mmio_base)); -} - static void bsd_ring_get_user_irq(struct drm_device *dev, struct intel_ring_buffer *ring) @@ -564,7 +556,7 @@ static int init_status_page(struct drm_device *dev, ring->status_page.obj = obj; memset(ring->status_page.page_addr, 0, PAGE_SIZE); - ring->setup_status_page(dev, ring); + intel_ring_setup_status_page(dev, ring); DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", ring->name, ring->status_page.gfx_addr); @@ -761,7 +753,6 @@ static const struct intel_ring_buffer render_ring = { .id = RING_RENDER, .mmio_base = RENDER_RING_BASE, .size = 32 * PAGE_SIZE, - .setup_status_page = render_setup_status_page, .init = init_render_ring, .set_tail = ring_set_tail, .flush = render_ring_flush, @@ -779,7 +770,6 @@ static const struct intel_ring_buffer bsd_ring = { .id = RING_BSD, .mmio_base = BSD_RING_BASE, .size = 32 * PAGE_SIZE, - .setup_status_page = bsd_setup_status_page, .init = init_bsd_ring, .set_tail = ring_set_tail, .flush = bsd_ring_flush, @@ -791,14 +781,6 @@ static const struct intel_ring_buffer bsd_ring = { }; -static void gen6_bsd_setup_status_page(struct drm_device *dev, - struct intel_ring_buffer *ring) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base), ring->status_page.gfx_addr); - I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); -} - static void gen6_bsd_ring_set_tail(struct drm_device *dev, struct intel_ring_buffer *ring, u32 value) @@ -862,7 +844,6 @@ static const struct intel_ring_buffer gen6_bsd_ring = { .id = RING_BSD, .mmio_base = GEN6_BSD_RING_BASE, .size = 32 * PAGE_SIZE, - .setup_status_page = gen6_bsd_setup_status_page, .init = init_bsd_ring, .set_tail = gen6_bsd_ring_set_tail, .flush = gen6_bsd_ring_flush, diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 43c5f7a476fa..c50919275c6f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -41,8 +41,6 @@ struct intel_ring_buffer { struct intel_ring_buffer *ring); void (*user_irq_put)(struct drm_device *dev, struct intel_ring_buffer *ring); - void (*setup_status_page)(struct drm_device *dev, - struct intel_ring_buffer *ring); int (*init)(struct drm_device *dev, struct intel_ring_buffer *ring); @@ -128,5 +126,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev); u32 intel_ring_get_active_head(struct drm_device *dev, struct intel_ring_buffer *ring); +void intel_ring_setup_status_page(struct drm_device *dev, + struct intel_ring_buffer *ring); #endif /* _INTEL_RINGBUFFER_H_ */ From 45ff46c54a31bf8924b61e3e3411654410a3b5c3 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 26 Sep 2010 00:24:48 +0200 Subject: [PATCH 312/476] drm: readd drm_lock_free in drm_unlock I've accidently killed a little bit too much in commit 1da3f87ebb7edb3e0b829ec4bbe5fb3d9d93986f Author: Daniel Vetter Date: Mon Aug 23 22:53:24 2010 +0200 drm: kill kernel_context_switch callbacks Note to self: Next time also test with AIGLX disabled. Reported-and-Tested-by: Andy Furniss Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=30374 Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_lock.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c index 1e28b9072068..632ae243ede0 100644 --- a/drivers/gpu/drm/drm_lock.c +++ b/drivers/gpu/drm/drm_lock.c @@ -152,6 +152,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_lock *lock = data; + struct drm_master *master = file_priv->master; if (lock->context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", @@ -161,6 +162,10 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); + if (drm_lock_free(&master->lock, lock->context)) { + /* FIXME: Should really bail out here. */ + } + unblock_all_signals(); return 0; } From 1c25595f8d31392b8c36b54c624d01591dbfb87b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 26 Sep 2010 11:03:27 +0100 Subject: [PATCH 313/476] drm/i915: Convert the file mutex into a spinlock Daniel Vetter pointed out that in this case is would be clearer and cleaner to use a spinlock instead of a mutex to protect the per-file request list manipulation. Make it so. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_gem.c | 28 +++++++++++++++------------- 3 files changed, 17 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index ba050ed8df51..b752c31fbcff 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -2173,8 +2173,8 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file) file->driver_priv = file_priv; + spin_lock_init(&file_priv->mm.lock); INIT_LIST_HEAD(&file_priv->mm.request_list); - mutex_init(&file_priv->mutex); return 0; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f2ff258cdfd5..710d59ea479c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -851,8 +851,8 @@ struct drm_i915_gem_request { }; struct drm_i915_file_private { - struct mutex mutex; struct { + struct spinlock lock; struct list_head request_list; } mm; }; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ac5bff85a4c7..78282edc02ca 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1694,11 +1694,11 @@ i915_add_request(struct drm_device *dev, list_add_tail(&request->list, &ring->request_list); if (file_priv) { - mutex_lock(&file_priv->mutex); + spin_lock(&file_priv->mm.lock); request->file_priv = file_priv; list_add_tail(&request->client_list, &file_priv->mm.request_list); - mutex_unlock(&file_priv->mutex); + spin_unlock(&file_priv->mm.lock); } if (!dev_priv->mm.suspended) { @@ -1733,11 +1733,15 @@ i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring) static inline void i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) { - if (request->file_priv) { - mutex_lock(&request->file_priv->mutex); - list_del(&request->client_list); - mutex_unlock(&request->file_priv->mutex); - } + struct drm_i915_file_private *file_priv = request->file_priv; + + if (!file_priv) + return; + + spin_lock(&file_priv->mm.lock); + list_del(&request->client_list); + request->file_priv = NULL; + spin_unlock(&file_priv->mm.lock); } static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, @@ -3464,7 +3468,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) u32 seqno = 0; int ret; - mutex_lock(&file_priv->mutex); + spin_lock(&file_priv->mm.lock); list_for_each_entry(request, &file_priv->mm.request_list, client_list) { if (time_after_eq(request->emitted_jiffies, recent_enough)) break; @@ -3472,7 +3476,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) ring = request->ring; seqno = request->seqno; } - mutex_unlock(&file_priv->mutex); + spin_unlock(&file_priv->mm.lock); if (seqno == 0) return 0; @@ -4974,8 +4978,7 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file) * later retire_requests won't dereference our soon-to-be-gone * file_priv. */ - mutex_lock(&dev->struct_mutex); - mutex_lock(&file_priv->mutex); + spin_lock(&file_priv->mm.lock); while (!list_empty(&file_priv->mm.request_list)) { struct drm_i915_gem_request *request; @@ -4985,8 +4988,7 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file) list_del(&request->client_list); request->file_priv = NULL; } - mutex_unlock(&file_priv->mutex); - mutex_unlock(&dev->struct_mutex); + spin_unlock(&file_priv->mm.lock); } static int From 53b2087d218c100657bddcb8ae887fa07862fb81 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 26 Sep 2010 22:21:10 +0100 Subject: [PATCH 314/476] drm/i915: fix debugging compilation error from previous commit There is no equivalent to mutex_destroy() for spinlocks so just delete the code. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index b752c31fbcff..df1c3533a6c8 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -2221,7 +2221,6 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; - mutex_destroy(&file_priv->mutex); kfree(file_priv); } From ced270fa893735363f74bf96e0a8a05ec330d04d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 26 Sep 2010 22:47:46 +0100 Subject: [PATCH 315/476] drm/i915: Ensure that the mode change flushing is currently uninterruptible Introduced by 48b956c5, I had thought I had already fixed this. Oh well. Reported-by: Sitsofe Wheeler Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 78282edc02ca..1025508e5916 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2858,10 +2858,17 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, if (obj_priv->gtt_space == NULL) return -EINVAL; - ret = i915_gem_object_flush_gpu_write_domain(obj, pipelined); + ret = i915_gem_object_flush_gpu_write_domain(obj, true); if (ret) return ret; + /* Currently, we are always called from an non-interruptible context. */ + if (!pipelined) { + ret = i915_gem_object_wait_rendering(obj, false); + if (ret) + return ret; + } + i915_gem_object_flush_cpu_write_domain(obj); old_read_domains = obj->read_domains; From a56ba56c275b1c2b982c8901ab92bf5a0fd0b757 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 Sep 2010 10:07:56 +0100 Subject: [PATCH 316/476] Revert "drm/i915: Drop ring->lazy_request" With multiple rings generating requests independently, the outstanding requests must also be track independently. Reported-by: Wang Jinjin Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=30380 Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 43 +++++++++++++++++-------- drivers/gpu/drm/i915/intel_ringbuffer.h | 5 +++ 2 files changed, 35 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1025508e5916..63b38608c800 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1545,12 +1545,23 @@ i915_gem_object_put_pages(struct drm_gem_object *obj) obj_priv->pages = NULL; } +static uint32_t +i915_gem_next_request_seqno(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + ring->outstanding_lazy_request = true; + return dev_priv->next_seqno; +} + static void i915_gem_object_move_to_active(struct drm_gem_object *obj, struct intel_ring_buffer *ring) { - struct drm_i915_private *dev_priv = obj->dev->dev_private; + struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + uint32_t seqno = i915_gem_next_request_seqno(dev, ring); BUG_ON(ring == NULL); obj_priv->ring = ring; @@ -1563,7 +1574,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, /* Move from whatever list we were on to the tail of execution. */ list_move_tail(&obj_priv->list, &ring->active_list); - obj_priv->last_rendering_seqno = dev_priv->next_seqno; + obj_priv->last_rendering_seqno = seqno; } static void @@ -1686,6 +1697,7 @@ i915_add_request(struct drm_device *dev, } seqno = ring->add_request(dev, ring, 0); + ring->outstanding_lazy_request = false; request->seqno = seqno; request->ring = ring; @@ -1930,11 +1942,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, if (atomic_read(&dev_priv->mm.wedged)) return -EAGAIN; - if (seqno == dev_priv->next_seqno) { + if (ring->outstanding_lazy_request) { seqno = i915_add_request(dev, NULL, NULL, ring); if (seqno == 0) return -ENOMEM; } + BUG_ON(seqno == dev_priv->next_seqno); if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) { if (HAS_PCH_SPLIT(dev)) @@ -1993,7 +2006,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, */ static int i915_wait_request(struct drm_device *dev, uint32_t seqno, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { return i915_do_wait_request(dev, seqno, 1, ring); } @@ -2139,12 +2152,21 @@ i915_gem_object_unbind(struct drm_gem_object *obj) return ret; } +static int i915_ring_idle(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + i915_gem_flush_ring(dev, NULL, ring, + I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); + return i915_wait_request(dev, + i915_gem_next_request_seqno(dev, ring), + ring); +} + int i915_gpu_idle(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; bool lists_empty; - u32 seqno; int ret; lists_empty = (list_empty(&dev_priv->mm.flushing_list) && @@ -2155,18 +2177,12 @@ i915_gpu_idle(struct drm_device *dev) return 0; /* Flush everything onto the inactive list. */ - seqno = dev_priv->next_seqno; - i915_gem_flush_ring(dev, NULL, &dev_priv->render_ring, - I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); - ret = i915_wait_request(dev, seqno, &dev_priv->render_ring); + ret = i915_ring_idle(dev, &dev_priv->render_ring); if (ret) return ret; if (HAS_BSD(dev)) { - seqno = dev_priv->next_seqno; - i915_gem_flush_ring(dev, NULL, &dev_priv->bsd_ring, - I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); - ret = i915_wait_request(dev, seqno, &dev_priv->bsd_ring); + ret = i915_ring_idle(dev, &dev_priv->bsd_ring); if (ret) return ret; } @@ -3938,6 +3954,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, DRM_INFO("%s: move to exec list %p\n", __func__, obj); #endif } + i915_add_request(dev, file_priv, request, ring); request = NULL; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index c50919275c6f..9725f783db20 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -81,6 +81,11 @@ struct intel_ring_buffer { */ struct list_head request_list; + /** + * Do we have some not yet emitted requests outstanding? + */ + bool outstanding_lazy_request; + wait_queue_head_t irq_queue; drm_local_map_t map; }; From e957d7720a2797b31231616014b68f4f6203145e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 24 Sep 2010 12:52:03 +0100 Subject: [PATCH 317/476] drm/i915/sdvo: Fix GMBUSification Besides a couple of bugs when writing more than a single byte along the GMBUS, SDVO was completely failing whilst trying to use GMBUS, so use bit banging instead. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 10 +- drivers/gpu/drm/i915/intel_bios.c | 9 + drivers/gpu/drm/i915/intel_bios.h | 3 +- drivers/gpu/drm/i915/intel_i2c.c | 181 +++++++++++----- drivers/gpu/drm/i915/intel_sdvo.c | 336 ++++++++++++++++++------------ 5 files changed, 341 insertions(+), 198 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 710d59ea479c..0bb255331764 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -132,10 +132,12 @@ struct drm_i915_fence_reg { }; struct sdvo_device_mapping { + u8 initialized; u8 dvo_port; u8 slave_addr; u8 dvo_wiring; - u8 initialized; + u8 i2c_pin; + u8 i2c_speed; u8 ddc_pin; }; @@ -248,8 +250,8 @@ typedef struct drm_i915_private { struct intel_gmbus { struct i2c_adapter adapter; - struct i2c_adapter *force_bitbanging; - int pin; + struct i2c_adapter *force_bit; + u32 reg0; } *gmbus; struct pci_dev *bridge_dev; @@ -1104,6 +1106,8 @@ extern int i915_restore_state(struct drm_device *dev); /* intel_i2c.c */ extern int intel_setup_gmbus(struct drm_device *dev); extern void intel_teardown_gmbus(struct drm_device *dev); +extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); +extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); extern void intel_i2c_reset(struct drm_device *dev); /* intel_opregion.c */ diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 42a7a5b33a0a..7e868d228c7b 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -369,7 +369,16 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, p_mapping->slave_addr = p_child->slave_addr; p_mapping->dvo_wiring = p_child->dvo_wiring; p_mapping->ddc_pin = p_child->ddc_pin; + p_mapping->i2c_pin = p_child->i2c_pin; + p_mapping->i2c_speed = p_child->i2c_speed; p_mapping->initialized = 1; + DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d, i2c_speed=%d\n", + p_mapping->dvo_port, + p_mapping->slave_addr, + p_mapping->dvo_wiring, + p_mapping->ddc_pin, + p_mapping->i2c_pin, + p_mapping->i2c_speed); } else { DRM_DEBUG_KMS("Maybe one SDVO port is shared by " "two SDVO device.\n"); diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 4c18514f6f80..e1a598f2a966 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -197,7 +197,8 @@ struct bdb_general_features { struct child_device_config { u16 handle; u16 device_type; - u8 device_id[10]; /* See DEVICE_TYPE_* above */ + u8 i2c_speed; + u8 rsvd[9]; u16 addin_offset; u8 dvo_port; /* See Device_PORT_* above */ u8 i2c_pin; diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 6f4d128935ac..91920247d4ff 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -38,6 +38,12 @@ #define I2C_RISEFALL_TIME 20 +static inline struct intel_gmbus * +to_intel_gmbus(struct i2c_adapter *i2c) +{ + return container_of(i2c, struct intel_gmbus, adapter); +} + struct intel_gpio { struct i2c_adapter adapter; struct i2c_algo_bit_data algo; @@ -71,10 +77,27 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable) I915_WRITE(DSPCLK_GATE_D, val); } +static u32 get_reserved(struct intel_gpio *gpio) +{ + struct drm_i915_private *dev_priv = gpio->dev_priv; + struct drm_device *dev = dev_priv->dev; + u32 reserved = 0; + + /* On most chips, these bits must be preserved in software. */ + if (!IS_I830(dev) && !IS_845G(dev)) + reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE | + GPIO_CLOCK_PULLUP_DISABLE); + + return reserved; +} + static int get_clock(void *data) { struct intel_gpio *gpio = data; struct drm_i915_private *dev_priv = gpio->dev_priv; + u32 reserved = get_reserved(gpio); + I915_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK); + I915_WRITE(gpio->reg, reserved); return (I915_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0; } @@ -82,6 +105,9 @@ static int get_data(void *data) { struct intel_gpio *gpio = data; struct drm_i915_private *dev_priv = gpio->dev_priv; + u32 reserved = get_reserved(gpio); + I915_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK); + I915_WRITE(gpio->reg, reserved); return (I915_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0; } @@ -89,13 +115,8 @@ static void set_clock(void *data, int state_high) { struct intel_gpio *gpio = data; struct drm_i915_private *dev_priv = gpio->dev_priv; - struct drm_device *dev = dev_priv->dev; - u32 reserved = 0, clock_bits; - - /* On most chips, these bits must be preserved in software. */ - if (!IS_I830(dev) && !IS_845G(dev)) - reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE | - GPIO_CLOCK_PULLUP_DISABLE); + u32 reserved = get_reserved(gpio); + u32 clock_bits; if (state_high) clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK; @@ -111,13 +132,8 @@ static void set_data(void *data, int state_high) { struct intel_gpio *gpio = data; struct drm_i915_private *dev_priv = gpio->dev_priv; - struct drm_device *dev = dev_priv->dev; - u32 reserved = 0, data_bits; - - /* On most chips, these bits must be preserved in software. */ - if (!IS_I830(dev) && !IS_845G(dev)) - reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE | - GPIO_CLOCK_PULLUP_DISABLE); + u32 reserved = get_reserved(gpio); + u32 data_bits; if (state_high) data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK; @@ -155,7 +171,7 @@ intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin) gpio->reg += PCH_GPIOA - GPIOA; gpio->dev_priv = dev_priv; - snprintf(gpio->adapter.name, I2C_NAME_SIZE, "GPIO %d", pin); + snprintf(gpio->adapter.name, I2C_NAME_SIZE, "GPIO%c", "?BACDEF?"[pin]); gpio->adapter.owner = THIS_MODULE; gpio->adapter.algo_data = &gpio->algo; gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev; @@ -170,16 +186,6 @@ intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin) if (i2c_bit_add_bus(&gpio->adapter)) goto out_free; - intel_i2c_reset(dev_priv->dev); - - /* JJJ: raise SCL and SDA? */ - intel_i2c_quirk_set(dev_priv, true); - set_data(gpio, 1); - udelay(I2C_RISEFALL_TIME); - set_clock(gpio, 1); - udelay(I2C_RISEFALL_TIME); - intel_i2c_quirk_set(dev_priv, false); - return &gpio->adapter; out_free: @@ -188,17 +194,27 @@ out_free: } static int -quirk_i2c_transfer(struct drm_i915_private *dev_priv, - struct i2c_adapter *adapter, - struct i2c_msg *msgs, - int num) +intel_i2c_quirk_xfer(struct drm_i915_private *dev_priv, + struct i2c_adapter *adapter, + struct i2c_msg *msgs, + int num) { + struct intel_gpio *gpio = container_of(adapter, + struct intel_gpio, + adapter); int ret; intel_i2c_reset(dev_priv->dev); intel_i2c_quirk_set(dev_priv, true); - ret = i2c_transfer(adapter, msgs, num); + set_data(gpio, 1); + set_clock(gpio, 1); + udelay(I2C_RISEFALL_TIME); + + ret = adapter->algo->master_xfer(adapter, msgs, num); + + set_data(gpio, 1); + set_clock(gpio, 1); intel_i2c_quirk_set(dev_priv, false); return ret; @@ -213,21 +229,15 @@ gmbus_xfer(struct i2c_adapter *adapter, struct intel_gmbus, adapter); struct drm_i915_private *dev_priv = adapter->algo_data; - int i, speed, reg_offset; + int i, reg_offset; - if (bus->force_bitbanging) - return quirk_i2c_transfer(dev_priv, bus->force_bitbanging, msgs, num); + if (bus->force_bit) + return intel_i2c_quirk_xfer(dev_priv, + bus->force_bit, msgs, num); reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0; - speed = GMBUS_RATE_100KHZ; - if (INTEL_INFO(dev_priv->dev)->gen > 4 || IS_G4X(dev_priv->dev)) { - if (bus->pin == GMBUS_PORT_DPB) /* SDVO only? */ - speed = GMBUS_RATE_1MHZ; - else - speed = GMBUS_RATE_400KHZ; - } - I915_WRITE(GMBUS0 + reg_offset, speed | bus->pin); + I915_WRITE(GMBUS0 + reg_offset, bus->reg0); for (i = 0; i < num; i++) { u16 len = msgs[i].len; @@ -239,6 +249,7 @@ gmbus_xfer(struct i2c_adapter *adapter, (len << GMBUS_BYTE_COUNT_SHIFT) | (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_READ | GMBUS_SW_RDY); + POSTING_READ(GMBUS2+reg_offset); do { u32 val, loop = 0; @@ -254,20 +265,35 @@ gmbus_xfer(struct i2c_adapter *adapter, } while (--len && ++loop < 4); } while (len); } else { - u32 val = 0, loop = 0; - - BUG_ON(msgs[i].len > 4); + u32 val, loop; + val = loop = 0; do { - val |= *buf++ << (loop*8); - } while (--len && +loop < 4); + val |= *buf++ << (8 * loop); + } while (--len && ++loop < 4); I915_WRITE(GMBUS3 + reg_offset, val); I915_WRITE(GMBUS1 + reg_offset, - (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT ) | + (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) | (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) | (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); + POSTING_READ(GMBUS2+reg_offset); + + while (len) { + if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50)) + goto timeout; + if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) + return 0; + + val = loop = 0; + do { + val |= *buf++ << (8 * loop); + } while (--len && ++loop < 4); + + I915_WRITE(GMBUS3 + reg_offset, val); + POSTING_READ(GMBUS2+reg_offset); + } } if (i + 1 < num && wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50)) @@ -279,17 +305,25 @@ gmbus_xfer(struct i2c_adapter *adapter, return num; timeout: - DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d\n", bus->pin); + DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n", + bus->reg0 & 0xff, bus->adapter.name); /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ - bus->force_bitbanging = intel_gpio_create(dev_priv, bus->pin); - if (!bus->force_bitbanging) + bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff); + if (!bus->force_bit) return -ENOMEM; - return quirk_i2c_transfer(dev_priv, bus->force_bitbanging, msgs, num); + return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num); } static u32 gmbus_func(struct i2c_adapter *adapter) { + struct intel_gmbus *bus = container_of(adapter, + struct intel_gmbus, + adapter); + + if (bus->force_bit) + bus->force_bit->algo->functionality(bus->force_bit); + return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | /* I2C_FUNC_10BIT_ADDR | */ I2C_FUNC_SMBUS_READ_BLOCK_DATA | @@ -307,15 +341,15 @@ static const struct i2c_algorithm gmbus_algorithm = { */ int intel_setup_gmbus(struct drm_device *dev) { - static const char *names[] = { + static const char *names[GMBUS_NUM_PORTS] = { "disabled", "ssc", "vga", "panel", "dpc", "dpb", - "dpd", "reserved" + "dpd", }; struct drm_i915_private *dev_priv = dev->dev_private; int ret, i; @@ -343,7 +377,8 @@ int intel_setup_gmbus(struct drm_device *dev) if (ret) goto err; - bus->pin = i; + /* By default use a conservative clock rate */ + bus->reg0 = i | GMBUS_RATE_100KHZ; } intel_i2c_reset(dev_priv->dev); @@ -360,6 +395,38 @@ err: return ret; } +void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed) +{ + struct intel_gmbus *bus = to_intel_gmbus(adapter); + + /* speed: + * 0x0 = 100 KHz + * 0x1 = 50 KHz + * 0x2 = 400 KHz + * 0x3 = 1000 Khz + */ + bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8); +} + +void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) +{ + struct intel_gmbus *bus = to_intel_gmbus(adapter); + + if (force_bit) { + if (bus->force_bit == NULL) { + struct drm_i915_private *dev_priv = adapter->algo_data; + bus->force_bit = intel_gpio_create(dev_priv, + bus->reg0 & 0xff); + } + } else { + if (bus->force_bit) { + i2c_del_adapter(bus->force_bit); + kfree(bus->force_bit); + bus->force_bit = NULL; + } + } +} + void intel_teardown_gmbus(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -370,9 +437,9 @@ void intel_teardown_gmbus(struct drm_device *dev) for (i = 0; i < GMBUS_NUM_PORTS; i++) { struct intel_gmbus *bus = &dev_priv->gmbus[i]; - if (bus->force_bitbanging) { - i2c_del_adapter(bus->force_bitbanging); - kfree(bus->force_bitbanging); + if (bus->force_bit) { + i2c_del_adapter(bus->force_bit); + kfree(bus->force_bit); } i2c_del_adapter(&bus->adapter); } diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 7cd2d9592d65..b684a405a05b 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -68,6 +68,8 @@ struct intel_sdvo { struct i2c_adapter *i2c; u8 slave_addr; + struct i2c_adapter ddc; + /* Register for the SDVO device: SDVOB or SDVOC */ int sdvo_reg; @@ -247,49 +249,29 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch) { - u8 out_buf[2] = { addr, 0 }; - u8 buf[2]; struct i2c_msg msgs[] = { { - .addr = intel_sdvo->slave_addr >> 1, + .addr = intel_sdvo->slave_addr, .flags = 0, .len = 1, - .buf = out_buf, + .buf = &addr, }, { - .addr = intel_sdvo->slave_addr >> 1, + .addr = intel_sdvo->slave_addr, .flags = I2C_M_RD, .len = 1, - .buf = buf, + .buf = ch, } }; int ret; if ((ret = i2c_transfer(intel_sdvo->i2c, msgs, 2)) == 2) - { - *ch = buf[0]; return true; - } DRM_DEBUG_KMS("i2c transfer returned %d\n", ret); return false; } -static bool intel_sdvo_write_byte(struct intel_sdvo *intel_sdvo, int addr, u8 ch) -{ - u8 out_buf[2] = { addr, ch }; - struct i2c_msg msgs[] = { - { - .addr = intel_sdvo->slave_addr >> 1, - .flags = 0, - .len = 2, - .buf = out_buf, - } - }; - - return i2c_transfer(intel_sdvo->i2c, msgs, 1) == 1; -} - #define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} /** Mapping of command numbers to names, for debug output */ static const struct _sdvo_cmd_name { @@ -434,22 +416,6 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, DRM_LOG_KMS("\n"); } -static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, - const void *args, int args_len) -{ - int i; - - intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len); - - for (i = 0; i < args_len; i++) { - if (!intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0 - i, - ((u8*)args)[i])) - return false; - } - - return intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_OPCODE, cmd); -} - static const char *cmd_status_names[] = { "Power on", "Success", @@ -460,6 +426,70 @@ static const char *cmd_status_names[] = { "Scaling not supported" }; +static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, + const void *args, int args_len) +{ + u8 buf[args_len*2 + 2], status; + struct i2c_msg msgs[args_len + 3]; + int i, ret; + + intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len); + + for (i = 0; i < args_len; i++) { + msgs[i].addr = intel_sdvo->slave_addr; + msgs[i].flags = 0; + msgs[i].len = 2; + msgs[i].buf = buf + 2 *i; + buf[2*i + 0] = SDVO_I2C_ARG_0 - i; + buf[2*i + 1] = ((u8*)args)[i]; + } + msgs[i].addr = intel_sdvo->slave_addr; + msgs[i].flags = 0; + msgs[i].len = 2; + msgs[i].buf = buf + 2*i; + buf[2*i + 0] = SDVO_I2C_OPCODE; + buf[2*i + 1] = cmd; + + /* the following two are to read the response */ + status = SDVO_I2C_CMD_STATUS; + msgs[i+1].addr = intel_sdvo->slave_addr; + msgs[i+1].flags = 0; + msgs[i+1].len = 1; + msgs[i+1].buf = &status; + + msgs[i+2].addr = intel_sdvo->slave_addr; + msgs[i+2].flags = I2C_M_RD; + msgs[i+2].len = 1; + msgs[i+2].buf = &status; + + ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3); + if (ret < 0) { + DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); + return false; + } + if (ret != i+3) { + /* failure in I2C transfer */ + DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3); + return false; + } + + i = 3; + while (status == SDVO_CMD_STATUS_PENDING && i--) { + if (!intel_sdvo_read_byte(intel_sdvo, + SDVO_I2C_CMD_STATUS, + &status)) + return false; + } + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("command returns response %s [%d]\n", + status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP ? cmd_status_names[status] : "???", + status); + return false; + } + + return true; +} + static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, void *response, int response_len) { @@ -497,13 +527,9 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, SDVO_I2C_RETURN_0 + i, &((u8 *)response)[i])) goto log_fail; - DRM_LOG_KMS("%02X ", ((u8 *)response)[i]); + DRM_LOG_KMS(" %02X", ((u8 *)response)[i]); } - - for (; i < 8; i++) - DRM_LOG_KMS(" "); DRM_LOG_KMS("\n"); - return true; log_fail: @@ -521,75 +547,17 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) return 4; } -/** - * Try to read the response after issuie the DDC switch command. But it - * is noted that we must do the action of reading response and issuing DDC - * switch command in one I2C transaction. Otherwise when we try to start - * another I2C transaction after issuing the DDC bus switch, it will be - * switched to the internal SDVO register. - */ -static int intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, - u8 target) +static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, + u8 ddc_bus) { - u8 out_buf[2], cmd_buf[2], ret_value[2], ret; - struct i2c_msg msgs[] = { - { - .addr = intel_sdvo->slave_addr >> 1, - .flags = 0, - .len = 2, - .buf = out_buf, - }, - /* the following two are to read the response */ - { - .addr = intel_sdvo->slave_addr >> 1, - .flags = 0, - .len = 1, - .buf = cmd_buf, - }, - { - .addr = intel_sdvo->slave_addr >> 1, - .flags = I2C_M_RD, - .len = 1, - .buf = ret_value, - }, - }; - - intel_sdvo_debug_write(intel_sdvo, SDVO_CMD_SET_CONTROL_BUS_SWITCH, - &target, 1); - /* write the DDC switch command argument */ - if (!intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0, target)) - return -EIO; - - out_buf[0] = SDVO_I2C_OPCODE; - out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH; - cmd_buf[0] = SDVO_I2C_CMD_STATUS; - cmd_buf[1] = 0; - ret_value[0] = 0; - ret_value[1] = 0; - - ret = i2c_transfer(intel_sdvo->i2c, msgs, 3); - if (ret < 0) - return ret; - if (ret != 3) { - /* failure in I2C transfer */ - DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); - return -EIO; - } - if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) { - DRM_DEBUG_KMS("DDC switch command returns response %d\n", - ret_value[0]); - return -EIO; - } - - return 0; + return intel_sdvo_write_cmd(intel_sdvo, + SDVO_CMD_SET_CONTROL_BUS_SWITCH, + &ddc_bus, 1); } static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len) { - if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len)) - return false; - - return intel_sdvo_read_response(intel_sdvo, NULL, 0); + return intel_sdvo_write_cmd(intel_sdvo, cmd, data, len); } static bool @@ -1272,7 +1240,38 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector, static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps) { - return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DEVICE_CAPS, caps, sizeof(*caps)); + if (!intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_DEVICE_CAPS, + caps, sizeof(*caps))) + return false; + + DRM_DEBUG_KMS("SDVO capabilities:\n" + " vendor_id: %d\n" + " device_id: %d\n" + " device_rev_id: %d\n" + " sdvo_version_major: %d\n" + " sdvo_version_minor: %d\n" + " sdvo_inputs_mask: %d\n" + " smooth_scaling: %d\n" + " sharp_scaling: %d\n" + " up_scaling: %d\n" + " down_scaling: %d\n" + " stall_support: %d\n" + " output_flags: %d\n", + caps->vendor_id, + caps->device_id, + caps->device_rev_id, + caps->sdvo_version_major, + caps->sdvo_version_minor, + caps->sdvo_inputs_mask, + caps->smooth_scaling, + caps->sharp_scaling, + caps->up_scaling, + caps->down_scaling, + caps->stall_support, + caps->output_flags); + + return true; } /* No use! */ @@ -1377,16 +1376,10 @@ intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) } static struct edid * -intel_sdvo_get_edid(struct drm_connector *connector, int ddc) +intel_sdvo_get_edid(struct drm_connector *connector) { - struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); - int ret; - - ret = intel_sdvo_set_control_bus_switch(intel_sdvo, ddc); - if (ret) - return NULL; - - return drm_get_edid(connector, intel_sdvo->i2c); + struct intel_sdvo *sdvo = intel_attached_sdvo(connector); + return drm_get_edid(connector, &sdvo->ddc); } static struct drm_connector * @@ -1447,26 +1440,27 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) enum drm_connector_status status; struct edid *edid; - edid = intel_sdvo_get_edid(connector, intel_sdvo->ddc_bus); + edid = intel_sdvo_get_edid(connector); if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) { - u8 ddc; + u8 ddc, saved_ddc = intel_sdvo->ddc_bus; /* * Don't use the 1 as the argument of DDC bus switch to get * the EDID. It is used for SDVO SPD ROM. */ for (ddc = intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) { - edid = intel_sdvo_get_edid(connector, ddc); - if (edid) { - /* - * If we found the EDID on the other bus, - * assume that is the correct DDC bus. - */ - intel_sdvo->ddc_bus = ddc; + intel_sdvo->ddc_bus = ddc; + edid = intel_sdvo_get_edid(connector); + if (edid) break; - } } + /* + * If we found the EDID on the other bus, + * assume that is the correct DDC bus. + */ + if (edid == NULL) + intel_sdvo->ddc_bus = saved_ddc; } /* @@ -1499,7 +1493,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) enum drm_connector_status ret; if (!intel_sdvo_write_cmd(intel_sdvo, - SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) + SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) return connector_status_unknown; if (intel_sdvo->is_tv) { /* add 30ms delay when the output type is SDVO-TV */ @@ -1508,7 +1502,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) if (!intel_sdvo_read_response(intel_sdvo, &response, 2)) return connector_status_unknown; - DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8); + DRM_DEBUG_KMS("SDVO response %d %d [%x]\n", + response & 0xff, response >> 8, + intel_sdvo_connector->output_flag); if (response == 0) return connector_status_disconnected; @@ -1541,11 +1537,10 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) { - struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct edid *edid; /* set the bus switch and get the modes */ - edid = intel_sdvo_get_edid(connector, intel_sdvo->ddc_bus); + edid = intel_sdvo_get_edid(connector); /* * Mac mini hack. On this device, the DVI-I connector shares one DDC @@ -1647,7 +1642,8 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) return; BUILD_BUG_ON(sizeof(tv_res) != 3); - if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, + if (!intel_sdvo_write_cmd(intel_sdvo, + SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, &tv_res, sizeof(tv_res))) return; if (!intel_sdvo_read_response(intel_sdvo, &reply, 3)) @@ -1924,6 +1920,7 @@ static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) drm_mode_destroy(encoder->dev, intel_sdvo->sdvo_lvds_fixed_mode); + i2c_del_adapter(&intel_sdvo->ddc); intel_encoder_destroy(encoder); } @@ -1991,6 +1988,30 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv, intel_sdvo_guess_ddc_bus(sdvo); } +static void +intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, + struct intel_sdvo *sdvo, u32 reg) +{ + struct sdvo_device_mapping *mapping; + u8 pin, speed; + + if (IS_SDVOB(reg)) + mapping = &dev_priv->sdvo_mappings[0]; + else + mapping = &dev_priv->sdvo_mappings[1]; + + pin = GMBUS_PORT_DPB; + speed = GMBUS_RATE_1MHZ >> 8; + if (mapping->initialized) { + pin = mapping->i2c_pin; + speed = mapping->i2c_speed; + } + + sdvo->i2c = &dev_priv->gmbus[pin].adapter; + intel_gmbus_set_speed(sdvo->i2c, speed); + intel_gmbus_force_bit(sdvo->i2c, true); +} + static bool intel_sdvo_get_digital_encoding_mode(struct intel_sdvo *intel_sdvo, int device) { @@ -2504,7 +2525,43 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply); else return true; +} +static int intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter, + struct i2c_msg *msgs, + int num) +{ + struct intel_sdvo *sdvo = adapter->algo_data; + + if (!intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus)) + return -EIO; + + return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num); +} + +static u32 intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter) +{ + struct intel_sdvo *sdvo = adapter->algo_data; + return sdvo->i2c->algo->functionality(sdvo->i2c); +} + +static const struct i2c_algorithm intel_sdvo_ddc_proxy = { + .master_xfer = intel_sdvo_ddc_proxy_xfer, + .functionality = intel_sdvo_ddc_proxy_func +}; + +static bool +intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo, + struct drm_device *dev) +{ + sdvo->ddc.owner = THIS_MODULE; + sdvo->ddc.class = I2C_CLASS_DDC; + snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy"); + sdvo->ddc.dev.parent = &dev->pdev->dev; + sdvo->ddc.algo_data = sdvo; + sdvo->ddc.algo = &intel_sdvo_ddc_proxy; + + return i2c_add_adapter(&sdvo->ddc) == 0; } bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) @@ -2518,6 +2575,11 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) if (!intel_sdvo) return false; + if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) { + kfree(intel_sdvo); + return false; + } + intel_sdvo->sdvo_reg = sdvo_reg; intel_encoder = &intel_sdvo->base; @@ -2525,9 +2587,8 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) /* encoder type will be decided later */ drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0); - intel_sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter; - - intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg); + intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1; + intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg); /* Read the regs to test if we can talk to the device */ for (i = 0; i < 0x40; i++) { @@ -2589,6 +2650,7 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) err: drm_encoder_cleanup(&intel_encoder->base); + i2c_del_adapter(&intel_sdvo->ddc); kfree(intel_sdvo); return false; From cb8ea7527b813dd6e19fb07328f7867a5f0a8d0a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 Sep 2010 13:35:47 +0100 Subject: [PATCH 318/476] drm/i915: Use i2c bit banging instead of GMBUS There are several reported instances of GMBUS failing to successfully read the EDID, so revert back to bit banging until the issue is resolved. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=30371 Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_i2c.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 91920247d4ff..2449a74d4d80 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -379,6 +379,9 @@ int intel_setup_gmbus(struct drm_device *dev) /* By default use a conservative clock rate */ bus->reg0 = i | GMBUS_RATE_100KHZ; + + /* XXX force bit banging until GMBUS is fully debugged */ + bus->force_bit = intel_gpio_create(dev_priv, i); } intel_i2c_reset(dev_priv->dev); From 8daf7473203cd9bd1b9b98200ee1c74dfe4826fe Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 Sep 2010 14:07:26 +0100 Subject: [PATCH 319/476] MAINTAINERS: Add contact details for drm/i915 Signed-off-by: Chris Wilson --- MAINTAINERS | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 50b8148448fd..5190fb9dff12 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2051,6 +2051,15 @@ S: Maintained F: drivers/gpu/drm/ F: include/drm/ +INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) +M: Chris Wilson +L: intel-gfx@lists.freedesktop.org +L: dri-devel@lists.freedesktop.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel.git +S: Supported +F: drivers/gpu/drm/i915 +F: include/drm/i915* + DSCC4 DRIVER M: Francois Romieu L: netdev@vger.kernel.org From b8232e906381dcba2bb26f0d849d4c25cc9b1368 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 Sep 2010 16:41:32 +0100 Subject: [PATCH 320/476] drm/i915: Disable LVDS i2c probing when using GPIO bit banging This check only appears to succeed when using GMBUS, so we need to skip it if we have fallen back to using GPIO bit banging. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 4 ++++ drivers/gpu/drm/i915/intel_lvds.c | 3 +++ 2 files changed, 7 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0bb255331764..0ce76a82a675 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1108,6 +1108,10 @@ extern int intel_setup_gmbus(struct drm_device *dev); extern void intel_teardown_gmbus(struct drm_device *dev); extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); +extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) +{ + return container_of(adapter, struct intel_gmbus, adapter)->force_bit; +} extern void intel_i2c_reset(struct drm_device *dev); /* intel_opregion.c */ diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 98172bcf485f..f1a649990ea9 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -827,6 +827,9 @@ static bool intel_lvds_ddc_probe(struct drm_device *dev, u8 pin) }, }; struct i2c_adapter *i2c = &dev_priv->gmbus[pin].adapter; + /* XXX this only appears to work when using GMBUS */ + if (intel_gmbus_is_forced_bit(i2c)) + return true; return i2c_transfer(i2c, msgs, 1) == 1; } From a8ed0b16a924a59b56906e83d6c033a04a9818f6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 Sep 2010 23:33:17 +0100 Subject: [PATCH 321/476] drm/i915: Tidy dvo_ch7017 and print out which chip we detect Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/dvo_ch7017.c | 63 +++++++++++++------------------ 1 file changed, 27 insertions(+), 36 deletions(-) diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c index 0bc8ce1ad9aa..af70337567ce 100644 --- a/drivers/gpu/drm/i915/dvo_ch7017.c +++ b/drivers/gpu/drm/i915/dvo_ch7017.c @@ -165,56 +165,35 @@ struct ch7017_priv { static void ch7017_dump_regs(struct intel_dvo_device *dvo); static void ch7017_dpms(struct intel_dvo_device *dvo, int mode); -static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val) +static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val) { - struct i2c_adapter *adapter = dvo->i2c_bus; - u8 out_buf[2]; - u8 in_buf[2]; - struct i2c_msg msgs[] = { { .addr = dvo->slave_addr, .flags = 0, .len = 1, - .buf = out_buf, + .buf = &addr, }, { .addr = dvo->slave_addr, .flags = I2C_M_RD, .len = 1, - .buf = in_buf, + .buf = val, } }; - - out_buf[0] = addr; - out_buf[1] = 0; - - if (i2c_transfer(adapter, msgs, 2) == 2) { - *val= in_buf[0]; - return true; - }; - - return false; + return i2c_transfer(dvo->i2c_bus, msgs, 2) == 2; } -static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val) +static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val) { - struct i2c_adapter *adapter = dvo->i2c_bus; - uint8_t out_buf[2]; + uint8_t buf[2] = { addr, val }; struct i2c_msg msg = { .addr = dvo->slave_addr, .flags = 0, .len = 2, - .buf = out_buf, + .buf = buf, }; - - out_buf[0] = addr; - out_buf[1] = val; - - if (i2c_transfer(adapter, &msg, 1) == 1) - return true; - - return false; + return i2c_transfer(dvo->i2c_bus, &msg, 1) == 1; } /** Probes for a CH7017 on the given bus and slave address. */ @@ -222,7 +201,8 @@ static bool ch7017_init(struct intel_dvo_device *dvo, struct i2c_adapter *adapter) { struct ch7017_priv *priv; - uint8_t val; + const char *str; + u8 val; priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL); if (priv == NULL) @@ -234,16 +214,27 @@ static bool ch7017_init(struct intel_dvo_device *dvo, if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val)) goto fail; - if (val != CH7017_DEVICE_ID_VALUE && - val != CH7018_DEVICE_ID_VALUE && - val != CH7019_DEVICE_ID_VALUE) { + switch (val) { + case CH7017_DEVICE_ID_VALUE: + str = "ch7017"; + break; + case CH7018_DEVICE_ID_VALUE: + str = "ch7018"; + break; + case CH7019_DEVICE_ID_VALUE: + str = "ch7019"; + break; + default: DRM_DEBUG_KMS("ch701x not detected, got %d: from %s " - "Slave %d.\n", - val, adapter->name,dvo->slave_addr); + "slave %d.\n", + val, adapter->name,dvo->slave_addr); goto fail; } + DRM_DEBUG_KMS("%s detected on %s, addr %d\n", + str, adapter->name, dvo->slave_addr); return true; + fail: kfree(priv); return false; @@ -365,7 +356,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode) } /* XXX: Should actually wait for update power status somehow */ - udelay(20000); + msleep(20); } static void ch7017_dump_regs(struct intel_dvo_device *dvo) From f573c66061184ce28196a22229b6214256ceacd8 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 Sep 2010 23:34:44 +0100 Subject: [PATCH 322/476] drm/i915/dvo: Fix panel and DDC i2c pins Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_dvo.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 561fbc34cec8..ea373283c93b 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -81,7 +81,6 @@ struct intel_dvo { struct intel_encoder base; struct intel_dvo_device dev; - int ddc_bus; struct drm_display_mode *panel_fixed_mode; bool panel_wants_dither; @@ -245,7 +244,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector) * that's not the case. */ intel_ddc_get_modes(connector, - &dev_priv->gmbus[intel_dvo->ddc_bus].adapter); + &dev_priv->gmbus[GMBUS_PORT_DPC].adapter); if (!list_empty(&connector->probed_modes)) return 1; @@ -349,7 +348,6 @@ void intel_dvo_init(struct drm_device *dev) struct intel_encoder *intel_encoder; struct intel_dvo *intel_dvo; struct intel_connector *intel_connector; - int ret = 0; int i; int encoder_type = DRM_MODE_ENCODER_NONE; @@ -367,9 +365,6 @@ void intel_dvo_init(struct drm_device *dev) drm_encoder_init(dev, &intel_encoder->base, &intel_dvo_enc_funcs, encoder_type); - /* Set up the DDC bus */ - intel_dvo->ddc_bus = GMBUS_PORT_DPB; - /* Now, try to find a controller */ for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { struct drm_connector *connector = &intel_connector->base; @@ -384,7 +379,7 @@ void intel_dvo_init(struct drm_device *dev) if (dvo->gpio != 0) gpio = dvo->gpio; else if (dvo->type == INTEL_DVO_CHIP_LVDS) - gpio = GMBUS_PORT_PANEL; + gpio = GMBUS_PORT_SSC; else gpio = GMBUS_PORT_DPB; @@ -395,8 +390,7 @@ void intel_dvo_init(struct drm_device *dev) i2c = &dev_priv->gmbus[gpio].adapter; intel_dvo->dev = *dvo; - ret = dvo->dev_ops->init(&intel_dvo->dev, i2c); - if (!ret) + if (!dvo->dev_ops->init(&intel_dvo->dev, i2c)) continue; intel_encoder->type = INTEL_OUTPUT_DVO; From 97d1ebaf81491afd8b45186056eda7ebf5da7875 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 29 Sep 2010 11:36:22 +0100 Subject: [PATCH 323/476] drm/i915/debug: Remove defunct WATCH_LRU This has bitrotted through inuse and superseded by tracing and debugfs. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 2 -- drivers/gpu/drm/i915/i915_gem.c | 16 ------------ drivers/gpu/drm/i915/i915_gem_debug.c | 35 --------------------------- drivers/gpu/drm/i915/i915_gem_evict.c | 3 --- 4 files changed, 56 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0ce76a82a675..703b8c92bdfa 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -77,7 +77,6 @@ enum plane { #define WATCH_COHERENCY 0 #define WATCH_BUF 0 #define WATCH_EXEC 0 -#define WATCH_LRU 0 #define WATCH_RELOC 0 #define WATCH_INACTIVE 0 #define WATCH_PWRITE 0 @@ -1089,7 +1088,6 @@ void i915_verify_inactive(struct drm_device *dev, char *file, int line); void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle); void i915_gem_dump_object(struct drm_gem_object *obj, int len, const char *where, uint32_t mark); -void i915_dump_lru(struct drm_device *dev, const char *where); /* i915_debugfs.c */ int i915_debugfs_init(struct drm_minor *minor); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 81513fa5d303..5ce14f188c43 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1865,12 +1865,6 @@ i915_gem_retire_requests_ring(struct drm_device *dev, break; obj = &obj_priv->base; - -#if WATCH_LRU - DRM_INFO("%s: retire %d moves to inactive list %p\n", - __func__, request->seqno, obj); -#endif - if (obj->write_domain != 0) i915_gem_object_move_to_flushing(obj); else @@ -2646,9 +2640,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) /* If the gtt is empty and we're still having trouble * fitting our object in, we're out of memory. */ -#if WATCH_LRU - DRM_INFO("%s: GTT full, evicting something\n", __func__); -#endif ret = i915_gem_evict_something(dev, obj->size, alignment); if (ret) return ret; @@ -3950,18 +3941,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, obj_priv = to_intel_bo(obj); i915_gem_object_move_to_active(obj, ring); -#if WATCH_LRU - DRM_INFO("%s: move to exec list %p\n", __func__, obj); -#endif } i915_add_request(dev, file_priv, request, ring); request = NULL; -#if WATCH_LRU - i915_dump_lru(dev, __func__); -#endif - i915_verify_inactive(dev, __FILE__, __LINE__); err: diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index 80f380b1d951..26e67ee7f85d 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c @@ -97,41 +97,6 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len, } #endif -#if WATCH_LRU -void -i915_dump_lru(struct drm_device *dev, const char *where) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; - - DRM_INFO("active list %s {\n", where); - spin_lock(&dev_priv->mm.active_list_lock); - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, - list) - { - DRM_INFO(" %p: %08x\n", obj_priv, - obj_priv->last_rendering_seqno); - } - spin_unlock(&dev_priv->mm.active_list_lock); - DRM_INFO("}\n"); - DRM_INFO("flushing list %s {\n", where); - list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, - list) - { - DRM_INFO(" %p: %08x\n", obj_priv, - obj_priv->last_rendering_seqno); - } - DRM_INFO("}\n"); - DRM_INFO("inactive %s {\n", where); - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { - DRM_INFO(" %p: %08x\n", obj_priv, - obj_priv->last_rendering_seqno); - } - DRM_INFO("}\n"); -} -#endif - - #if WATCH_COHERENCY void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 63ac3d2ba52c..c503c81f4cdf 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -190,9 +190,6 @@ found: /* Unbinding will emit any required flushes */ list_for_each_entry_safe(obj_priv, tmp_obj_priv, &eviction_list, evict_list) { -#if WATCH_LRU - DRM_INFO("%s: evicting %p\n", __func__, &obj_priv->base); -#endif ret = i915_gem_object_unbind(&obj_priv->base); if (ret) return ret; From 3d2a812ae4676b74f2033cf09c855074d06f3872 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 29 Sep 2010 11:39:53 +0100 Subject: [PATCH 324/476] drm/i915/debug: Remove default WATCH_BUF Replaced by tracepoints. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 1 - drivers/gpu/drm/i915/i915_gem.c | 53 +-------------------------- drivers/gpu/drm/i915/i915_gem_debug.c | 2 +- 3 files changed, 3 insertions(+), 53 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 703b8c92bdfa..cb4e9a63c835 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -75,7 +75,6 @@ enum plane { #define DRIVER_PATCHLEVEL 0 #define WATCH_COHERENCY 0 -#define WATCH_BUF 0 #define WATCH_EXEC 0 #define WATCH_RELOC 0 #define WATCH_INACTIVE 0 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 5ce14f188c43..9a8e6752e009 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1092,10 +1092,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, intel_mark_busy(dev, obj); -#if WATCH_BUF - DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n", - obj, obj->size, read_domains, write_domain); -#endif if (read_domains & I915_GEM_DOMAIN_GTT) { ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); @@ -1137,7 +1133,6 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, { struct drm_i915_gem_sw_finish *args = data; struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; int ret = 0; if (!(dev->driver->driver_features & DRIVER_GEM)) @@ -1153,14 +1148,8 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, return ret; } -#if WATCH_BUF - DRM_INFO("%s: sw_finish %d (%p %zd)\n", - __func__, args->handle, obj, obj->size); -#endif - obj_priv = to_intel_bo(obj); - /* Pinned buffers may be scanout, so flush the cache */ - if (obj_priv->pin_count) + if (to_intel_bo(obj)->pin_count) i915_gem_object_flush_cpu_write_domain(obj); drm_gem_object_unreference(obj); @@ -2061,10 +2050,6 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj, * it. */ if (obj_priv->active) { -#if WATCH_BUF - DRM_INFO("%s: object %p wait for seqno %08x\n", - __func__, obj, obj_priv->last_rendering_seqno); -#endif ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, interruptible, @@ -2086,10 +2071,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj) struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int ret = 0; -#if WATCH_BUF - DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj); - DRM_INFO("gtt_space %p\n", obj_priv->gtt_space); -#endif if (obj_priv->gtt_space == NULL) return 0; @@ -2647,10 +2628,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) goto search_free; } -#if WATCH_BUF - DRM_INFO("Binding object of size %zd at 0x%08x\n", - obj->size, obj_priv->gtt_offset); -#endif ret = i915_gem_object_get_pages(obj, gfpmask); if (ret) { drm_mm_put_block(obj_priv->gtt_space); @@ -3073,12 +3050,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) intel_mark_busy(dev, obj); -#if WATCH_BUF - DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", - __func__, obj, - obj->read_domains, obj->pending_read_domains, - obj->write_domain, obj->pending_write_domain); -#endif /* * If the object isn't moving to a new write domain, * let the object stay in multiple read domains @@ -3105,13 +3076,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) * stale data. That is, any new read domains. */ invalidate_domains |= obj->pending_read_domains & ~obj->read_domains; - if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { -#if WATCH_BUF - DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", - __func__, flush_domains, invalidate_domains); -#endif + if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) i915_gem_clflush_object(obj); - } old_read_domains = obj->read_domains; @@ -3129,12 +3095,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) dev->flush_domains |= flush_domains; if (obj_priv->ring) dev_priv->mm.flush_rings |= obj_priv->ring->id; -#if WATCH_BUF - DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n", - __func__, - obj->read_domains, obj->write_domain, - dev->invalidate_domains, dev->flush_domains); -#endif trace_i915_gem_object_change_domain(obj, old_read_domains, @@ -3438,11 +3398,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, (reloc_offset & (PAGE_SIZE - 1))); reloc_val = target_obj_priv->gtt_offset + reloc->delta; -#if WATCH_BUF - DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n", - obj, (unsigned int) reloc->offset, - readl(reloc_entry), reloc_val); -#endif writel(reloc_val, reloc_entry); io_mapping_unmap_atomic(reloc_page, KM_USER0); @@ -3454,10 +3409,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, drm_gem_object_unreference(target_obj); } -#if WATCH_BUF - if (0) - i915_gem_dump_object(obj, 128, __func__, ~0); -#endif return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index 26e67ee7f85d..2732c909a948 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c @@ -52,7 +52,7 @@ i915_verify_inactive(struct drm_device *dev, char *file, int line) #endif /* WATCH_INACTIVE */ -#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE +#if WATCH_EXEC | WATCH_PWRITE static void i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end, uint32_t bias, uint32_t mark) From 891b48cfc8659be486c70a03ad815f9a2485ee58 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 29 Sep 2010 12:26:37 +0100 Subject: [PATCH 325/476] drm/i915: Avoid blocking the kworker thread on a stuck mutex Just reschedule the retire requests again if the device is currently busy. The request list will be pruned along other paths so will never grow unbounded and so we can afford to miss the occasional pruning. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 9a8e6752e009..fe1424c6c3fa 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1901,7 +1901,12 @@ i915_gem_retire_work_handler(struct work_struct *work) mm.retire_work.work); dev = dev_priv->dev; - mutex_lock(&dev->struct_mutex); + /* Come back later if the device is busy... */ + if (!mutex_trylock(&dev->struct_mutex)) { + queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); + return; + } + i915_gem_retire_requests(dev); if (!dev_priv->mm.suspended && From 23bc598253fa8e9ede6ad29304ea4ed177e9fc23 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 29 Sep 2010 16:10:57 +0100 Subject: [PATCH 326/476] drm/i915/debug: Convert i915_verify_active() to scan all lists ... and check more regularly. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 8 +- drivers/gpu/drm/i915/i915_gem.c | 29 +++---- drivers/gpu/drm/i915/i915_gem_debug.c | 109 +++++++++++++++++++++--- drivers/gpu/drm/i915/intel_ringbuffer.c | 4 +- 4 files changed, 112 insertions(+), 38 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index cb4e9a63c835..5ccf98095389 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -77,7 +77,7 @@ enum plane { #define WATCH_COHERENCY 0 #define WATCH_EXEC 0 #define WATCH_RELOC 0 -#define WATCH_INACTIVE 0 +#define WATCH_LISTS 0 #define WATCH_PWRITE 0 #define I915_GEM_PHYS_CURSOR_0 1 @@ -1079,10 +1079,10 @@ bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, /* i915_gem_debug.c */ void i915_gem_dump_object(struct drm_gem_object *obj, int len, const char *where, uint32_t mark); -#if WATCH_INACTIVE -void i915_verify_inactive(struct drm_device *dev, char *file, int line); +#if WATCH_LISTS +int i915_verify_lists(struct drm_device *dev); #else -#define i915_verify_inactive(dev, file, line) +#define i915_verify_lists(dev) 0 #endif void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle); void i915_gem_dump_object(struct drm_gem_object *obj, int len, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index fe1424c6c3fa..c3a7065947ce 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -109,6 +109,7 @@ static int i915_mutex_lock_interruptible(struct drm_device *dev) return -EAGAIN; } + WARN_ON(i915_verify_lists(dev)); return 0; } @@ -1612,7 +1613,6 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - i915_verify_inactive(dev, __FILE__, __LINE__); if (obj_priv->pin_count != 0) list_move_tail(&obj_priv->list, &dev_priv->mm.pinned_list); else @@ -1626,7 +1626,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) obj_priv->active = 0; drm_gem_object_unreference(obj); } - i915_verify_inactive(dev, __FILE__, __LINE__); + WARN_ON(i915_verify_lists(dev)); } static void @@ -1821,6 +1821,8 @@ i915_gem_retire_requests_ring(struct drm_device *dev, list_empty(&ring->request_list)) return; + WARN_ON(i915_verify_lists(dev)); + seqno = ring->get_seqno(dev, ring); while (!list_empty(&ring->request_list)) { struct drm_i915_gem_request *request; @@ -1865,6 +1867,8 @@ i915_gem_retire_requests_ring(struct drm_device *dev, ring->user_irq_put(dev, ring); dev_priv->trace_irq_seqno = 0; } + + WARN_ON(i915_verify_lists(dev)); } void @@ -3690,8 +3694,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (ret) goto pre_mutex_err; - i915_verify_inactive(dev, __FILE__, __LINE__); - if (dev_priv->mm.suspended) { mutex_unlock(&dev->struct_mutex); ret = -EBUSY; @@ -3811,8 +3813,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } - i915_verify_inactive(dev, __FILE__, __LINE__); - /* Zero the global flush/invalidate flags. These * will be modified as new domains are computed * for each object @@ -3828,8 +3828,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, i915_gem_object_set_to_gpu_domain(obj); } - i915_verify_inactive(dev, __FILE__, __LINE__); - if (dev->invalidate_domains | dev->flush_domains) { #if WATCH_EXEC DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", @@ -3860,8 +3858,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, old_write_domain); } - i915_verify_inactive(dev, __FILE__, __LINE__); - #if WATCH_COHERENCY for (i = 0; i < args->buffer_count; i++) { i915_gem_object_check_coherency(object_list[i], @@ -3890,8 +3886,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, */ i915_retire_commands(dev, ring); - i915_verify_inactive(dev, __FILE__, __LINE__); - for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; obj_priv = to_intel_bo(obj); @@ -3902,8 +3896,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, i915_add_request(dev, file_priv, request, ring); request = NULL; - i915_verify_inactive(dev, __FILE__, __LINE__); - err: for (i = 0; i < pinned; i++) i915_gem_object_unpin(object_list[i]); @@ -4094,8 +4086,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) int ret; BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); - - i915_verify_inactive(dev, __FILE__, __LINE__); + WARN_ON(i915_verify_lists(dev)); if (obj_priv->gtt_space != NULL) { if (alignment == 0) @@ -4129,8 +4120,8 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) list_move_tail(&obj_priv->list, &dev_priv->mm.pinned_list); } - i915_verify_inactive(dev, __FILE__, __LINE__); + WARN_ON(i915_verify_lists(dev)); return 0; } @@ -4141,7 +4132,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj) drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - i915_verify_inactive(dev, __FILE__, __LINE__); + WARN_ON(i915_verify_lists(dev)); obj_priv->pin_count--; BUG_ON(obj_priv->pin_count < 0); BUG_ON(obj_priv->gtt_space == NULL); @@ -4157,7 +4148,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj) atomic_dec(&dev->pin_count); atomic_sub(obj->size, &dev->pin_memory); } - i915_verify_inactive(dev, __FILE__, __LINE__); + WARN_ON(i915_verify_lists(dev)); } int diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index 2732c909a948..48644b840a8d 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c @@ -30,24 +30,107 @@ #include "i915_drm.h" #include "i915_drv.h" -#if WATCH_INACTIVE -void -i915_verify_inactive(struct drm_device *dev, char *file, int line) +#if WATCH_LISTS +int +i915_verify_lists(struct drm_device *dev) { + static int warned; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; + int err = 0; - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { - obj = &obj_priv->base; - if (obj_priv->pin_count || obj_priv->active || - (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | - I915_GEM_DOMAIN_GTT))) - DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n", + if (warned) + return 0; + + list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) { + if (obj->base.dev != dev || + !atomic_read(&obj->base.refcount.refcount)) { + DRM_ERROR("freed render active %p\n", obj); + err++; + break; + } else if (!obj->active || + (obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) { + DRM_ERROR("invalid render active %p (a %d r %x)\n", obj, - obj_priv->pin_count, obj_priv->active, - obj->write_domain, file, line); + obj->active, + obj->base.read_domains); + err++; + } else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) { + DRM_ERROR("invalid render active %p (w %x, gwl %d)\n", + obj, + obj->base.write_domain, + !list_empty(&obj->gpu_write_list)); + err++; + } } + + list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) { + if (obj->base.dev != dev || + !atomic_read(&obj->base.refcount.refcount)) { + DRM_ERROR("freed flushing %p\n", obj); + err++; + break; + } else if (!obj->active || + (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 || + list_empty(&obj->gpu_write_list)){ + DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n", + obj, + obj->active, + obj->base.write_domain, + !list_empty(&obj->gpu_write_list)); + err++; + } + } + + list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) { + if (obj->base.dev != dev || + !atomic_read(&obj->base.refcount.refcount)) { + DRM_ERROR("freed gpu write %p\n", obj); + err++; + break; + } else if (!obj->active || + (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) { + DRM_ERROR("invalid gpu write %p (a %d w %x)\n", + obj, + obj->active, + obj->base.write_domain); + err++; + } + } + + list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) { + if (obj->base.dev != dev || + !atomic_read(&obj->base.refcount.refcount)) { + DRM_ERROR("freed inactive %p\n", obj); + err++; + break; + } else if (obj->pin_count || obj->active || + (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) { + DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n", + obj, + obj->pin_count, obj->active, + obj->base.write_domain); + err++; + } + } + + list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) { + if (obj->base.dev != dev || + !atomic_read(&obj->base.refcount.refcount)) { + DRM_ERROR("freed pinned %p\n", obj); + err++; + break; + } else if (!obj->pin_count || obj->active || + (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) { + DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n", + obj, + obj->pin_count, obj->active, + obj->base.write_domain); + err++; + } + } + + return warned = err; } #endif /* WATCH_INACTIVE */ diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 5103b95cea93..d89b88791aac 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -579,6 +579,8 @@ int intel_init_ring_buffer(struct drm_device *dev, int ret; ring->dev = dev; + INIT_LIST_HEAD(&ring->active_list); + INIT_LIST_HEAD(&ring->request_list); if (I915_NEED_GFX_HWS(dev)) { ret = init_status_page(dev, ring); @@ -627,8 +629,6 @@ int intel_init_ring_buffer(struct drm_device *dev, if (ring->space < 0) ring->space += ring->size; } - INIT_LIST_HEAD(&ring->active_list); - INIT_LIST_HEAD(&ring->request_list); return ret; err_unmap: From d21d5975686fbc107f9352006b06e1e92b4c5810 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 26 Sep 2010 11:19:33 +0100 Subject: [PATCH 327/476] drm/i915: Report the deferred free list in debugfs Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index eb5dd52847a9..e4893988f401 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -45,7 +45,8 @@ enum { BSD_LIST, FLUSHING_LIST, INACTIVE_LIST, - PINNED_LIST + PINNED_LIST, + DEFERRED_FREE_LIST, }; static const char *yesno(int v) @@ -161,6 +162,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) seq_printf(m, "Flushing:\n"); head = &dev_priv->mm.flushing_list; break; + case DEFERRED_FREE_LIST: + seq_printf(m, "Deferred free:\n"); + head = &dev_priv->mm.deferred_free_list; + break; default: mutex_unlock(&dev->struct_mutex); return -EINVAL; @@ -991,6 +996,7 @@ static struct drm_info_list i915_debugfs_list[] = { {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST}, + {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST}, {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, {"i915_gem_request", i915_gem_request_info, 0}, {"i915_gem_seqno", i915_gem_seqno_info, 0}, From 8f2480fb05991f1a5522dd48332cd9db4f7745c6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 26 Sep 2010 11:44:19 +0100 Subject: [PATCH 328/476] drm/i915/debugfs: Include list totals Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index e4893988f401..47e3a8fdcc6d 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -135,7 +135,8 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv; - int ret; + size_t total_obj_size, total_gtt_size; + int count, ret; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) @@ -171,13 +172,19 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) return -EINVAL; } + total_obj_size = total_gtt_size = count = 0; list_for_each_entry(obj_priv, head, list) { seq_printf(m, " "); describe_obj(m, obj_priv); seq_printf(m, "\n"); + total_obj_size += obj_priv->base.size; + total_gtt_size += obj_priv->gtt_space->size; + count++; } - mutex_unlock(&dev->struct_mutex); + + seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", + count, total_obj_size, total_gtt_size); return 0; } From 5cdf58817433345157644140f2f509f00c06d479 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 27 Sep 2010 15:51:07 +0100 Subject: [PATCH 329/476] drm/i915: Make get/put pages static Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 26 +++++++++++--------------- drivers/gpu/drm/i915/i915_drv.h | 2 -- drivers/gpu/drm/i915/i915_gem.c | 13 ++++++++++--- 3 files changed, 21 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 47e3a8fdcc6d..0d9bbd595ff8 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -406,16 +406,19 @@ static int i915_hws_info(struct seq_file *m, void *data) return 0; } -static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count) +static void i915_dump_object(struct seq_file *m, + struct io_mapping *mapping, + struct drm_i915_gem_object *obj_priv) { - int page, i; - uint32_t *mem; + int page, page_count, i; + page_count = obj_priv->base.size / PAGE_SIZE; for (page = 0; page < page_count; page++) { - mem = kmap(pages[page]); + u32 *mem = io_mapping_map_wc(mapping, + obj_priv->gtt_offset + page * PAGE_SIZE); for (i = 0; i < PAGE_SIZE; i += 4) seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); - kunmap(pages[page]); + io_mapping_unmap(mem); } } @@ -436,16 +439,9 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data) list) { obj = &obj_priv->base; if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { - ret = i915_gem_object_get_pages(obj, 0); - if (ret) { - mutex_unlock(&dev->struct_mutex); - return ret; - } - - seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset); - i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE); - - i915_gem_object_put_pages(obj); + seq_printf(m, "--- gtt_offset = 0x%08x\n", + obj_priv->gtt_offset); + i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv); } } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5ccf98095389..66acc7c3bb03 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1055,8 +1055,6 @@ int i915_gem_attach_phys_object(struct drm_device *dev, void i915_gem_detach_phys_object(struct drm_device *dev, struct drm_gem_object *obj); void i915_gem_free_all_phys_object(struct drm_device *dev); -int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); -void i915_gem_object_put_pages(struct drm_gem_object *obj); void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); void i915_gem_shrinker_init(void); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c3a7065947ce..613b0bffde0f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -58,6 +58,13 @@ static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *o struct drm_file *file_priv); static void i915_gem_free_object_tail(struct drm_gem_object *obj); +static int +i915_gem_object_get_pages(struct drm_gem_object *obj, + gfp_t gfpmask); + +static void +i915_gem_object_put_pages(struct drm_gem_object *obj); + static LIST_HEAD(shrink_list); static DEFINE_SPINLOCK(shrink_list_lock); @@ -1021,7 +1028,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, if (obj_priv->phys_obj) ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); else if (obj_priv->tiling_mode == I915_TILING_NONE && - dev->gtt_total != 0 && + obj_priv->gtt_space && obj->write_domain != I915_GEM_DOMAIN_CPU) { ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv); if (ret == -EFAULT) { @@ -1501,7 +1508,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, return 0; } -void +static void i915_gem_object_put_pages(struct drm_gem_object *obj) { struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); @@ -2174,7 +2181,7 @@ i915_gpu_idle(struct drm_device *dev) return 0; } -int +static int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) { From f394940b8d275064f080a59dac636688dae3531a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 29 Sep 2010 21:19:54 +0100 Subject: [PATCH 330/476] drm/i915: Remove redundant deletion of obj->gpu_write_list At that point as the object is no longer in any GPU write domain it must not be on the list, so the list_del() is redundant. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 613b0bffde0f..302beee03197 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3857,8 +3857,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (obj->write_domain) list_move_tail(&obj_priv->gpu_write_list, &dev_priv->mm.gpu_write_list); - else - list_del_init(&obj_priv->gpu_write_list); trace_i915_gem_object_change_domain(obj, obj->read_domains, From 6a04002bea137d2c6359228316d9c827806e475f Mon Sep 17 00:00:00 2001 From: Simon Que Date: Thu, 30 Sep 2010 09:36:39 +0100 Subject: [PATCH 331/476] i915: Added function to initialize VBT settings Added a function that sets the LVDS values to default settings. This will be called by intel_init_bios before checking for the VBT (video BIOS table). The default values are thus loaded regardless of whether a VBT is found. The default settings in each parse function have been moved to the new function. This consolidates all the default settings into one place. The default dither bit value has been changed from 0 to 1. We can assume that display devices will want dithering enabled. Signed-off-by: Simon Que Acked-by: Olof Johansson [ickle: fixup for -next] Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_bios.c | 36 ++++++++++++++++++++----------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 7e868d228c7b..b1f73ac0f3fd 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -129,10 +129,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, int i, temp_downclock; struct drm_display_mode *temp_mode; - /* Defaults if we can't find VBT info */ - dev_priv->lvds_dither = 0; - dev_priv->lvds_vbt = 0; - lvds_options = find_section(bdb, BDB_LVDS_OPTIONS); if (!lvds_options) return; @@ -140,6 +136,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, dev_priv->lvds_dither = lvds_options->pixel_dither; if (lvds_options->panel_type == 0xff) return; + panel_type = lvds_options->panel_type; lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA); @@ -232,8 +229,6 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv, struct lvds_dvo_timing *dvo_timing; struct drm_display_mode *panel_fixed_mode; - dev_priv->sdvo_lvds_vbt_mode = NULL; - sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS); if (!sdvo_lvds_options) return; @@ -262,10 +257,6 @@ parse_general_features(struct drm_i915_private *dev_priv, struct drm_device *dev = dev_priv->dev; struct bdb_general_features *general; - /* Set sensible defaults in case we can't find the general block */ - dev_priv->int_tv_support = 1; - dev_priv->int_crt_support = 1; - general = find_section(bdb, BDB_GENERAL_FEATURES); if (general) { dev_priv->int_tv_support = general->int_tv_support; @@ -423,8 +414,6 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct bdb_edp *edp; - dev_priv->edp.bpp = 18; - edp = find_section(bdb, BDB_EDP); if (!edp) { if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) { @@ -528,6 +517,27 @@ parse_device_mapping(struct drm_i915_private *dev_priv, return; } +static void +init_vbt_defaults(struct drm_i915_private *dev_priv) +{ + dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC; + + /* LFP panel data */ + dev_priv->lvds_dither = 1; + dev_priv->lvds_vbt = 0; + + /* SDVO panel data */ + dev_priv->sdvo_lvds_vbt_mode = NULL; + + /* general features */ + dev_priv->int_tv_support = 1; + dev_priv->int_crt_support = 1; + dev_priv->lvds_use_ssc = 0; + + /* eDP data */ + dev_priv->edp.bpp = 18; +} + /** * intel_init_bios - initialize VBIOS settings & find VBT * @dev: DRM device @@ -545,7 +555,7 @@ intel_init_bios(struct drm_device *dev) struct bdb_header *bdb = NULL; u8 __iomem *bios = NULL; - dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC; + init_vbt_defaults(dev_priv); /* XXX Should this validation be moved to intel_opregion.c? */ if (dev_priv->opregion.vbt) { From 73aa808f10effc280e6eb70267314542a7c29426 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 30 Sep 2010 11:46:12 +0100 Subject: [PATCH 332/476] drm: Move the GTT accounting to i915 Only drm/i915 does the bookkeeping that makes the information useful, and the information maintained is driver specific, so move it out of the core and into its single user. Signed-off-by: Chris Wilson Cc: Dave Airlie --- drivers/gpu/drm/drm_debugfs.c | 1 - drivers/gpu/drm/drm_gem.c | 14 ---- drivers/gpu/drm/drm_info.c | 14 ---- drivers/gpu/drm/drm_proc.c | 1 - drivers/gpu/drm/i915/i915_debugfs.c | 26 +++++++ drivers/gpu/drm/i915/i915_drv.h | 9 +++ drivers/gpu/drm/i915/i915_gem.c | 110 +++++++++++++++++++--------- include/drm/drmP.h | 8 -- 8 files changed, 111 insertions(+), 72 deletions(-) diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 677b275fa721..9d8c892d07c9 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -48,7 +48,6 @@ static struct drm_info_list drm_debugfs_list[] = { {"queues", drm_queues_info, 0}, {"bufs", drm_bufs_info, 0}, {"gem_names", drm_gem_name_info, DRIVER_GEM}, - {"gem_objects", drm_gem_object_info, DRIVER_GEM}, #if DRM_DEBUG_CODE {"vma", drm_vma_info, 0}, #endif diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index cff7317d3830..3ea0692ce59a 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -92,12 +92,6 @@ drm_gem_init(struct drm_device *dev) spin_lock_init(&dev->object_name_lock); idr_init(&dev->object_name_idr); - atomic_set(&dev->object_count, 0); - atomic_set(&dev->object_memory, 0); - atomic_set(&dev->pin_count, 0); - atomic_set(&dev->pin_memory, 0); - atomic_set(&dev->gtt_count, 0); - atomic_set(&dev->gtt_memory, 0); mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); if (!mm) { @@ -151,9 +145,6 @@ int drm_gem_object_init(struct drm_device *dev, kref_init(&obj->handlecount); obj->size = size; - atomic_inc(&dev->object_count); - atomic_add(obj->size, &dev->object_memory); - return 0; } EXPORT_SYMBOL(drm_gem_object_init); @@ -180,8 +171,6 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size) return obj; fput: /* Object_init mangles the global counters - readjust them. */ - atomic_dec(&dev->object_count); - atomic_sub(obj->size, &dev->object_memory); fput(obj->filp); free: kfree(obj); @@ -436,10 +425,7 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private) void drm_gem_object_release(struct drm_gem_object *obj) { - struct drm_device *dev = obj->dev; fput(obj->filp); - atomic_dec(&dev->object_count); - atomic_sub(obj->size, &dev->object_memory); } EXPORT_SYMBOL(drm_gem_object_release); diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index 2ef2c7827243..5aff08e236cf 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c @@ -270,20 +270,6 @@ int drm_gem_name_info(struct seq_file *m, void *data) return 0; } -int drm_gem_object_info(struct seq_file *m, void* data) -{ - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - - seq_printf(m, "%d objects\n", atomic_read(&dev->object_count)); - seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory)); - seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count)); - seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory)); - seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory)); - seq_printf(m, "%d gtt total\n", dev->gtt_total); - return 0; -} - #if DRM_DEBUG_CODE int drm_vma_info(struct seq_file *m, void *data) diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c index e571de536dc5..9e5b07efebb7 100644 --- a/drivers/gpu/drm/drm_proc.c +++ b/drivers/gpu/drm/drm_proc.c @@ -55,7 +55,6 @@ static struct drm_info_list drm_proc_list[] = { {"queues", drm_queues_info, 0}, {"bufs", drm_bufs_info, 0}, {"gem_names", drm_gem_name_info, DRIVER_GEM}, - {"gem_objects", drm_gem_object_info, DRIVER_GEM}, #if DRM_DEBUG_CODE {"vma", drm_vma_info, 0}, #endif diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 0d9bbd595ff8..d598070fb279 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -188,6 +188,31 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) return 0; } +static int i915_gem_object_info(struct seq_file *m, void* data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + seq_printf(m, "%u objects\n", dev_priv->mm.object_count); + seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory); + seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count); + seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory); + seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count); + seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory); + seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total); + + mutex_unlock(&dev->struct_mutex); + + return 0; +} + + static int i915_gem_pageflip_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; @@ -994,6 +1019,7 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor) static struct drm_info_list i915_debugfs_list[] = { {"i915_capabilities", i915_capabilities, 0, 0}, + {"i915_gem_objects", i915_gem_object_info, 0}, {"i915_gem_render_active", i915_gem_object_list_info, 0, (void *) RENDER_LIST}, {"i915_gem_bsd_active", i915_gem_object_list_info, 0, (void *) BSD_LIST}, {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 66acc7c3bb03..7cfbc0fbd952 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -642,6 +642,15 @@ typedef struct drm_i915_private { struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; uint32_t flush_rings; + + /* accounting, useful for userland debugging */ + size_t object_memory; + size_t pin_memory; + size_t gtt_memory; + size_t gtt_total; + u32 object_count; + u32 pin_count; + u32 gtt_count; } mm; struct sdvo_device_mapping sdvo_mappings[2]; /* indicate whether the LVDS_BORDER should be enabled or not */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 302beee03197..16c4b7b9602c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -68,6 +68,49 @@ i915_gem_object_put_pages(struct drm_gem_object *obj); static LIST_HEAD(shrink_list); static DEFINE_SPINLOCK(shrink_list_lock); +/* some bookkeeping */ +static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, + size_t size) +{ + dev_priv->mm.object_count++; + dev_priv->mm.object_memory += size; +} + +static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, + size_t size) +{ + dev_priv->mm.object_count--; + dev_priv->mm.object_memory -= size; +} + +static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv, + size_t size) +{ + dev_priv->mm.gtt_count++; + dev_priv->mm.gtt_memory += size; +} + +static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv, + size_t size) +{ + dev_priv->mm.gtt_count--; + dev_priv->mm.gtt_memory -= size; +} + +static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv, + size_t size) +{ + dev_priv->mm.pin_count++; + dev_priv->mm.pin_memory += size; +} + +static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv, + size_t size) +{ + dev_priv->mm.pin_count--; + dev_priv->mm.pin_memory -= size; +} + int i915_gem_check_is_wedged(struct drm_device *dev) { @@ -128,7 +171,8 @@ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv) obj_priv->pin_count == 0; } -int i915_gem_do_init(struct drm_device *dev, unsigned long start, +int i915_gem_do_init(struct drm_device *dev, + unsigned long start, unsigned long end) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -142,7 +186,7 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start, drm_mm_init(&dev_priv->mm.gtt_space, start, end - start); - dev->gtt_total = (uint32_t) (end - start); + dev_priv->mm.gtt_total = end - start; return 0; } @@ -165,14 +209,16 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_get_aperture *args = data; if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; - args->aper_size = dev->gtt_total; - args->aper_available_size = (args->aper_size - - atomic_read(&dev->pin_memory)); + mutex_lock(&dev->struct_mutex); + args->aper_size = dev_priv->mm.gtt_total; + args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory; + mutex_unlock(&dev->struct_mutex); return 0; } @@ -2084,6 +2130,7 @@ int i915_gem_object_unbind(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int ret = 0; @@ -2116,25 +2163,18 @@ i915_gem_object_unbind(struct drm_gem_object *obj) if (obj_priv->fence_reg != I915_FENCE_REG_NONE) i915_gem_clear_fence_reg(obj); - if (obj_priv->agp_mem != NULL) { - drm_unbind_agp(obj_priv->agp_mem); - drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); - obj_priv->agp_mem = NULL; - } + drm_unbind_agp(obj_priv->agp_mem); + drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); i915_gem_object_put_pages(obj); BUG_ON(obj_priv->pages_refcount); - if (obj_priv->gtt_space) { - atomic_dec(&dev->gtt_count); - atomic_sub(obj->size, &dev->gtt_memory); - - drm_mm_put_block(obj_priv->gtt_space); - obj_priv->gtt_space = NULL; - } - + i915_gem_info_remove_gtt(dev_priv, obj->size); list_del_init(&obj_priv->list); + drm_mm_put_block(obj_priv->gtt_space); + obj_priv->gtt_space = NULL; + if (i915_gem_object_is_purgeable(obj_priv)) i915_gem_object_truncate(obj); @@ -2619,7 +2659,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) /* If the object is bigger than the entire aperture, reject it early * before evicting everything in a vain attempt to find space. */ - if (obj->size > dev->gtt_total) { + if (obj->size > dev_priv->mm.gtt_total) { DRM_ERROR("Attempting to bind an object larger than the aperture\n"); return -E2BIG; } @@ -2688,11 +2728,10 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) goto search_free; } - atomic_inc(&dev->gtt_count); - atomic_add(obj->size, &dev->gtt_memory); /* keep track of bounds object by adding it to the inactive list */ list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + i915_gem_info_add_gtt(dev_priv, obj->size); /* Assert that the object is not currently in any GPU domain. As it * wasn't in the GTT, there shouldn't be any way it could have been in @@ -3779,15 +3818,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, pinned+1, args->buffer_count, total_size, num_fences, ret); - DRM_ERROR("%d objects [%d pinned], " - "%d object bytes [%d pinned], " - "%d/%d gtt bytes\n", - atomic_read(&dev->object_count), - atomic_read(&dev->pin_count), - atomic_read(&dev->object_memory), - atomic_read(&dev->pin_memory), - atomic_read(&dev->gtt_memory), - dev->gtt_total); + DRM_ERROR("%u objects [%u pinned, %u GTT], " + "%zu object bytes [%zu pinned], " + "%zu /%zu gtt bytes\n", + dev_priv->mm.object_count, + dev_priv->mm.pin_count, + dev_priv->mm.gtt_count, + dev_priv->mm.object_memory, + dev_priv->mm.pin_memory, + dev_priv->mm.gtt_memory, + dev_priv->mm.gtt_total); } goto err; } @@ -4119,8 +4159,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) * remove it from the inactive list */ if (obj_priv->pin_count == 1) { - atomic_inc(&dev->pin_count); - atomic_add(obj->size, &dev->pin_memory); + i915_gem_info_add_pin(dev_priv, obj->size); if (!obj_priv->active) list_move_tail(&obj_priv->list, &dev_priv->mm.pinned_list); @@ -4150,8 +4189,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj) if (!obj_priv->active) list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); - atomic_dec(&dev->pin_count); - atomic_sub(obj->size, &dev->pin_memory); + i915_gem_info_remove_pin(dev_priv, obj->size); } WARN_ON(i915_verify_lists(dev)); } @@ -4378,6 +4416,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, size_t size) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; obj = kzalloc(sizeof(*obj), GFP_KERNEL); @@ -4389,6 +4428,8 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, return NULL; } + i915_gem_info_add_obj(dev_priv, size); + obj->base.write_domain = I915_GEM_DOMAIN_CPU; obj->base.read_domains = I915_GEM_DOMAIN_CPU; @@ -4429,6 +4470,7 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj) i915_gem_free_mmap_offset(obj); drm_gem_object_release(obj); + i915_gem_info_remove_obj(dev_priv, obj->size); kfree(obj_priv->page_cpu_valid); kfree(obj_priv->bit_17); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 30e827aeba02..bb5c41893c00 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1041,13 +1041,6 @@ struct drm_device { /*@{ */ spinlock_t object_name_lock; struct idr object_name_idr; - atomic_t object_count; - atomic_t object_memory; - atomic_t pin_count; - atomic_t pin_memory; - atomic_t gtt_count; - atomic_t gtt_memory; - uint32_t gtt_total; uint32_t invalidate_domains; /* domains pending invalidation */ uint32_t flush_domains; /* domains pending flush */ /*@} */ @@ -1378,7 +1371,6 @@ extern int drm_bufs_info(struct seq_file *m, void *data); extern int drm_vblank_info(struct seq_file *m, void *data); extern int drm_clients_info(struct seq_file *m, void* data); extern int drm_gem_name_info(struct seq_file *m, void *data); -extern int drm_gem_object_info(struct seq_file *m, void* data); #if DRM_DEBUG_CODE extern int drm_vma_info(struct seq_file *m, void *data); From 812ed4924328adf94f45c664b6a4c710a69167e2 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 30 Sep 2010 15:08:57 +0100 Subject: [PATCH 333/476] drm/i915: Force the domain to CPU on unbinding whilst wedged. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=30083 Reported-by: Sitsofe Wheeler Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 16c4b7b9602c..c033c5a2e9fc 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2158,6 +2158,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj) * should be safe and we need to cleanup or else we might * cause memory corruption through use-after-free. */ + if (ret) { + i915_gem_clflush_object(obj); + obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU; + } /* release the fence reg _after_ flushing */ if (obj_priv->fence_reg != I915_FENCE_REG_NONE) From 069efc1dac477a4a51e42c0fe50bdcf85ada626a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 30 Sep 2010 16:53:18 +0100 Subject: [PATCH 334/476] drm/i915: Clear fence registers on GPU reset When the GPU is reset, the fence registers are invalidated, so release the objects and clear them out. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_gem.c | 14 +++++++++++++- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 2184d29e7a9f..2109537d1b90 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -395,7 +395,7 @@ int i915_reset(struct drm_device *dev, u8 flags) mutex_lock(&dev->struct_mutex); - i915_gem_reset_lists(dev); + i915_gem_reset(dev); /* * Set the domains we want to reset (GRDOM/bits 2 and 3) as diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7cfbc0fbd952..d19a26af3f8e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1033,7 +1033,7 @@ int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, int i915_gem_object_put_fence_reg(struct drm_gem_object *obj, bool interruptible); void i915_gem_retire_requests(struct drm_device *dev); -void i915_gem_reset_lists(struct drm_device *dev); +void i915_gem_reset(struct drm_device *dev); void i915_gem_clflush_object(struct drm_gem_object *obj); int i915_gem_object_set_domain(struct drm_gem_object *obj, uint32_t read_domains, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c033c5a2e9fc..db9d36fb5883 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1826,10 +1826,11 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, } } -void i915_gem_reset_lists(struct drm_device *dev) +void i915_gem_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv; + int i; i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring); if (HAS_BSD(dev)) @@ -1858,6 +1859,17 @@ void i915_gem_reset_lists(struct drm_device *dev) { obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS; } + + /* The fence registers are invalidated so clear them out */ + for (i = 0; i < 16; i++) { + struct drm_i915_fence_reg *reg; + + reg = &dev_priv->fence_regs[i]; + if (!reg->obj) + continue; + + i915_gem_clear_fence_reg(reg->obj); + } } /** From dc96e9b8e37641d9d15a8a4cdd18ed7680d8f546 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 1 Oct 2010 12:05:06 +0100 Subject: [PATCH 335/476] drm/i915: Try to reset gen2 devices. So far only found registers for i830, i845, i865 and one of those has no effect on i865! At this moment in time, attempting to reset i8xx is a little optimistic... Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.c | 33 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_reg.h | 11 +++++++++++ 2 files changed, 44 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 2109537d1b90..f3243a3abc37 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -342,6 +342,36 @@ int i915_resume(struct drm_device *dev) return 0; } +static int i8xx_do_reset(struct drm_device *dev, u8 flags) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (IS_I85X(dev)) + return -ENODEV; + + I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830); + POSTING_READ(D_STATE); + + if (IS_I830(dev) || IS_845G(dev)) { + I915_WRITE(DEBUG_RESET_I830, + DEBUG_RESET_DISPLAY | + DEBUG_RESET_RENDER | + DEBUG_RESET_FULL); + POSTING_READ(DEBUG_RESET_I830); + msleep(1); + + I915_WRITE(DEBUG_RESET_I830, 0); + POSTING_READ(DEBUG_RESET_I830); + } + + msleep(1); + + I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830); + POSTING_READ(D_STATE); + + return 0; +} + static int i965_reset_complete(struct drm_device *dev) { u8 gdrst; @@ -410,6 +440,9 @@ int i915_reset(struct drm_device *dev, u8 flags) case 4: ret = i965_do_reset(dev, flags); break; + case 2: + ret = i8xx_do_reset(dev, flags); + break; } if (ret) { DRM_ERROR("Failed to reset chip.\n"); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ddbcd8c109e0..58cfea25a645 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -216,6 +216,16 @@ #define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ #define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */ + +/* + * Reset registers + */ +#define DEBUG_RESET_I830 0x6070 +#define DEBUG_RESET_FULL (1<<7) +#define DEBUG_RESET_RENDER (1<<8) +#define DEBUG_RESET_DISPLAY (1<<9) + + /* * Fence registers */ @@ -763,6 +773,7 @@ #define DPLLA_TEST_M_BYPASS (1 << 2) #define DPLLA_INPUT_BUFFER_ENABLE (1 << 0) #define D_STATE 0x6104 +#define DSTATE_GFX_RESET_I830 (1<<6) #define DSTATE_PLL_D3_OFF (1<<3) #define DSTATE_GFX_CLOCK_GATING (1<<1) #define DSTATE_DOT_CLOCK_GATING (1<<0) From 2fa772f34042cd4ddfb4ffaf5c24f0ce8c1025e9 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 1 Oct 2010 13:23:27 +0100 Subject: [PATCH 336/476] drm/i915: Only print 'generating error event' if we actually are Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_irq.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index aaa0f1b9d6e1..64c07c24e300 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -396,7 +396,6 @@ static void i915_error_work_func(struct work_struct *work) char *reset_event[] = { "RESET=1", NULL }; char *reset_done_event[] = { "ERROR=0", NULL }; - DRM_DEBUG_DRIVER("generating error event\n"); kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); if (atomic_read(&dev_priv->mm.wedged)) { @@ -574,8 +573,10 @@ static void i915_capture_error_state(struct drm_device *dev) return; } + DRM_DEBUG_DRIVER("generating error event\n"); + error->seqno = - dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring); + dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring); error->eir = I915_READ(EIR); error->pgtbl_er = I915_READ(PGTBL_ER); error->pipeastat = I915_READ(PIPEASTAT); From ae681d969ac0946e09636f2bef7a126d73e1ad6b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 1 Oct 2010 14:57:56 +0100 Subject: [PATCH 337/476] drm/i915: If the GPU hangs twice within 5 seconds, declare it wedged. The issue is that we may become stuck executing a long running shader and continually attempt to reset the GPU. (Or maybe we tickle some bug and need to break the vicious cycle.) So if we are detect a second hang within 5 seconds, give up trying to programme the GPU and report it wedged. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.c | 15 +++++++++------ drivers/gpu/drm/i915/i915_drv.h | 2 ++ 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index f3243a3abc37..c3decb2fef4b 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -383,6 +383,11 @@ static int i965_do_reset(struct drm_device *dev, u8 flags) { u8 gdrst; + /* + * Set the domains we want to reset (GRDOM/bits 2 and 3) as + * well as the reset bit (GR/bit 0). Setting the GR bit + * triggers the reset; when done, the hardware will clear it. + */ pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1); @@ -427,13 +432,10 @@ int i915_reset(struct drm_device *dev, u8 flags) i915_gem_reset(dev); - /* - * Set the domains we want to reset (GRDOM/bits 2 and 3) as - * well as the reset bit (GR/bit 0). Setting the GR bit - * triggers the reset; when done, the hardware will clear it. - */ ret = -ENODEV; - switch (INTEL_INFO(dev)->gen) { + if (get_seconds() - dev_priv->last_gpu_reset < 5) { + DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); + } else switch (INTEL_INFO(dev)->gen) { case 5: ret = ironlake_do_reset(dev, flags); break; @@ -444,6 +446,7 @@ int i915_reset(struct drm_device *dev, u8 flags) ret = i8xx_do_reset(dev, flags); break; } + dev_priv->last_gpu_reset = get_seconds(); if (ret) { DRM_ERROR("Failed to reset chip.\n"); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d19a26af3f8e..73ad8bff2c2a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -699,6 +699,8 @@ typedef struct drm_i915_private { struct drm_mm_node *compressed_fb; struct drm_mm_node *compressed_llb; + unsigned long last_gpu_reset; + /* list of fbdev register on this device */ struct intel_fbdev *fbdev; } drm_i915_private_t; From 1cdf7fef793c715d8c4998575aba3741fa4a0b01 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 2 Oct 2010 15:12:41 +0100 Subject: [PATCH 338/476] drm/i915: Don't mask the return code whilst relocating. The return from move_to_gtt_domain() may indicate a pending signal which needs to handled as opposed to an actual error, for instance, so report the original return value rather than forcing an EINVAL. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index db9d36fb5883..a78c97340605 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3454,7 +3454,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, if (ret != 0) { drm_gem_object_unreference(target_obj); i915_gem_object_unpin(obj); - return -EINVAL; + return ret; } /* Map the page containing the relocation we're going to From f87ea7613126ace98c0cb8b86f58e16a0e539375 Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Sun, 3 Oct 2010 19:36:26 -0700 Subject: [PATCH 339/476] drm/i915: avoid struct mutex output_poll mutex lock loop on unload Cancel the output polling work proc before acquiring the struct mutex to avoid acquiring the work proc mutex with the struct mutex held. This avoids inverting the lock order seen when the work proc runs. Signed-off-by: Keith Packard Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a2e8e15b8f5a..f55b560c468f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6127,9 +6127,9 @@ void intel_modeset_cleanup(struct drm_device *dev) struct drm_crtc *crtc; struct intel_crtc *intel_crtc; + drm_kms_helper_poll_fini(dev); mutex_lock(&dev->struct_mutex); - drm_kms_helper_poll_fini(dev); intel_fbdev_fini(dev); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { From 35b62a89b0723ca05831f2edfff6deebe1806f21 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 26 Sep 2010 20:23:38 +0100 Subject: [PATCH 340/476] drm/i915: Skip pread/pwrite if size to copy is 0. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index dba82022bd3e..29e97c075421 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -576,7 +576,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_pread *args = data; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; - int ret; + int ret = 0; obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) @@ -586,14 +586,17 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, /* Bounds check source. */ if (args->offset > obj->size || args->size > obj->size - args->offset) { ret = -EINVAL; - goto err; + goto out; } + if (args->size == 0) + goto out; + if (!access_ok(VERIFY_WRITE, (char __user *)(uintptr_t)args->data_ptr, args->size)) { ret = -EFAULT; - goto err; + goto out; } if (i915_gem_object_needs_bit17_swizzle(obj)) { @@ -605,7 +608,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, file_priv); } -err: +out: drm_gem_object_unreference_unlocked(obj); return ret; } @@ -1059,14 +1062,17 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, /* Bounds check destination. */ if (args->offset > obj->size || args->size > obj->size - args->offset) { ret = -EINVAL; - goto err; + goto out; } + if (args->size == 0) + goto out; + if (!access_ok(VERIFY_READ, (char __user *)(uintptr_t)args->data_ptr, args->size)) { ret = -EFAULT; - goto err; + goto out; } /* We can only do the GTT pwrite on untiled buffers, as otherwise @@ -1100,7 +1106,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, DRM_INFO("pwrite failed %d\n", ret); #endif -err: +out: drm_gem_object_unreference_unlocked(obj); return ret; } From 2c6be944111a873ce96865f1a6033056bdf0d0e2 Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Sun, 3 Oct 2010 13:33:49 -0700 Subject: [PATCH 341/476] drm/i915: mark display port DPMS state as 'ON' when enabling output The display port DPMS state is tracked internally in the display port driver so that when a hotplug event comes along, the driver can know whether to try retraining the link. This doesn't work well if the driver never sets the DPMS state to ON when the output is enabled. Signed-off-by: Keith Packard Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_dp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 9e8fe122b0af..152d94507b79 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -933,6 +933,7 @@ static void intel_dp_commit(struct drm_encoder *encoder) if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) ironlake_edp_backlight_on(dev); + intel_dp->dpms_mode = DRM_MODE_DPMS_ON; } static void From 7b4f3990a22fbe800945f12001bc30db374d0af5 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 4 Oct 2010 15:33:04 +0100 Subject: [PATCH 342/476] drm/i915: Avoid circular locking from intel_fbdev_fini() lockdep spots that the fb_info->lock takes the dev->struct_mutex during init (due to the device probing) and so we can not hold dev->struct_mutex when unregistering the framebuffer. Simply reverse the order of initialisation during cleanup and so do the intel_fbdev_fini() before the intel_modeset_cleanup. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 1 + drivers/gpu/drm/i915/intel_display.c | 2 -- drivers/gpu/drm/i915/intel_fb.c | 8 +++----- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index df86d04f7968..726c3736082f 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -2106,6 +2106,7 @@ int i915_driver_unload(struct drm_device *dev) acpi_video_unregister(); if (drm_core_check_feature(dev, DRIVER_MODESET)) { + intel_fbdev_fini(dev); intel_modeset_cleanup(dev); /* diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f55b560c468f..69c54c5a4254 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6130,8 +6130,6 @@ void intel_modeset_cleanup(struct drm_device *dev) drm_kms_helper_poll_fini(dev); mutex_lock(&dev->struct_mutex); - intel_fbdev_fini(dev); - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { /* Skip inactive CRTCs */ if (!crtc->fb) diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 7af4accafb7f..7dc50acd65d7 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -206,8 +206,8 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = { .fb_probe = intel_fb_find_or_create_single, }; -static int intel_fbdev_destroy(struct drm_device *dev, - struct intel_fbdev *ifbdev) +static void intel_fbdev_destroy(struct drm_device *dev, + struct intel_fbdev *ifbdev) { struct fb_info *info; struct intel_framebuffer *ifb = &ifbdev->ifb; @@ -225,9 +225,7 @@ static int intel_fbdev_destroy(struct drm_device *dev, drm_framebuffer_cleanup(&ifb->base); if (ifb->obj) - drm_gem_object_unreference(ifb->obj); - - return 0; + drm_gem_object_unreference_unlocked(ifb->obj); } int intel_fbdev_init(struct drm_device *dev) From 7760fcb020b41352af4e675ce65a6aa0e93c170f Mon Sep 17 00:00:00 2001 From: Roy Spliet Date: Fri, 17 Sep 2010 23:17:24 +0200 Subject: [PATCH 343/476] drm/nouveau: Import initial memory timing work This isn't correct everywhere yet, but since we don't use the data yet it's perfectly safe to push in, and the information we gain from logs will help to fix the remaining issues. v2 (Ben Skeggs ): - fixed up formatting - free parsed timing info on takedown - switched timing table printout to debug loglevel Signed-off-by: Roy Spliet Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 18 ++++ drivers/gpu/drm/nouveau/nouveau_mem.c | 144 ++++++++++++++++++++++++++ drivers/gpu/drm/nouveau/nouveau_pm.c | 4 +- drivers/gpu/drm/nouveau/nouveau_pm.h | 4 + 4 files changed, 169 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 799cd149745d..e1fb2c95eb90 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -401,10 +401,28 @@ struct nouveau_pm_threshold_temp { s16 fan_boost; }; +struct nouveau_pm_memtiming { + u32 reg_100220; + u32 reg_100224; + u32 reg_100228; + u32 reg_10022c; + u32 reg_100230; + u32 reg_100234; + u32 reg_100238; + u32 reg_10023c; +}; + +struct nouveau_pm_memtimings { + bool supported; + struct nouveau_pm_memtiming *timing; + int nr_timing; +}; + struct nouveau_pm_engine { struct nouveau_pm_voltage voltage; struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL]; int nr_perflvl; + struct nouveau_pm_memtimings memtimings; struct nouveau_pm_temp_sensor_constants sensor_constants; struct nouveau_pm_threshold_temp threshold_temp; diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 2db01f80f38e..00b31b5e16cd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -648,3 +648,147 @@ nouveau_mem_gart_init(struct drm_device *dev) return 0; } +void +nouveau_mem_timing_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nouveau_pm_memtimings *memtimings = &pm->memtimings; + struct nvbios *bios = &dev_priv->vbios; + struct bit_entry P; + u8 tUNK_0, tUNK_1, tUNK_2; + u8 tRP; /* Byte 3 */ + u8 tRAS; /* Byte 5 */ + u8 tRFC; /* Byte 7 */ + u8 tRC; /* Byte 9 */ + u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14; + u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21; + u8 *mem = NULL, *entry; + int i, recordlen, entries; + + if (bios->type == NVBIOS_BIT) { + if (bit_table(dev, 'P', &P)) + return; + + if (P.version == 1) + mem = ROMPTR(bios, P.data[4]); + else + if (P.version == 2) + mem = ROMPTR(bios, P.data[8]); + else { + NV_WARN(dev, "unknown mem for BIT P %d\n", P.version); + } + } else { + NV_DEBUG(dev, "BMP version too old for memory\n"); + return; + } + + if (!mem) { + NV_DEBUG(dev, "memory timing table pointer invalid\n"); + return; + } + + if (mem[0] != 0x10) { + NV_WARN(dev, "memory timing table 0x%02x unknown\n", mem[0]); + return; + } + + /* validate record length */ + entries = mem[2]; + recordlen = mem[3]; + if (recordlen < 15) { + NV_ERROR(dev, "mem timing table length unknown: %d\n", mem[3]); + return; + } + + /* parse vbios entries into common format */ + memtimings->timing = + kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL); + if (!memtimings->timing) + return; + + entry = mem + mem[1]; + for (i = 0; i < entries; i++, entry += recordlen) { + struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i]; + if (entry[0] == 0) + continue; + + tUNK_18 = 1; + tUNK_19 = 1; + tUNK_20 = 0; + tUNK_21 = 0; + switch (recordlen) { + case 0x21: + tUNK_21 = entry[21]; + case 0x20: + tUNK_20 = entry[20]; + case 0x19: + tUNK_19 = entry[19]; + case 0x18: + tUNK_18 = entry[18]; + default: + tUNK_0 = entry[0]; + tUNK_1 = entry[1]; + tUNK_2 = entry[2]; + tRP = entry[3]; + tRAS = entry[5]; + tRFC = entry[7]; + tRC = entry[9]; + tUNK_10 = entry[10]; + tUNK_11 = entry[11]; + tUNK_12 = entry[12]; + tUNK_13 = entry[13]; + tUNK_14 = entry[14]; + break; + } + + timing->reg_100220 = (tRC << 24 | tRFC << 16 | tRAS << 8 | tRP); + + /* XXX: I don't trust the -1's and +1's... they must come + * from somewhere! */ + timing->reg_100224 = ((tUNK_0 + tUNK_19 + 1) << 24 | + tUNK_18 << 16 | + (tUNK_1 + tUNK_19 + 1) << 8 | + (tUNK_2 - 1)); + + timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10); + if(recordlen > 19) { + timing->reg_100228 += (tUNK_19 - 1) << 24; + } else { + timing->reg_100228 += tUNK_12 << 24; + } + + /* XXX: reg_10022c */ + + timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 | + tUNK_13 << 8 | tUNK_13); + + /* XXX: +6? */ + timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC); + if(tUNK_10 > tUNK_11) { + timing->reg_100234 += tUNK_10 << 16; + } else { + timing->reg_100234 += tUNK_11 << 16; + } + + /* XXX; reg_100238, reg_10023c */ + NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i, + timing->reg_100220, timing->reg_100224, + timing->reg_100228, timing->reg_10022c); + NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n", + timing->reg_100230, timing->reg_100234, + timing->reg_100238, timing->reg_10023c); + } + + memtimings->nr_timing = entries; + memtimings->supported = true; +} + +void +nouveau_mem_timing_fini(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_memtimings *mem = &dev_priv->engine.pm.memtimings; + + kfree(mem->timing); +} diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index b1d3f4b26ebd..01437f1753a7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c @@ -451,6 +451,7 @@ nouveau_pm_init(struct drm_device *dev) nouveau_volt_init(dev); nouveau_perf_init(dev); nouveau_temp_init(dev); + nouveau_mem_timing_init(dev); NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl); for (i = 0; i < pm->nr_perflvl; i++) { @@ -491,9 +492,10 @@ nouveau_pm_fini(struct drm_device *dev) if (pm->cur != &pm->boot) nouveau_pm_perflvl_set(dev, &pm->boot); + nouveau_mem_timing_fini(dev); + nouveau_temp_fini(dev); nouveau_perf_fini(dev); nouveau_volt_fini(dev); - nouveau_temp_fini(dev); nouveau_hwmon_fini(dev); nouveau_sysfs_fini(dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h index 6ad0ca9db88f..7504e3b8c023 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.h +++ b/drivers/gpu/drm/nouveau/nouveau_pm.h @@ -42,6 +42,10 @@ int nouveau_voltage_gpio_set(struct drm_device *, int voltage); void nouveau_perf_init(struct drm_device *); void nouveau_perf_fini(struct drm_device *); +/* nouveau_mem.c */ +void nouveau_mem_timing_init(struct drm_device *); +void nouveau_mem_timing_fini(struct drm_device *); + /* nv04_pm.c */ int nv04_pm_clock_get(struct drm_device *, u32 id); void *nv04_pm_clock_pre(struct drm_device *, u32 id, int khz); From fe224bb7e12f1ea9a785ec942bc5d59950543888 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 27 Sep 2010 08:29:33 +1000 Subject: [PATCH 344/476] drm/nouveau: enable enhanced framing only if DP display supports it Reported-by: Adam Jackson Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_dp.c | 7 +++++-- drivers/gpu/drm/nouveau/nouveau_encoder.h | 1 + 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c index 89ca1f6851a0..6bf3f714f84d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c @@ -317,7 +317,8 @@ train: return false; config[0] = nv_encoder->dp.link_nr; - if (nv_encoder->dp.dpcd_version >= 0x11) + if (nv_encoder->dp.dpcd_version >= 0x11 && + nv_encoder->dp.enhanced_frame) config[0] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; ret = nouveau_dp_lane_count_set(encoder, config[0]); @@ -468,10 +469,12 @@ nouveau_dp_detect(struct drm_encoder *encoder) !nv_encoder->dcb->dpconf.link_bw) nv_encoder->dp.link_bw = DP_LINK_BW_1_62; - nv_encoder->dp.link_nr = dpcd[2] & 0xf; + nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT; if (nv_encoder->dp.link_nr > nv_encoder->dcb->dpconf.link_nr) nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr; + nv_encoder->dp.enhanced_frame = (dpcd[2] & DP_ENHANCED_FRAME_CAP); + return true; } diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h index 7c82d68bc155..ae69b61d93db 100644 --- a/drivers/gpu/drm/nouveau/nouveau_encoder.h +++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h @@ -55,6 +55,7 @@ struct nouveau_encoder { int dpcd_version; int link_nr; int link_bw; + bool enhanced_frame; } dp; }; }; From 5c6dc6575460a0afe56d8cae7666e769e08ef942 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 27 Sep 2010 09:47:56 +1000 Subject: [PATCH 345/476] drm/nouveau: pass perflvl struct to clock_pre() On certain boards, there's BIOS scripts and memory timings that need to be modified with the memclk. Just pass in the entire perflvl struct and let the chipset-specific code decide what to do. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 3 ++- drivers/gpu/drm/nouveau/nouveau_pm.c | 13 +++++++------ drivers/gpu/drm/nouveau/nouveau_pm.h | 6 ++++-- drivers/gpu/drm/nouveau/nv04_pm.c | 3 ++- drivers/gpu/drm/nouveau/nv50_pm.c | 3 ++- 5 files changed, 17 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index e1fb2c95eb90..01ee63a70cc9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -432,7 +432,8 @@ struct nouveau_pm_engine { struct device *hwmon; int (*clock_get)(struct drm_device *, u32 id); - void *(*clock_pre)(struct drm_device *, u32 id, int khz); + void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *, + u32 id, int khz); void (*clock_set)(struct drm_device *, void *); int (*voltage_get)(struct drm_device *); int (*voltage_set)(struct drm_device *, int voltage); diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index 01437f1753a7..1c99c55d6d46 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c @@ -31,7 +31,8 @@ #include static int -nouveau_pm_clock_set(struct drm_device *dev, u8 id, u32 khz) +nouveau_pm_clock_set(struct drm_device *dev, struct nouveau_pm_level *perflvl, + u8 id, u32 khz) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; @@ -40,7 +41,7 @@ nouveau_pm_clock_set(struct drm_device *dev, u8 id, u32 khz) if (khz == 0) return 0; - pre_state = pm->clock_pre(dev, id, khz); + pre_state = pm->clock_pre(dev, perflvl, id, khz); if (IS_ERR(pre_state)) return PTR_ERR(pre_state); @@ -67,10 +68,10 @@ nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl) } } - nouveau_pm_clock_set(dev, PLL_CORE, perflvl->core); - nouveau_pm_clock_set(dev, PLL_SHADER, perflvl->shader); - nouveau_pm_clock_set(dev, PLL_MEMORY, perflvl->memory); - nouveau_pm_clock_set(dev, PLL_UNK05, perflvl->unk05); + nouveau_pm_clock_set(dev, perflvl, PLL_CORE, perflvl->core); + nouveau_pm_clock_set(dev, perflvl, PLL_SHADER, perflvl->shader); + nouveau_pm_clock_set(dev, perflvl, PLL_MEMORY, perflvl->memory); + nouveau_pm_clock_set(dev, perflvl, PLL_UNK05, perflvl->unk05); pm->cur = perflvl; return 0; diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h index 7504e3b8c023..babe64ac33c0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.h +++ b/drivers/gpu/drm/nouveau/nouveau_pm.h @@ -48,12 +48,14 @@ void nouveau_mem_timing_fini(struct drm_device *); /* nv04_pm.c */ int nv04_pm_clock_get(struct drm_device *, u32 id); -void *nv04_pm_clock_pre(struct drm_device *, u32 id, int khz); +void *nv04_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *, + u32 id, int khz); void nv04_pm_clock_set(struct drm_device *, void *); /* nv50_pm.c */ int nv50_pm_clock_get(struct drm_device *, u32 id); -void *nv50_pm_clock_pre(struct drm_device *, u32 id, int khz); +void *nv50_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *, + u32 id, int khz); void nv50_pm_clock_set(struct drm_device *, void *); /* nouveau_temp.c */ diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c index 61ca92744f93..6a6eb697d38e 100644 --- a/drivers/gpu/drm/nouveau/nv04_pm.c +++ b/drivers/gpu/drm/nouveau/nv04_pm.c @@ -39,7 +39,8 @@ nv04_pm_clock_get(struct drm_device *dev, u32 id) } void * -nv04_pm_clock_pre(struct drm_device *dev, u32 id, int khz) +nv04_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl, + u32 id, int khz) { struct nv04_pm_state *state; int ret; diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c index 64bc29c39c3a..eaf69c83ce92 100644 --- a/drivers/gpu/drm/nouveau/nv50_pm.c +++ b/drivers/gpu/drm/nouveau/nv50_pm.c @@ -67,7 +67,8 @@ nv50_pm_clock_get(struct drm_device *dev, u32 id) } void * -nv50_pm_clock_pre(struct drm_device *dev, u32 id, int khz) +nv50_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl, + u32 id, int khz) { struct nv50_pm_state *state; int dummy, ret; From aee582de806c7008756df23aa444c8e7d58004a9 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 27 Sep 2010 10:13:23 +1000 Subject: [PATCH 346/476] drm/nouveau: run perflvl and M table scripts on mem clock change Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 2 ++ drivers/gpu/drm/nouveau/nouveau_perf.c | 1 + drivers/gpu/drm/nouveau/nv50_pm.c | 22 ++++++++++++++++++++++ 3 files changed, 25 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 01ee63a70cc9..ef74d40d1bf1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -385,6 +385,8 @@ struct nouveau_pm_level { u8 voltage; u8 fanspeed; + + u16 memscript; }; struct nouveau_pm_temp_sensor_constants { diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c index 6b641b69cb77..5a95be654123 100644 --- a/drivers/gpu/drm/nouveau/nouveau_perf.c +++ b/drivers/gpu/drm/nouveau/nouveau_perf.c @@ -160,6 +160,7 @@ nouveau_perf_init(struct drm_device *dev) perflvl->memory = ROM16(entry[12]) * 1000; break; case 0x30: + perflvl->memscript = ROM16(entry[2]); case 0x35: perflvl->fanspeed = entry[6]; perflvl->voltage = entry[7]; diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c index eaf69c83ce92..2a9fabdf1b75 100644 --- a/drivers/gpu/drm/nouveau/nv50_pm.c +++ b/drivers/gpu/drm/nouveau/nv50_pm.c @@ -24,6 +24,7 @@ #include "drmP.h" #include "nouveau_drv.h" +#include "nouveau_bios.h" #include "nouveau_pm.h" /*XXX: boards using limits 0x40 need fixing, the register layout @@ -33,6 +34,7 @@ */ struct nv50_pm_state { + struct nouveau_pm_level *perflvl; struct pll_lims pll; enum pll_types type; int N, M, P; @@ -77,6 +79,7 @@ nv50_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl, if (!state) return ERR_PTR(-ENOMEM); state->type = id; + state->perflvl = perflvl; ret = get_pll_limits(dev, id, &state->pll); if (ret < 0) { @@ -98,11 +101,30 @@ void nv50_pm_clock_set(struct drm_device *dev, void *pre_state) { struct nv50_pm_state *state = pre_state; + struct nouveau_pm_level *perflvl = state->perflvl; u32 reg = state->pll.reg, tmp; + struct bit_entry BIT_M; + u16 script; int N = state->N; int M = state->M; int P = state->P; + if (state->type == PLL_MEMORY && perflvl->memscript && + bit_table(dev, 'M', &BIT_M) == 0 && + BIT_M.version == 1 && BIT_M.length >= 0x0b) { + script = ROM16(BIT_M.data[0x05]); + if (script) + nouveau_bios_run_init_table(dev, script, NULL); + script = ROM16(BIT_M.data[0x07]); + if (script) + nouveau_bios_run_init_table(dev, script, NULL); + script = ROM16(BIT_M.data[0x09]); + if (script) + nouveau_bios_run_init_table(dev, script, NULL); + + nouveau_bios_run_init_table(dev, perflvl->memscript, NULL); + } + if (state->pll.vco2.maxfreq) { if (state->type == PLL_MEMORY) { nv_wr32(dev, 0x100210, 0); From fade7ad56d929e168ead0f75e591468afa2fe97c Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 27 Sep 2010 11:18:14 +1000 Subject: [PATCH 347/476] drm/nva3: split pm backend out from nv50 This will end up quite different, it makes sense for it to be completely separate. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/Makefile | 4 +- drivers/gpu/drm/nouveau/nouveau_pm.h | 6 ++ drivers/gpu/drm/nouveau/nouveau_state.c | 18 ++++- drivers/gpu/drm/nouveau/nv50_pm.c | 52 +++++--------- drivers/gpu/drm/nouveau/nva3_pm.c | 95 +++++++++++++++++++++++++ 5 files changed, 135 insertions(+), 40 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/nva3_pm.c diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index bdbde726778e..23fa82d667d6 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile @@ -10,7 +10,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \ nouveau_display.o nouveau_connector.o nouveau_fbcon.o \ nouveau_dp.o nouveau_ramht.o \ - nouveau_pm.o nouveau_volt.o nouveau_perf.o \ + nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o nv50_mc.o \ nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \ @@ -25,7 +25,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ nv10_gpio.o nv50_gpio.o \ nv50_calc.o \ - nv04_pm.o nv50_pm.o nouveau_temp.o + nv04_pm.o nv50_pm.o nva3_pm.o nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h index babe64ac33c0..4a9838ddacec 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.h +++ b/drivers/gpu/drm/nouveau/nouveau_pm.h @@ -58,6 +58,12 @@ void *nv50_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *, u32 id, int khz); void nv50_pm_clock_set(struct drm_device *, void *); +/* nva3_pm.c */ +int nva3_pm_clock_get(struct drm_device *, u32 id); +void *nva3_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *, + u32 id, int khz); +void nva3_pm_clock_set(struct drm_device *, void *); + /* nouveau_temp.c */ void nouveau_temp_init(struct drm_device *dev); void nouveau_temp_fini(struct drm_device *dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index affcfc2fae19..75bce914e7b5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -375,9 +375,21 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->gpio.get = nv50_gpio_get; engine->gpio.set = nv50_gpio_set; engine->gpio.irq_enable = nv50_gpio_irq_enable; - engine->pm.clock_get = nv50_pm_clock_get; - engine->pm.clock_pre = nv50_pm_clock_pre; - engine->pm.clock_set = nv50_pm_clock_set; + switch (dev_priv->chipset) { + case 0xa3: + case 0xa5: + case 0xa8: + case 0xaf: + engine->pm.clock_get = nva3_pm_clock_get; + engine->pm.clock_pre = nva3_pm_clock_pre; + engine->pm.clock_set = nva3_pm_clock_set; + break; + default: + engine->pm.clock_get = nv50_pm_clock_get; + engine->pm.clock_pre = nv50_pm_clock_pre; + engine->pm.clock_set = nv50_pm_clock_set; + break; + } engine->pm.voltage_get = nouveau_voltage_gpio_get; engine->pm.voltage_set = nouveau_voltage_gpio_set; if (dev_priv->chipset >= 0x84) diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c index 2a9fabdf1b75..7dbb305d7e63 100644 --- a/drivers/gpu/drm/nouveau/nv50_pm.c +++ b/drivers/gpu/drm/nouveau/nv50_pm.c @@ -27,12 +27,6 @@ #include "nouveau_bios.h" #include "nouveau_pm.h" -/*XXX: boards using limits 0x40 need fixing, the register layout - * is correct here, but, there's some other funny magic - * that modifies things, so it's not likely we'll set/read - * the correct timings yet.. working on it... - */ - struct nv50_pm_state { struct nouveau_pm_level *perflvl; struct pll_lims pll; @@ -51,21 +45,13 @@ nv50_pm_clock_get(struct drm_device *dev, u32 id) if (ret) return ret; - if (pll.vco2.maxfreq) { - reg0 = nv_rd32(dev, pll.reg + 0); - reg1 = nv_rd32(dev, pll.reg + 4); - P = (reg0 & 0x00070000) >> 16; - N = (reg1 & 0x0000ff00) >> 8; - M = (reg1 & 0x000000ff); + reg0 = nv_rd32(dev, pll.reg + 0); + reg1 = nv_rd32(dev, pll.reg + 4); + P = (reg0 & 0x00070000) >> 16; + N = (reg1 & 0x0000ff00) >> 8; + M = (reg1 & 0x000000ff); - return ((pll.refclk * N / M) >> P); - } - - reg0 = nv_rd32(dev, pll.reg + 4); - P = (reg0 & 0x003f0000) >> 16; - N = (reg0 & 0x0000ff00) >> 8; - M = (reg0 & 0x000000ff); - return pll.refclk * N / M / P; + return ((pll.refclk * N / M) >> P); } void * @@ -125,23 +111,19 @@ nv50_pm_clock_set(struct drm_device *dev, void *pre_state) nouveau_bios_run_init_table(dev, perflvl->memscript, NULL); } - if (state->pll.vco2.maxfreq) { - if (state->type == PLL_MEMORY) { - nv_wr32(dev, 0x100210, 0); - nv_wr32(dev, 0x1002dc, 1); - } + if (state->type == PLL_MEMORY) { + nv_wr32(dev, 0x100210, 0); + nv_wr32(dev, 0x1002dc, 1); + } - tmp = nv_rd32(dev, reg + 0) & 0xfff8ffff; - tmp |= 0x80000000 | (P << 16); - nv_wr32(dev, reg + 0, tmp); - nv_wr32(dev, reg + 4, (N << 8) | M); + tmp = nv_rd32(dev, reg + 0) & 0xfff8ffff; + tmp |= 0x80000000 | (P << 16); + nv_wr32(dev, reg + 0, tmp); + nv_wr32(dev, reg + 4, (N << 8) | M); - if (state->type == PLL_MEMORY) { - nv_wr32(dev, 0x1002dc, 0); - nv_wr32(dev, 0x100210, 0x80000000); - } - } else { - nv_wr32(dev, reg + 4, (P << 16) | (N << 8) | M); + if (state->type == PLL_MEMORY) { + nv_wr32(dev, 0x1002dc, 0); + nv_wr32(dev, 0x100210, 0x80000000); } kfree(state); diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c new file mode 100644 index 000000000000..dbbafed36406 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nva3_pm.c @@ -0,0 +1,95 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_bios.h" +#include "nouveau_pm.h" + +/*XXX: boards using limits 0x40 need fixing, the register layout + * is correct here, but, there's some other funny magic + * that modifies things, so it's not likely we'll set/read + * the correct timings yet.. working on it... + */ + +struct nva3_pm_state { + struct pll_lims pll; + int N, M, P; +}; + +int +nva3_pm_clock_get(struct drm_device *dev, u32 id) +{ + struct pll_lims pll; + int P, N, M, ret; + u32 reg; + + ret = get_pll_limits(dev, id, &pll); + if (ret) + return ret; + + reg = nv_rd32(dev, pll.reg + 4); + P = (reg & 0x003f0000) >> 16; + N = (reg & 0x0000ff00) >> 8; + M = (reg & 0x000000ff); + return pll.refclk * N / M / P; +} + +void * +nva3_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl, + u32 id, int khz) +{ + struct nva3_pm_state *state; + int dummy, ret; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return ERR_PTR(-ENOMEM); + + ret = get_pll_limits(dev, id, &state->pll); + if (ret < 0) { + kfree(state); + return (ret == -ENOENT) ? NULL : ERR_PTR(ret); + } + + ret = nv50_calc_pll2(dev, &state->pll, khz, &state->N, &dummy, + &state->M, &state->P); + if (ret < 0) { + kfree(state); + return ERR_PTR(ret); + } + + return state; +} + +void +nva3_pm_clock_set(struct drm_device *dev, void *pre_state) +{ + struct nva3_pm_state *state = pre_state; + u32 reg = state->pll.reg; + + nv_wr32(dev, reg + 4, (state->P << 16) | (state->N << 8) | state->M); + kfree(state); +} + From 85341f27b49833325f97580edcd687b4d90bb30b Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 28 Sep 2010 10:03:57 +1000 Subject: [PATCH 348/476] drm/nouveau: fix typo in c2aa91afea5f7e7ae4530fabd37414a79c03328c Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_dp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c index 6bf3f714f84d..4562f309ae3d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c @@ -469,7 +469,7 @@ nouveau_dp_detect(struct drm_encoder *encoder) !nv_encoder->dcb->dpconf.link_bw) nv_encoder->dp.link_bw = DP_LINK_BW_1_62; - nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT; + nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT_MASK; if (nv_encoder->dp.link_nr > nv_encoder->dcb->dpconf.link_nr) nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr; From 62acdc71cfbc816b26a57d89f433f8afaf4ff78d Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 28 Sep 2010 10:23:20 +1000 Subject: [PATCH 349/476] drm/nouveau: fix required mode bandwidth calculation for DP This should fix eDP on certain laptops with 18-bit panels, we were rejecting the panel's native mode due to thinking there was insufficient bandwidth for it. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_connector.c | 18 +++++++++++++++++- drivers/gpu/drm/nouveau/nouveau_connector.h | 3 +++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 4b286a8c30c8..0871495096fa 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -76,6 +76,22 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder) return NULL; } +/*TODO: This could use improvement, and learn to handle the fixed + * BIOS tables etc. It's fine currently, for its only user. + */ +int +nouveau_connector_bpp(struct drm_connector *connector) +{ + struct nouveau_connector *nv_connector = nouveau_connector(connector); + + if (nv_connector->edid && nv_connector->edid->revision >= 4) { + u8 bpc = ((nv_connector->edid->input & 0x70) >> 3) + 4; + if (bpc > 4) + return bpc; + } + + return 18; +} static void nouveau_connector_destroy(struct drm_connector *drm_connector) @@ -666,7 +682,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector, else max_clock = nv_encoder->dp.link_nr * 162000; - clock *= 3; + clock = clock * nouveau_connector_bpp(connector) / 8; break; default: BUG_ON(1); diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h index 0d2e668ccfe5..c21ed6b16f88 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.h +++ b/drivers/gpu/drm/nouveau/nouveau_connector.h @@ -55,4 +55,7 @@ nouveau_connector_create(struct drm_device *, int index); void nouveau_connector_set_polling(struct drm_connector *); +int +nouveau_connector_bpp(struct drm_connector *); + #endif /* __NOUVEAU_CONNECTOR_H__ */ From 2756a4f5df42bf19496ad7759032633ab826ea0e Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Sun, 26 Sep 2010 17:33:50 +0200 Subject: [PATCH 350/476] drm/nouveau: Fix perf table parsing on BMP v5.25. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_perf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c index 5a95be654123..3be875f1d983 100644 --- a/drivers/gpu/drm/nouveau/nouveau_perf.c +++ b/drivers/gpu/drm/nouveau/nouveau_perf.c @@ -103,7 +103,7 @@ nouveau_perf_init(struct drm_device *dev) entries = perf[5]; } } else { - if (bios->data[bios->offset + 6] < 0x27) { + if (bios->data[bios->offset + 6] < 0x25) { legacy_perf_init(dev); return; } From 23357e4da0e1b39c9dfd64a1db0deafc6d70b554 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Tue, 28 Sep 2010 03:22:15 +0200 Subject: [PATCH 351/476] drm/nv30-nv40: Fix postdivider mask when writing engine/memory PLLs. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_hw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c index ebcf8a8190c2..bed669a54a2d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hw.c +++ b/drivers/gpu/drm/nouveau/nouveau_hw.c @@ -305,7 +305,7 @@ setPLL_double_lowregs(struct drm_device *dev, uint32_t NMNMreg, bool mpll = Preg == 0x4020; uint32_t oldPval = nvReadMC(dev, Preg); uint32_t NMNM = pv->NM2 << 16 | pv->NM1; - uint32_t Pval = (oldPval & (mpll ? ~(0x11 << 16) : ~(1 << 16))) | + uint32_t Pval = (oldPval & (mpll ? ~(0x77 << 16) : ~(7 << 16))) | 0xc << 28 | pv->log2P << 16; uint32_t saved4600 = 0; /* some cards have different maskc040s */ From cd2fb2e9e0a6a3273d353b18e4bdd21cc0482724 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Tue, 28 Sep 2010 20:47:58 +0200 Subject: [PATCH 352/476] drm/nv0x-nv4x: Leave the 0x40 bit untouched when changing CRE_LCD. It's an unrelated PLL filtering control bit, leave it alone when changing the CRTC-encoder binding. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv04_dac.c | 3 --- drivers/gpu/drm/nouveau/nv04_dfp.c | 4 +++- drivers/gpu/drm/nouveau/nv04_tv.c | 6 ++---- drivers/gpu/drm/nouveau/nv17_tv.c | 4 +--- drivers/gpu/drm/nouveau/nvreg.h | 1 + 5 files changed, 7 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c index 9cc560c792a4..ba6423f2ffcc 100644 --- a/drivers/gpu/drm/nouveau/nv04_dac.c +++ b/drivers/gpu/drm/nouveau/nv04_dac.c @@ -345,14 +345,11 @@ static void nv04_dac_prepare(struct drm_encoder *encoder) { struct drm_encoder_helper_funcs *helper = encoder->helper_private; struct drm_device *dev = encoder->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; int head = nouveau_crtc(encoder->crtc)->index; - struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg; helper->dpms(encoder, DRM_MODE_DPMS_OFF); nv04_dfp_disable(dev, head); - crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] = 0; } static void nv04_dac_mode_set(struct drm_encoder *encoder, diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c index 4b4f9aabde70..c936403b26e2 100644 --- a/drivers/gpu/drm/nouveau/nv04_dfp.c +++ b/drivers/gpu/drm/nouveau/nv04_dfp.c @@ -104,6 +104,8 @@ void nv04_dfp_disable(struct drm_device *dev, int head) } /* don't inadvertently turn it on when state written later */ crtcstate[head].fp_control = FP_TG_CONTROL_OFF; + crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] &= + ~NV_CIO_CRE_LCD_ROUTE_MASK; } void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode) @@ -253,7 +255,7 @@ static void nv04_dfp_prepare(struct drm_encoder *encoder) nv04_dfp_prepare_sel_clk(dev, nv_encoder, head); - *cr_lcd = 0x3; + *cr_lcd = (*cr_lcd & ~NV_CIO_CRE_LCD_ROUTE_MASK) | 0x3; if (nv_two_heads(dev)) { if (nv_encoder->dcb->location == DCB_LOC_ON_CHIP) diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c index c8dc8a376ad9..3eb605ddfd03 100644 --- a/drivers/gpu/drm/nouveau/nv04_tv.c +++ b/drivers/gpu/drm/nouveau/nv04_tv.c @@ -99,12 +99,10 @@ static void nv04_tv_bind(struct drm_device *dev, int head, bool bind) state->tv_setup = 0; - if (bind) { - state->CRTC[NV_CIO_CRE_LCD__INDEX] = 0; + if (bind) state->CRTC[NV_CIO_CRE_49] |= 0x10; - } else { + else state->CRTC[NV_CIO_CRE_49] &= ~0x10; - } NVWriteVgaCrtc(dev, head, NV_CIO_CRE_LCD__INDEX, state->CRTC[NV_CIO_CRE_LCD__INDEX]); diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c index a3b886166302..28119fd19d03 100644 --- a/drivers/gpu/drm/nouveau/nv17_tv.c +++ b/drivers/gpu/drm/nouveau/nv17_tv.c @@ -424,9 +424,7 @@ static void nv17_tv_prepare(struct drm_encoder *encoder) } if (tv_norm->kind == CTV_ENC_MODE) - *cr_lcd = 0x1 | (head ? 0x0 : 0x8); - else - *cr_lcd = 0; + *cr_lcd |= 0x1 | (head ? 0x0 : 0x8); /* Set the DACCLK register */ dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1; diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h index ad64673ace1f..881f8a585613 100644 --- a/drivers/gpu/drm/nouveau/nvreg.h +++ b/drivers/gpu/drm/nouveau/nvreg.h @@ -263,6 +263,7 @@ # define NV_CIO_CRE_HCUR_ADDR1_ADR 7:2 # define NV_CIO_CRE_LCD__INDEX 0x33 # define NV_CIO_CRE_LCD_LCD_SELECT 0:0 +# define NV_CIO_CRE_LCD_ROUTE_MASK 0x3b # define NV_CIO_CRE_DDC0_STATUS__INDEX 0x36 # define NV_CIO_CRE_DDC0_WR__INDEX 0x37 # define NV_CIO_CRE_ILACE__INDEX 0x39 /* interlace */ From 62841ab726def838472dfaaee571a0d30a2ce1e0 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 30 Sep 2010 09:09:42 +1000 Subject: [PATCH 353/476] drm/nv50: prevent (IB_PUT == IB_GET) for occurring unless idle Should fix a DMA race condition I've never seen myself, but could be the culprit in some random hangs that have been reported. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index 9d27acda87e2..eb24e2b05193 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c @@ -214,7 +214,7 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count) chan->dma.ib_free = get - chan->dma.ib_put; if (chan->dma.ib_free <= 0) - chan->dma.ib_free += chan->dma.ib_max + 1; + chan->dma.ib_free += chan->dma.ib_max; } return 0; From 0cba1b7644cbcd855d0a2b2ea4d8da26fd08dec4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marcin=20Ko=C5=9Bcielnicki?= Date: Wed, 29 Sep 2010 11:15:01 +0000 Subject: [PATCH 354/476] drm/nouveau: Add a module option to force card POST. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bios.c | 2 ++ drivers/gpu/drm/nouveau/nouveau_drv.c | 4 ++++ drivers/gpu/drm/nouveau/nouveau_drv.h | 1 + 3 files changed, 7 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 03032528c8d6..c66d4567a2ed 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -6872,6 +6872,8 @@ nouveau_bios_init(struct drm_device *dev) "running VBIOS init tables.\n"); bios->execute = true; } + if (nouveau_force_post) + bios->execute = true; ret = nouveau_run_vbios_init(dev); if (ret) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index f919e411e39a..edc4a9ab28d1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -80,6 +80,10 @@ MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration"); int nouveau_nofbaccel = 0; module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); +MODULE_PARM_DESC(force_post, "Force POST"); +int nouveau_force_post = 0; +module_param_named(force_post, nouveau_force_post, int, 0400); + MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type"); int nouveau_override_conntype = 0; module_param_named(override_conntype, nouveau_override_conntype, int, 0400); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index ef74d40d1bf1..b209a6208757 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -755,6 +755,7 @@ extern char *nouveau_vbios; extern int nouveau_ignorelid; extern int nouveau_nofbaccel; extern int nouveau_noaccel; +extern int nouveau_force_post; extern int nouveau_override_conntype; extern char *nouveau_perflvl; extern int nouveau_perflvl_wr; From 2730723bbc4a8b289fa536fc3555e15947da09c1 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Tue, 21 Sep 2010 18:57:11 +0200 Subject: [PATCH 355/476] drm/nouveau: Minor refactoring/cleanup of the fence code. Mainly to make room for inter-channel sync. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 15 ------ drivers/gpu/drm/nouveau/nouveau_channel.c | 4 +- drivers/gpu/drm/nouveau/nouveau_dma.c | 17 ------ drivers/gpu/drm/nouveau/nouveau_drv.h | 11 +++- drivers/gpu/drm/nouveau/nouveau_fence.c | 63 +++++++++++++++++------ drivers/gpu/drm/nouveau/nouveau_gem.c | 4 +- 6 files changed, 59 insertions(+), 55 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 2148e2d73de3..03d842a06b04 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -36,21 +36,6 @@ #include #include -int -nouveau_bo_sync_gpu(struct nouveau_bo *nvbo, struct nouveau_channel *chan) -{ - struct nouveau_fence *prev_fence = nvbo->bo.sync_obj; - int ret; - - if (!prev_fence || nouveau_fence_channel(prev_fence) == chan) - return 0; - - spin_lock(&nvbo->bo.lock); - ret = ttm_bo_wait(&nvbo->bo, false, false, false); - spin_unlock(&nvbo->bo.lock); - return ret; -} - static void nouveau_bo_del_ttm(struct ttm_buffer_object *bo) { diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index e01396747f6f..5eb4c966273f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -223,7 +223,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, ret = nouveau_dma_init(chan); if (!ret) - ret = nouveau_fence_init(chan); + ret = nouveau_fence_channel_init(chan); if (ret) { nouveau_channel_free(chan); return ret; @@ -270,7 +270,7 @@ nouveau_channel_free(struct nouveau_channel *chan) * above attempts at idling were OK, but if we failed this'll tell TTM * we're done with the buffers. */ - nouveau_fence_fini(chan); + nouveau_fence_channel_fini(chan); /* This will prevent pfifo from switching channels. */ pfifo->reassign(dev, false); diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index eb24e2b05193..82581e600dcd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c @@ -73,16 +73,6 @@ nouveau_dma_init(struct nouveau_channel *chan) if (ret) return ret; - /* Create an NV_SW object for various sync purposes */ - ret = nouveau_gpuobj_sw_new(chan, NV_SW, &obj); - if (ret) - return ret; - - ret = nouveau_ramht_insert(chan, NvSw, obj); - nouveau_gpuobj_ref(NULL, &obj); - if (ret) - return ret; - /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy); if (ret) @@ -110,13 +100,6 @@ nouveau_dma_init(struct nouveau_channel *chan) BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1); OUT_RING(chan, NvNotify0); - /* Initialise NV_SW */ - ret = RING_SPACE(chan, 2); - if (ret) - return ret; - BEGIN_RING(chan, NvSubSw, 0, 1); - OUT_RING(chan, NvSw); - /* Sit back and pray the channel works.. */ FIRE_RING(chan); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index b209a6208757..9a9066f1fbcc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -705,6 +705,12 @@ struct drm_nouveau_private { struct apertures_struct *apertures; }; +static inline struct drm_nouveau_private * +nouveau_private(struct drm_device *dev) +{ + return dev->dev_private; +} + static inline struct drm_nouveau_private * nouveau_bdev(struct ttm_bo_device *bd) { @@ -1231,8 +1237,8 @@ extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *); /* nouveau_fence.c */ struct nouveau_fence; -extern int nouveau_fence_init(struct nouveau_channel *); -extern void nouveau_fence_fini(struct nouveau_channel *); +extern int nouveau_fence_channel_init(struct nouveau_channel *); +extern void nouveau_fence_channel_fini(struct nouveau_channel *); extern void nouveau_fence_update(struct nouveau_channel *); extern int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **, bool emit); @@ -1240,6 +1246,7 @@ extern int nouveau_fence_emit(struct nouveau_fence *); struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *); extern bool nouveau_fence_signalled(void *obj, void *arg); extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr); +extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *); extern int nouveau_fence_flush(void *obj, void *arg); extern void nouveau_fence_unref(void **obj); extern void *nouveau_fence_ref(void *obj); diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 87ac21ec23d2..62f13189698a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -30,7 +30,7 @@ #include "nouveau_drv.h" #include "nouveau_dma.h" -#define USE_REFCNT (dev_priv->card_type >= NV_10) +#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10) struct nouveau_fence { struct nouveau_channel *channel; @@ -59,14 +59,13 @@ nouveau_fence_del(struct kref *ref) void nouveau_fence_update(struct nouveau_channel *chan) { - struct drm_nouveau_private *dev_priv = chan->dev->dev_private; - struct list_head *entry, *tmp; - struct nouveau_fence *fence; + struct drm_device *dev = chan->dev; + struct nouveau_fence *tmp, *fence; uint32_t sequence; spin_lock(&chan->fence.lock); - if (USE_REFCNT) + if (USE_REFCNT(dev)) sequence = nvchan_rd32(chan, 0x48); else sequence = atomic_read(&chan->fence.last_sequence_irq); @@ -75,9 +74,7 @@ nouveau_fence_update(struct nouveau_channel *chan) goto out; chan->fence.sequence_ack = sequence; - list_for_each_safe(entry, tmp, &chan->fence.pending) { - fence = list_entry(entry, struct nouveau_fence, entry); - + list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) { sequence = fence->sequence; fence->signalled = true; list_del(&fence->entry); @@ -121,8 +118,8 @@ nouveau_fence_channel(struct nouveau_fence *fence) int nouveau_fence_emit(struct nouveau_fence *fence) { - struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private; struct nouveau_channel *chan = fence->channel; + struct drm_device *dev = chan->dev; int ret; ret = RING_SPACE(chan, 2); @@ -143,7 +140,7 @@ nouveau_fence_emit(struct nouveau_fence *fence) list_add_tail(&fence->entry, &chan->fence.pending); spin_unlock(&chan->fence.lock); - BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1); + BEGIN_RING(chan, NvSubSw, USE_REFCNT(dev) ? 0x0050 : 0x0150, 1); OUT_RING(chan, fence->sequence); FIRE_RING(chan); @@ -213,6 +210,19 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) return ret; } +int +nouveau_fence_sync(struct nouveau_fence *fence, + struct nouveau_channel *wchan) +{ + struct nouveau_channel *chan = nouveau_fence_channel(fence); + + if (likely(!fence || chan == wchan || + nouveau_fence_signalled(fence, NULL))) + return 0; + + return nouveau_fence_wait(fence, NULL, false, false); +} + int nouveau_fence_flush(void *sync_obj, void *sync_arg) { @@ -220,23 +230,42 @@ nouveau_fence_flush(void *sync_obj, void *sync_arg) } int -nouveau_fence_init(struct nouveau_channel *chan) +nouveau_fence_channel_init(struct nouveau_channel *chan) { + struct nouveau_gpuobj *obj = NULL; + int ret; + + /* Create an NV_SW object for various sync purposes */ + ret = nouveau_gpuobj_sw_new(chan, NV_SW, &obj); + if (ret) + return ret; + + ret = nouveau_ramht_insert(chan, NvSw, obj); + nouveau_gpuobj_ref(NULL, &obj); + if (ret) + return ret; + + ret = RING_SPACE(chan, 2); + if (ret) + return ret; + BEGIN_RING(chan, NvSubSw, 0, 1); + OUT_RING(chan, NvSw); + + FIRE_RING(chan); + INIT_LIST_HEAD(&chan->fence.pending); spin_lock_init(&chan->fence.lock); atomic_set(&chan->fence.last_sequence_irq, 0); + return 0; } void -nouveau_fence_fini(struct nouveau_channel *chan) +nouveau_fence_channel_fini(struct nouveau_channel *chan) { - struct list_head *entry, *tmp; - struct nouveau_fence *fence; - - list_for_each_safe(entry, tmp, &chan->fence.pending) { - fence = list_entry(entry, struct nouveau_fence, entry); + struct nouveau_fence *tmp, *fence; + list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) { fence->signalled = true; list_del(&fence->entry); kref_put(&fence->refcount, nouveau_fence_del); diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index ead7b8fc53fc..1e630987543e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -364,7 +364,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, list_for_each_entry(nvbo, list, entry) { struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; - ret = nouveau_bo_sync_gpu(nvbo, chan); + ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan); if (unlikely(ret)) { NV_ERROR(dev, "fail pre-validate sync\n"); return ret; @@ -387,7 +387,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, return ret; } - ret = nouveau_bo_sync_gpu(nvbo, chan); + ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan); if (unlikely(ret)) { NV_ERROR(dev, "fail post-validate sync\n"); return ret; From 8ac3891b48906b38db4b153c2d0d55db2ef81aee Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Tue, 21 Sep 2010 20:49:39 +0200 Subject: [PATCH 356/476] drm/nouveau: Provide a means to have arbitrary work run on fence completion. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 3 +++ drivers/gpu/drm/nouveau/nouveau_fence.c | 30 +++++++++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 9a9066f1fbcc..d4f049d42ede 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -1243,6 +1243,9 @@ extern void nouveau_fence_update(struct nouveau_channel *); extern int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **, bool emit); extern int nouveau_fence_emit(struct nouveau_fence *); +extern void nouveau_fence_work(struct nouveau_fence *fence, + void (*work)(void *priv, bool signalled), + void *priv); struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *); extern bool nouveau_fence_signalled(void *obj, void *arg); extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr); diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 62f13189698a..fbb2c3b26239 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -39,6 +39,9 @@ struct nouveau_fence { uint32_t sequence; bool signalled; + + void (*work)(void *priv, bool signalled); + void *priv; }; static inline struct nouveau_fence * @@ -78,6 +81,10 @@ nouveau_fence_update(struct nouveau_channel *chan) sequence = fence->sequence; fence->signalled = true; list_del(&fence->entry); + + if (unlikely(fence->work)) + fence->work(fence->priv, true); + kref_put(&fence->refcount, nouveau_fence_del); if (sequence == chan->fence.sequence_ack) @@ -147,6 +154,25 @@ nouveau_fence_emit(struct nouveau_fence *fence) return 0; } +void +nouveau_fence_work(struct nouveau_fence *fence, + void (*work)(void *priv, bool signalled), + void *priv) +{ + BUG_ON(fence->work); + + spin_lock(&fence->channel->fence.lock); + + if (fence->signalled) { + work(priv, true); + } else { + fence->work = work; + fence->priv = priv; + } + + spin_unlock(&fence->channel->fence.lock); +} + void nouveau_fence_unref(void **sync_obj) { @@ -268,6 +294,10 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan) list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) { fence->signalled = true; list_del(&fence->entry); + + if (unlikely(fence->work)) + fence->work(fence->priv, false); + kref_put(&fence->refcount, nouveau_fence_del); } } From 0c6c1c2fb8b0fd4340f78db20ee7f35d2a810907 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Wed, 22 Sep 2010 00:58:54 +0200 Subject: [PATCH 357/476] drm/nouveau: Use semaphores to handle inter-channel sync in hardware. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_dma.h | 1 + drivers/gpu/drm/nouveau/nouveau_drv.h | 8 + drivers/gpu/drm/nouveau/nouveau_fence.c | 195 +++++++++++++++++++++++- drivers/gpu/drm/nouveau/nouveau_state.c | 11 +- 4 files changed, 212 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h index 8b05c15866d5..d578c21d3c8d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.h +++ b/drivers/gpu/drm/nouveau/nouveau_dma.h @@ -72,6 +72,7 @@ enum { NvGdiRect = 0x8000000c, NvImageBlit = 0x8000000d, NvSw = 0x8000000e, + NvSema = 0x8000000f, /* G80+ display objects */ NvEvoVRAM = 0x01000000, diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index d4f049d42ede..a308c132c19b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -622,6 +622,12 @@ struct drm_nouveau_private { atomic_t validate_sequence; } ttm; + struct { + spinlock_t lock; + struct drm_mm heap; + struct nouveau_bo *bo; + } fence; + int fifo_alloc_count; struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; @@ -1237,6 +1243,8 @@ extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *); /* nouveau_fence.c */ struct nouveau_fence; +extern int nouveau_fence_init(struct drm_device *); +extern void nouveau_fence_fini(struct drm_device *); extern int nouveau_fence_channel_init(struct nouveau_channel *); extern void nouveau_fence_channel_fini(struct nouveau_channel *); extern void nouveau_fence_update(struct nouveau_channel *); diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index fbb2c3b26239..f42675cc9d14 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -28,9 +28,11 @@ #include "drm.h" #include "nouveau_drv.h" +#include "nouveau_ramht.h" #include "nouveau_dma.h" #define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10) +#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17) struct nouveau_fence { struct nouveau_channel *channel; @@ -44,6 +46,12 @@ struct nouveau_fence { void *priv; }; +struct nouveau_semaphore { + struct kref ref; + struct drm_device *dev; + struct drm_mm_node *mem; +}; + static inline struct nouveau_fence * nouveau_fence(void *sync_obj) { @@ -236,17 +244,128 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) return ret; } +static struct nouveau_semaphore * +alloc_semaphore(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_semaphore *sema; + + if (!USE_SEMA(dev)) + return NULL; + + sema = kmalloc(sizeof(*sema), GFP_KERNEL); + if (!sema) + goto fail; + + spin_lock(&dev_priv->fence.lock); + sema->mem = drm_mm_search_free(&dev_priv->fence.heap, 4, 0, 0); + if (sema->mem) + sema->mem = drm_mm_get_block(sema->mem, 4, 0); + spin_unlock(&dev_priv->fence.lock); + + if (!sema->mem) + goto fail; + + kref_init(&sema->ref); + sema->dev = dev; + nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 0); + + return sema; +fail: + kfree(sema); + return NULL; +} + +static void +free_semaphore(struct kref *ref) +{ + struct nouveau_semaphore *sema = + container_of(ref, struct nouveau_semaphore, ref); + struct drm_nouveau_private *dev_priv = sema->dev->dev_private; + + spin_lock(&dev_priv->fence.lock); + drm_mm_put_block(sema->mem); + spin_unlock(&dev_priv->fence.lock); + + kfree(sema); +} + +static void +semaphore_work(void *priv, bool signalled) +{ + struct nouveau_semaphore *sema = priv; + struct drm_nouveau_private *dev_priv = sema->dev->dev_private; + + if (unlikely(!signalled)) + nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1); + + kref_put(&sema->ref, free_semaphore); +} + +static int +emit_semaphore(struct nouveau_channel *chan, int method, + struct nouveau_semaphore *sema) +{ + struct drm_nouveau_private *dev_priv = sema->dev->dev_private; + struct nouveau_fence *fence; + int ret; + + ret = RING_SPACE(chan, dev_priv->card_type >= NV_50 ? 6 : 4); + if (ret) + return ret; + + if (dev_priv->card_type >= NV_50) { + BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1); + OUT_RING(chan, NvSema); + } + BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1); + OUT_RING(chan, sema->mem->start); + BEGIN_RING(chan, NvSubSw, method, 1); + OUT_RING(chan, 1); + + /* Delay semaphore destruction until its work is done */ + ret = nouveau_fence_new(chan, &fence, true); + if (ret) + return ret; + + kref_get(&sema->ref); + nouveau_fence_work(fence, semaphore_work, sema); + nouveau_fence_unref((void *)&fence); + + return 0; +} + int nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *wchan) { struct nouveau_channel *chan = nouveau_fence_channel(fence); + struct drm_device *dev = wchan->dev; + struct nouveau_semaphore *sema; + int ret; if (likely(!fence || chan == wchan || nouveau_fence_signalled(fence, NULL))) return 0; - return nouveau_fence_wait(fence, NULL, false, false); + sema = alloc_semaphore(dev); + if (!sema) { + /* Early card or broken userspace, fall back to + * software sync. */ + return nouveau_fence_wait(fence, NULL, false, false); + } + + /* Signal the semaphore from chan */ + ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema); + if (ret) + goto out; + + /* Make wchan wait until it gets signalled */ + ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema); + +out: + kref_put(&sema->ref, free_semaphore); + return ret; } int @@ -258,6 +377,8 @@ nouveau_fence_flush(void *sync_obj, void *sync_arg) int nouveau_fence_channel_init(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *obj = NULL; int ret; @@ -277,6 +398,30 @@ nouveau_fence_channel_init(struct nouveau_channel *chan) BEGIN_RING(chan, NvSubSw, 0, 1); OUT_RING(chan, NvSw); + /* Create a DMA object for the shared cross-channel sync area. */ + if (USE_SEMA(dev)) { + struct drm_mm_node *mem = dev_priv->fence.bo->bo.mem.mm_node; + + ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, + mem->start << PAGE_SHIFT, + mem->size << PAGE_SHIFT, + NV_DMA_ACCESS_RW, + NV_DMA_TARGET_VIDMEM, &obj); + if (ret) + return ret; + + ret = nouveau_ramht_insert(chan, NvSema, obj); + nouveau_gpuobj_ref(NULL, &obj); + if (ret) + return ret; + + ret = RING_SPACE(chan, 2); + if (ret) + return ret; + BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1); + OUT_RING(chan, NvSema); + } + FIRE_RING(chan); INIT_LIST_HEAD(&chan->fence.pending); @@ -302,3 +447,51 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan) } } +int +nouveau_fence_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int ret; + + /* Create a shared VRAM heap for cross-channel sync. */ + if (USE_SEMA(dev)) { + ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, + 0, 0, false, true, &dev_priv->fence.bo); + if (ret) + return ret; + + ret = nouveau_bo_pin(dev_priv->fence.bo, TTM_PL_FLAG_VRAM); + if (ret) + goto fail; + + ret = nouveau_bo_map(dev_priv->fence.bo); + if (ret) + goto fail; + + ret = drm_mm_init(&dev_priv->fence.heap, 0, + dev_priv->fence.bo->bo.mem.size); + if (ret) + goto fail; + + spin_lock_init(&dev_priv->fence.lock); + } + + return 0; +fail: + nouveau_bo_unmap(dev_priv->fence.bo); + nouveau_bo_ref(NULL, &dev_priv->fence.bo); + return ret; +} + +void +nouveau_fence_fini(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (USE_SEMA(dev)) { + drm_mm_takedown(&dev_priv->fence.heap); + nouveau_bo_unmap(dev_priv->fence.bo); + nouveau_bo_unpin(dev_priv->fence.bo); + nouveau_bo_ref(NULL, &dev_priv->fence.bo); + } +} diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 75bce914e7b5..ed7757f14083 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -639,9 +639,13 @@ nouveau_card_init(struct drm_device *dev) /* what about PVIDEO/PCRTC/PRAMDAC etc? */ if (!engine->graph.accel_blocked) { - ret = nouveau_card_init_channel(dev); + ret = nouveau_fence_init(dev); if (ret) goto out_irq; + + ret = nouveau_card_init_channel(dev); + if (ret) + goto out_fence; } ret = nouveau_backlight_init(dev); @@ -652,6 +656,8 @@ nouveau_card_init(struct drm_device *dev) drm_kms_helper_poll_init(dev); return 0; +out_fence: + nouveau_fence_fini(dev); out_irq: drm_irq_uninstall(dev); out_display: @@ -695,7 +701,8 @@ static void nouveau_card_takedown(struct drm_device *dev) nouveau_backlight_exit(dev); - if (dev_priv->channel) { + if (!engine->graph.accel_blocked) { + nouveau_fence_fini(dev); nouveau_channel_free(dev_priv->channel); dev_priv->channel = NULL; } From 647988175234a733cc0d4cf968949344803a77a7 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Tue, 21 Sep 2010 19:02:01 +0200 Subject: [PATCH 358/476] drm/nouveau: Synchronize buffer object moves in hardware. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 03d842a06b04..4e813638bdb7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -454,10 +454,15 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, if (ret) return ret; - ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, - evict || (nvbo->channel && - nvbo->channel != chan), + if (nvbo->channel) { + ret = nouveau_fence_sync(fence, nvbo->channel); + if (ret) + goto out; + } + + ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict, no_wait_reserve, no_wait_gpu, new_mem); +out: nouveau_fence_unref((void *)&fence); return ret; } From 8af29ccd7917ab448ea7b5cf581fa7b2b4ea3cba Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Sat, 2 Oct 2010 17:04:46 +0200 Subject: [PATCH 359/476] drm/nv50: Fix large 3D performance regression caused by the interchannel sync patches. Reported-by: Christoph Bumiller Signed-off-by: Francisco Jerez Tested-by: Maarten Maathuis Tested-by: Xavier Chantry Tested-by: Ben Skeggs Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 1 + drivers/gpu/drm/nouveau/nouveau_fence.c | 46 +++++++++++++++++++++---- 2 files changed, 40 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index a308c132c19b..3a07e580d27a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -1473,6 +1473,7 @@ nv_match_device(struct drm_device *dev, unsigned device, #define NV_SW_SEMAPHORE_OFFSET 0x00000064 #define NV_SW_SEMAPHORE_ACQUIRE 0x00000068 #define NV_SW_SEMAPHORE_RELEASE 0x0000006c +#define NV_SW_YIELD 0x00000080 #define NV_SW_DMA_VBLSEM 0x0000018c #define NV_SW_VBLSEM_OFFSET 0x00000400 #define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index f42675cc9d14..441b12420bb1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -308,21 +308,54 @@ emit_semaphore(struct nouveau_channel *chan, int method, { struct drm_nouveau_private *dev_priv = sema->dev->dev_private; struct nouveau_fence *fence; + bool smart = (dev_priv->card_type >= NV_50); int ret; - ret = RING_SPACE(chan, dev_priv->card_type >= NV_50 ? 6 : 4); + ret = RING_SPACE(chan, smart ? 8 : 4); if (ret) return ret; - if (dev_priv->card_type >= NV_50) { + if (smart) { BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1); OUT_RING(chan, NvSema); } BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1); OUT_RING(chan, sema->mem->start); + + if (smart && method == NV_SW_SEMAPHORE_ACQUIRE) { + /* + * NV50 tries to be too smart and context-switch + * between semaphores instead of doing a "first come, + * first served" strategy like previous cards + * do. + * + * That's bad because the ACQUIRE latency can get as + * large as the PFIFO context time slice in the + * typical DRI2 case where you have several + * outstanding semaphores at the same moment. + * + * If we're going to ACQUIRE, force the card to + * context switch before, just in case the matching + * RELEASE is already scheduled to be executed in + * another channel. + */ + BEGIN_RING(chan, NvSubSw, NV_SW_YIELD, 1); + OUT_RING(chan, 0); + } + BEGIN_RING(chan, NvSubSw, method, 1); OUT_RING(chan, 1); + if (smart && method == NV_SW_SEMAPHORE_RELEASE) { + /* + * Force the card to context switch, there may be + * another channel waiting for the semaphore we just + * released. + */ + BEGIN_RING(chan, NvSubSw, NV_SW_YIELD, 1); + OUT_RING(chan, 0); + } + /* Delay semaphore destruction until its work is done */ ret = nouveau_fence_new(chan, &fence, true); if (ret) @@ -355,14 +388,13 @@ nouveau_fence_sync(struct nouveau_fence *fence, return nouveau_fence_wait(fence, NULL, false, false); } - /* Signal the semaphore from chan */ - ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema); + /* Make wchan wait until it gets signalled */ + ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema); if (ret) goto out; - /* Make wchan wait until it gets signalled */ - ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema); - + /* Signal the semaphore from chan */ + ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema); out: kref_put(&sema->ref, free_semaphore); return ret; From eadc69cc9054594ff7860d407f855536af13af99 Mon Sep 17 00:00:00 2001 From: Emil Velikov Date: Sun, 26 Sep 2010 20:26:02 +0100 Subject: [PATCH 360/476] drm/nouveau: don't use the default pll limits in table v2.1 on nv50+ cards This fixes issues bug 30370 and prevents another possible divide by zero on the original nv50 cards, by returning -ENOENT Signed-off-by: Emil Velikov Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bios.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index c66d4567a2ed..53f4eba65cb9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -4890,6 +4890,12 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims break; } + if ((dev_priv->card_type >= NV_50) && (pllindex == 0)) { + NV_ERROR(dev, "Register 0x%08x not found in PLL " + "limits table", pll_lim->reg); + return -ENOENT; + } + pll_rec = &bios->data[plloffs + recordlen * pllindex]; BIOSLOG(bios, "Loading PLL limits for reg 0x%08x\n", From ca8e7c6ccdfb9a77de9b9719e6ef768373fb607c Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 4 Oct 2010 15:27:58 +1000 Subject: [PATCH 361/476] drm/nouveau: parse voltage from perf 0x40 entires This was disabled previously because of some uncertainty that +2 was indeed the voltage. It appears it is, checked on a NVA8 and a NVA3M. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_perf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c index 3be875f1d983..ac62a1b8c4fc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_perf.c +++ b/drivers/gpu/drm/nouveau/nouveau_perf.c @@ -173,7 +173,7 @@ nouveau_perf_init(struct drm_device *dev) case 0x40: #define subent(n) entry[perf[2] + ((n) * perf[3])] perflvl->fanspeed = 0; /*XXX*/ - perflvl->voltage = 0; /*XXX: entry[2] */; + perflvl->voltage = entry[2]; perflvl->core = (ROM16(subent(0)) & 0xfff) * 1000; perflvl->shader = (ROM16(subent(1)) & 0xfff) * 1000; perflvl->memory = (ROM16(subent(2)) & 0xfff) * 1000; From a845fff841b13678b2d80f10425aba87db840e4a Mon Sep 17 00:00:00 2001 From: Roy Spliet Date: Mon, 4 Oct 2010 23:01:08 +0200 Subject: [PATCH 362/476] drm/nouveau: fix thinkos in mem timing table recordlen check Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_mem.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 00b31b5e16cd..a163c7c612e7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -35,6 +35,8 @@ #include "drm_sarea.h" #include "nouveau_drv.h" +#define MIN(a,b) a < b ? a : b + /* * NV10-NV40 tiling helpers */ @@ -717,14 +719,14 @@ nouveau_mem_timing_init(struct drm_device *dev) tUNK_19 = 1; tUNK_20 = 0; tUNK_21 = 0; - switch (recordlen) { - case 0x21: + switch (MIN(recordlen,21)) { + case 21: tUNK_21 = entry[21]; - case 0x20: + case 20: tUNK_20 = entry[20]; - case 0x19: + case 19: tUNK_19 = entry[19]; - case 0x18: + case 18: tUNK_18 = entry[18]; default: tUNK_0 = entry[0]; From 42311ff90dc8746bd81427b2ed6efda9af791b77 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 4 Aug 2010 12:07:08 +1000 Subject: [PATCH 363/476] drm/ttm: introduce utility function to free an allocated memory node MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Existing core code/drivers call drm_mm_put_block on ttm_mem_reg.mm_node directly. Future patches will modify TTM behaviour in such a way that ttm_mem_reg.mm_node doesn't necessarily belong to drm_mm. Reviewed-by: Jerome Glisse Acked-by: Thomas Hellström Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 14 ++------------ drivers/gpu/drm/radeon/radeon_ttm.c | 18 ++---------------- drivers/gpu/drm/ttm/ttm_bo.c | 26 +++++++++++++++----------- drivers/gpu/drm/ttm/ttm_bo_util.c | 9 +-------- include/drm/ttm/ttm_bo_driver.h | 4 ++++ 5 files changed, 24 insertions(+), 47 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 4e813638bdb7..f685f392c226 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -693,12 +693,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); out: - if (tmp_mem.mm_node) { - spin_lock(&bo->bdev->glob->lru_lock); - drm_mm_put_block(tmp_mem.mm_node); - spin_unlock(&bo->bdev->glob->lru_lock); - } - + ttm_bo_mem_put(bo, &tmp_mem); return ret; } @@ -731,12 +726,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, goto out; out: - if (tmp_mem.mm_node) { - spin_lock(&bo->bdev->glob->lru_lock); - drm_mm_put_block(tmp_mem.mm_node); - spin_unlock(&bo->bdev->glob->lru_lock); - } - + ttm_bo_mem_put(bo, &tmp_mem); return ret; } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 84c53e41a88f..cc19aba9bb74 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -326,14 +326,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, } r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); out_cleanup: - if (tmp_mem.mm_node) { - struct ttm_bo_global *glob = rdev->mman.bdev.glob; - - spin_lock(&glob->lru_lock); - drm_mm_put_block(tmp_mem.mm_node); - spin_unlock(&glob->lru_lock); - return r; - } + ttm_bo_mem_put(bo, &tmp_mem); return r; } @@ -372,14 +365,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, goto out_cleanup; } out_cleanup: - if (tmp_mem.mm_node) { - struct ttm_bo_global *glob = rdev->mman.bdev.glob; - - spin_lock(&glob->lru_lock); - drm_mm_put_block(tmp_mem.mm_node); - spin_unlock(&glob->lru_lock); - return r; - } + ttm_bo_mem_put(bo, &tmp_mem); return r; } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index cb4cf7ef4d1e..80d37b460a8c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -475,11 +475,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) list_del_init(&bo->ddestroy); ++put_count; } - if (bo->mem.mm_node) { - drm_mm_put_block(bo->mem.mm_node); - bo->mem.mm_node = NULL; - } spin_unlock(&glob->lru_lock); + ttm_bo_mem_put(bo, &bo->mem); atomic_set(&bo->reserved, 0); @@ -621,7 +618,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, bool no_wait_reserve, bool no_wait_gpu) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_bo_global *glob = bo->glob; struct ttm_mem_reg evict_mem; struct ttm_placement placement; int ret = 0; @@ -667,12 +663,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, if (ret) { if (ret != -ERESTARTSYS) printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); - spin_lock(&glob->lru_lock); - if (evict_mem.mm_node) { - drm_mm_put_block(evict_mem.mm_node); - evict_mem.mm_node = NULL; - } - spin_unlock(&glob->lru_lock); + ttm_bo_mem_put(bo, &evict_mem); goto out; } bo->evicted = true; @@ -769,6 +760,19 @@ static int ttm_bo_man_get_node(struct ttm_buffer_object *bo, return 0; } +void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) +{ + struct ttm_bo_global *glob = bo->glob; + + if (mem->mm_node) { + spin_lock(&glob->lru_lock); + drm_mm_put_block(mem->mm_node); + spin_unlock(&glob->lru_lock); + mem->mm_node = NULL; + } +} +EXPORT_SYMBOL(ttm_bo_mem_put); + /** * Repeatedly evict memory from the LRU for @mem_type until we create enough * space, or we've evicted everything and there isn't enough space. diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 7cffb3e04232..0ebfe0d94931 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -39,14 +39,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo) { - struct ttm_mem_reg *old_mem = &bo->mem; - - if (old_mem->mm_node) { - spin_lock(&bo->glob->lru_lock); - drm_mm_put_block(old_mem->mm_node); - spin_unlock(&bo->glob->lru_lock); - } - old_mem->mm_node = NULL; + ttm_bo_mem_put(bo, &bo->mem); } int ttm_bo_move_ttm(struct ttm_buffer_object *bo, diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index b87504235f18..6c694d86e03d 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -649,6 +649,10 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem, bool interruptible, bool no_wait_reserve, bool no_wait_gpu); + +extern void ttm_bo_mem_put(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem); + /** * ttm_bo_wait_for_cpu * From d961db75ce86a84f1f04e91ad1014653ed7d9f46 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 5 Aug 2010 10:48:18 +1000 Subject: [PATCH 364/476] drm/ttm: restructure to allow driver to plug in alternate memory manager MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Nouveau will need this on GeForce 8 and up to account for the GPU reordering physical VRAM for some memory types. Reviewed-by: Jerome Glisse Acked-by: Thomas Hellström Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 18 +-- drivers/gpu/drm/nouveau/nouveau_channel.c | 6 +- drivers/gpu/drm/nouveau/nouveau_notifier.c | 2 +- drivers/gpu/drm/nouveau/nouveau_sgdma.c | 4 +- drivers/gpu/drm/nouveau/nv50_crtc.c | 3 +- drivers/gpu/drm/nouveau/nv50_display.c | 2 +- drivers/gpu/drm/nouveau/nv50_instmem.c | 2 +- drivers/gpu/drm/nouveau/nvc0_instmem.c | 2 +- drivers/gpu/drm/radeon/radeon_object.c | 6 +- drivers/gpu/drm/radeon/radeon_ttm.c | 16 ++- drivers/gpu/drm/ttm/Makefile | 3 +- drivers/gpu/drm/ttm/ttm_agp_backend.c | 3 +- drivers/gpu/drm/ttm/ttm_bo.c | 100 +++----------- drivers/gpu/drm/ttm/ttm_bo_manager.c | 148 +++++++++++++++++++++ drivers/gpu/drm/ttm/ttm_bo_util.c | 3 +- drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 3 +- include/drm/ttm/ttm_bo_api.h | 3 +- include/drm/ttm/ttm_bo_driver.h | 21 ++- 18 files changed, 229 insertions(+), 116 deletions(-) create mode 100644 drivers/gpu/drm/ttm/ttm_bo_manager.c diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index f685f392c226..80353e2b8409 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -381,6 +381,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->default_caching = TTM_PL_FLAG_CACHED; break; case TTM_PL_VRAM: + man->func = &ttm_bo_manager_func; man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_FLAG_UNCACHED | @@ -392,6 +393,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->gpu_offset = 0; break; case TTM_PL_TT: + man->func = &ttm_bo_manager_func; switch (dev_priv->gart_info.type) { case NOUVEAU_GART_AGP: man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; @@ -494,8 +496,8 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, u64 src_offset, dst_offset; int ret; - src_offset = old_mem->mm_node->start << PAGE_SHIFT; - dst_offset = new_mem->mm_node->start << PAGE_SHIFT; + src_offset = old_mem->start << PAGE_SHIFT; + dst_offset = new_mem->start << PAGE_SHIFT; if (!nvbo->no_vm) { if (old_mem->mem_type == TTM_PL_VRAM) src_offset += dev_priv->vm_vram_base; @@ -597,8 +599,8 @@ static int nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) { - u32 src_offset = old_mem->mm_node->start << PAGE_SHIFT; - u32 dst_offset = new_mem->mm_node->start << PAGE_SHIFT; + u32 src_offset = old_mem->start << PAGE_SHIFT; + u32 dst_offset = new_mem->start << PAGE_SHIFT; u32 page_count = new_mem->num_pages; int ret; @@ -746,7 +748,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, return 0; } - offset = new_mem->mm_node->start << PAGE_SHIFT; + offset = new_mem->start << PAGE_SHIFT; if (dev_priv->card_type == NV_50) { ret = nv50_mem_vm_bind_linear(dev, @@ -860,14 +862,14 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) case TTM_PL_TT: #if __OS_HAS_AGP if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { - mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; + mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.base = dev_priv->gart_info.aper_base; mem->bus.is_iomem = true; } #endif break; case TTM_PL_VRAM: - mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; + mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.base = pci_resource_start(dev->pdev, 1); mem->bus.is_iomem = true; break; @@ -897,7 +899,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) } /* make sure bo is in mappable vram */ - if (bo->mem.mm_node->start + bo->mem.num_pages < dev_priv->fb_mappable_pages) + if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages) return 0; diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 5eb4c966273f..373950e34814 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -48,14 +48,14 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) dev_priv->gart_info.aper_size, NV_DMA_ACCESS_RO, &pushbuf, NULL); - chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT; + chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; } else if (dev_priv->card_type != NV_04) { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, dev_priv->fb_available_size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_VIDMEM, &pushbuf); - chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT; + chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; } else { /* NV04 cmdbuf hack, from original ddx.. not sure of it's * exact reason for existing :) PCI access to cmdbuf in @@ -67,7 +67,7 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) dev_priv->fb_available_size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_PCI, &pushbuf); - chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT; + chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; } nouveau_gpuobj_ref(pushbuf, &chan->pushbuf); diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 22b86189b7bb..2cc59f8c658b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c @@ -113,7 +113,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, return -ENOMEM; } - offset = chan->notifier_bo->bo.mem.mm_node->start << PAGE_SHIFT; + offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT; if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) { target = NV_DMA_TARGET_VIDMEM; } else diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 7f028fee7a58..288bacac7e5a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -95,9 +95,9 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; unsigned i, j, pte; - NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start); + NV_DEBUG(dev, "pg=0x%lx\n", mem->start); - pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT); + pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT); nvbe->pte_start = pte; for (i = 0; i < nvbe->nr_pages; i++) { dma_addr_t dma_offset = nvbe->pages[i]; diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index 1686f8291b6d..3f2fb4ec63ab 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -104,8 +104,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked) OUT_RING(evo, nv_crtc->lut.depth == 8 ? NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON); - OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.mm_node->start << - PAGE_SHIFT) >> 8); + OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.start << PAGE_SHIFT) >> 8); if (dev_priv->chipset != 0x50) { BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); OUT_RING(evo, NvEvoVRAM); diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 11d366ad4036..55c9663ef2bf 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -345,7 +345,7 @@ nv50_display_init(struct drm_device *dev) /* initialise fifo */ nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0), - ((evo->pushbuf_bo->bo.mem.mm_node->start << PAGE_SHIFT) >> 8) | + ((evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT) >> 8) | NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM | NV50_PDISPLAY_CHANNEL_DMA_CB_VALID); nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000); diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index f5800f21a9dc..a53fc974332b 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -347,7 +347,7 @@ nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, return ret; } - gpuobj->vinst = gpuobj->im_backing->bo.mem.mm_node->start << PAGE_SHIFT; + gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT; return 0; } diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c index 6a41d644e044..13a0f78a9088 100644 --- a/drivers/gpu/drm/nouveau/nvc0_instmem.c +++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c @@ -50,7 +50,7 @@ nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, return ret; } - gpuobj->vinst = gpuobj->im_backing->bo.mem.mm_node->start << PAGE_SHIFT; + gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT; return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 0afd1e62347d..c26106066ec2 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -435,7 +435,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo) out: radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, - bo->tbo.mem.mm_node->start << PAGE_SHIFT, + bo->tbo.mem.start << PAGE_SHIFT, bo->tbo.num_pages << PAGE_SHIFT); return 0; } @@ -532,7 +532,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) rdev = rbo->rdev; if (bo->mem.mem_type == TTM_PL_VRAM) { size = bo->mem.num_pages << PAGE_SHIFT; - offset = bo->mem.mm_node->start << PAGE_SHIFT; + offset = bo->mem.start << PAGE_SHIFT; if ((offset + size) > rdev->mc.visible_vram_size) { /* hurrah the memory is not visible ! */ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); @@ -540,7 +540,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) r = ttm_bo_validate(bo, &rbo->placement, false, true, false); if (unlikely(r != 0)) return r; - offset = bo->mem.mm_node->start << PAGE_SHIFT; + offset = bo->mem.start << PAGE_SHIFT; /* this should not happen */ if ((offset + size) > rdev->mc.visible_vram_size) return -EINVAL; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index cc19aba9bb74..0921910698d4 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -152,6 +152,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->default_caching = TTM_PL_FLAG_CACHED; break; case TTM_PL_TT: + man->func = &ttm_bo_manager_func; man->gpu_offset = rdev->mc.gtt_start; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; @@ -173,6 +174,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, break; case TTM_PL_VRAM: /* "On-card" video ram */ + man->func = &ttm_bo_manager_func; man->gpu_offset = rdev->mc.vram_start; man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; @@ -246,8 +248,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, if (unlikely(r)) { return r; } - old_start = old_mem->mm_node->start << PAGE_SHIFT; - new_start = new_mem->mm_node->start << PAGE_SHIFT; + old_start = old_mem->start << PAGE_SHIFT; + new_start = new_mem->start << PAGE_SHIFT; switch (old_mem->mem_type) { case TTM_PL_VRAM: @@ -435,14 +437,14 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_ #if __OS_HAS_AGP if (rdev->flags & RADEON_IS_AGP) { /* RADEON_IS_AGP is set only if AGP is active */ - mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; + mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.base = rdev->mc.agp_base; mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture; } #endif break; case TTM_PL_VRAM: - mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; + mem->bus.offset = mem->start << PAGE_SHIFT; /* check if it's visible */ if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size) return -EINVAL; @@ -685,7 +687,7 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend, int r; gtt = container_of(backend, struct radeon_ttm_backend, backend); - gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT; + gtt->offset = bo_mem->start << PAGE_SHIFT; if (!gtt->num_pages) { WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend); } @@ -784,9 +786,9 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev) radeon_mem_types_list[i].show = &radeon_mm_dump_table; radeon_mem_types_list[i].driver_features = 0; if (i == 0) - radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].manager; + radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].priv; else - radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager; + radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].priv; } /* Add ttm page pool to debugfs */ diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index b256d4adfafe..f3cf6f02c997 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile @@ -4,6 +4,7 @@ ccflags-y := -Iinclude/drm ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ - ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o + ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \ + ttm_bo_manager.o obj-$(CONFIG_DRM_TTM) += ttm.o diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index 4bf69c404491..f999e36f30b4 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c @@ -74,6 +74,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) { struct ttm_agp_backend *agp_be = container_of(backend, struct ttm_agp_backend, backend); + struct drm_mm_node *node = bo_mem->mm_node; struct agp_memory *mem = agp_be->mem; int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED); int ret; @@ -81,7 +82,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) mem->is_flushed = 1; mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY; - ret = agp_bind_memory(mem, bo_mem->mm_node->start); + ret = agp_bind_memory(mem, node->start); if (ret) printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n"); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 80d37b460a8c..af7b57a47fbc 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -84,11 +84,8 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) man->available_caching); printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n", man->default_caching); - if (mem_type != TTM_PL_SYSTEM) { - spin_lock(&bdev->glob->lru_lock); - drm_mm_debug_table(&man->manager, TTM_PFX); - spin_unlock(&bdev->glob->lru_lock); - } + if (mem_type != TTM_PL_SYSTEM) + (*man->func->debug)(man, TTM_PFX); } static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, @@ -421,7 +418,7 @@ moved: if (bo->mem.mm_node) { spin_lock(&bo->lock); - bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) + + bo->offset = (bo->mem.start << PAGE_SHIFT) + bdev->man[bo->mem.mem_type].gpu_offset; bo->cur_placement = bo->mem.placement; spin_unlock(&bo->lock); @@ -724,52 +721,12 @@ retry: return ret; } -static int ttm_bo_man_get_node(struct ttm_buffer_object *bo, - struct ttm_mem_type_manager *man, - struct ttm_placement *placement, - struct ttm_mem_reg *mem, - struct drm_mm_node **node) -{ - struct ttm_bo_global *glob = bo->glob; - unsigned long lpfn; - int ret; - - lpfn = placement->lpfn; - if (!lpfn) - lpfn = man->size; - *node = NULL; - do { - ret = drm_mm_pre_get(&man->manager); - if (unlikely(ret)) - return ret; - - spin_lock(&glob->lru_lock); - *node = drm_mm_search_free_in_range(&man->manager, - mem->num_pages, mem->page_alignment, - placement->fpfn, lpfn, 1); - if (unlikely(*node == NULL)) { - spin_unlock(&glob->lru_lock); - return 0; - } - *node = drm_mm_get_block_atomic_range(*node, mem->num_pages, - mem->page_alignment, - placement->fpfn, - lpfn); - spin_unlock(&glob->lru_lock); - } while (*node == NULL); - return 0; -} - void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) { - struct ttm_bo_global *glob = bo->glob; + struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; - if (mem->mm_node) { - spin_lock(&glob->lru_lock); - drm_mm_put_block(mem->mm_node); - spin_unlock(&glob->lru_lock); - mem->mm_node = NULL; - } + if (mem->mm_node) + (*man->func->put_node)(man, mem); } EXPORT_SYMBOL(ttm_bo_mem_put); @@ -788,14 +745,13 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_global *glob = bdev->glob; struct ttm_mem_type_manager *man = &bdev->man[mem_type]; - struct drm_mm_node *node; int ret; do { - ret = ttm_bo_man_get_node(bo, man, placement, mem, &node); + ret = (*man->func->get_node)(man, bo, placement, mem); if (unlikely(ret != 0)) return ret; - if (node) + if (mem->mm_node) break; spin_lock(&glob->lru_lock); if (list_empty(&man->lru)) { @@ -808,9 +764,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, if (unlikely(ret != 0)) return ret; } while (1); - if (node == NULL) + if (mem->mm_node == NULL) return -ENOMEM; - mem->mm_node = node; mem->mem_type = mem_type; return 0; } @@ -884,7 +839,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, bool type_found = false; bool type_ok = false; bool has_erestartsys = false; - struct drm_mm_node *node = NULL; int i, ret; mem->mm_node = NULL; @@ -918,17 +872,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (man->has_type && man->use_type) { type_found = true; - ret = ttm_bo_man_get_node(bo, man, placement, mem, - &node); + ret = (*man->func->get_node)(man, bo, placement, mem); if (unlikely(ret)) return ret; } - if (node) + if (mem->mm_node) break; } - if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) { - mem->mm_node = node; + if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) { mem->mem_type = mem_type; mem->placement = cur_flags; return 0; @@ -998,7 +950,6 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, bool interruptible, bool no_wait_reserve, bool no_wait_gpu) { - struct ttm_bo_global *glob = bo->glob; int ret = 0; struct ttm_mem_reg mem; @@ -1026,11 +977,8 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, goto out_unlock; ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu); out_unlock: - if (ret && mem.mm_node) { - spin_lock(&glob->lru_lock); - drm_mm_put_block(mem.mm_node); - spin_unlock(&glob->lru_lock); - } + if (ret && mem.mm_node) + ttm_bo_mem_put(bo, &mem); return ret; } @@ -1038,11 +986,10 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem) { int i; - struct drm_mm_node *node = mem->mm_node; - if (node && placement->lpfn != 0 && - (node->start < placement->fpfn || - node->start + node->size > placement->lpfn)) + if (mem->mm_node && placement->lpfn != 0 && + (mem->start < placement->fpfn || + mem->start + mem->num_pages > placement->lpfn)) return -1; for (i = 0; i < placement->num_placement; i++) { @@ -1286,7 +1233,6 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) { - struct ttm_bo_global *glob = bdev->glob; struct ttm_mem_type_manager *man; int ret = -EINVAL; @@ -1309,13 +1255,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) if (mem_type > 0) { ttm_bo_force_list_clean(bdev, mem_type, false); - spin_lock(&glob->lru_lock); - if (drm_mm_clean(&man->manager)) - drm_mm_takedown(&man->manager); - else - ret = -EBUSY; - - spin_unlock(&glob->lru_lock); + ret = (*man->func->takedown)(man); } return ret; @@ -1366,6 +1306,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, ret = bdev->driver->init_mem_type(bdev, type, man); if (ret) return ret; + man->bdev = bdev; ret = 0; if (type != TTM_PL_SYSTEM) { @@ -1375,7 +1316,8 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, type); return ret; } - ret = drm_mm_init(&man->manager, 0, p_size); + + ret = (*man->func->init)(man, p_size); if (ret) return ret; } diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c new file mode 100644 index 000000000000..7410c190c891 --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c @@ -0,0 +1,148 @@ +/************************************************************************** + * + * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#include "ttm/ttm_module.h" +#include "ttm/ttm_bo_driver.h" +#include "ttm/ttm_placement.h" +#include +#include +#include +#include +#include +#include + +static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, + struct ttm_buffer_object *bo, + struct ttm_placement *placement, + struct ttm_mem_reg *mem) +{ + struct ttm_bo_global *glob = man->bdev->glob; + struct drm_mm *mm = man->priv; + struct drm_mm_node *node = NULL; + unsigned long lpfn; + int ret; + + lpfn = placement->lpfn; + if (!lpfn) + lpfn = man->size; + do { + ret = drm_mm_pre_get(mm); + if (unlikely(ret)) + return ret; + + spin_lock(&glob->lru_lock); + node = drm_mm_search_free_in_range(mm, + mem->num_pages, mem->page_alignment, + placement->fpfn, lpfn, 1); + if (unlikely(node == NULL)) { + spin_unlock(&glob->lru_lock); + return 0; + } + node = drm_mm_get_block_atomic_range(node, mem->num_pages, + mem->page_alignment, + placement->fpfn, + lpfn); + spin_unlock(&glob->lru_lock); + } while (node == NULL); + + mem->mm_node = node; + mem->start = node->start; + return 0; +} + +static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, + struct ttm_mem_reg *mem) +{ + struct ttm_bo_global *glob = man->bdev->glob; + + if (mem->mm_node) { + spin_lock(&glob->lru_lock); + drm_mm_put_block(mem->mm_node); + spin_unlock(&glob->lru_lock); + mem->mm_node = NULL; + } +} + +static int ttm_bo_man_init(struct ttm_mem_type_manager *man, + unsigned long p_size) +{ + struct drm_mm *mm; + int ret; + + mm = kzalloc(sizeof(*mm), GFP_KERNEL); + if (!mm) + return -ENOMEM; + + ret = drm_mm_init(mm, 0, p_size); + if (ret) { + kfree(mm); + return ret; + } + + man->priv = mm; + return 0; +} + +static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) +{ + struct ttm_bo_global *glob = man->bdev->glob; + struct drm_mm *mm = man->priv; + int ret = 0; + + spin_lock(&glob->lru_lock); + if (drm_mm_clean(mm)) { + drm_mm_takedown(mm); + kfree(mm); + man->priv = NULL; + } else + ret = -EBUSY; + spin_unlock(&glob->lru_lock); + return ret; +} + +static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, + const char *prefix) +{ + struct ttm_bo_global *glob = man->bdev->glob; + struct drm_mm *mm = man->priv; + + spin_lock(&glob->lru_lock); + drm_mm_debug_table(mm, prefix); + spin_unlock(&glob->lru_lock); +} + +const struct ttm_mem_type_manager_func ttm_bo_manager_func = { + ttm_bo_man_init, + ttm_bo_man_takedown, + ttm_bo_man_get_node, + ttm_bo_man_put_node, + ttm_bo_man_debug +}; +EXPORT_SYMBOL(ttm_bo_manager_func); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 0ebfe0d94931..c9d2d4d8d066 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -256,8 +256,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, dir = 1; if ((old_mem->mem_type == new_mem->mem_type) && - (new_mem->mm_node->start < - old_mem->mm_node->start + old_mem->mm_node->size)) { + (new_mem->start < old_mem->start + old_mem->size)) { dir = -1; add = new_mem->num_pages - 1; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index c4f5114aee7c..1b3bd8c6c67e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -147,6 +147,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, break; case TTM_PL_VRAM: /* "On-card" video ram */ + man->func = &ttm_bo_manager_func; man->gpu_offset = 0; man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_MASK_CACHING; @@ -203,7 +204,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg /* System memory */ return 0; case TTM_PL_VRAM: - mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; + mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.base = dev_priv->vram_start; mem->bus.is_iomem = true; break; diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 267a86c74e2e..49b43c23636a 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -102,7 +102,8 @@ struct ttm_bus_placement { */ struct ttm_mem_reg { - struct drm_mm_node *mm_node; + void *mm_node; + unsigned long start; unsigned long size; unsigned long num_pages; uint32_t page_alignment; diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 6c694d86e03d..e3371dbe6a10 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -203,7 +203,22 @@ struct ttm_tt { * It's set up by the ttm_bo_driver::init_mem_type method. */ +struct ttm_mem_type_manager; + +struct ttm_mem_type_manager_func { + int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size); + int (*takedown)(struct ttm_mem_type_manager *man); + int (*get_node)(struct ttm_mem_type_manager *man, + struct ttm_buffer_object *bo, + struct ttm_placement *placement, + struct ttm_mem_reg *mem); + void (*put_node)(struct ttm_mem_type_manager *man, + struct ttm_mem_reg *mem); + void (*debug)(struct ttm_mem_type_manager *man, const char *prefix); +}; + struct ttm_mem_type_manager { + struct ttm_bo_device *bdev; /* * No protection. Constant from start. @@ -222,8 +237,8 @@ struct ttm_mem_type_manager { * TODO: Consider one lru_lock per ttm_mem_type_manager. * Plays ill with list removal, though. */ - - struct drm_mm manager; + const struct ttm_mem_type_manager_func *func; + void *priv; struct list_head lru; }; @@ -895,6 +910,8 @@ extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, */ extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); +extern const struct ttm_mem_type_manager_func ttm_bo_manager_func; + #if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) #define TTM_HAS_AGP #include From d3216a0c3133d8e88ec45c7ecd9f38b421f90c03 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 5 Oct 2010 12:42:59 +0200 Subject: [PATCH 365/476] drm/vmwgfx: Really support other depths than 32 Also add some sanity checks. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 106 ++++++++++++++++++++++------ 1 file changed, 85 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index e882ba099f0c..f30223cafadb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -471,16 +471,55 @@ static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = { .create_handle = vmw_framebuffer_create_handle, }; -int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, - struct vmw_surface *surface, - struct vmw_framebuffer **out, - unsigned width, unsigned height) +static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, + struct vmw_surface *surface, + struct vmw_framebuffer **out, + const struct drm_mode_fb_cmd + *mode_cmd) { struct drm_device *dev = dev_priv->dev; struct vmw_framebuffer_surface *vfbs; + enum SVGA3dSurfaceFormat format; int ret; + /* + * Sanity checks. + */ + + if (unlikely(surface->mip_levels[0] != 1 || + surface->num_sizes != 1 || + surface->sizes[0].width < mode_cmd->width || + surface->sizes[0].height < mode_cmd->height || + surface->sizes[0].depth != 1)) { + DRM_ERROR("Incompatible surface dimensions " + "for requested mode.\n"); + return -EINVAL; + } + + switch (mode_cmd->depth) { + case 32: + format = SVGA3D_A8R8G8B8; + break; + case 24: + format = SVGA3D_X8R8G8B8; + break; + case 16: + format = SVGA3D_R5G6B5; + break; + case 15: + format = SVGA3D_A1R5G5B5; + break; + default: + DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth); + return -EINVAL; + } + + if (unlikely(format != surface->format)) { + DRM_ERROR("Invalid surface format for requested mode.\n"); + return -EINVAL; + } + vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL); if (!vfbs) { ret = -ENOMEM; @@ -498,11 +537,11 @@ int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, } /* XXX get the first 3 from the surface info */ - vfbs->base.base.bits_per_pixel = 32; - vfbs->base.base.pitch = width * 32 / 4; - vfbs->base.base.depth = 24; - vfbs->base.base.width = width; - vfbs->base.base.height = height; + vfbs->base.base.bits_per_pixel = mode_cmd->bpp; + vfbs->base.base.pitch = mode_cmd->pitch; + vfbs->base.base.depth = mode_cmd->depth; + vfbs->base.base.width = mode_cmd->width; + vfbs->base.base.height = mode_cmd->height; vfbs->base.pin = &vmw_surface_dmabuf_pin; vfbs->base.unpin = &vmw_surface_dmabuf_unpin; vfbs->surface = surface; @@ -659,16 +698,25 @@ static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb) return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer); } -int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, - struct vmw_dma_buffer *dmabuf, - struct vmw_framebuffer **out, - unsigned width, unsigned height) +static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, + struct vmw_dma_buffer *dmabuf, + struct vmw_framebuffer **out, + const struct drm_mode_fb_cmd + *mode_cmd) { struct drm_device *dev = dev_priv->dev; struct vmw_framebuffer_dmabuf *vfbd; + unsigned int requested_size; int ret; + requested_size = mode_cmd->height * mode_cmd->pitch; + if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) { + DRM_ERROR("Screen buffer object size is too small " + "for requested mode.\n"); + return -EINVAL; + } + vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL); if (!vfbd) { ret = -ENOMEM; @@ -685,12 +733,11 @@ int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, goto out_err3; } - /* XXX get the first 3 from the surface info */ - vfbd->base.base.bits_per_pixel = 32; - vfbd->base.base.pitch = width * vfbd->base.base.bits_per_pixel / 8; - vfbd->base.base.depth = 24; - vfbd->base.base.width = width; - vfbd->base.base.height = height; + vfbd->base.base.bits_per_pixel = mode_cmd->bpp; + vfbd->base.base.pitch = mode_cmd->pitch; + vfbd->base.base.depth = mode_cmd->depth; + vfbd->base.base.width = mode_cmd->width; + vfbd->base.base.height = mode_cmd->height; vfbd->base.pin = vmw_framebuffer_dmabuf_pin; vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin; vfbd->buffer = dmabuf; @@ -719,8 +766,25 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, struct vmw_framebuffer *vfb = NULL; struct vmw_surface *surface = NULL; struct vmw_dma_buffer *bo = NULL; + unsigned int required_size; int ret; + /** + * This code should be conditioned on Screen Objects not being used. + * If screen objects are used, we can allocate a GMR to hold the + * requested framebuffer. + */ + + required_size = mode_cmd->pitch * mode_cmd->height; + if (unlikely(required_size > dev_priv->vram_size)) { + DRM_ERROR("VRAM size is too small for requested mode.\n"); + return NULL; + } + + /** + * End conditioned code. + */ + ret = vmw_user_surface_lookup_handle(dev_priv, tfile, mode_cmd->handle, &surface); if (ret) @@ -730,7 +794,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, goto err_not_scanout; ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, - mode_cmd->width, mode_cmd->height); + mode_cmd); /* vmw_user_surface_lookup takes one ref so does new_fb */ vmw_surface_unreference(&surface); @@ -751,7 +815,7 @@ try_dmabuf: } ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb, - mode_cmd->width, mode_cmd->height); + mode_cmd); /* vmw_user_dmabuf_lookup takes one ref so does new_fb */ vmw_dmabuf_unreference(&bo); From 094e0fa8b96c9fab5df9597e728d82f3d87ee471 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 5 Oct 2010 12:43:00 +0200 Subject: [PATCH 366/476] drm/vmwgfx: Fix ACPI S3 & S4 functionality. Don't suspend or hibernate when there are 3D resources active since we can't restore the device's 3D state. Instead fail with an error message. In other cases, make sure we re-enable the fifo and unlock ttm on resume. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 38 +++++++++++++++++++++++++++-- drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 1 + 2 files changed, 37 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 5c845b6ec492..91eeade92124 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -751,15 +751,40 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, * Buffer contents is moved to swappable memory. */ ttm_bo_swapout_all(&dev_priv->bdev); + + /** + * Release 3d reference held by fbdev and potentially + * stop fifo. + */ + dev_priv->suspended = true; + if (dev_priv->enable_fb) + vmw_3d_resource_dec(dev_priv); + break; case PM_POST_HIBERNATION: case PM_POST_SUSPEND: + case PM_POST_RESTORE: + if (!dev_priv->suspended) { + printk(KERN_WARNING + "[%s] Driver is not suspended at resume" + " point.\n", VMWGFX_DRIVER_NAME); + + break; + } + + /** + * Reclaim 3d reference held by fbdev and potentially + * start fifo. + */ + if (dev_priv->enable_fb) + vmw_3d_resource_inc(dev_priv); + + dev_priv->suspended = false; ttm_suspend_unlock(&vmaster->lock); + break; case PM_RESTORE_PREPARE: break; - case PM_POST_RESTORE: - break; default: break; } @@ -772,6 +797,15 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) { + struct drm_device *dev = pci_get_drvdata(pdev); + struct vmw_private *dev_priv = vmw_priv(dev); + + if (dev_priv->num_3d_resources != 0) { + DRM_INFO("Can't suspend or hibernate " + "while 3D resources are active.\n"); + return -EBUSY; + } + pci_save_state(pdev); pci_disable_device(pdev); pci_set_power_state(pdev, PCI_D3hot); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 58de6393f611..132cc248d229 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -286,6 +286,7 @@ struct vmw_private { struct vmw_master *active_master; struct vmw_master fbdev_master; struct notifier_block pm_nb; + bool suspended; struct mutex release_mutex; uint32_t num_3d_resources; From 7fbd721ad35f8ffec8d9a82e0e4b390cb6c9f4f7 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 5 Oct 2010 12:43:01 +0200 Subject: [PATCH 367/476] drm/vmwgfx: Add new-style PM hooks to improve hibernation behavior Add the new-style PM hooks prepare and complete. This allows us to power up the device again after the hibernation image has been created, and display output will thus be active until the VM is finally powered off. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 113 +++++++++++++++++++--------- 1 file changed, 79 insertions(+), 34 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 91eeade92124..f3e481f9aa86 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -752,34 +752,10 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, */ ttm_bo_swapout_all(&dev_priv->bdev); - /** - * Release 3d reference held by fbdev and potentially - * stop fifo. - */ - dev_priv->suspended = true; - if (dev_priv->enable_fb) - vmw_3d_resource_dec(dev_priv); - break; case PM_POST_HIBERNATION: case PM_POST_SUSPEND: case PM_POST_RESTORE: - if (!dev_priv->suspended) { - printk(KERN_WARNING - "[%s] Driver is not suspended at resume" - " point.\n", VMWGFX_DRIVER_NAME); - - break; - } - - /** - * Reclaim 3d reference held by fbdev and potentially - * start fifo. - */ - if (dev_priv->enable_fb) - vmw_3d_resource_inc(dev_priv); - - dev_priv->suspended = false; ttm_suspend_unlock(&vmaster->lock); break; @@ -795,7 +771,7 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, * These might not be needed with the virtual SVGA device. */ -int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) +static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) { struct drm_device *dev = pci_get_drvdata(pdev); struct vmw_private *dev_priv = vmw_priv(dev); @@ -812,13 +788,81 @@ int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) return 0; } -int vmw_pci_resume(struct pci_dev *pdev) +static int vmw_pci_resume(struct pci_dev *pdev) { pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); return pci_enable_device(pdev); } +static int vmw_pm_suspend(struct device *kdev) +{ + struct pci_dev *pdev = to_pci_dev(kdev); + struct pm_message dummy; + + dummy.event = 0; + + return vmw_pci_suspend(pdev, dummy); +} + +static int vmw_pm_resume(struct device *kdev) +{ + struct pci_dev *pdev = to_pci_dev(kdev); + + return vmw_pci_resume(pdev); +} + +static int vmw_pm_prepare(struct device *kdev) +{ + struct pci_dev *pdev = to_pci_dev(kdev); + struct drm_device *dev = pci_get_drvdata(pdev); + struct vmw_private *dev_priv = vmw_priv(dev); + + /** + * Release 3d reference held by fbdev and potentially + * stop fifo. + */ + dev_priv->suspended = true; + if (dev_priv->enable_fb) + vmw_3d_resource_dec(dev_priv); + + if (dev_priv->num_3d_resources != 0) { + + DRM_INFO("Can't suspend or hibernate " + "while 3D resources are active.\n"); + + if (dev_priv->enable_fb) + vmw_3d_resource_inc(dev_priv); + dev_priv->suspended = false; + return -EBUSY; + } + + return 0; +} + +static void vmw_pm_complete(struct device *kdev) +{ + struct pci_dev *pdev = to_pci_dev(kdev); + struct drm_device *dev = pci_get_drvdata(pdev); + struct vmw_private *dev_priv = vmw_priv(dev); + + /** + * Reclaim 3d reference held by fbdev and potentially + * start fifo. + */ + if (dev_priv->enable_fb) + vmw_3d_resource_inc(dev_priv); + + dev_priv->suspended = false; +} + +static const struct dev_pm_ops vmw_pm_ops = { + .prepare = vmw_pm_prepare, + .complete = vmw_pm_complete, + .suspend = vmw_pm_suspend, + .resume = vmw_pm_resume, +}; + static struct drm_driver driver = { .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_MODESET, @@ -852,15 +896,16 @@ static struct drm_driver driver = { #if defined(CONFIG_COMPAT) .compat_ioctl = drm_compat_ioctl, #endif - }, + }, .pci_driver = { - .name = VMWGFX_DRIVER_NAME, - .id_table = vmw_pci_id_list, - .probe = vmw_probe, - .remove = vmw_remove, - .suspend = vmw_pci_suspend, - .resume = vmw_pci_resume - }, + .name = VMWGFX_DRIVER_NAME, + .id_table = vmw_pci_id_list, + .probe = vmw_probe, + .remove = vmw_remove, + .driver = { + .pm = &vmw_pm_ops + } + }, .name = VMWGFX_DRIVER_NAME, .desc = VMWGFX_DRIVER_DESC, .date = VMWGFX_DRIVER_DATE, From 02b001624f0384540299d9288fdaf37b7d37c814 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 5 Oct 2010 12:43:02 +0200 Subject: [PATCH 368/476] drm: vmwgfx: Add a struct drm_file parameter to the dirty framebuffer callback This is needed for the callback to identify the caller and take appropriate locks if needed. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc.c | 3 ++- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 2 ++ include/drm/drm_crtc.h | 3 ++- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 37e0b4fa482a..6985cb1da72c 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -1854,7 +1854,8 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev, } if (fb->funcs->dirty) { - ret = fb->funcs->dirty(fb, flags, r->color, clips, num_clips); + ret = fb->funcs->dirty(fb, file_priv, flags, r->color, + clips, num_clips); } else { ret = -ENOSYS; goto out_err2; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index f30223cafadb..073b3e1c9cc9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -392,6 +392,7 @@ out_unlock: int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, + struct drm_file *file_priv, unsigned flags, unsigned color, struct drm_clip_rect *clips, unsigned num_clips) @@ -583,6 +584,7 @@ void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) } int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, + struct drm_file *file_priv, unsigned flags, unsigned color, struct drm_clip_rect *clips, unsigned num_clips) diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 3e5a51af757c..15c4796fd467 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -221,7 +221,8 @@ struct drm_framebuffer_funcs { * the semantics and arguments have a one to one mapping * on this function. */ - int (*dirty)(struct drm_framebuffer *framebuffer, unsigned flags, + int (*dirty)(struct drm_framebuffer *framebuffer, + struct drm_file *file_priv, unsigned flags, unsigned color, struct drm_clip_rect *clips, unsigned num_clips); }; From 3a939a5ece3030e60c966a885c8e9bd329c4faf7 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 5 Oct 2010 12:43:03 +0200 Subject: [PATCH 369/476] drm/vmwgfx: Take the ttm lock around the dirty ioctl This makes sure noone accesses the fifo while it's taken down using the dirty ioctl. Also make sure all workqueues are idled before the fifo is taken down. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 5 +- drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 3 ++ drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 84 ++++++++++++++++++++++++++--- 3 files changed, 84 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index f3e481f9aa86..201c34d1f3ee 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -597,6 +597,8 @@ static void vmw_lastclose(struct drm_device *dev) static void vmw_master_init(struct vmw_master *vmaster) { ttm_lock_init(&vmaster->lock); + INIT_LIST_HEAD(&vmaster->fb_surf); + mutex_init(&vmaster->fb_surf_mutex); } static int vmw_master_create(struct drm_device *dev, @@ -608,7 +610,7 @@ static int vmw_master_create(struct drm_device *dev, if (unlikely(vmaster == NULL)) return -ENOMEM; - ttm_lock_init(&vmaster->lock); + vmw_master_init(vmaster); ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); master->driver_priv = vmaster; @@ -699,6 +701,7 @@ static void vmw_master_drop(struct drm_device *dev, vmw_fp->locked_master = drm_master_get(file_priv->master); ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); + vmw_kms_idle_workqueues(vmaster); if (unlikely((ret != 0))) { DRM_ERROR("Unable to lock TTM at VT switch.\n"); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 132cc248d229..0ab53d98310e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -151,6 +151,8 @@ struct vmw_overlay; struct vmw_master { struct ttm_lock lock; + struct mutex fb_surf_mutex; + struct list_head fb_surf; }; struct vmw_vga_topology_state { @@ -519,6 +521,7 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv, unsigned bbp, unsigned depth); int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +void vmw_kms_idle_workqueues(struct vmw_master *vmaster); u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc); /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 073b3e1c9cc9..82bd3d8c0e4f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -332,18 +332,55 @@ struct vmw_framebuffer_surface { struct delayed_work d_work; struct mutex work_lock; bool present_fs; + struct list_head head; + struct drm_master *master; }; +/** + * vmw_kms_idle_workqueues - Flush workqueues on this master + * + * @vmaster - Pointer identifying the master, for the surfaces of which + * we idle the dirty work queues. + * + * This function should be called with the ttm lock held in exclusive mode + * to idle all dirty work queues before the fifo is taken down. + * + * The work task may actually requeue itself, but after the flush returns we're + * sure that there's nothing to present, since the ttm lock is held in + * exclusive mode, so the fifo will never get used. + */ + +void vmw_kms_idle_workqueues(struct vmw_master *vmaster) +{ + struct vmw_framebuffer_surface *entry; + + mutex_lock(&vmaster->fb_surf_mutex); + list_for_each_entry(entry, &vmaster->fb_surf, head) { + if (cancel_delayed_work_sync(&entry->d_work)) + (void) entry->d_work.work.func(&entry->d_work.work); + + (void) cancel_delayed_work_sync(&entry->d_work); + } + mutex_unlock(&vmaster->fb_surf_mutex); +} + void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) { - struct vmw_framebuffer_surface *vfb = + struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(framebuffer); + struct vmw_master *vmaster = vmw_master(vfbs->master); - cancel_delayed_work_sync(&vfb->d_work); + + mutex_lock(&vmaster->fb_surf_mutex); + list_del(&vfbs->head); + mutex_unlock(&vmaster->fb_surf_mutex); + + cancel_delayed_work_sync(&vfbs->d_work); + drm_master_put(&vfbs->master); drm_framebuffer_cleanup(framebuffer); - vmw_surface_unreference(&vfb->surface); + vmw_surface_unreference(&vfbs->surface); - kfree(framebuffer); + kfree(vfbs); } static void vmw_framebuffer_present_fs_callback(struct work_struct *work) @@ -362,6 +399,12 @@ static void vmw_framebuffer_present_fs_callback(struct work_struct *work) SVGA3dCopyRect cr; } *cmd; + /** + * Strictly we should take the ttm_lock in read mode before accessing + * the fifo, to make sure the fifo is present and up. However, + * instead we flush all workqueues under the ttm lock in exclusive mode + * before taking down the fifo. + */ mutex_lock(&vfbs->work_lock); if (!vfbs->present_fs) goto out_unlock; @@ -398,12 +441,14 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, unsigned num_clips) { struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); + struct vmw_master *vmaster = vmw_master(file_priv->master); struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(framebuffer); struct vmw_surface *surf = vfbs->surface; struct drm_clip_rect norect; SVGA3dCopyRect *cr; int i, inc = 1; + int ret; struct { SVGA3dCmdHeader header; @@ -411,6 +456,13 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, SVGA3dCopyRect cr; } *cmd; + if (unlikely(vfbs->master != file_priv->master)) + return -EINVAL; + + ret = ttm_read_lock(&vmaster->lock, true); + if (unlikely(ret != 0)) + return ret; + if (!num_clips || !(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_SCREEN_OBJECT)) { @@ -426,6 +478,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, */ vmw_framebuffer_present_fs_callback(&vfbs->d_work.work); } + ttm_read_unlock(&vmaster->lock); return 0; } @@ -443,6 +496,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); if (unlikely(cmd == NULL)) { DRM_ERROR("Fifo reserve failed.\n"); + ttm_read_unlock(&vmaster->lock); return -ENOMEM; } @@ -462,7 +516,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, } vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); - + ttm_read_unlock(&vmaster->lock); return 0; } @@ -473,6 +527,7 @@ static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = { }; static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, + struct drm_file *file_priv, struct vmw_surface *surface, struct vmw_framebuffer **out, const struct drm_mode_fb_cmd @@ -482,6 +537,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, struct drm_device *dev = dev_priv->dev; struct vmw_framebuffer_surface *vfbs; enum SVGA3dSurfaceFormat format; + struct vmw_master *vmaster = vmw_master(file_priv->master); int ret; /* @@ -546,8 +602,14 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, vfbs->base.pin = &vmw_surface_dmabuf_pin; vfbs->base.unpin = &vmw_surface_dmabuf_unpin; vfbs->surface = surface; + vfbs->master = drm_master_get(file_priv->master); mutex_init(&vfbs->work_lock); + + mutex_lock(&vmaster->fb_surf_mutex); INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback); + list_add_tail(&vfbs->head, &vmaster->fb_surf); + mutex_unlock(&vmaster->fb_surf_mutex); + *out = &vfbs->base; return 0; @@ -590,13 +652,19 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, unsigned num_clips) { struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); + struct vmw_master *vmaster = vmw_master(file_priv->master); struct drm_clip_rect norect; + int ret; struct { uint32_t header; SVGAFifoCmdUpdate body; } *cmd; int i, increment = 1; + ret = ttm_read_lock(&vmaster->lock, true); + if (unlikely(ret != 0)) + return ret; + if (!num_clips) { num_clips = 1; clips = &norect; @@ -611,6 +679,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips); if (unlikely(cmd == NULL)) { DRM_ERROR("Fifo reserve failed.\n"); + ttm_read_unlock(&vmaster->lock); return -ENOMEM; } @@ -623,6 +692,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, } vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips); + ttm_read_unlock(&vmaster->lock); return 0; } @@ -795,8 +865,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, if (!surface->scanout) goto err_not_scanout; - ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, - mode_cmd); + ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface, + &vfb, mode_cmd); /* vmw_user_surface_lookup takes one ref so does new_fb */ vmw_surface_unreference(&surface); From e133e7371231e49c3e7d626e2251cb6f7c3ca1ad Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 5 Oct 2010 12:43:04 +0200 Subject: [PATCH 370/476] drm/vmwgfx: Prune modes based on available VRAM size This needs to be reviewed once we support screen objects and don't rely on VRAM for the frame-buffer. Also fix some integer overflow issues pointed out by Michel Daenzer. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 3 +++ drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | 7 +++++++ drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 11 +++++++++-- drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | 28 +++++++++++++++++++--------- 4 files changed, 38 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 0ab53d98310e..a10d0ad31036 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -522,6 +522,9 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv, int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void vmw_kms_idle_workqueues(struct vmw_master *vmaster); +bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, + uint32_t pitch, + uint32_t height); u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc); /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 409e172f4abf..086ef0490263 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -144,6 +144,13 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var, return -EINVAL; } + if (!vmw_kms_validate_mode_vram(vmw_priv, + info->fix.line_length, + var->yoffset + var->yres)) { + DRM_ERROR("Requested geom can not fit in framebuffer\n"); + return -EINVAL; + } + return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 82bd3d8c0e4f..5fb68f3981a2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -838,7 +838,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, struct vmw_framebuffer *vfb = NULL; struct vmw_surface *surface = NULL; struct vmw_dma_buffer *bo = NULL; - unsigned int required_size; + u64 required_size; int ret; /** @@ -848,7 +848,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, */ required_size = mode_cmd->pitch * mode_cmd->height; - if (unlikely(required_size > dev_priv->vram_size)) { + if (unlikely(required_size > (u64) dev_priv->vram_size)) { DRM_ERROR("VRAM size is too small for requested mode.\n"); return NULL; } @@ -1133,6 +1133,13 @@ out_unlock: return ret; } +bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, + uint32_t pitch, + uint32_t height) +{ + return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size; +} + u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc) { return 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 11cb39e3accb..a01c47ddb5bc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -427,7 +427,9 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector, { struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector); struct drm_device *dev = connector->dev; + struct vmw_private *dev_priv = vmw_priv(dev); struct drm_display_mode *mode = NULL; + struct drm_display_mode *bmode; struct drm_display_mode prefmode = { DRM_MODE("preferred", DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -443,22 +445,30 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector, mode->hdisplay = ldu->pref_width; mode->vdisplay = ldu->pref_height; mode->vrefresh = drm_mode_vrefresh(mode); - drm_mode_probed_add(connector, mode); + if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2, + mode->vdisplay)) { + drm_mode_probed_add(connector, mode); - if (ldu->pref_mode) { - list_del_init(&ldu->pref_mode->head); - drm_mode_destroy(dev, ldu->pref_mode); + if (ldu->pref_mode) { + list_del_init(&ldu->pref_mode->head); + drm_mode_destroy(dev, ldu->pref_mode); + } + + ldu->pref_mode = mode; } - - ldu->pref_mode = mode; } for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) { - if (vmw_ldu_connector_builtin[i].hdisplay > max_width || - vmw_ldu_connector_builtin[i].vdisplay > max_height) + bmode = &vmw_ldu_connector_builtin[i]; + if (bmode->hdisplay > max_width || + bmode->vdisplay > max_height) continue; - mode = drm_mode_duplicate(dev, &vmw_ldu_connector_builtin[i]); + if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2, + bmode->vdisplay)) + continue; + + mode = drm_mode_duplicate(dev, bmode); if (!mode) return 0; mode->vrefresh = drm_mode_vrefresh(mode); From 09e2601b5a7bb817fb07df6f430289484a9a2cad Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 5 Oct 2010 12:43:05 +0200 Subject: [PATCH 371/476] drm/vmwgfx: Don't flush fb if we're in the suspended state. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 086ef0490263..b27a9f2887d2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -212,6 +212,9 @@ static void vmw_fb_dirty_flush(struct vmw_fb_par *par) SVGAFifoCmdUpdate body; } *cmd; + if (vmw_priv->suspended) + return; + spin_lock_irqsave(&par->dirty.lock, flags); if (!par->dirty.active) { spin_unlock_irqrestore(&par->dirty.lock, flags); From 30f47fc85d524d25d63da9e6d77e55ab99c6cc4a Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 5 Oct 2010 12:43:06 +0200 Subject: [PATCH 372/476] drm/vmwgfx: Add a parameter to get the max fb size This can be used by the X server to restrict mode resolutions and size of root pixmap. Bump minor to announce this availability. Bump driver date. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 4 ++-- drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | 3 +++ include/drm/vmwgfx_drm.h | 1 + 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index a10d0ad31036..217ba1fe5dd7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -39,9 +39,9 @@ #include "ttm/ttm_execbuf_util.h" #include "ttm/ttm_module.h" -#define VMWGFX_DRIVER_DATE "20100209" +#define VMWGFX_DRIVER_DATE "20100723" #define VMWGFX_DRIVER_MAJOR 1 -#define VMWGFX_DRIVER_MINOR 2 +#define VMWGFX_DRIVER_MINOR 3 #define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 1c7a316454d8..570d57775a58 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -54,6 +54,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, case DRM_VMW_PARAM_FIFO_CAPS: param->value = dev_priv->fifo.capabilities; break; + case DRM_VMW_PARAM_MAX_FB_SIZE: + param->value = dev_priv->vram_size; + break; default: DRM_ERROR("Illegal vmwgfx get param request: %d\n", param->param); diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h index 4d0842391edc..650e6bf6f69f 100644 --- a/include/drm/vmwgfx_drm.h +++ b/include/drm/vmwgfx_drm.h @@ -72,6 +72,7 @@ #define DRM_VMW_PARAM_FIFO_OFFSET 3 #define DRM_VMW_PARAM_HW_CAPS 4 #define DRM_VMW_PARAM_FIFO_CAPS 5 +#define DRM_VMW_PARAM_MAX_FB_SIZE 6 /** * struct drm_vmw_getparam_arg From 73558ead6467f0590fe58a03a16a94d2a934178b Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 5 Oct 2010 12:43:07 +0200 Subject: [PATCH 373/476] drm/vmwgfx: Add modinfo version Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 201c34d1f3ee..f2942b3c59c0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -942,3 +942,7 @@ module_exit(vmwgfx_exit); MODULE_AUTHOR("VMware Inc. and others"); MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); MODULE_LICENSE("GPL and additional rights"); +MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "." + __stringify(VMWGFX_DRIVER_MINOR) "." + __stringify(VMWGFX_DRIVER_PATCHLEVEL) "." + "0"); From 029e50bfc231741c533128e7e806850557f8e97f Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 5 Oct 2010 12:43:08 +0200 Subject: [PATCH 374/476] drm/vmwgfx: Save at least one screen layout Save at least one screen layout during vga save to avoid odd things happening during restore. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 5fb68f3981a2..87c6e6156d7d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1025,6 +1025,9 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv) vmw_priv->num_displays = vmw_read(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS); + if (vmw_priv->num_displays == 0) + vmw_priv->num_displays = 1; + for (i = 0; i < vmw_priv->num_displays; ++i) { save = &vmw_priv->vga_save[i]; vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i); From 8aea528736bf83ba0cdde67a3c0ca0250581eade Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 5 Oct 2010 12:43:09 +0200 Subject: [PATCH 375/476] drm/vmwgfx: Bump minor and driver date Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 217ba1fe5dd7..9d55fa8cd0fe 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -39,9 +39,9 @@ #include "ttm/ttm_execbuf_util.h" #include "ttm/ttm_module.h" -#define VMWGFX_DRIVER_DATE "20100723" +#define VMWGFX_DRIVER_DATE "20100927" #define VMWGFX_DRIVER_MAJOR 1 -#define VMWGFX_DRIVER_MINOR 3 +#define VMWGFX_DRIVER_MINOR 4 #define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) From b70d6bb3f67b2e2964abafb0117f1733f062cba5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 6 Aug 2010 21:36:58 -0400 Subject: [PATCH 376/476] drm/radeon/kms: clean up r6xx/r7xx blit init (v2) Move common code to init function. v2: make sure the bo is pinned after init as well. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/r600.c | 14 +------------- drivers/gpu/drm/radeon/r600_blit_kms.c | 17 +++++++++++++++-- drivers/gpu/drm/radeon/rv770.c | 14 +------------- 3 files changed, 17 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 7a04959ba0ee..927509ff349a 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2426,19 +2426,7 @@ int r600_startup(struct radeon_device *rdev) rdev->asic->copy = NULL; dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); } - /* pin copy shader into vram */ - if (rdev->r600_blit.shader_obj) { - r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); - if (unlikely(r != 0)) - return r; - r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, - &rdev->r600_blit.shader_gpu_addr); - radeon_bo_unreserve(rdev->r600_blit.shader_obj); - if (r) { - dev_err(rdev->dev, "(%d) pin blit object failed\n", r); - return r; - } - } + /* Enable IRQ */ r = r600_irq_init(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 9ceb2a1ce799..90394df63009 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c @@ -472,9 +472,10 @@ int r600_blit_init(struct radeon_device *rdev) u32 packet2s[16]; int num_packet2s = 0; - /* don't reinitialize blit */ + /* pin copy shader into vram if already initialized */ if (rdev->r600_blit.shader_obj) - return 0; + goto done; + mutex_init(&rdev->r600_blit.mutex); rdev->r600_blit.state_offset = 0; @@ -532,6 +533,18 @@ int r600_blit_init(struct radeon_device *rdev) memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4); radeon_bo_kunmap(rdev->r600_blit.shader_obj); radeon_bo_unreserve(rdev->r600_blit.shader_obj); + +done: + r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); + if (unlikely(r != 0)) + return r; + r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, + &rdev->r600_blit.shader_gpu_addr); + radeon_bo_unreserve(rdev->r600_blit.shader_obj); + if (r) { + dev_err(rdev->dev, "(%d) pin blit object failed\n", r); + return r; + } return 0; } diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index bfa59db374d2..bc2beb7d35e9 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -1028,19 +1028,7 @@ static int rv770_startup(struct radeon_device *rdev) rdev->asic->copy = NULL; dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); } - /* pin copy shader into vram */ - if (rdev->r600_blit.shader_obj) { - r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); - if (unlikely(r != 0)) - return r; - r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, - &rdev->r600_blit.shader_gpu_addr); - radeon_bo_unreserve(rdev->r600_blit.shader_obj); - if (r) { - DRM_ERROR("failed to pin blit object %d\n", r); - return r; - } - } + /* Enable IRQ */ r = r600_irq_init(rdev); if (r) { From 724c80e1d630296d1324859e964d80d35007d83c Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 27 Aug 2010 18:25:25 -0400 Subject: [PATCH 377/476] drm/radeon/kms: enable writeback (v2) When writeback is enabled, the GPU shadows writes to certain registers into a buffer in memory. The driver can then read the values from the shadow rather than reading back from the register across the bus. Writeback can be disabled by setting the no_wb module param to 1. On r6xx/r7xx/evergreen, the following registers are shadowed: - CP scratch registers - CP read pointer - IH write pointer On r1xx-rr5xx, the following registers are shadowed: - CP scratch registers - CP read pointer v2: - Combine wb patches for r6xx-evergreen and r1xx-r5xx - Writeback is disabled on AGP boards since it tends to be unreliable on AGP using the gart. - Check radeon_wb_init return values properly. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen.c | 36 +++++--- drivers/gpu/drm/radeon/r100.c | 97 ++++++--------------- drivers/gpu/drm/radeon/r300.c | 15 ++-- drivers/gpu/drm/radeon/r420.c | 16 ++-- drivers/gpu/drm/radeon/r520.c | 11 ++- drivers/gpu/drm/radeon/r600.c | 116 ++++++++----------------- drivers/gpu/drm/radeon/radeon.h | 12 ++- drivers/gpu/drm/radeon/radeon_asic.h | 3 - drivers/gpu/drm/radeon/radeon_device.c | 77 +++++++++++++++- drivers/gpu/drm/radeon/radeon_fence.c | 6 +- drivers/gpu/drm/radeon/radeon_ring.c | 12 ++- drivers/gpu/drm/radeon/rs400.c | 15 ++-- drivers/gpu/drm/radeon/rs600.c | 15 ++-- drivers/gpu/drm/radeon/rs690.c | 15 ++-- drivers/gpu/drm/radeon/rv515.c | 15 ++-- drivers/gpu/drm/radeon/rv770.c | 15 ++-- 16 files changed, 258 insertions(+), 218 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 79082d4398ae..e47d221e24ac 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -731,7 +731,7 @@ int evergreen_cp_resume(struct radeon_device *rdev) /* Set ring buffer size */ rb_bufsz = drm_order(rdev->cp.ring_size / 8); - tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; + tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif @@ -745,8 +745,19 @@ int evergreen_cp_resume(struct radeon_device *rdev) WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); WREG32(CP_RB_RPTR_WR, 0); WREG32(CP_RB_WPTR, 0); - WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF); - WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr)); + + /* set the wb address wether it's enabled or not */ + WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); + WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); + WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); + + if (rdev->wb.enabled) + WREG32(SCRATCH_UMSK, 0xff); + else { + tmp |= RB_NO_UPDATE; + WREG32(SCRATCH_UMSK, 0); + } + mdelay(1); WREG32(CP_RB_CNTL, tmp); @@ -1759,8 +1770,10 @@ static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev) { u32 wptr, tmp; - /* XXX use writeback */ - wptr = RREG32(IH_RB_WPTR); + if (rdev->wb.enabled) + wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]; + else + wptr = RREG32(IH_RB_WPTR); if (wptr & RB_OVERFLOW) { /* When a ring buffer overflow happen start parsing interrupt @@ -2068,6 +2081,11 @@ static int evergreen_startup(struct radeon_device *rdev) } #endif + /* allocate wb buffer */ + r = radeon_wb_init(rdev); + if (r) + return r; + /* Enable IRQ */ r = r600_irq_init(rdev); if (r) { @@ -2086,8 +2104,6 @@ static int evergreen_startup(struct radeon_device *rdev) r = evergreen_cp_resume(rdev); if (r) return r; - /* write back buffer are not vital so don't worry about failure */ - r600_wb_enable(rdev); return 0; } @@ -2128,7 +2144,7 @@ int evergreen_suspend(struct radeon_device *rdev) r700_cp_stop(rdev); rdev->cp.ready = false; evergreen_irq_suspend(rdev); - r600_wb_disable(rdev); + radeon_wb_disable(rdev); evergreen_pcie_gart_disable(rdev); #if 0 /* unpin shaders bo */ @@ -2245,8 +2261,8 @@ int evergreen_init(struct radeon_device *rdev) if (r) { dev_err(rdev->dev, "disabling GPU acceleration\n"); r700_cp_fini(rdev); - r600_wb_fini(rdev); r600_irq_fini(rdev); + radeon_wb_fini(rdev); radeon_irq_kms_fini(rdev); evergreen_pcie_gart_fini(rdev); rdev->accel_working = false; @@ -2270,8 +2286,8 @@ void evergreen_fini(struct radeon_device *rdev) { /*r600_blit_fini(rdev);*/ r700_cp_fini(rdev); - r600_wb_fini(rdev); r600_irq_fini(rdev); + radeon_wb_fini(rdev); radeon_irq_kms_fini(rdev); evergreen_pcie_gart_fini(rdev); radeon_gem_fini(rdev); diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index e151f16a8f86..7712c055b3e8 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -675,67 +675,6 @@ void r100_fence_ring_emit(struct radeon_device *rdev, radeon_ring_write(rdev, RADEON_SW_INT_FIRE); } -int r100_wb_init(struct radeon_device *rdev) -{ - int r; - - if (rdev->wb.wb_obj == NULL) { - r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, - RADEON_GEM_DOMAIN_GTT, - &rdev->wb.wb_obj); - if (r) { - dev_err(rdev->dev, "(%d) create WB buffer failed\n", r); - return r; - } - r = radeon_bo_reserve(rdev->wb.wb_obj, false); - if (unlikely(r != 0)) - return r; - r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, - &rdev->wb.gpu_addr); - if (r) { - dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r); - radeon_bo_unreserve(rdev->wb.wb_obj); - return r; - } - r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); - radeon_bo_unreserve(rdev->wb.wb_obj); - if (r) { - dev_err(rdev->dev, "(%d) map WB buffer failed\n", r); - return r; - } - } - WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr); - WREG32(R_00070C_CP_RB_RPTR_ADDR, - S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + 1024) >> 2)); - WREG32(R_000770_SCRATCH_UMSK, 0xff); - return 0; -} - -void r100_wb_disable(struct radeon_device *rdev) -{ - WREG32(R_000770_SCRATCH_UMSK, 0); -} - -void r100_wb_fini(struct radeon_device *rdev) -{ - int r; - - r100_wb_disable(rdev); - if (rdev->wb.wb_obj) { - r = radeon_bo_reserve(rdev->wb.wb_obj, false); - if (unlikely(r != 0)) { - dev_err(rdev->dev, "(%d) can't finish WB\n", r); - return; - } - radeon_bo_kunmap(rdev->wb.wb_obj); - radeon_bo_unpin(rdev->wb.wb_obj); - radeon_bo_unreserve(rdev->wb.wb_obj); - radeon_bo_unref(&rdev->wb.wb_obj); - rdev->wb.wb = NULL; - rdev->wb.wb_obj = NULL; - } -} - int r100_copy_blit(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, @@ -996,20 +935,32 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) | REG_SET(RADEON_RB_BLKSZ, rb_blksz) | - REG_SET(RADEON_MAX_FETCH, max_fetch) | - RADEON_RB_NO_UPDATE); + REG_SET(RADEON_MAX_FETCH, max_fetch)); #ifdef __BIG_ENDIAN tmp |= RADEON_BUF_SWAP_32BIT; #endif - WREG32(RADEON_CP_RB_CNTL, tmp); + WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE); /* Set ring address */ DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr); WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr); /* Force read & write ptr to 0 */ - WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); + WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); WREG32(RADEON_CP_RB_RPTR_WR, 0); WREG32(RADEON_CP_RB_WPTR, 0); + + /* set the wb address whether it's enabled or not */ + WREG32(R_00070C_CP_RB_RPTR_ADDR, + S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2)); + WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET); + + if (rdev->wb.enabled) + WREG32(R_000770_SCRATCH_UMSK, 0xff); + else { + tmp |= RADEON_RB_NO_UPDATE; + WREG32(R_000770_SCRATCH_UMSK, 0); + } + WREG32(RADEON_CP_RB_CNTL, tmp); udelay(10); rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); @@ -1050,6 +1001,7 @@ void r100_cp_disable(struct radeon_device *rdev) rdev->cp.ready = false; WREG32(RADEON_CP_CSQ_MODE, 0); WREG32(RADEON_CP_CSQ_CNTL, 0); + WREG32(R_000770_SCRATCH_UMSK, 0); if (r100_gui_wait_for_idle(rdev)) { printk(KERN_WARNING "Failed to wait GUI idle while " "programming pipes. Bad things might happen.\n"); @@ -3734,6 +3686,12 @@ static int r100_startup(struct radeon_device *rdev) if (r) return r; } + + /* allocate wb buffer */ + r = radeon_wb_init(rdev); + if (r) + return r; + /* Enable IRQ */ r100_irq_set(rdev); rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); @@ -3743,9 +3701,6 @@ static int r100_startup(struct radeon_device *rdev) dev_err(rdev->dev, "failled initializing CP (%d).\n", r); return r; } - r = r100_wb_init(rdev); - if (r) - dev_err(rdev->dev, "failled initializing WB (%d).\n", r); r = r100_ib_init(rdev); if (r) { dev_err(rdev->dev, "failled initializing IB (%d).\n", r); @@ -3779,7 +3734,7 @@ int r100_resume(struct radeon_device *rdev) int r100_suspend(struct radeon_device *rdev) { r100_cp_disable(rdev); - r100_wb_disable(rdev); + radeon_wb_disable(rdev); r100_irq_disable(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_disable(rdev); @@ -3789,7 +3744,7 @@ int r100_suspend(struct radeon_device *rdev) void r100_fini(struct radeon_device *rdev) { r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); radeon_gem_fini(rdev); if (rdev->flags & RADEON_IS_PCI) @@ -3902,7 +3857,7 @@ int r100_init(struct radeon_device *rdev) /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); radeon_irq_kms_fini(rdev); if (rdev->flags & RADEON_IS_PCI) diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index c827738ad7dd..34527e600fe9 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -1332,6 +1332,12 @@ static int r300_startup(struct radeon_device *rdev) if (r) return r; } + + /* allocate wb buffer */ + r = radeon_wb_init(rdev); + if (r) + return r; + /* Enable IRQ */ r100_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); @@ -1341,9 +1347,6 @@ static int r300_startup(struct radeon_device *rdev) dev_err(rdev->dev, "failled initializing CP (%d).\n", r); return r; } - r = r100_wb_init(rdev); - if (r) - dev_err(rdev->dev, "failled initializing WB (%d).\n", r); r = r100_ib_init(rdev); if (r) { dev_err(rdev->dev, "failled initializing IB (%d).\n", r); @@ -1379,7 +1382,7 @@ int r300_resume(struct radeon_device *rdev) int r300_suspend(struct radeon_device *rdev) { r100_cp_disable(rdev); - r100_wb_disable(rdev); + radeon_wb_disable(rdev); r100_irq_disable(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_disable(rdev); @@ -1391,7 +1394,7 @@ int r300_suspend(struct radeon_device *rdev) void r300_fini(struct radeon_device *rdev) { r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); radeon_gem_fini(rdev); if (rdev->flags & RADEON_IS_PCIE) @@ -1484,7 +1487,7 @@ int r300_init(struct radeon_device *rdev) /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); radeon_irq_kms_fini(rdev); if (rdev->flags & RADEON_IS_PCIE) diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 59f7bccc5be0..c387346f93a9 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -248,6 +248,12 @@ static int r420_startup(struct radeon_device *rdev) return r; } r420_pipes_init(rdev); + + /* allocate wb buffer */ + r = radeon_wb_init(rdev); + if (r) + return r; + /* Enable IRQ */ r100_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); @@ -258,10 +264,6 @@ static int r420_startup(struct radeon_device *rdev) return r; } r420_cp_errata_init(rdev); - r = r100_wb_init(rdev); - if (r) { - dev_err(rdev->dev, "failled initializing WB (%d).\n", r); - } r = r100_ib_init(rdev); if (r) { dev_err(rdev->dev, "failled initializing IB (%d).\n", r); @@ -302,7 +304,7 @@ int r420_suspend(struct radeon_device *rdev) { r420_cp_errata_fini(rdev); r100_cp_disable(rdev); - r100_wb_disable(rdev); + radeon_wb_disable(rdev); r100_irq_disable(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_disable(rdev); @@ -314,7 +316,7 @@ int r420_suspend(struct radeon_device *rdev) void r420_fini(struct radeon_device *rdev) { r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); radeon_gem_fini(rdev); if (rdev->flags & RADEON_IS_PCIE) @@ -418,7 +420,7 @@ int r420_init(struct radeon_device *rdev) /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); radeon_irq_kms_fini(rdev); if (rdev->flags & RADEON_IS_PCIE) diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 1458dee902dd..3c8677f9e385 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c @@ -181,6 +181,12 @@ static int r520_startup(struct radeon_device *rdev) if (r) return r; } + + /* allocate wb buffer */ + r = radeon_wb_init(rdev); + if (r) + return r; + /* Enable IRQ */ rs600_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); @@ -190,9 +196,6 @@ static int r520_startup(struct radeon_device *rdev) dev_err(rdev->dev, "failled initializing CP (%d).\n", r); return r; } - r = r100_wb_init(rdev); - if (r) - dev_err(rdev->dev, "failled initializing WB (%d).\n", r); r = r100_ib_init(rdev); if (r) { dev_err(rdev->dev, "failled initializing IB (%d).\n", r); @@ -295,7 +298,7 @@ int r520_init(struct radeon_device *rdev) /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); radeon_irq_kms_fini(rdev); rv370_pcie_gart_fini(rdev); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 927509ff349a..fbce58b2cd04 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1918,6 +1918,7 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) void r600_cp_stop(struct radeon_device *rdev) { WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); + WREG32(SCRATCH_UMSK, 0); } int r600_init_microcode(struct radeon_device *rdev) @@ -2150,7 +2151,7 @@ int r600_cp_resume(struct radeon_device *rdev) /* Set ring buffer size */ rb_bufsz = drm_order(rdev->cp.ring_size / 8); - tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; + tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif @@ -2164,8 +2165,19 @@ int r600_cp_resume(struct radeon_device *rdev) WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); WREG32(CP_RB_RPTR_WR, 0); WREG32(CP_RB_WPTR, 0); - WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF); - WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr)); + + /* set the wb address whether it's enabled or not */ + WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); + WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); + WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); + + if (rdev->wb.enabled) + WREG32(SCRATCH_UMSK, 0xff); + else { + tmp |= RB_NO_UPDATE; + WREG32(SCRATCH_UMSK, 0); + } + mdelay(1); WREG32(CP_RB_CNTL, tmp); @@ -2217,9 +2229,10 @@ void r600_scratch_init(struct radeon_device *rdev) int i; rdev->scratch.num_reg = 7; + rdev->scratch.reg_base = SCRATCH_REG0; for (i = 0; i < rdev->scratch.num_reg; i++) { rdev->scratch.free[i] = true; - rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4); + rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); } } @@ -2263,70 +2276,6 @@ int r600_ring_test(struct radeon_device *rdev) return r; } -void r600_wb_disable(struct radeon_device *rdev) -{ - int r; - - WREG32(SCRATCH_UMSK, 0); - if (rdev->wb.wb_obj) { - r = radeon_bo_reserve(rdev->wb.wb_obj, false); - if (unlikely(r != 0)) - return; - radeon_bo_kunmap(rdev->wb.wb_obj); - radeon_bo_unpin(rdev->wb.wb_obj); - radeon_bo_unreserve(rdev->wb.wb_obj); - } -} - -void r600_wb_fini(struct radeon_device *rdev) -{ - r600_wb_disable(rdev); - if (rdev->wb.wb_obj) { - radeon_bo_unref(&rdev->wb.wb_obj); - rdev->wb.wb = NULL; - rdev->wb.wb_obj = NULL; - } -} - -int r600_wb_enable(struct radeon_device *rdev) -{ - int r; - - if (rdev->wb.wb_obj == NULL) { - r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, - RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); - if (r) { - dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); - return r; - } - r = radeon_bo_reserve(rdev->wb.wb_obj, false); - if (unlikely(r != 0)) { - r600_wb_fini(rdev); - return r; - } - r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, - &rdev->wb.gpu_addr); - if (r) { - radeon_bo_unreserve(rdev->wb.wb_obj); - dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); - r600_wb_fini(rdev); - return r; - } - r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); - radeon_bo_unreserve(rdev->wb.wb_obj); - if (r) { - dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); - r600_wb_fini(rdev); - return r; - } - } - WREG32(SCRATCH_ADDR, (rdev->wb.gpu_addr >> 8) & 0xFFFFFFFF); - WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + 1024) & 0xFFFFFFFC); - WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + 1024) & 0xFF); - WREG32(SCRATCH_UMSK, 0xff); - return 0; -} - void r600_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) { @@ -2427,6 +2376,11 @@ int r600_startup(struct radeon_device *rdev) dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); } + /* allocate wb buffer */ + r = radeon_wb_init(rdev); + if (r) + return r; + /* Enable IRQ */ r = r600_irq_init(rdev); if (r) { @@ -2445,8 +2399,7 @@ int r600_startup(struct radeon_device *rdev) r = r600_cp_resume(rdev); if (r) return r; - /* write back buffer are not vital so don't worry about failure */ - r600_wb_enable(rdev); + return 0; } @@ -2505,7 +2458,7 @@ int r600_suspend(struct radeon_device *rdev) r600_cp_stop(rdev); rdev->cp.ready = false; r600_irq_suspend(rdev); - r600_wb_disable(rdev); + radeon_wb_disable(rdev); r600_pcie_gart_disable(rdev); /* unpin shaders bo */ if (rdev->r600_blit.shader_obj) { @@ -2602,8 +2555,8 @@ int r600_init(struct radeon_device *rdev) if (r) { dev_err(rdev->dev, "disabling GPU acceleration\n"); r600_cp_fini(rdev); - r600_wb_fini(rdev); r600_irq_fini(rdev); + radeon_wb_fini(rdev); radeon_irq_kms_fini(rdev); r600_pcie_gart_fini(rdev); rdev->accel_working = false; @@ -2633,8 +2586,8 @@ void r600_fini(struct radeon_device *rdev) r600_audio_fini(rdev); r600_blit_fini(rdev); r600_cp_fini(rdev); - r600_wb_fini(rdev); r600_irq_fini(rdev); + radeon_wb_fini(rdev); radeon_irq_kms_fini(rdev); r600_pcie_gart_fini(rdev); radeon_agp_fini(rdev); @@ -2969,10 +2922,13 @@ int r600_irq_init(struct radeon_device *rdev) ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | IH_WPTR_OVERFLOW_CLEAR | (rb_bufsz << 1)); - /* WPTR writeback, not yet */ - /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/ - WREG32(IH_RB_WPTR_ADDR_LO, 0); - WREG32(IH_RB_WPTR_ADDR_HI, 0); + + if (rdev->wb.enabled) + ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE; + + /* set the writeback address whether it's enabled or not */ + WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC); + WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF); WREG32(IH_RB_CNTL, ih_rb_cntl); @@ -3230,8 +3186,10 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev) { u32 wptr, tmp; - /* XXX use writeback */ - wptr = RREG32(IH_RB_WPTR); + if (rdev->wb.enabled) + wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]; + else + wptr = RREG32(IH_RB_WPTR); if (wptr & RB_OVERFLOW) { /* When a ring buffer overflow happen start parsing interrupt diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index a168d644bf9e..4e10938d8dd1 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -365,6 +365,7 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev); */ struct radeon_scratch { unsigned num_reg; + uint32_t reg_base; bool free[32]; uint32_t reg[32]; }; @@ -593,8 +594,13 @@ struct radeon_wb { struct radeon_bo *wb_obj; volatile uint32_t *wb; uint64_t gpu_addr; + bool enabled; }; +#define RADEON_WB_SCRATCH_OFFSET 0 +#define RADEON_WB_CP_RPTR_OFFSET 1024 +#define R600_WB_IH_WPTR_OFFSET 2048 + /** * struct radeon_pm - power management datas * @max_bandwidth: maximum bandwidth the gpu has (MByte/s) @@ -1340,6 +1346,9 @@ extern void radeon_update_bandwidth_info(struct radeon_device *rdev); extern void radeon_update_display_priority(struct radeon_device *rdev); extern bool radeon_boot_test_post_card(struct radeon_device *rdev); extern void radeon_scratch_init(struct radeon_device *rdev); +extern void radeon_wb_fini(struct radeon_device *rdev); +extern int radeon_wb_init(struct radeon_device *rdev); +extern void radeon_wb_disable(struct radeon_device *rdev); extern void radeon_surface_init(struct radeon_device *rdev); extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); @@ -1424,9 +1433,6 @@ extern int r600_pcie_gart_init(struct radeon_device *rdev); extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); extern int r600_ib_test(struct radeon_device *rdev); extern int r600_ring_test(struct radeon_device *rdev); -extern void r600_wb_fini(struct radeon_device *rdev); -extern int r600_wb_enable(struct radeon_device *rdev); -extern void r600_wb_disable(struct radeon_device *rdev); extern void r600_scratch_init(struct radeon_device *rdev); extern int r600_blit_init(struct radeon_device *rdev); extern void r600_blit_fini(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index a5aff755f0d2..6d3b055c02fd 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -108,9 +108,6 @@ void r100_irq_disable(struct radeon_device *rdev); void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save); void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save); void r100_vram_init_sizes(struct radeon_device *rdev); -void r100_wb_disable(struct radeon_device *rdev); -void r100_wb_fini(struct radeon_device *rdev); -int r100_wb_init(struct radeon_device *rdev); int r100_cp_reset(struct radeon_device *rdev); void r100_vga_render_disable(struct radeon_device *rdev); void r100_restore_sanity(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 256d204a6d24..cfc162d05010 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -117,9 +117,10 @@ void radeon_scratch_init(struct radeon_device *rdev) } else { rdev->scratch.num_reg = 7; } + rdev->scratch.reg_base = RADEON_SCRATCH_REG0; for (i = 0; i < rdev->scratch.num_reg; i++) { rdev->scratch.free[i] = true; - rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4); + rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); } } @@ -149,6 +150,80 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) } } +void radeon_wb_disable(struct radeon_device *rdev) +{ + int r; + + if (rdev->wb.wb_obj) { + r = radeon_bo_reserve(rdev->wb.wb_obj, false); + if (unlikely(r != 0)) + return; + radeon_bo_kunmap(rdev->wb.wb_obj); + radeon_bo_unpin(rdev->wb.wb_obj); + radeon_bo_unreserve(rdev->wb.wb_obj); + } + rdev->wb.enabled = false; +} + +void radeon_wb_fini(struct radeon_device *rdev) +{ + radeon_wb_disable(rdev); + if (rdev->wb.wb_obj) { + radeon_bo_unref(&rdev->wb.wb_obj); + rdev->wb.wb = NULL; + rdev->wb.wb_obj = NULL; + } +} + +int radeon_wb_init(struct radeon_device *rdev) +{ + int r; + + if (rdev->wb.wb_obj == NULL) { + r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, + RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); + if (r) { + dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); + return r; + } + } + r = radeon_bo_reserve(rdev->wb.wb_obj, false); + if (unlikely(r != 0)) { + radeon_wb_fini(rdev); + return r; + } + r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, + &rdev->wb.gpu_addr); + if (r) { + radeon_bo_unreserve(rdev->wb.wb_obj); + dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); + radeon_wb_fini(rdev); + return r; + } + r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); + radeon_bo_unreserve(rdev->wb.wb_obj); + if (r) { + dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); + radeon_wb_fini(rdev); + return r; + } + + /* disabled via module param */ + if (radeon_no_wb == 1) + rdev->wb.enabled = false; + else { + /* often unreliable on AGP */ + if (rdev->flags & RADEON_IS_AGP) { + rdev->wb.enabled = false; + } else + rdev->wb.enabled = true; + } + + dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis"); + + return 0; +} + /** * radeon_vram_location - try to find VRAM location * @rdev: radeon device structure holding all necessary informations diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index b1f9a81b5d1d..698a7ed3a6a1 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -72,7 +72,11 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev) bool wake = false; unsigned long cjiffies; - seq = RREG32(rdev->fence_drv.scratch_reg); + if (rdev->wb.enabled) { + u32 scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; + seq = rdev->wb.wb[scratch_index/4]; + } else + seq = RREG32(rdev->fence_drv.scratch_reg); if (seq != rdev->fence_drv.last_seq) { rdev->fence_drv.last_seq = seq; rdev->fence_drv.last_jiffies = jiffies; diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 261e98a276db..6ea798ce8218 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -247,10 +247,14 @@ void radeon_ib_pool_fini(struct radeon_device *rdev) */ void radeon_ring_free_size(struct radeon_device *rdev) { - if (rdev->family >= CHIP_R600) - rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); - else - rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); + if (rdev->wb.enabled) + rdev->cp.rptr = rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]; + else { + if (rdev->family >= CHIP_R600) + rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); + else + rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); + } /* This works because ring_size is a power of 2 */ rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4)); rdev->cp.ring_free_dw -= rdev->cp.wptr; diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index ae2b76b9a388..f683e51a2a06 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -397,6 +397,12 @@ static int rs400_startup(struct radeon_device *rdev) r = rs400_gart_enable(rdev); if (r) return r; + + /* allocate wb buffer */ + r = radeon_wb_init(rdev); + if (r) + return r; + /* Enable IRQ */ r100_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); @@ -406,9 +412,6 @@ static int rs400_startup(struct radeon_device *rdev) dev_err(rdev->dev, "failled initializing CP (%d).\n", r); return r; } - r = r100_wb_init(rdev); - if (r) - dev_err(rdev->dev, "failled initializing WB (%d).\n", r); r = r100_ib_init(rdev); if (r) { dev_err(rdev->dev, "failled initializing IB (%d).\n", r); @@ -443,7 +446,7 @@ int rs400_resume(struct radeon_device *rdev) int rs400_suspend(struct radeon_device *rdev) { r100_cp_disable(rdev); - r100_wb_disable(rdev); + radeon_wb_disable(rdev); r100_irq_disable(rdev); rs400_gart_disable(rdev); return 0; @@ -452,7 +455,7 @@ int rs400_suspend(struct radeon_device *rdev) void rs400_fini(struct radeon_device *rdev) { r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); radeon_gem_fini(rdev); rs400_gart_fini(rdev); @@ -526,7 +529,7 @@ int rs400_init(struct radeon_device *rdev) /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); rs400_gart_fini(rdev); radeon_irq_kms_fini(rdev); diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index cc05b230d7ef..8d8359a5d459 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -795,6 +795,12 @@ static int rs600_startup(struct radeon_device *rdev) r = rs600_gart_enable(rdev); if (r) return r; + + /* allocate wb buffer */ + r = radeon_wb_init(rdev); + if (r) + return r; + /* Enable IRQ */ rs600_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); @@ -804,9 +810,6 @@ static int rs600_startup(struct radeon_device *rdev) dev_err(rdev->dev, "failled initializing CP (%d).\n", r); return r; } - r = r100_wb_init(rdev); - if (r) - dev_err(rdev->dev, "failled initializing WB (%d).\n", r); r = r100_ib_init(rdev); if (r) { dev_err(rdev->dev, "failled initializing IB (%d).\n", r); @@ -847,7 +850,7 @@ int rs600_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); r100_cp_disable(rdev); - r100_wb_disable(rdev); + radeon_wb_disable(rdev); rs600_irq_disable(rdev); rs600_gart_disable(rdev); return 0; @@ -857,7 +860,7 @@ void rs600_fini(struct radeon_device *rdev) { r600_audio_fini(rdev); r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); radeon_gem_fini(rdev); rs600_gart_fini(rdev); @@ -931,7 +934,7 @@ int rs600_init(struct radeon_device *rdev) /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); rs600_gart_fini(rdev); radeon_irq_kms_fini(rdev); diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 3e3f75718be3..70ed66ef1ca8 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -615,6 +615,12 @@ static int rs690_startup(struct radeon_device *rdev) r = rs400_gart_enable(rdev); if (r) return r; + + /* allocate wb buffer */ + r = radeon_wb_init(rdev); + if (r) + return r; + /* Enable IRQ */ rs600_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); @@ -624,9 +630,6 @@ static int rs690_startup(struct radeon_device *rdev) dev_err(rdev->dev, "failled initializing CP (%d).\n", r); return r; } - r = r100_wb_init(rdev); - if (r) - dev_err(rdev->dev, "failled initializing WB (%d).\n", r); r = r100_ib_init(rdev); if (r) { dev_err(rdev->dev, "failled initializing IB (%d).\n", r); @@ -667,7 +670,7 @@ int rs690_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); r100_cp_disable(rdev); - r100_wb_disable(rdev); + radeon_wb_disable(rdev); rs600_irq_disable(rdev); rs400_gart_disable(rdev); return 0; @@ -677,7 +680,7 @@ void rs690_fini(struct radeon_device *rdev) { r600_audio_fini(rdev); r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); radeon_gem_fini(rdev); rs400_gart_fini(rdev); @@ -752,7 +755,7 @@ int rs690_init(struct radeon_device *rdev) /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); rs400_gart_fini(rdev); radeon_irq_kms_fini(rdev); diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 4d6e86041a9f..5d569f41f4ae 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -386,6 +386,12 @@ static int rv515_startup(struct radeon_device *rdev) if (r) return r; } + + /* allocate wb buffer */ + r = radeon_wb_init(rdev); + if (r) + return r; + /* Enable IRQ */ rs600_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); @@ -395,9 +401,6 @@ static int rv515_startup(struct radeon_device *rdev) dev_err(rdev->dev, "failled initializing CP (%d).\n", r); return r; } - r = r100_wb_init(rdev); - if (r) - dev_err(rdev->dev, "failled initializing WB (%d).\n", r); r = r100_ib_init(rdev); if (r) { dev_err(rdev->dev, "failled initializing IB (%d).\n", r); @@ -431,7 +434,7 @@ int rv515_resume(struct radeon_device *rdev) int rv515_suspend(struct radeon_device *rdev) { r100_cp_disable(rdev); - r100_wb_disable(rdev); + radeon_wb_disable(rdev); rs600_irq_disable(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_disable(rdev); @@ -447,7 +450,7 @@ void rv515_set_safe_registers(struct radeon_device *rdev) void rv515_fini(struct radeon_device *rdev) { r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); radeon_gem_fini(rdev); rv370_pcie_gart_fini(rdev); @@ -527,7 +530,7 @@ int rv515_init(struct radeon_device *rdev) /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); radeon_irq_kms_fini(rdev); rv370_pcie_gart_fini(rdev); diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index bc2beb7d35e9..ff1cc58920c0 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -268,6 +268,7 @@ static void rv770_mc_program(struct radeon_device *rdev) void r700_cp_stop(struct radeon_device *rdev) { WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); + WREG32(SCRATCH_UMSK, 0); } static int rv770_cp_load_microcode(struct radeon_device *rdev) @@ -1029,6 +1030,11 @@ static int rv770_startup(struct radeon_device *rdev) dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); } + /* allocate wb buffer */ + r = radeon_wb_init(rdev); + if (r) + return r; + /* Enable IRQ */ r = r600_irq_init(rdev); if (r) { @@ -1047,8 +1053,7 @@ static int rv770_startup(struct radeon_device *rdev) r = r600_cp_resume(rdev); if (r) return r; - /* write back buffer are not vital so don't worry about failure */ - r600_wb_enable(rdev); + return 0; } @@ -1094,7 +1099,7 @@ int rv770_suspend(struct radeon_device *rdev) r700_cp_stop(rdev); rdev->cp.ready = false; r600_irq_suspend(rdev); - r600_wb_disable(rdev); + radeon_wb_disable(rdev); rv770_pcie_gart_disable(rdev); /* unpin shaders bo */ if (rdev->r600_blit.shader_obj) { @@ -1189,8 +1194,8 @@ int rv770_init(struct radeon_device *rdev) if (r) { dev_err(rdev->dev, "disabling GPU acceleration\n"); r700_cp_fini(rdev); - r600_wb_fini(rdev); r600_irq_fini(rdev); + radeon_wb_fini(rdev); radeon_irq_kms_fini(rdev); rv770_pcie_gart_fini(rdev); rdev->accel_working = false; @@ -1222,8 +1227,8 @@ void rv770_fini(struct radeon_device *rdev) { r600_blit_fini(rdev); r700_cp_fini(rdev); - r600_wb_fini(rdev); r600_irq_fini(rdev); + radeon_wb_fini(rdev); radeon_irq_kms_fini(rdev); rv770_pcie_gart_fini(rdev); rv770_vram_scratch_fini(rdev); From d0f8a854c340986359a3b0a97e380c71def7a440 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Sat, 4 Sep 2010 05:04:34 -0400 Subject: [PATCH 378/476] drm/radeon/kms/r6xx+: use new style fencing (v3) On r6xx+ a newer fence mechanism was implemented to replace the old wait_until plus scratch regs setup. A single EOP event will flush the destination caches, write a fence value, and generate an interrupt. This is the recommended fence mechanism on r6xx+ asics. This requires my previous writeback patch. v2: fix typo that enabled event fence checking on all asics rather than just r6xx+. v3: properly enable EOP interrupts Should fix: https://bugs.freedesktop.org/show_bug.cgi?id=29972 Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen.c | 2 ++ drivers/gpu/drm/radeon/r600.c | 42 +++++++++++++++++--------- drivers/gpu/drm/radeon/r600d.h | 21 +++++++++++++ drivers/gpu/drm/radeon/radeon.h | 2 ++ drivers/gpu/drm/radeon/radeon_device.c | 8 ++++- drivers/gpu/drm/radeon/radeon_fence.c | 6 +++- 6 files changed, 64 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index e47d221e24ac..4c82cc830271 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1594,6 +1594,7 @@ int evergreen_irq_set(struct radeon_device *rdev) if (rdev->irq.sw_int) { DRM_DEBUG("evergreen_irq_set: sw int\n"); cp_int_cntl |= RB_INT_ENABLE; + cp_int_cntl |= TIME_STAMP_INT_ENABLE; } if (rdev->irq.crtc_vblank_int[0]) { DRM_DEBUG("evergreen_irq_set: vblank 0\n"); @@ -2012,6 +2013,7 @@ restart_ih: break; case 181: /* CP EOP event */ DRM_DEBUG("IH: CP EOP\n"); + radeon_fence_process(rdev); break; case 233: /* GUI IDLE */ DRM_DEBUG("IH: CP EOP\n"); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index fbce58b2cd04..7c5f855a43e6 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2279,21 +2279,31 @@ int r600_ring_test(struct radeon_device *rdev) void r600_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) { - /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */ - - radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0)); - radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT); - /* wait for 3D idle clean */ - radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); - radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); - radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit); - /* Emit fence sequence & fire IRQ */ - radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); - radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); - radeon_ring_write(rdev, fence->seq); - /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ - radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0)); - radeon_ring_write(rdev, RB_INT_STAT); + if (rdev->wb.use_event) { + u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET + + (u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base); + /* EVENT_WRITE_EOP - flush caches, send int */ + radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); + radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); + radeon_ring_write(rdev, addr & 0xffffffff); + radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); + radeon_ring_write(rdev, fence->seq); + radeon_ring_write(rdev, 0); + } else { + radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0)); + radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0)); + /* wait for 3D idle clean */ + radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); + radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit); + /* Emit fence sequence & fire IRQ */ + radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); + radeon_ring_write(rdev, fence->seq); + /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ + radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0)); + radeon_ring_write(rdev, RB_INT_STAT); + } } int r600_copy_blit(struct radeon_device *rdev, @@ -3012,6 +3022,7 @@ int r600_irq_set(struct radeon_device *rdev) if (rdev->irq.sw_int) { DRM_DEBUG("r600_irq_set: sw int\n"); cp_int_cntl |= RB_INT_ENABLE; + cp_int_cntl |= TIME_STAMP_INT_ENABLE; } if (rdev->irq.crtc_vblank_int[0]) { DRM_DEBUG("r600_irq_set: vblank 0\n"); @@ -3377,6 +3388,7 @@ restart_ih: break; case 181: /* CP EOP event */ DRM_DEBUG("IH: CP EOP\n"); + radeon_fence_process(rdev); break; case 233: /* GUI IDLE */ DRM_DEBUG("IH: CP EOP\n"); diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 858a1920c0d7..966a793e225b 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -474,6 +474,7 @@ #define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58 #define VTX_REUSE_DEPTH_MASK 0x000000FF #define VGT_EVENT_INITIATOR 0x28a90 +# define CACHE_FLUSH_AND_INV_EVENT_TS (0x14 << 0) # define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0) #define VM_CONTEXT0_CNTL 0x1410 @@ -775,7 +776,27 @@ #define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) #define PACKET3_COND_WRITE 0x45 #define PACKET3_EVENT_WRITE 0x46 +#define EVENT_TYPE(x) ((x) << 0) +#define EVENT_INDEX(x) ((x) << 8) + /* 0 - any non-TS event + * 1 - ZPASS_DONE + * 2 - SAMPLE_PIPELINESTAT + * 3 - SAMPLE_STREAMOUTSTAT* + * 4 - *S_PARTIAL_FLUSH + * 5 - TS events + */ #define PACKET3_EVENT_WRITE_EOP 0x47 +#define DATA_SEL(x) ((x) << 29) + /* 0 - discard + * 1 - send low 32bit data + * 2 - send 64bit data + * 3 - send 64bit counter value + */ +#define INT_SEL(x) ((x) << 24) + /* 0 - none + * 1 - interrupt only (DATA_SEL = 0) + * 2 - interrupt when data write is confirmed + */ #define PACKET3_ONE_REG_WRITE 0x57 #define PACKET3_SET_CONFIG_REG 0x68 #define PACKET3_SET_CONFIG_REG_OFFSET 0x00008000 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 4e10938d8dd1..2bfae5679135 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -595,11 +595,13 @@ struct radeon_wb { volatile uint32_t *wb; uint64_t gpu_addr; bool enabled; + bool use_event; }; #define RADEON_WB_SCRATCH_OFFSET 0 #define RADEON_WB_CP_RPTR_OFFSET 1024 #define R600_WB_IH_WPTR_OFFSET 2048 +#define R600_WB_EVENT_OFFSET 3072 /** * struct radeon_pm - power management datas diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index cfc162d05010..8adfedfe547f 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -208,6 +208,8 @@ int radeon_wb_init(struct radeon_device *rdev) return r; } + /* disable event_write fences */ + rdev->wb.use_event = false; /* disabled via module param */ if (radeon_no_wb == 1) rdev->wb.enabled = false; @@ -215,8 +217,12 @@ int radeon_wb_init(struct radeon_device *rdev) /* often unreliable on AGP */ if (rdev->flags & RADEON_IS_AGP) { rdev->wb.enabled = false; - } else + } else { rdev->wb.enabled = true; + /* event_write fences are only available on r600+ */ + if (rdev->family >= CHIP_R600) + rdev->wb.use_event = true; + } } dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis"); diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 698a7ed3a6a1..216392d0353b 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -73,7 +73,11 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev) unsigned long cjiffies; if (rdev->wb.enabled) { - u32 scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; + u32 scratch_index; + if (rdev->wb.use_event) + scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; + else + scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; seq = rdev->wb.wb[scratch_index/4]; } else seq = RREG32(rdev->fence_drv.scratch_reg); From 5bccf5e32f010ac4d99e1eafb8669cfb35a0889a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marius=20Gr=C3=B6ger?= Date: Tue, 21 Sep 2010 21:30:59 +0200 Subject: [PATCH 379/476] drm/radeon: add properties to configure the width of the underscan borders MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This allows for a more exact fitting on the physical display. The new properties default to zero which corresponds to the previous underscan border width[height] formula: (display_width[display_width] >> 5) + 16. Example to set a horizontal border width of 30 and a vertikal border height of 22: xrandr --output HDMI-0 --set underscan on --set "underscan hborder" 30 --set "underscan vborder" 22 Signed-off-by: Marius Gröger Reviewed-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_connectors.c | 55 ++++++++++++++++++++-- drivers/gpu/drm/radeon/radeon_display.c | 28 ++++++++++- drivers/gpu/drm/radeon/radeon_mode.h | 4 ++ 3 files changed, 82 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index ecc1a8fafbfd..64c3ddf02167 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -326,6 +326,34 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr } } + if (property == rdev->mode_info.underscan_hborder_property) { + /* need to find digital encoder on connector */ + encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); + if (!encoder) + return 0; + + radeon_encoder = to_radeon_encoder(encoder); + + if (radeon_encoder->underscan_hborder != val) { + radeon_encoder->underscan_hborder = val; + radeon_property_change_mode(&radeon_encoder->base); + } + } + + if (property == rdev->mode_info.underscan_vborder_property) { + /* need to find digital encoder on connector */ + encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); + if (!encoder) + return 0; + + radeon_encoder = to_radeon_encoder(encoder); + + if (radeon_encoder->underscan_vborder != val) { + radeon_encoder->underscan_vborder = val; + radeon_property_change_mode(&radeon_encoder->base); + } + } + if (property == rdev->mode_info.tv_std_property) { encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TVDAC); if (!encoder) { @@ -1153,10 +1181,17 @@ radeon_add_atom_connector(struct drm_device *dev, drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.coherent_mode_property, 1); - if (ASIC_IS_AVIVO(rdev)) + if (ASIC_IS_AVIVO(rdev)) { drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_property, UNDERSCAN_AUTO); + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.underscan_hborder_property, + 0); + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.underscan_vborder_property, + 0); + } if (connector_type == DRM_MODE_CONNECTOR_DVII) { radeon_connector->dac_load_detect = true; drm_connector_attach_property(&radeon_connector->base, @@ -1181,10 +1216,17 @@ radeon_add_atom_connector(struct drm_device *dev, drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.coherent_mode_property, 1); - if (ASIC_IS_AVIVO(rdev)) + if (ASIC_IS_AVIVO(rdev)) { drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_property, UNDERSCAN_AUTO); + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.underscan_hborder_property, + 0); + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.underscan_vborder_property, + 0); + } subpixel_order = SubPixelHorizontalRGB; break; case DRM_MODE_CONNECTOR_DisplayPort: @@ -1212,10 +1254,17 @@ radeon_add_atom_connector(struct drm_device *dev, drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.coherent_mode_property, 1); - if (ASIC_IS_AVIVO(rdev)) + if (ASIC_IS_AVIVO(rdev)) { drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_property, UNDERSCAN_AUTO); + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.underscan_hborder_property, + 0); + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.underscan_vborder_property, + 0); + } break; case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_Composite: diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index b92d2f2fcbed..902f7ce86bbc 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1002,6 +1002,24 @@ static int radeon_modeset_create_props(struct radeon_device *rdev) radeon_underscan_enum_list[i].name); } + rdev->mode_info.underscan_hborder_property = + drm_property_create(rdev->ddev, + DRM_MODE_PROP_RANGE, + "underscan hborder", 2); + if (!rdev->mode_info.underscan_hborder_property) + return -ENOMEM; + rdev->mode_info.underscan_hborder_property->values[0] = 0; + rdev->mode_info.underscan_hborder_property->values[1] = 128; + + rdev->mode_info.underscan_vborder_property = + drm_property_create(rdev->ddev, + DRM_MODE_PROP_RANGE, + "underscan vborder", 2); + if (!rdev->mode_info.underscan_vborder_property) + return -ENOMEM; + rdev->mode_info.underscan_vborder_property->values[0] = 0; + rdev->mode_info.underscan_vborder_property->values[1] = 128; + return 0; } @@ -1159,8 +1177,14 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && drm_detect_hdmi_monitor(radeon_connector->edid) && is_hdtv_mode(mode)))) { - radeon_crtc->h_border = (mode->hdisplay >> 5) + 16; - radeon_crtc->v_border = (mode->vdisplay >> 5) + 16; + if (radeon_encoder->underscan_hborder != 0) + radeon_crtc->h_border = radeon_encoder->underscan_hborder; + else + radeon_crtc->h_border = (mode->hdisplay >> 5) + 16; + if (radeon_encoder->underscan_vborder != 0) + radeon_crtc->v_border = radeon_encoder->underscan_vborder; + else + radeon_crtc->v_border = (mode->vdisplay >> 5) + 16; radeon_crtc->rmx_type = RMX_FULL; src_v = crtc->mode.vdisplay; dst_v = crtc->mode.vdisplay - (radeon_crtc->v_border * 2); diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 17a6602b5885..8e071bf5e250 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -241,6 +241,8 @@ struct radeon_mode_info { struct drm_property *tmds_pll_property; /* underscan */ struct drm_property *underscan_property; + struct drm_property *underscan_hborder_property; + struct drm_property *underscan_vborder_property; /* hardcoded DFP edid from BIOS */ struct edid *bios_hardcoded_edid; @@ -370,6 +372,8 @@ struct radeon_encoder { uint32_t pixel_clock; enum radeon_rmx_type rmx_type; enum radeon_underscan_type underscan_type; + uint32_t underscan_hborder; + uint32_t underscan_vborder; struct drm_display_mode native_mode; void *enc_priv; int audio_polling_active; From f981d4633a29261587e46bbf77bf29aeb2f9666a Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 30 Sep 2010 19:16:03 -0400 Subject: [PATCH 380/476] drm/radeon/kms: properly handle 40 bit MC addresses in the cursor code Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_cursor.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 5731fc9b1ae3..4a8102512db5 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c @@ -118,22 +118,25 @@ static void radeon_show_cursor(struct drm_crtc *crtc) } static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, - uint32_t gpu_addr) + uint64_t gpu_addr) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_device *rdev = crtc->dev->dev_private; if (ASIC_IS_DCE4(rdev)) { - WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, 0); - WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); + WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, + upper_32_bits(gpu_addr)); + WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, + gpu_addr & 0xffffffff); } else if (ASIC_IS_AVIVO(rdev)) { if (rdev->family >= CHIP_RV770) { if (radeon_crtc->crtc_id) - WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, 0); + WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr)); else - WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, 0); + WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr)); } - WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); + WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, + gpu_addr & 0xffffffff); } else { radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr; /* offset is from DISP(2)_BASE_ADDRESS */ From bcac54da0a6bd2ed93a2a70fe3d4ebc08c4ed779 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 29 Sep 2010 11:37:39 -0400 Subject: [PATCH 381/476] drm/radeon/kms: prefer high post dividers in legacy pll algo the hw prefers higher post dividers Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_display.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 902f7ce86bbc..d276d6d8e2b0 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -513,7 +513,7 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll, max_fractional_feed_div = pll->max_frac_feedback_div; } - for (post_div = min_post_div; post_div <= max_post_div; ++post_div) { + for (post_div = max_post_div; post_div >= min_post_div; --post_div) { uint32_t ref_div; if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) From f28488c282d8916b9b6190cc41714815bbaf97d5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 29 Sep 2010 11:37:40 -0400 Subject: [PATCH 382/476] drm/radeon/kms: remove some pll algo flags These shouldn't be needed with the post div changes in the last patch. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/atombios_crtc.c | 14 +---------- drivers/gpu/drm/radeon/radeon_display.c | 26 ++------------------- drivers/gpu/drm/radeon/radeon_legacy_crtc.c | 5 ---- drivers/gpu/drm/radeon/radeon_mode.h | 14 ++++------- 4 files changed, 7 insertions(+), 52 deletions(-) diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index cd0290f946cf..ca04a1bdb75b 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -501,21 +501,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, (rdev->family == CHIP_RS740)) pll->flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/ RADEON_PLL_PREFER_CLOSEST_LOWER); - - if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ - pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; - else - pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; - } else { + } else pll->flags |= RADEON_PLL_LEGACY; - if (mode->clock > 200000) /* range limits??? */ - pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; - else - pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; - - } - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc == crtc) { radeon_encoder = to_radeon_encoder(encoder); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index d276d6d8e2b0..20464659d3fa 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -579,7 +579,8 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll, if ((best_vco == 0 && error < best_error) || (best_vco != 0 && ((best_error > 100 && error < best_error - 100) || - (abs(error - best_error) < 100 && vco_diff < best_vco_diff)))) { + (abs(error - best_error) < 100 && + vco_diff < best_vco_diff)))) { best_post_div = post_div; best_ref_div = ref_div; best_feedback_div = feedback_div; @@ -587,29 +588,6 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll, best_freq = current_freq; best_error = error; best_vco_diff = vco_diff; - } else if (current_freq == freq) { - if (best_freq == -1) { - best_post_div = post_div; - best_ref_div = ref_div; - best_feedback_div = feedback_div; - best_frac_feedback_div = frac_feedback_div; - best_freq = current_freq; - best_error = error; - best_vco_diff = vco_diff; - } else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) || - ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) || - ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) || - ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) || - ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) || - ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) { - best_post_div = post_div; - best_ref_div = ref_div; - best_feedback_div = feedback_div; - best_frac_feedback_div = frac_feedback_div; - best_freq = current_freq; - best_error = error; - best_vco_diff = vco_diff; - } } if (current_freq < freq) min_frac_feed_div = frac_feedback_div + 1; diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 305049afde15..d60b31982845 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -722,11 +722,6 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) else pll->algo = PLL_ALGO_LEGACY; - if (mode->clock > 200000) /* range limits??? */ - pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; - else - pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc == crtc) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 8e071bf5e250..8707cd61e58b 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -139,16 +139,10 @@ struct radeon_tmds_pll { #define RADEON_PLL_NO_ODD_POST_DIV (1 << 1) #define RADEON_PLL_USE_REF_DIV (1 << 2) #define RADEON_PLL_LEGACY (1 << 3) -#define RADEON_PLL_PREFER_LOW_REF_DIV (1 << 4) -#define RADEON_PLL_PREFER_HIGH_REF_DIV (1 << 5) -#define RADEON_PLL_PREFER_LOW_FB_DIV (1 << 6) -#define RADEON_PLL_PREFER_HIGH_FB_DIV (1 << 7) -#define RADEON_PLL_PREFER_LOW_POST_DIV (1 << 8) -#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9) -#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) -#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) -#define RADEON_PLL_USE_POST_DIV (1 << 12) -#define RADEON_PLL_IS_LCD (1 << 13) +#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 4) +#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 5) +#define RADEON_PLL_USE_POST_DIV (1 << 6) +#define RADEON_PLL_IS_LCD (1 << 7) /* pll algo */ enum radeon_pll_algo { From 48dfaaeb6637240af3089bf9b7a00a6cf24e0182 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 29 Sep 2010 11:37:41 -0400 Subject: [PATCH 383/476] drm/radeon/kms: remove new pll algo The recent changes to the old algo (prefer high post div) coupled with the range and precision limitations of using fixed point with the new algo make the new algo less useful. So drop the new algo. This should work as well or better than the old new/old combinations and simplifies the code a lot. Fixes: https://bugs.freedesktop.org/show_bug.cgi?id=30218 among others. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/atombios_crtc.c | 32 +-- drivers/gpu/drm/radeon/radeon.h | 1 - drivers/gpu/drm/radeon/radeon_atombios.c | 15 +- drivers/gpu/drm/radeon/radeon_display.c | 222 +------------------- drivers/gpu/drm/radeon/radeon_drv.c | 4 - drivers/gpu/drm/radeon/radeon_legacy_crtc.c | 4 - drivers/gpu/drm/radeon/radeon_mode.h | 9 - 7 files changed, 9 insertions(+), 278 deletions(-) diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index ca04a1bdb75b..89600e344230 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -482,19 +482,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, /* reset the pll flags */ pll->flags = 0; - /* select the PLL algo */ - if (ASIC_IS_AVIVO(rdev)) { - if (radeon_new_pll == 0) - pll->algo = PLL_ALGO_LEGACY; - else - pll->algo = PLL_ALGO_NEW; - } else { - if (radeon_new_pll == 1) - pll->algo = PLL_ALGO_NEW; - else - pll->algo = PLL_ALGO_LEGACY; - } - if (ASIC_IS_AVIVO(rdev)) { if ((rdev->family == CHIP_RS600) || (rdev->family == CHIP_RS690) || @@ -523,25 +510,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) adjusted_clock = mode->clock * 2; - if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { - pll->algo = PLL_ALGO_LEGACY; + if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; - } - /* There is some evidence (often anecdotal) that RV515/RV620 LVDS - * (on some boards at least) prefers the legacy algo. I'm not - * sure whether this should handled generically or on a - * case-by-case quirk basis. Both algos should work fine in the - * majority of cases. - */ - if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) && - ((rdev->family == CHIP_RV515) || - (rdev->family == CHIP_RV620))) { - /* allow the user to overrride just in case */ - if (radeon_new_pll == 1) - pll->algo = PLL_ALGO_NEW; - else - pll->algo = PLL_ALGO_LEGACY; - } } else { if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 2bfae5679135..8e5cb2c4fa7e 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -88,7 +88,6 @@ extern int radeon_benchmarking; extern int radeon_testing; extern int radeon_connector_table; extern int radeon_tv; -extern int radeon_new_pll; extern int radeon_audio; extern int radeon_disp_priority; extern int radeon_hw_i2c; diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 68932ba7b8a4..89ce9b6d89d9 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1112,8 +1112,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) * pre-DCE 3.0 r6xx hardware. This might need to be adjusted per * family. */ - if (!radeon_new_pll) - p1pll->pll_out_min = 64800; + p1pll->pll_out_min = 64800; } p1pll->pll_in_min = @@ -1390,18 +1389,6 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct lvds->ss = radeon_atombios_get_ss_info(encoder, lvds_info->info.ucSS_Id); - if (ASIC_IS_AVIVO(rdev)) { - if (radeon_new_pll == 0) - lvds->pll_algo = PLL_ALGO_LEGACY; - else - lvds->pll_algo = PLL_ALGO_NEW; - } else { - if (radeon_new_pll == 1) - lvds->pll_algo = PLL_ALGO_NEW; - else - lvds->pll_algo = PLL_ALGO_LEGACY; - } - encoder->native_mode = lvds->native_mode; if (encoder_enum == 2) diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 20464659d3fa..325a07391b3c 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -454,13 +454,13 @@ static inline uint32_t radeon_div(uint64_t n, uint32_t d) return n; } -static void radeon_compute_pll_legacy(struct radeon_pll *pll, - uint64_t freq, - uint32_t *dot_clock_p, - uint32_t *fb_div_p, - uint32_t *frac_fb_div_p, - uint32_t *ref_div_p, - uint32_t *post_div_p) +void radeon_compute_pll(struct radeon_pll *pll, + uint64_t freq, + uint32_t *dot_clock_p, + uint32_t *fb_div_p, + uint32_t *frac_fb_div_p, + uint32_t *ref_div_p, + uint32_t *post_div_p) { uint32_t min_ref_div = pll->min_ref_div; uint32_t max_ref_div = pll->max_ref_div; @@ -609,214 +609,6 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll, *post_div_p = best_post_div; } -static bool -calc_fb_div(struct radeon_pll *pll, - uint32_t freq, - uint32_t post_div, - uint32_t ref_div, - uint32_t *fb_div, - uint32_t *fb_div_frac) -{ - fixed20_12 feedback_divider, a, b; - u32 vco_freq; - - vco_freq = freq * post_div; - /* feedback_divider = vco_freq * ref_div / pll->reference_freq; */ - a.full = dfixed_const(pll->reference_freq); - feedback_divider.full = dfixed_const(vco_freq); - feedback_divider.full = dfixed_div(feedback_divider, a); - a.full = dfixed_const(ref_div); - feedback_divider.full = dfixed_mul(feedback_divider, a); - - if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { - /* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */ - a.full = dfixed_const(10); - feedback_divider.full = dfixed_mul(feedback_divider, a); - feedback_divider.full += dfixed_const_half(0); - feedback_divider.full = dfixed_floor(feedback_divider); - feedback_divider.full = dfixed_div(feedback_divider, a); - - /* *fb_div = floor(feedback_divider); */ - a.full = dfixed_floor(feedback_divider); - *fb_div = dfixed_trunc(a); - /* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */ - a.full = dfixed_const(10); - b.full = dfixed_mul(feedback_divider, a); - - feedback_divider.full = dfixed_floor(feedback_divider); - feedback_divider.full = dfixed_mul(feedback_divider, a); - feedback_divider.full = b.full - feedback_divider.full; - *fb_div_frac = dfixed_trunc(feedback_divider); - } else { - /* *fb_div = floor(feedback_divider + 0.5); */ - feedback_divider.full += dfixed_const_half(0); - feedback_divider.full = dfixed_floor(feedback_divider); - - *fb_div = dfixed_trunc(feedback_divider); - *fb_div_frac = 0; - } - - if (((*fb_div) < pll->min_feedback_div) || ((*fb_div) > pll->max_feedback_div)) - return false; - else - return true; -} - -static bool -calc_fb_ref_div(struct radeon_pll *pll, - uint32_t freq, - uint32_t post_div, - uint32_t *fb_div, - uint32_t *fb_div_frac, - uint32_t *ref_div) -{ - fixed20_12 ffreq, max_error, error, pll_out, a; - u32 vco; - u32 pll_out_min, pll_out_max; - - if (pll->flags & RADEON_PLL_IS_LCD) { - pll_out_min = pll->lcd_pll_out_min; - pll_out_max = pll->lcd_pll_out_max; - } else { - pll_out_min = pll->pll_out_min; - pll_out_max = pll->pll_out_max; - } - - ffreq.full = dfixed_const(freq); - /* max_error = ffreq * 0.0025; */ - a.full = dfixed_const(400); - max_error.full = dfixed_div(ffreq, a); - - for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) { - if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) { - vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac)); - vco = vco / ((*ref_div) * 10); - - if ((vco < pll_out_min) || (vco > pll_out_max)) - continue; - - /* pll_out = vco / post_div; */ - a.full = dfixed_const(post_div); - pll_out.full = dfixed_const(vco); - pll_out.full = dfixed_div(pll_out, a); - - if (pll_out.full >= ffreq.full) { - error.full = pll_out.full - ffreq.full; - if (error.full <= max_error.full) - return true; - } - } - } - return false; -} - -static void radeon_compute_pll_new(struct radeon_pll *pll, - uint64_t freq, - uint32_t *dot_clock_p, - uint32_t *fb_div_p, - uint32_t *frac_fb_div_p, - uint32_t *ref_div_p, - uint32_t *post_div_p) -{ - u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0; - u32 best_freq = 0, vco_frequency; - u32 pll_out_min, pll_out_max; - - if (pll->flags & RADEON_PLL_IS_LCD) { - pll_out_min = pll->lcd_pll_out_min; - pll_out_max = pll->lcd_pll_out_max; - } else { - pll_out_min = pll->pll_out_min; - pll_out_max = pll->pll_out_max; - } - - /* freq = freq / 10; */ - do_div(freq, 10); - - if (pll->flags & RADEON_PLL_USE_POST_DIV) { - post_div = pll->post_div; - if ((post_div < pll->min_post_div) || (post_div > pll->max_post_div)) - goto done; - - vco_frequency = freq * post_div; - if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max)) - goto done; - - if (pll->flags & RADEON_PLL_USE_REF_DIV) { - ref_div = pll->reference_div; - if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div)) - goto done; - if (!calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac)) - goto done; - } - } else { - for (post_div = pll->max_post_div; post_div >= pll->min_post_div; --post_div) { - if (pll->flags & RADEON_PLL_LEGACY) { - if ((post_div == 5) || - (post_div == 7) || - (post_div == 9) || - (post_div == 10) || - (post_div == 11)) - continue; - } - - if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) - continue; - - vco_frequency = freq * post_div; - if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max)) - continue; - if (pll->flags & RADEON_PLL_USE_REF_DIV) { - ref_div = pll->reference_div; - if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div)) - goto done; - if (calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac)) - break; - } else { - if (calc_fb_ref_div(pll, freq, post_div, &fb_div, &fb_div_frac, &ref_div)) - break; - } - } - } - - best_freq = pll->reference_freq * 10 * fb_div; - best_freq += pll->reference_freq * fb_div_frac; - best_freq = best_freq / (ref_div * post_div); - -done: - if (best_freq == 0) - DRM_ERROR("Couldn't find valid PLL dividers\n"); - - *dot_clock_p = best_freq / 10; - *fb_div_p = fb_div; - *frac_fb_div_p = fb_div_frac; - *ref_div_p = ref_div; - *post_div_p = post_div; - - DRM_DEBUG_KMS("%u %d.%d, %d, %d\n", *dot_clock_p, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p); -} - -void radeon_compute_pll(struct radeon_pll *pll, - uint64_t freq, - uint32_t *dot_clock_p, - uint32_t *fb_div_p, - uint32_t *frac_fb_div_p, - uint32_t *ref_div_p, - uint32_t *post_div_p) -{ - switch (pll->algo) { - case PLL_ALGO_NEW: - radeon_compute_pll_new(pll, freq, dot_clock_p, fb_div_p, - frac_fb_div_p, ref_div_p, post_div_p); - break; - case PLL_ALGO_LEGACY: - default: - radeon_compute_pll_legacy(pll, freq, dot_clock_p, fb_div_p, - frac_fb_div_p, ref_div_p, post_div_p); - break; - } -} - static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 663cdc10a5c2..f29a2695d961 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -93,7 +93,6 @@ int radeon_benchmarking = 0; int radeon_testing = 0; int radeon_connector_table = 0; int radeon_tv = 1; -int radeon_new_pll = -1; int radeon_audio = 1; int radeon_disp_priority = 0; int radeon_hw_i2c = 0; @@ -131,9 +130,6 @@ module_param_named(connector_table, radeon_connector_table, int, 0444); MODULE_PARM_DESC(tv, "TV enable (0 = disable)"); module_param_named(tv, radeon_tv, int, 0444); -MODULE_PARM_DESC(new_pll, "Select new PLL code"); -module_param_named(new_pll, radeon_new_pll, int, 0444); - MODULE_PARM_DESC(audio, "Audio enable (0 = disable)"); module_param_named(audio, radeon_audio, int, 0444); diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index d60b31982845..28e0b1e9e5f1 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -717,10 +717,6 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) pll = &rdev->clock.p1pll; pll->flags = RADEON_PLL_LEGACY; - if (radeon_new_pll == 1) - pll->algo = PLL_ALGO_NEW; - else - pll->algo = PLL_ALGO_LEGACY; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc == crtc) { diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 8707cd61e58b..29f551769aaf 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -144,12 +144,6 @@ struct radeon_tmds_pll { #define RADEON_PLL_USE_POST_DIV (1 << 6) #define RADEON_PLL_IS_LCD (1 << 7) -/* pll algo */ -enum radeon_pll_algo { - PLL_ALGO_LEGACY, - PLL_ALGO_NEW -}; - struct radeon_pll { /* reference frequency */ uint32_t reference_freq; @@ -182,8 +176,6 @@ struct radeon_pll { /* pll id */ uint32_t id; - /* pll algo */ - enum radeon_pll_algo algo; }; struct radeon_i2c_chan { @@ -346,7 +338,6 @@ struct radeon_encoder_atom_dig { /* atom lvds */ uint32_t lvds_misc; uint16_t panel_pwr_delay; - enum radeon_pll_algo pll_algo; struct radeon_atom_ss *ss; /* panel mode */ struct drm_display_mode native_mode; From ba032a58d1f320039e7850fb6e8651695c1aa571 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 4 Oct 2010 17:13:01 -0400 Subject: [PATCH 384/476] drm/radeon/kms: rework spread spectrum handling This patch reworks spread spectrum handling to enable it properly on lvds and DP/eDP links. It also fixes several bugs in the old spread spectrum code. - Use the ss recommended reference divider if available when calculating the pll - Use the proper ss command tables on pre-DCE3 asics - Avoid reading past the end of the ss info tables - Enable ss on evergreen asics (lvds, dp, tmds) - Enable ss on DP/eDP links Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/atombios_crtc.c | 259 +++++++++++++++++------ drivers/gpu/drm/radeon/radeon_atombios.c | 114 ++++++++-- drivers/gpu/drm/radeon/radeon_encoders.c | 16 +- drivers/gpu/drm/radeon/radeon_mode.h | 20 +- 4 files changed, 307 insertions(+), 102 deletions(-) diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 89600e344230..7238f3f54aa6 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -398,65 +398,76 @@ static void atombios_disable_ss(struct drm_crtc *crtc) union atom_enable_ss { - ENABLE_LVDS_SS_PARAMETERS legacy; + ENABLE_LVDS_SS_PARAMETERS lvds_ss; + ENABLE_LVDS_SS_PARAMETERS_V2 lvds_ss_2; ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1; + ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2; }; -static void atombios_enable_ss(struct drm_crtc *crtc) +static void atombios_crtc_program_ss(struct drm_crtc *crtc, + int enable, + int pll_id, + struct radeon_atom_ss *ss) { - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; - struct drm_encoder *encoder = NULL; - struct radeon_encoder *radeon_encoder = NULL; - struct radeon_encoder_atom_dig *dig = NULL; int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL); union atom_enable_ss args; - uint16_t percentage = 0; - uint8_t type = 0, step = 0, delay = 0, range = 0; - - /* XXX add ss support for DCE4 */ - if (ASIC_IS_DCE4(rdev)) - return; - - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - if (encoder->crtc == crtc) { - radeon_encoder = to_radeon_encoder(encoder); - /* only enable spread spectrum on LVDS */ - if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - dig = radeon_encoder->enc_priv; - if (dig && dig->ss) { - percentage = dig->ss->percentage; - type = dig->ss->type; - step = dig->ss->step; - delay = dig->ss->delay; - range = dig->ss->range; - } else - return; - } else - return; - break; - } - } - - if (!radeon_encoder) - return; memset(&args, 0, sizeof(args)); - if (ASIC_IS_AVIVO(rdev)) { - args.v1.usSpreadSpectrumPercentage = cpu_to_le16(percentage); - args.v1.ucSpreadSpectrumType = type; - args.v1.ucSpreadSpectrumStep = step; - args.v1.ucSpreadSpectrumDelay = delay; - args.v1.ucSpreadSpectrumRange = range; - args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; - args.v1.ucEnable = ATOM_ENABLE; + + if (ASIC_IS_DCE4(rdev)) { + args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); + args.v2.ucSpreadSpectrumType = ss->type; + switch (pll_id) { + case ATOM_PPLL1: + args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL; + args.v2.usSpreadSpectrumAmount = ss->amount; + args.v2.usSpreadSpectrumStep = ss->step; + break; + case ATOM_PPLL2: + args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL; + args.v2.usSpreadSpectrumAmount = ss->amount; + args.v2.usSpreadSpectrumStep = ss->step; + break; + case ATOM_DCPLL: + args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL; + args.v2.usSpreadSpectrumAmount = 0; + args.v2.usSpreadSpectrumStep = 0; + break; + case ATOM_PPLL_INVALID: + return; + } + args.v2.ucEnable = enable; + } else if (ASIC_IS_DCE3(rdev)) { + args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); + args.v1.ucSpreadSpectrumType = ss->type; + args.v1.ucSpreadSpectrumStep = ss->step; + args.v1.ucSpreadSpectrumDelay = ss->delay; + args.v1.ucSpreadSpectrumRange = ss->range; + args.v1.ucPpll = pll_id; + args.v1.ucEnable = enable; + } else if (ASIC_IS_AVIVO(rdev)) { + if (enable == ATOM_DISABLE) { + atombios_disable_ss(crtc); + return; + } + args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); + args.lvds_ss_2.ucSpreadSpectrumType = ss->type; + args.lvds_ss_2.ucSpreadSpectrumStep = ss->step; + args.lvds_ss_2.ucSpreadSpectrumDelay = ss->delay; + args.lvds_ss_2.ucSpreadSpectrumRange = ss->range; + args.lvds_ss_2.ucEnable = enable; } else { - args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage); - args.legacy.ucSpreadSpectrumType = type; - args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2; - args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4; - args.legacy.ucEnable = ATOM_ENABLE; + if (enable == ATOM_DISABLE) { + atombios_disable_ss(crtc); + return; + } + args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); + args.lvds_ss.ucSpreadSpectrumType = ss->type; + args.lvds_ss.ucSpreadSpectrumStepSize_Delay = (ss->step & 3) << 2; + args.lvds_ss.ucSpreadSpectrumStepSize_Delay |= (ss->delay & 7) << 4; + args.lvds_ss.ucEnable = enable; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } @@ -468,7 +479,9 @@ union adjust_pixel_clock { static u32 atombios_adjust_pll(struct drm_crtc *crtc, struct drm_display_mode *mode, - struct radeon_pll *pll) + struct radeon_pll *pll, + bool ss_enabled, + struct radeon_atom_ss *ss) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; @@ -506,6 +519,16 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, } } + /* use recommended ref_div for ss */ + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { + if (ss_enabled) { + if (ss->refdiv) { + pll->flags |= RADEON_PLL_USE_REF_DIV; + pll->reference_div = ss->refdiv; + } + } + } + if (ASIC_IS_AVIVO(rdev)) { /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) @@ -547,9 +570,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, args.v1.ucTransmitterID = radeon_encoder->encoder_id; args.v1.ucEncodeMode = encoder_mode; if (encoder_mode == ATOM_ENCODER_MODE_DP) { - /* may want to enable SS on DP eventually */ - /* args.v1.ucConfig |= - ADJUST_DISPLAY_CONFIG_SS_ENABLE;*/ + if (ss_enabled) + args.v1.ucConfig |= + ADJUST_DISPLAY_CONFIG_SS_ENABLE; } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) { args.v1.ucConfig |= ADJUST_DISPLAY_CONFIG_SS_ENABLE; @@ -566,11 +589,10 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, args.v3.sInput.ucDispPllConfig = 0; if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - if (encoder_mode == ATOM_ENCODER_MODE_DP) { - /* may want to enable SS on DP/eDP eventually */ - /*args.v3.sInput.ucDispPllConfig |= - DISPPLL_CONFIG_SS_ENABLE;*/ + if (ss_enabled) + args.v3.sInput.ucDispPllConfig |= + DISPPLL_CONFIG_SS_ENABLE; args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_COHERENT_MODE; /* 16200 or 27000 */ @@ -590,17 +612,17 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, } } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { if (encoder_mode == ATOM_ENCODER_MODE_DP) { - /* may want to enable SS on DP/eDP eventually */ - /*args.v3.sInput.ucDispPllConfig |= - DISPPLL_CONFIG_SS_ENABLE;*/ + if (ss_enabled) + args.v3.sInput.ucDispPllConfig |= + DISPPLL_CONFIG_SS_ENABLE; args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_COHERENT_MODE; /* 16200 or 27000 */ args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) { - /* want to enable SS on LVDS eventually */ - /*args.v3.sInput.ucDispPllConfig |= - DISPPLL_CONFIG_SS_ENABLE;*/ + if (ss_enabled) + args.v3.sInput.ucDispPllConfig |= + DISPPLL_CONFIG_SS_ENABLE; } else { if (mode->clock > 165000) args.v3.sInput.ucDispPllConfig |= @@ -774,6 +796,8 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode struct radeon_pll *pll; u32 adjusted_clock; int encoder_mode = 0; + struct radeon_atom_ss ss; + bool ss_enabled = false; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc == crtc) { @@ -800,16 +824,112 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode break; } + if (radeon_encoder->active_device & + (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) { + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + struct drm_connector *connector = + radeon_get_connector_for_encoder(encoder); + struct radeon_connector *radeon_connector = + to_radeon_connector(connector); + struct radeon_connector_atom_dig *dig_connector = + radeon_connector->con_priv; + int dp_clock; + + switch (encoder_mode) { + case ATOM_ENCODER_MODE_DP: + /* DP/eDP */ + dp_clock = dig_connector->dp_clock / 10; + if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { + if (ASIC_IS_DCE4(rdev)) + ss_enabled = + radeon_atombios_get_asic_ss_info(rdev, &ss, + dig->lcd_ss_id, + dp_clock); + else + ss_enabled = + radeon_atombios_get_ppll_ss_info(rdev, &ss, + dig->lcd_ss_id); + } else { + if (ASIC_IS_DCE4(rdev)) + ss_enabled = + radeon_atombios_get_asic_ss_info(rdev, &ss, + ASIC_INTERNAL_SS_ON_DP, + dp_clock); + else { + if (dp_clock == 16200) { + ss_enabled = + radeon_atombios_get_ppll_ss_info(rdev, &ss, + ATOM_DP_SS_ID2); + if (!ss_enabled) + ss_enabled = + radeon_atombios_get_ppll_ss_info(rdev, &ss, + ATOM_DP_SS_ID1); + } else + ss_enabled = + radeon_atombios_get_ppll_ss_info(rdev, &ss, + ATOM_DP_SS_ID1); + } + } + break; + case ATOM_ENCODER_MODE_LVDS: + if (ASIC_IS_DCE4(rdev)) + ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss, + dig->lcd_ss_id, + mode->clock / 10); + else + ss_enabled = radeon_atombios_get_ppll_ss_info(rdev, &ss, + dig->lcd_ss_id); + break; + case ATOM_ENCODER_MODE_DVI: + if (ASIC_IS_DCE4(rdev)) + ss_enabled = + radeon_atombios_get_asic_ss_info(rdev, &ss, + ASIC_INTERNAL_SS_ON_TMDS, + mode->clock / 10); + break; + case ATOM_ENCODER_MODE_HDMI: + if (ASIC_IS_DCE4(rdev)) + ss_enabled = + radeon_atombios_get_asic_ss_info(rdev, &ss, + ASIC_INTERNAL_SS_ON_HDMI, + mode->clock / 10); + break; + default: + break; + } + } + /* adjust pixel clock as needed */ - adjusted_clock = atombios_adjust_pll(crtc, mode, pll); + adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, &ref_div, &post_div); + atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss); + atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, encoder_mode, radeon_encoder->encoder_id, mode->clock, ref_div, fb_div, frac_fb_div, post_div); + if (ss_enabled) { + /* calculate ss amount and step size */ + if (ASIC_IS_DCE4(rdev)) { + u32 step_size; + u32 amount = (((fb_div * 10) + frac_fb_div) * ss.percentage) / 10000; + ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK; + ss.amount |= ((amount - (ss.amount * 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) & + ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK; + if (ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD) + step_size = (4 * amount * ref_div * (ss.rate * 2048)) / + (125 * 25 * pll->reference_freq / 100); + else + step_size = (2 * amount * ref_div * (ss.rate * 2048)) / + (125 * 25 * pll->reference_freq / 100); + ss.step = step_size; + } + + atombios_crtc_program_ss(crtc, ATOM_ENABLE, radeon_crtc->pll_id, &ss); + } } static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y, @@ -1188,12 +1308,19 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, } } - atombios_disable_ss(crtc); /* always set DCPLL */ - if (ASIC_IS_DCE4(rdev)) + if (ASIC_IS_DCE4(rdev)) { + struct radeon_atom_ss ss; + bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss, + ASIC_INTERNAL_SS_ON_DCPLL, + rdev->clock.default_dispclk); + if (ss_enabled) + atombios_crtc_program_ss(crtc, ATOM_DISABLE, ATOM_DCPLL, &ss); atombios_crtc_set_dcpll(crtc); + if (ss_enabled) + atombios_crtc_program_ss(crtc, ATOM_ENABLE, ATOM_DCPLL, &ss); + } atombios_crtc_set_pll(crtc, adjusted_mode); - atombios_enable_ss(crtc); if (ASIC_IS_DCE4(rdev)) atombios_set_crtc_dtd_timing(crtc, adjusted_mode); diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 89ce9b6d89d9..2b44cbcb031b 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1276,36 +1276,27 @@ bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, return false; } -static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct - radeon_encoder - *encoder, - int id) +bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev, + struct radeon_atom_ss *ss, + int id) { - struct drm_device *dev = encoder->base.dev; - struct radeon_device *rdev = dev->dev_private; struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info); - uint16_t data_offset; + uint16_t data_offset, size; struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info; uint8_t frev, crev; - struct radeon_atom_ss *ss = NULL; - int i; + int i, num_indices; - if (id > ATOM_MAX_SS_ENTRY) - return NULL; - - if (atom_parse_data_header(mode_info->atom_context, index, NULL, + memset(ss, 0, sizeof(struct radeon_atom_ss)); + if (atom_parse_data_header(mode_info->atom_context, index, &size, &frev, &crev, &data_offset)) { ss_info = (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset); - ss = - kzalloc(sizeof(struct radeon_atom_ss), GFP_KERNEL); + num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / + sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT); - if (!ss) - return NULL; - - for (i = 0; i < ATOM_MAX_SS_ENTRY; i++) { + for (i = 0; i < num_indices; i++) { if (ss_info->asSS_Info[i].ucSS_Id == id) { ss->percentage = le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage); @@ -1314,11 +1305,88 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct ss->delay = ss_info->asSS_Info[i].ucSS_Delay; ss->range = ss_info->asSS_Info[i].ucSS_Range; ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div; - break; + return true; } } } - return ss; + return false; +} + +union asic_ss_info { + struct _ATOM_ASIC_INTERNAL_SS_INFO info; + struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2; + struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3; +}; + +bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, + struct radeon_atom_ss *ss, + int id, u32 clock) +{ + struct radeon_mode_info *mode_info = &rdev->mode_info; + int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); + uint16_t data_offset, size; + union asic_ss_info *ss_info; + uint8_t frev, crev; + int i, num_indices; + + memset(ss, 0, sizeof(struct radeon_atom_ss)); + if (atom_parse_data_header(mode_info->atom_context, index, &size, + &frev, &crev, &data_offset)) { + + ss_info = + (union asic_ss_info *)(mode_info->atom_context->bios + data_offset); + + switch (frev) { + case 1: + num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / + sizeof(ATOM_ASIC_SS_ASSIGNMENT); + + for (i = 0; i < num_indices; i++) { + if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) && + (clock <= ss_info->info.asSpreadSpectrum[i].ulTargetClockRange)) { + ss->percentage = + le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage); + ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode; + ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz); + return true; + } + } + break; + case 2: + num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); + for (i = 0; i < num_indices; i++) { + if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) && + (clock <= ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange)) { + ss->percentage = + le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage); + ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode; + ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz); + return true; + } + } + break; + case 3: + num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); + for (i = 0; i < num_indices; i++) { + if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) && + (clock <= ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange)) { + ss->percentage = + le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); + ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; + ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz); + return true; + } + } + break; + default: + DRM_ERROR("Unsupported ASIC_InternalSS_Info table: %d %d\n", frev, crev); + break; + } + + } + return false; } union lvds_info { @@ -1370,7 +1438,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); lvds->panel_pwr_delay = le16_to_cpu(lvds_info->info.usOffDelayInMs); - lvds->lvds_misc = lvds_info->info.ucLVDS_Misc; + lvds->lcd_misc = lvds_info->info.ucLVDS_Misc; misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess); if (misc & ATOM_VSYNC_POLARITY) @@ -1387,7 +1455,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct /* set crtc values */ drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); - lvds->ss = radeon_atombios_get_ss_info(encoder, lvds_info->info.ucSS_Id); + lvds->lcd_ss_id = lvds_info->info.ucSS_Id; encoder->native_mode = lvds->native_mode; diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 2c293e8304d6..ae58b6849a2e 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -529,9 +529,9 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL) + if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; - if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB) + if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) args.v1.ucMisc |= (1 << 1); } else { if (dig->linkb) @@ -558,18 +558,18 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) args.v2.ucTemporal = 0; args.v2.ucFRC = 0; if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL) + if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; - if (dig->lvds_misc & ATOM_PANEL_MISC_SPATIAL) { + if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) { args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN; - if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB) + if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH; } - if (dig->lvds_misc & ATOM_PANEL_MISC_TEMPORAL) { + if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) { args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN; - if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB) + if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH; - if (((dig->lvds_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2) + if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2) args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4; } } else { diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 29f551769aaf..d25cf093c84d 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -324,21 +324,24 @@ struct radeon_encoder_ext_tmds { struct radeon_atom_ss { uint16_t percentage; uint8_t type; - uint8_t step; + uint16_t step; uint8_t delay; uint8_t range; uint8_t refdiv; + /* asic_ss */ + uint16_t rate; + uint16_t amount; }; struct radeon_encoder_atom_dig { bool linkb; /* atom dig */ bool coherent_mode; - int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */ - /* atom lvds */ - uint32_t lvds_misc; + int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB, etc. */ + /* atom lvds/edp */ + uint32_t lcd_misc; uint16_t panel_pwr_delay; - struct radeon_atom_ss *ss; + uint32_t lcd_ss_id; /* panel mode */ struct drm_display_mode native_mode; }; @@ -480,6 +483,13 @@ extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector); +extern bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev, + struct radeon_atom_ss *ss, + int id); +extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, + struct radeon_atom_ss *ss, + int id, u32 clock); + extern void radeon_compute_pll(struct radeon_pll *pll, uint64_t freq, uint32_t *dot_clock_p, From 6383cf7d7839bf52aa4efa20cc921773126797f4 Mon Sep 17 00:00:00 2001 From: Mario Kleiner Date: Tue, 5 Oct 2010 19:57:36 -0400 Subject: [PATCH 385/476] drm/radeon: Add function for display scanout position query. radeon_get_crtc_scanoutpos() returns the current horizontal and vertical scanout position of a crtc. It also reports if the display scanout is currently inside the vblank area. hpos reports current horizontal pixel scanout position. vpos reports the current scanned out line as a value >= 0 in active scanout. If the scanout is inside vblank area, it reports a negative value, the number of scanlines until end of vblank aka start of active scanout, e.g., -3 == "At most 3 scanlines until end of vblank". This code is derived from radeon_pm_in_vbl(), tested on R500 and R600. Signed-off-by: Mario Kleiner Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_display.c | 153 ++++++++++++++++++++++++ drivers/gpu/drm/radeon/radeon_mode.h | 7 ++ 2 files changed, 160 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 325a07391b3c..fd70b8428f39 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -989,3 +989,156 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, } return true; } + +/* + * Retrieve current video scanout position of crtc on a given gpu. + * + * \param rdev Device to query. + * \param crtc Crtc to query. + * \param *vpos Location where vertical scanout position should be stored. + * \param *hpos Location where horizontal scanout position should go. + * + * Returns vpos as a positive number while in active scanout area. + * Returns vpos as a negative number inside vblank, counting the number + * of scanlines to go until end of vblank, e.g., -1 means "one scanline + * until start of active scanout / end of vblank." + * + * \return Flags, or'ed together as follows: + * + * RADEON_SCANOUTPOS_VALID = Query successfull. + * RADEON_SCANOUTPOS_INVBL = Inside vblank. + * RADEON_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of + * this flag means that returned position may be offset by a constant but + * unknown small number of scanlines wrt. real scanout position. + * + */ +int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos) +{ + u32 stat_crtc = 0, vbl = 0, position = 0; + int vbl_start, vbl_end, vtotal, ret = 0; + bool in_vbl = true; + + if (ASIC_IS_DCE4(rdev)) { + if (crtc == 0) { + vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + + EVERGREEN_CRTC0_REGISTER_OFFSET); + position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + + EVERGREEN_CRTC0_REGISTER_OFFSET); + ret |= RADEON_SCANOUTPOS_VALID; + } + if (crtc == 1) { + vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + + EVERGREEN_CRTC1_REGISTER_OFFSET); + position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + + EVERGREEN_CRTC1_REGISTER_OFFSET); + ret |= RADEON_SCANOUTPOS_VALID; + } + if (crtc == 2) { + vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + + EVERGREEN_CRTC2_REGISTER_OFFSET); + position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + + EVERGREEN_CRTC2_REGISTER_OFFSET); + ret |= RADEON_SCANOUTPOS_VALID; + } + if (crtc == 3) { + vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + + EVERGREEN_CRTC3_REGISTER_OFFSET); + position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + + EVERGREEN_CRTC3_REGISTER_OFFSET); + ret |= RADEON_SCANOUTPOS_VALID; + } + if (crtc == 4) { + vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + + EVERGREEN_CRTC4_REGISTER_OFFSET); + position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + + EVERGREEN_CRTC4_REGISTER_OFFSET); + ret |= RADEON_SCANOUTPOS_VALID; + } + if (crtc == 5) { + vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + + EVERGREEN_CRTC5_REGISTER_OFFSET); + position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + + EVERGREEN_CRTC5_REGISTER_OFFSET); + ret |= RADEON_SCANOUTPOS_VALID; + } + } else if (ASIC_IS_AVIVO(rdev)) { + if (crtc == 0) { + vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END); + position = RREG32(AVIVO_D1CRTC_STATUS_POSITION); + ret |= RADEON_SCANOUTPOS_VALID; + } + if (crtc == 1) { + vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END); + position = RREG32(AVIVO_D2CRTC_STATUS_POSITION); + ret |= RADEON_SCANOUTPOS_VALID; + } + } else { + /* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */ + if (crtc == 0) { + /* Assume vbl_end == 0, get vbl_start from + * upper 16 bits. + */ + vbl = (RREG32(RADEON_CRTC_V_TOTAL_DISP) & + RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT; + /* Only retrieve vpos from upper 16 bits, set hpos == 0. */ + position = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; + stat_crtc = RREG32(RADEON_CRTC_STATUS); + if (!(stat_crtc & 1)) + in_vbl = false; + + ret |= RADEON_SCANOUTPOS_VALID; + } + if (crtc == 1) { + vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) & + RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT; + position = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; + stat_crtc = RREG32(RADEON_CRTC2_STATUS); + if (!(stat_crtc & 1)) + in_vbl = false; + + ret |= RADEON_SCANOUTPOS_VALID; + } + } + + /* Decode into vertical and horizontal scanout position. */ + *vpos = position & 0x1fff; + *hpos = (position >> 16) & 0x1fff; + + /* Valid vblank area boundaries from gpu retrieved? */ + if (vbl > 0) { + /* Yes: Decode. */ + ret |= RADEON_SCANOUTPOS_ACCURATE; + vbl_start = vbl & 0x1fff; + vbl_end = (vbl >> 16) & 0x1fff; + } + else { + /* No: Fake something reasonable which gives at least ok results. */ + vbl_start = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vdisplay; + vbl_end = 0; + } + + /* Test scanout position against vblank region. */ + if ((*vpos < vbl_start) && (*vpos >= vbl_end)) + in_vbl = false; + + /* Check if inside vblank area and apply corrective offsets: + * vpos will then be >=0 in video scanout area, but negative + * within vblank area, counting down the number of lines until + * start of scanout. + */ + + /* Inside "upper part" of vblank area? Apply corrective offset if so: */ + if (in_vbl && (*vpos >= vbl_start)) { + vtotal = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vtotal; + *vpos = *vpos - vtotal; + } + + /* Correct for shifted end of vbl at vbl_end. */ + *vpos = *vpos - vbl_end; + + /* In vblank? */ + if (in_vbl) + ret |= RADEON_SCANOUTPOS_INVBL; + + return ret; +} diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index d25cf093c84d..f4396d71b417 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -428,6 +428,11 @@ struct radeon_framebuffer { struct drm_gem_object *obj; }; +/* radeon_get_crtc_scanoutpos() return flags */ +#define RADEON_SCANOUTPOS_VALID (1 << 0) +#define RADEON_SCANOUTPOS_INVBL (1 << 1) +#define RADEON_SCANOUTPOS_ACCURATE (1 << 2) + extern enum radeon_tv_std radeon_combios_get_tv_info(struct radeon_device *rdev); extern enum radeon_tv_std @@ -531,6 +536,8 @@ extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, extern int radeon_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); +extern int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos); + extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev); extern struct edid * radeon_combios_get_hardcoded_edid(struct radeon_device *rdev); From 75fa0b08e50cb72715b58321e8259c47adfe4c6f Mon Sep 17 00:00:00 2001 From: Mario Kleiner Date: Tue, 5 Oct 2010 19:57:37 -0400 Subject: [PATCH 386/476] drm/radeon: Modify radeon_pm_in_vbl to use radeon_get_crtc_scanoutpos() radeon_pm_in_vbl() didn't report in vblank status accurately. Make it a wrapper around radeon_get_crtc_scanoutpos() which corrects for biases, so it reports accurately. radeon_pm_in_vbl() will only report in_vbl if all active crtc's are currently inside vblank. agd5f: use rdev->num_crtc rather than hardcoding the crtc count Signed-off-by: Mario Kleiner Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_pm.c | 70 ++++-------------------------- 1 file changed, 9 insertions(+), 61 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index f87efec76236..8c9b2ef32c68 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -712,73 +712,21 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev) static bool radeon_pm_in_vbl(struct radeon_device *rdev) { - u32 stat_crtc = 0, vbl = 0, position = 0; + int crtc, vpos, hpos, vbl_status; bool in_vbl = true; - if (ASIC_IS_DCE4(rdev)) { - if (rdev->pm.active_crtcs & (1 << 0)) { - vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + - EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff; - position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + - EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff; - } - if (rdev->pm.active_crtcs & (1 << 1)) { - vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + - EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff; - position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + - EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff; - } - if (rdev->pm.active_crtcs & (1 << 2)) { - vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + - EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff; - position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + - EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff; - } - if (rdev->pm.active_crtcs & (1 << 3)) { - vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + - EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff; - position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + - EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff; - } - if (rdev->pm.active_crtcs & (1 << 4)) { - vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + - EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff; - position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + - EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff; - } - if (rdev->pm.active_crtcs & (1 << 5)) { - vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + - EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff; - position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + - EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff; - } - } else if (ASIC_IS_AVIVO(rdev)) { - if (rdev->pm.active_crtcs & (1 << 0)) { - vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END) & 0xfff; - position = RREG32(AVIVO_D1CRTC_STATUS_POSITION) & 0xfff; - } - if (rdev->pm.active_crtcs & (1 << 1)) { - vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END) & 0xfff; - position = RREG32(AVIVO_D2CRTC_STATUS_POSITION) & 0xfff; - } - if (position < vbl && position > 1) - in_vbl = false; - } else { - if (rdev->pm.active_crtcs & (1 << 0)) { - stat_crtc = RREG32(RADEON_CRTC_STATUS); - if (!(stat_crtc & 1)) - in_vbl = false; - } - if (rdev->pm.active_crtcs & (1 << 1)) { - stat_crtc = RREG32(RADEON_CRTC2_STATUS); - if (!(stat_crtc & 1)) + /* Iterate over all active crtc's. All crtc's must be in vblank, + * otherwise return in_vbl == false. + */ + for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { + if (rdev->pm.active_crtcs & (1 << crtc)) { + vbl_status = radeon_get_crtc_scanoutpos(rdev, crtc, &vpos, &hpos); + if ((vbl_status & RADEON_SCANOUTPOS_VALID) && + !(vbl_status & RADEON_SCANOUTPOS_INVBL)) in_vbl = false; } } - if (position < vbl && position > 1) - in_vbl = false; - return in_vbl; } From d7ccd8fc11700502b5a104b7bad595b492a3aa1b Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 9 Sep 2010 11:33:36 -0400 Subject: [PATCH 387/476] drm/radeon/kms: add drm blit support for evergreen This patch implements blit support for bo moves using the 3D engine. It uses the same method as r6xx/r7xx: - store the base state in an IB - emit variable state and vertex buffers to do the blit This allows the hw to move bos using the 3D engine and allows full use of vram beyond the pci aperture size. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/Makefile | 2 +- drivers/gpu/drm/radeon/evergreen.c | 51 +- drivers/gpu/drm/radeon/evergreen_blit_kms.c | 776 ++++++++++++++++++ .../gpu/drm/radeon/evergreen_blit_shaders.c | 359 ++++++++ .../gpu/drm/radeon/evergreen_blit_shaders.h | 35 + drivers/gpu/drm/radeon/evergreend.h | 5 + drivers/gpu/drm/radeon/r600_blit_kms.c | 26 +- drivers/gpu/drm/radeon/radeon.h | 8 + drivers/gpu/drm/radeon/radeon_asic.c | 6 +- drivers/gpu/drm/radeon/radeon_asic.h | 8 +- 10 files changed, 1221 insertions(+), 55 deletions(-) create mode 100644 drivers/gpu/drm/radeon/evergreen_blit_kms.c create mode 100644 drivers/gpu/drm/radeon/evergreen_blit_shaders.c create mode 100644 drivers/gpu/drm/radeon/evergreen_blit_shaders.h diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index aebe00875041..6cae4f2028d2 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -65,7 +65,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ - evergreen.o evergreen_cs.o + evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o radeon-$(CONFIG_COMPAT) += radeon_ioc32.o radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 4c82cc830271..aee61ae24402 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2062,26 +2062,13 @@ static int evergreen_startup(struct radeon_device *rdev) return r; } evergreen_gpu_init(rdev); -#if 0 - if (!rdev->r600_blit.shader_obj) { - r = r600_blit_init(rdev); - if (r) { - DRM_ERROR("radeon: failed blitter (%d).\n", r); - return r; - } - } - r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); - if (unlikely(r != 0)) - return r; - r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, - &rdev->r600_blit.shader_gpu_addr); - radeon_bo_unreserve(rdev->r600_blit.shader_obj); + r = evergreen_blit_init(rdev); if (r) { - DRM_ERROR("failed to pin blit object %d\n", r); - return r; + evergreen_blit_fini(rdev); + rdev->asic->copy = NULL; + dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); } -#endif /* allocate wb buffer */ r = radeon_wb_init(rdev); @@ -2139,23 +2126,43 @@ int evergreen_resume(struct radeon_device *rdev) int evergreen_suspend(struct radeon_device *rdev) { -#if 0 int r; -#endif + /* FIXME: we should wait for ring to be empty */ r700_cp_stop(rdev); rdev->cp.ready = false; evergreen_irq_suspend(rdev); radeon_wb_disable(rdev); evergreen_pcie_gart_disable(rdev); -#if 0 + /* unpin shaders bo */ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); if (likely(r == 0)) { radeon_bo_unpin(rdev->r600_blit.shader_obj); radeon_bo_unreserve(rdev->r600_blit.shader_obj); } -#endif + + return 0; +} + +int evergreen_copy_blit(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_pages, struct radeon_fence *fence) +{ + int r; + + mutex_lock(&rdev->r600_blit.mutex); + rdev->r600_blit.vb_ib = NULL; + r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); + if (r) { + if (rdev->r600_blit.vb_ib) + radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); + mutex_unlock(&rdev->r600_blit.mutex); + return r; + } + evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); + evergreen_blit_done_copy(rdev, fence); + mutex_unlock(&rdev->r600_blit.mutex); return 0; } @@ -2286,7 +2293,7 @@ int evergreen_init(struct radeon_device *rdev) void evergreen_fini(struct radeon_device *rdev) { - /*r600_blit_fini(rdev);*/ + evergreen_blit_fini(rdev); r700_cp_fini(rdev); r600_irq_fini(rdev); radeon_wb_fini(rdev); diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c new file mode 100644 index 000000000000..ce1ae4a2aa54 --- /dev/null +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c @@ -0,0 +1,776 @@ +/* + * Copyright 2010 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Alex Deucher + */ + +#include "drmP.h" +#include "drm.h" +#include "radeon_drm.h" +#include "radeon.h" + +#include "evergreend.h" +#include "evergreen_blit_shaders.h" + +#define DI_PT_RECTLIST 0x11 +#define DI_INDEX_SIZE_16_BIT 0x0 +#define DI_SRC_SEL_AUTO_INDEX 0x2 + +#define FMT_8 0x1 +#define FMT_5_6_5 0x8 +#define FMT_8_8_8_8 0x1a +#define COLOR_8 0x1 +#define COLOR_5_6_5 0x8 +#define COLOR_8_8_8_8 0x1a + +/* emits 17 */ +static void +set_render_target(struct radeon_device *rdev, int format, + int w, int h, u64 gpu_addr) +{ + u32 cb_color_info; + int pitch, slice; + + h = ALIGN(h, 8); + if (h < 8) + h = 8; + + cb_color_info = ((format << 2) | (1 << 24)); + pitch = (w / 8) - 1; + slice = ((w * h) / 64) - 1; + + radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 15)); + radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2); + radeon_ring_write(rdev, gpu_addr >> 8); + radeon_ring_write(rdev, pitch); + radeon_ring_write(rdev, slice); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, cb_color_info); + radeon_ring_write(rdev, (1 << 4)); + radeon_ring_write(rdev, (w - 1) | ((h - 1) << 16)); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, 0); +} + +/* emits 5dw */ +static void +cp_set_surface_sync(struct radeon_device *rdev, + u32 sync_type, u32 size, + u64 mc_addr) +{ + u32 cp_coher_size; + + if (size == 0xffffffff) + cp_coher_size = 0xffffffff; + else + cp_coher_size = ((size + 255) >> 8); + + radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3)); + radeon_ring_write(rdev, sync_type); + radeon_ring_write(rdev, cp_coher_size); + radeon_ring_write(rdev, mc_addr >> 8); + radeon_ring_write(rdev, 10); /* poll interval */ +} + +/* emits 11dw + 1 surface sync = 16dw */ +static void +set_shaders(struct radeon_device *rdev) +{ + u64 gpu_addr; + + /* VS */ + gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; + radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 3)); + radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2); + radeon_ring_write(rdev, gpu_addr >> 8); + radeon_ring_write(rdev, 2); + radeon_ring_write(rdev, 0); + + /* PS */ + gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset; + radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 4)); + radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2); + radeon_ring_write(rdev, gpu_addr >> 8); + radeon_ring_write(rdev, 1); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, 2); + + gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; + cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr); +} + +/* emits 10 + 1 sync (5) = 15 */ +static void +set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) +{ + u32 sq_vtx_constant_word2, sq_vtx_constant_word3; + + /* high addr, stride */ + sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8)); + /* xyzw swizzles */ + sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12); + + radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8)); + radeon_ring_write(rdev, 0x580); + radeon_ring_write(rdev, gpu_addr & 0xffffffff); + radeon_ring_write(rdev, 48 - 1); /* size */ + radeon_ring_write(rdev, sq_vtx_constant_word2); + radeon_ring_write(rdev, sq_vtx_constant_word3); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30); + + if (rdev->family == CHIP_CEDAR) + cp_set_surface_sync(rdev, + PACKET3_TC_ACTION_ENA, 48, gpu_addr); + else + cp_set_surface_sync(rdev, + PACKET3_VC_ACTION_ENA, 48, gpu_addr); + +} + +/* emits 10 */ +static void +set_tex_resource(struct radeon_device *rdev, + int format, int w, int h, int pitch, + u64 gpu_addr) +{ + u32 sq_tex_resource_word0, sq_tex_resource_word1; + u32 sq_tex_resource_word4, sq_tex_resource_word7; + + if (h < 1) + h = 1; + + sq_tex_resource_word0 = (1 << 0); /* 2D */ + sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) | + ((w - 1) << 18)); + sq_tex_resource_word1 = ((h - 1) << 0); + /* xyzw swizzles */ + sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25); + + sq_tex_resource_word7 = format | (SQ_TEX_VTX_VALID_TEXTURE << 30); + + radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8)); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, sq_tex_resource_word0); + radeon_ring_write(rdev, sq_tex_resource_word1); + radeon_ring_write(rdev, gpu_addr >> 8); + radeon_ring_write(rdev, gpu_addr >> 8); + radeon_ring_write(rdev, sq_tex_resource_word4); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, sq_tex_resource_word7); +} + +/* emits 12 */ +static void +set_scissors(struct radeon_device *rdev, int x1, int y1, + int x2, int y2) +{ + radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); + radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); + radeon_ring_write(rdev, (x1 << 0) | (y1 << 16)); + radeon_ring_write(rdev, (x2 << 0) | (y2 << 16)); + + radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); + radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); + radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31)); + radeon_ring_write(rdev, (x2 << 0) | (y2 << 16)); + + radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); + radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); + radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31)); + radeon_ring_write(rdev, (x2 << 0) | (y2 << 16)); +} + +/* emits 10 */ +static void +draw_auto(struct radeon_device *rdev) +{ + radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2); + radeon_ring_write(rdev, DI_PT_RECTLIST); + + radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0)); + radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT); + + radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0)); + radeon_ring_write(rdev, 1); + + radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1)); + radeon_ring_write(rdev, 3); + radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX); + +} + +/* emits 20 */ +static void +set_default_state(struct radeon_device *rdev) +{ + u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3; + u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2; + u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3; + int num_ps_gprs, num_vs_gprs, num_temp_gprs; + int num_gs_gprs, num_es_gprs, num_hs_gprs, num_ls_gprs; + int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads; + int num_hs_threads, num_ls_threads; + int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries; + int num_hs_stack_entries, num_ls_stack_entries; + u64 gpu_addr; + int dwords; + + switch (rdev->family) { + case CHIP_CEDAR: + default: + num_ps_gprs = 93; + num_vs_gprs = 46; + num_temp_gprs = 4; + num_gs_gprs = 31; + num_es_gprs = 31; + num_hs_gprs = 23; + num_ls_gprs = 23; + num_ps_threads = 96; + num_vs_threads = 16; + num_gs_threads = 16; + num_es_threads = 16; + num_hs_threads = 16; + num_ls_threads = 16; + num_ps_stack_entries = 42; + num_vs_stack_entries = 42; + num_gs_stack_entries = 42; + num_es_stack_entries = 42; + num_hs_stack_entries = 42; + num_ls_stack_entries = 42; + break; + case CHIP_REDWOOD: + num_ps_gprs = 93; + num_vs_gprs = 46; + num_temp_gprs = 4; + num_gs_gprs = 31; + num_es_gprs = 31; + num_hs_gprs = 23; + num_ls_gprs = 23; + num_ps_threads = 128; + num_vs_threads = 20; + num_gs_threads = 20; + num_es_threads = 20; + num_hs_threads = 20; + num_ls_threads = 20; + num_ps_stack_entries = 42; + num_vs_stack_entries = 42; + num_gs_stack_entries = 42; + num_es_stack_entries = 42; + num_hs_stack_entries = 42; + num_ls_stack_entries = 42; + break; + case CHIP_JUNIPER: + num_ps_gprs = 93; + num_vs_gprs = 46; + num_temp_gprs = 4; + num_gs_gprs = 31; + num_es_gprs = 31; + num_hs_gprs = 23; + num_ls_gprs = 23; + num_ps_threads = 128; + num_vs_threads = 20; + num_gs_threads = 20; + num_es_threads = 20; + num_hs_threads = 20; + num_ls_threads = 20; + num_ps_stack_entries = 85; + num_vs_stack_entries = 85; + num_gs_stack_entries = 85; + num_es_stack_entries = 85; + num_hs_stack_entries = 85; + num_ls_stack_entries = 85; + break; + case CHIP_CYPRESS: + case CHIP_HEMLOCK: + num_ps_gprs = 93; + num_vs_gprs = 46; + num_temp_gprs = 4; + num_gs_gprs = 31; + num_es_gprs = 31; + num_hs_gprs = 23; + num_ls_gprs = 23; + num_ps_threads = 128; + num_vs_threads = 20; + num_gs_threads = 20; + num_es_threads = 20; + num_hs_threads = 20; + num_ls_threads = 20; + num_ps_stack_entries = 85; + num_vs_stack_entries = 85; + num_gs_stack_entries = 85; + num_es_stack_entries = 85; + num_hs_stack_entries = 85; + num_ls_stack_entries = 85; + break; + } + + if (rdev->family == CHIP_CEDAR) + sq_config = 0; + else + sq_config = VC_ENABLE; + + sq_config |= (EXPORT_SRC_C | + CS_PRIO(0) | + LS_PRIO(0) | + HS_PRIO(0) | + PS_PRIO(0) | + VS_PRIO(1) | + GS_PRIO(2) | + ES_PRIO(3)); + + sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) | + NUM_VS_GPRS(num_vs_gprs) | + NUM_CLAUSE_TEMP_GPRS(num_temp_gprs)); + sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) | + NUM_ES_GPRS(num_es_gprs)); + sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) | + NUM_LS_GPRS(num_ls_gprs)); + sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) | + NUM_VS_THREADS(num_vs_threads) | + NUM_GS_THREADS(num_gs_threads) | + NUM_ES_THREADS(num_es_threads)); + sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) | + NUM_LS_THREADS(num_ls_threads)); + sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) | + NUM_VS_STACK_ENTRIES(num_vs_stack_entries)); + sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) | + NUM_ES_STACK_ENTRIES(num_es_stack_entries)); + sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) | + NUM_LS_STACK_ENTRIES(num_ls_stack_entries)); + + /* emit an IB pointing at default state */ + dwords = ALIGN(rdev->r600_blit.state_len, 0x10); + gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; + radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); + radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC); + radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF); + radeon_ring_write(rdev, dwords); + + /* disable dyn gprs */ + radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2); + radeon_ring_write(rdev, 0); + + /* SQ config */ + radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11)); + radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2); + radeon_ring_write(rdev, sq_config); + radeon_ring_write(rdev, sq_gpr_resource_mgmt_1); + radeon_ring_write(rdev, sq_gpr_resource_mgmt_2); + radeon_ring_write(rdev, sq_gpr_resource_mgmt_3); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, sq_thread_resource_mgmt); + radeon_ring_write(rdev, sq_thread_resource_mgmt_2); + radeon_ring_write(rdev, sq_stack_resource_mgmt_1); + radeon_ring_write(rdev, sq_stack_resource_mgmt_2); + radeon_ring_write(rdev, sq_stack_resource_mgmt_3); +} + +static inline uint32_t i2f(uint32_t input) +{ + u32 result, i, exponent, fraction; + + if ((input & 0x3fff) == 0) + result = 0; /* 0 is a special case */ + else { + exponent = 140; /* exponent biased by 127; */ + fraction = (input & 0x3fff) << 10; /* cheat and only + handle numbers below 2^^15 */ + for (i = 0; i < 14; i++) { + if (fraction & 0x800000) + break; + else { + fraction = fraction << 1; /* keep + shifting left until top bit = 1 */ + exponent = exponent - 1; + } + } + result = exponent << 23 | (fraction & 0x7fffff); /* mask + off top bit; assumed 1 */ + } + return result; +} + +int evergreen_blit_init(struct radeon_device *rdev) +{ + u32 obj_size; + int r, dwords; + void *ptr; + u32 packet2s[16]; + int num_packet2s = 0; + + /* pin copy shader into vram if already initialized */ + if (rdev->r600_blit.shader_obj) + goto done; + + mutex_init(&rdev->r600_blit.mutex); + rdev->r600_blit.state_offset = 0; + + rdev->r600_blit.state_len = evergreen_default_size; + + dwords = rdev->r600_blit.state_len; + while (dwords & 0xf) { + packet2s[num_packet2s++] = PACKET2(0); + dwords++; + } + + obj_size = dwords * 4; + obj_size = ALIGN(obj_size, 256); + + rdev->r600_blit.vs_offset = obj_size; + obj_size += evergreen_vs_size * 4; + obj_size = ALIGN(obj_size, 256); + + rdev->r600_blit.ps_offset = obj_size; + obj_size += evergreen_ps_size * 4; + obj_size = ALIGN(obj_size, 256); + + r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM, + &rdev->r600_blit.shader_obj); + if (r) { + DRM_ERROR("evergreen failed to allocate shader\n"); + return r; + } + + DRM_DEBUG("evergreen blit allocated bo %08x vs %08x ps %08x\n", + obj_size, + rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset); + + r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); + if (unlikely(r != 0)) + return r; + r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr); + if (r) { + DRM_ERROR("failed to map blit object %d\n", r); + return r; + } + + memcpy_toio(ptr + rdev->r600_blit.state_offset, + evergreen_default_state, rdev->r600_blit.state_len * 4); + + if (num_packet2s) + memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), + packet2s, num_packet2s * 4); + memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4); + memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4); + radeon_bo_kunmap(rdev->r600_blit.shader_obj); + radeon_bo_unreserve(rdev->r600_blit.shader_obj); + +done: + r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); + if (unlikely(r != 0)) + return r; + r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, + &rdev->r600_blit.shader_gpu_addr); + radeon_bo_unreserve(rdev->r600_blit.shader_obj); + if (r) { + dev_err(rdev->dev, "(%d) pin blit object failed\n", r); + return r; + } + return 0; +} + +void evergreen_blit_fini(struct radeon_device *rdev) +{ + int r; + + if (rdev->r600_blit.shader_obj == NULL) + return; + /* If we can't reserve the bo, unref should be enough to destroy + * it when it becomes idle. + */ + r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); + if (!r) { + radeon_bo_unpin(rdev->r600_blit.shader_obj); + radeon_bo_unreserve(rdev->r600_blit.shader_obj); + } + radeon_bo_unref(&rdev->r600_blit.shader_obj); +} + +static int evergreen_vb_ib_get(struct radeon_device *rdev) +{ + int r; + r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib); + if (r) { + DRM_ERROR("failed to get IB for vertex buffer\n"); + return r; + } + + rdev->r600_blit.vb_total = 64*1024; + rdev->r600_blit.vb_used = 0; + return 0; +} + +static void evergreen_vb_ib_put(struct radeon_device *rdev) +{ + radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); + radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); +} + +int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes) +{ + int r; + int ring_size, line_size; + int max_size; + /* loops of emits + fence emit possible */ + int dwords_per_loop = 74, num_loops; + + r = evergreen_vb_ib_get(rdev); + if (r) + return r; + + /* 8 bpp vs 32 bpp for xfer unit */ + if (size_bytes & 3) + line_size = 8192; + else + line_size = 8192 * 4; + + max_size = 8192 * line_size; + + /* major loops cover the max size transfer */ + num_loops = ((size_bytes + max_size) / max_size); + /* minor loops cover the extra non aligned bits */ + num_loops += ((size_bytes % line_size) ? 1 : 0); + /* calculate number of loops correctly */ + ring_size = num_loops * dwords_per_loop; + /* set default + shaders */ + ring_size += 36; /* shaders + def state */ + ring_size += 10; /* fence emit for VB IB */ + ring_size += 5; /* done copy */ + ring_size += 10; /* fence emit for done copy */ + r = radeon_ring_lock(rdev, ring_size); + if (r) + return r; + + set_default_state(rdev); /* 20 */ + set_shaders(rdev); /* 16 */ + return 0; +} + +void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence) +{ + int r; + + if (rdev->r600_blit.vb_ib) + evergreen_vb_ib_put(rdev); + + if (fence) + r = radeon_fence_emit(rdev, fence); + + radeon_ring_unlock_commit(rdev); +} + +void evergreen_kms_blit_copy(struct radeon_device *rdev, + u64 src_gpu_addr, u64 dst_gpu_addr, + int size_bytes) +{ + int max_bytes; + u64 vb_gpu_addr; + u32 *vb; + + DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr, + size_bytes, rdev->r600_blit.vb_used); + vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used); + if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) { + max_bytes = 8192; + + while (size_bytes) { + int cur_size = size_bytes; + int src_x = src_gpu_addr & 255; + int dst_x = dst_gpu_addr & 255; + int h = 1; + src_gpu_addr = src_gpu_addr & ~255; + dst_gpu_addr = dst_gpu_addr & ~255; + + if (!src_x && !dst_x) { + h = (cur_size / max_bytes); + if (h > 8192) + h = 8192; + if (h == 0) + h = 1; + else + cur_size = max_bytes; + } else { + if (cur_size > max_bytes) + cur_size = max_bytes; + if (cur_size > (max_bytes - dst_x)) + cur_size = (max_bytes - dst_x); + if (cur_size > (max_bytes - src_x)) + cur_size = (max_bytes - src_x); + } + + if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { + WARN_ON(1); + } + + vb[0] = i2f(dst_x); + vb[1] = 0; + vb[2] = i2f(src_x); + vb[3] = 0; + + vb[4] = i2f(dst_x); + vb[5] = i2f(h); + vb[6] = i2f(src_x); + vb[7] = i2f(h); + + vb[8] = i2f(dst_x + cur_size); + vb[9] = i2f(h); + vb[10] = i2f(src_x + cur_size); + vb[11] = i2f(h); + + /* src 10 */ + set_tex_resource(rdev, FMT_8, + src_x + cur_size, h, src_x + cur_size, + src_gpu_addr); + + /* 5 */ + cp_set_surface_sync(rdev, + PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr); + + + /* dst 17 */ + set_render_target(rdev, COLOR_8, + dst_x + cur_size, h, + dst_gpu_addr); + + /* scissors 12 */ + set_scissors(rdev, dst_x, 0, dst_x + cur_size, h); + + /* 15 */ + vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used; + set_vtx_resource(rdev, vb_gpu_addr); + + /* draw 10 */ + draw_auto(rdev); + + /* 5 */ + cp_set_surface_sync(rdev, + PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA, + cur_size * h, dst_gpu_addr); + + vb += 12; + rdev->r600_blit.vb_used += 12 * 4; + + src_gpu_addr += cur_size * h; + dst_gpu_addr += cur_size * h; + size_bytes -= cur_size * h; + } + } else { + max_bytes = 8192 * 4; + + while (size_bytes) { + int cur_size = size_bytes; + int src_x = (src_gpu_addr & 255); + int dst_x = (dst_gpu_addr & 255); + int h = 1; + src_gpu_addr = src_gpu_addr & ~255; + dst_gpu_addr = dst_gpu_addr & ~255; + + if (!src_x && !dst_x) { + h = (cur_size / max_bytes); + if (h > 8192) + h = 8192; + if (h == 0) + h = 1; + else + cur_size = max_bytes; + } else { + if (cur_size > max_bytes) + cur_size = max_bytes; + if (cur_size > (max_bytes - dst_x)) + cur_size = (max_bytes - dst_x); + if (cur_size > (max_bytes - src_x)) + cur_size = (max_bytes - src_x); + } + + if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { + WARN_ON(1); + } + + vb[0] = i2f(dst_x / 4); + vb[1] = 0; + vb[2] = i2f(src_x / 4); + vb[3] = 0; + + vb[4] = i2f(dst_x / 4); + vb[5] = i2f(h); + vb[6] = i2f(src_x / 4); + vb[7] = i2f(h); + + vb[8] = i2f((dst_x + cur_size) / 4); + vb[9] = i2f(h); + vb[10] = i2f((src_x + cur_size) / 4); + vb[11] = i2f(h); + + /* src 10 */ + set_tex_resource(rdev, FMT_8_8_8_8, + (src_x + cur_size) / 4, + h, (src_x + cur_size) / 4, + src_gpu_addr); + /* 5 */ + cp_set_surface_sync(rdev, + PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr); + + /* dst 17 */ + set_render_target(rdev, COLOR_8_8_8_8, + (dst_x + cur_size) / 4, h, + dst_gpu_addr); + + /* scissors 12 */ + set_scissors(rdev, (dst_x / 4), 0, (dst_x + cur_size / 4), h); + + /* Vertex buffer setup 15 */ + vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used; + set_vtx_resource(rdev, vb_gpu_addr); + + /* draw 10 */ + draw_auto(rdev); + + /* 5 */ + cp_set_surface_sync(rdev, + PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA, + cur_size * h, dst_gpu_addr); + + /* 74 ring dwords per loop */ + vb += 12; + rdev->r600_blit.vb_used += 12 * 4; + + src_gpu_addr += cur_size * h; + dst_gpu_addr += cur_size * h; + size_bytes -= cur_size * h; + } + } +} + diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c new file mode 100644 index 000000000000..5d5045027b46 --- /dev/null +++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c @@ -0,0 +1,359 @@ +/* + * Copyright 2010 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Alex Deucher + */ + +#include +#include + +/* + * evergreen cards need to use the 3D engine to blit data which requires + * quite a bit of hw state setup. Rather than pull the whole 3D driver + * (which normally generates the 3D state) into the DRM, we opt to use + * statically generated state tables. The regsiter state and shaders + * were hand generated to support blitting functionality. See the 3D + * driver or documentation for descriptions of the registers and + * shader instructions. + */ + +const u32 evergreen_default_state[] = +{ + 0xc0012800, /* CONTEXT_CONTROL */ + 0x80000000, + 0x80000000, + + 0xc0016900, + 0x0000023b, + 0x00000000, /* SQ_LDS_ALLOC_PS */ + + 0xc0066900, + 0x00000240, + 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */ + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + + 0xc0046900, + 0x00000247, + 0x00000000, /* SQ_GS_VERT_ITEMSIZE */ + 0x00000000, + 0x00000000, + 0x00000000, + + 0xc0026f00, + 0x00000000, + 0x00000000, /* SQ_VTX_BASE_VTX_LOC */ + 0x00000000, + + 0xc0026900, + 0x00000010, + 0x00000000, /* DB_Z_INFO */ + 0x00000000, /* DB_STENCIL_INFO */ + + + 0xc0016900, + 0x00000200, + 0x00000000, /* DB_DEPTH_CONTROL */ + + 0xc0066900, + 0x00000000, + 0x00000060, /* DB_RENDER_CONTROL */ + 0x00000000, /* DB_COUNT_CONTROL */ + 0x00000000, /* DB_DEPTH_VIEW */ + 0x0000002a, /* DB_RENDER_OVERRIDE */ + 0x00000000, /* DB_RENDER_OVERRIDE2 */ + 0x00000000, /* DB_HTILE_DATA_BASE */ + + 0xc0026900, + 0x0000000a, + 0x00000000, /* DB_STENCIL_CLEAR */ + 0x00000000, /* DB_DEPTH_CLEAR */ + + 0xc0016900, + 0x000002dc, + 0x0000aa00, /* DB_ALPHA_TO_MASK */ + + 0xc0016900, + 0x00000080, + 0x00000000, /* PA_SC_WINDOW_OFFSET */ + + 0xc00d6900, + 0x00000083, + 0x0000ffff, /* PA_SC_CLIPRECT_RULE */ + 0x00000000, /* PA_SC_CLIPRECT_0_TL */ + 0x20002000, /* PA_SC_CLIPRECT_0_BR */ + 0x00000000, + 0x20002000, + 0x00000000, + 0x20002000, + 0x00000000, + 0x20002000, + 0xaaaaaaaa, /* PA_SC_EDGERULE */ + 0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */ + 0x0000000f, /* CB_TARGET_MASK */ + 0x0000000f, /* CB_SHADER_MASK */ + + 0xc0226900, + 0x00000094, + 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */ + 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */ + 0x80000000, + 0x20002000, + 0x80000000, + 0x20002000, + 0x80000000, + 0x20002000, + 0x80000000, + 0x20002000, + 0x80000000, + 0x20002000, + 0x80000000, + 0x20002000, + 0x80000000, + 0x20002000, + 0x80000000, + 0x20002000, + 0x80000000, + 0x20002000, + 0x80000000, + 0x20002000, + 0x80000000, + 0x20002000, + 0x80000000, + 0x20002000, + 0x80000000, + 0x20002000, + 0x80000000, + 0x20002000, + 0x80000000, + 0x20002000, + 0x00000000, /* PA_SC_VPORT_ZMIN_0 */ + 0x3f800000, /* PA_SC_VPORT_ZMAX_0 */ + + 0xc0016900, + 0x000000d4, + 0x00000000, /* SX_MISC */ + + 0xc0026900, + 0x00000292, + 0x00000000, /* PA_SC_MODE_CNTL_0 */ + 0x00000000, /* PA_SC_MODE_CNTL_1 */ + + 0xc0106900, + 0x00000300, + 0x00000000, /* PA_SC_LINE_CNTL */ + 0x00000000, /* PA_SC_AA_CONFIG */ + 0x00000005, /* PA_SU_VTX_CNTL */ + 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */ + 0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */ + 0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */ + 0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */ + 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_0 */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_7 */ + 0xffffffff, /* PA_SC_AA_MASK */ + + 0xc00d6900, + 0x00000202, + 0x00cc0010, /* CB_COLOR_CONTROL */ + 0x00000210, /* DB_SHADER_CONTROL */ + 0x00010000, /* PA_CL_CLIP_CNTL */ + 0x00000004, /* PA_SU_SC_MODE_CNTL */ + 0x00000100, /* PA_CL_VTE_CNTL */ + 0x00000000, /* PA_CL_VS_OUT_CNTL */ + 0x00000000, /* PA_CL_NANINF_CNTL */ + 0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */ + 0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */ + 0x00000000, /* PA_SU_PRIM_FILTER_CNTL */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* SQ_DYN_GPR_RESOURCE_LIMIT_1 */ + + 0xc0066900, + 0x000002de, + 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + + 0xc0016900, + 0x00000229, + 0x00000000, /* SQ_PGM_START_FS */ + + 0xc0016900, + 0x0000022a, + 0x00000000, /* SQ_PGM_RESOURCES_FS */ + + 0xc0096900, + 0x00000100, + 0x00ffffff, /* VGT_MAX_VTX_INDX */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* SX_ALPHA_TEST_CONTROL */ + 0x00000000, /* CB_BLEND_RED */ + 0x00000000, /* CB_BLEND_GREEN */ + 0x00000000, /* CB_BLEND_BLUE */ + 0x00000000, /* CB_BLEND_ALPHA */ + + 0xc0026900, + 0x000002a8, + 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */ + 0x00000000, /* */ + + 0xc0026900, + 0x000002ad, + 0x00000000, /* VGT_REUSE_OFF */ + 0x00000000, /* */ + + 0xc0116900, + 0x00000280, + 0x00000000, /* PA_SU_POINT_SIZE */ + 0x00000000, /* PA_SU_POINT_MINMAX */ + 0x00000008, /* PA_SU_LINE_CNTL */ + 0x00000000, /* PA_SC_LINE_STIPPLE */ + 0x00000000, /* VGT_OUTPUT_PATH_CNTL */ + 0x00000000, /* VGT_HOS_CNTL */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* VGT_GS_MODE */ + + 0xc0016900, + 0x000002a1, + 0x00000000, /* VGT_PRIMITIVEID_EN */ + + 0xc0016900, + 0x000002a5, + 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */ + + 0xc0016900, + 0x000002d5, + 0x00000000, /* VGT_SHADER_STAGES_EN */ + + 0xc0026900, + 0x000002e5, + 0x00000000, /* VGT_STRMOUT_CONFIG */ + 0x00000000, /* */ + + 0xc0016900, + 0x000001e0, + 0x00000000, /* CB_BLEND0_CONTROL */ + + 0xc0016900, + 0x000001b1, + 0x00000000, /* SPI_VS_OUT_CONFIG */ + + 0xc0016900, + 0x00000187, + 0x00000000, /* SPI_VS_OUT_ID_0 */ + + 0xc0016900, + 0x00000191, + 0x00000100, /* SPI_PS_INPUT_CNTL_0 */ + + 0xc00b6900, + 0x000001b3, + 0x20000001, /* SPI_PS_IN_CONTROL_0 */ + 0x00000000, /* SPI_PS_IN_CONTROL_1 */ + 0x00000000, /* SPI_INTERP_CONTROL_0 */ + 0x00000000, /* SPI_INPUT_Z */ + 0x00000000, /* SPI_FOG_CNTL */ + 0x00100000, /* SPI_BARYC_CNTL */ + 0x00000000, /* SPI_PS_IN_CONTROL_2 */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + + 0xc0036e00, /* SET_SAMPLER */ + 0x00000000, + 0x00000012, + 0x00000000, + 0x00000000, +}; + +const u32 evergreen_vs[] = +{ + 0x00000004, + 0x80800400, + 0x0000a03c, + 0x95000688, + 0x00004000, + 0x15200688, + 0x00000000, + 0x00000000, + 0x3c000000, + 0x67961001, + 0x00080000, + 0x00000000, + 0x1c000000, + 0x67961000, + 0x00000008, + 0x00000000, +}; + +const u32 evergreen_ps[] = +{ + 0x00000003, + 0xa00c0000, + 0x00000008, + 0x80400000, + 0x00000000, + 0x95200688, + 0x00380400, + 0x00146b10, + 0x00380000, + 0x20146b10, + 0x00380400, + 0x40146b00, + 0x80380000, + 0x60146b00, + 0x00000000, + 0x00000000, + 0x00000010, + 0x000d1000, + 0xb0800000, + 0x00000000, +}; + +const u32 evergreen_ps_size = ARRAY_SIZE(evergreen_ps); +const u32 evergreen_vs_size = ARRAY_SIZE(evergreen_vs); +const u32 evergreen_default_size = ARRAY_SIZE(evergreen_default_state); diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.h b/drivers/gpu/drm/radeon/evergreen_blit_shaders.h new file mode 100644 index 000000000000..bb8d6c751595 --- /dev/null +++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.h @@ -0,0 +1,35 @@ +/* + * Copyright 2009 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef EVERGREEN_BLIT_SHADERS_H +#define EVERGREEN_BLIT_SHADERS_H + +extern const u32 evergreen_ps[]; +extern const u32 evergreen_vs[]; +extern const u32 evergreen_default_state[]; + +extern const u32 evergreen_ps_size, evergreen_vs_size; +extern const u32 evergreen_default_size; + +#endif diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 9b7532dd30f7..319aa9752d40 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -802,6 +802,11 @@ #define SQ_ALU_CONST_CACHE_LS_14 0x28f78 #define SQ_ALU_CONST_CACHE_LS_15 0x28f7c +#define PA_SC_SCREEN_SCISSOR_TL 0x28030 +#define PA_SC_GENERIC_SCISSOR_TL 0x28240 +#define PA_SC_WINDOW_SCISSOR_TL 0x28204 +#define VGT_PRIMITIVE_TYPE 0x8958 + #define DB_DEPTH_CONTROL 0x28800 #define DB_DEPTH_VIEW 0x28008 #define DB_HTILE_DATA_BASE 0x28014 diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 90394df63009..2a4747d9747c 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c @@ -565,7 +565,7 @@ void r600_blit_fini(struct radeon_device *rdev) radeon_bo_unref(&rdev->r600_blit.shader_obj); } -int r600_vb_ib_get(struct radeon_device *rdev) +static int r600_vb_ib_get(struct radeon_device *rdev) { int r; r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib); @@ -579,7 +579,7 @@ int r600_vb_ib_get(struct radeon_device *rdev) return 0; } -void r600_vb_ib_put(struct radeon_device *rdev) +static void r600_vb_ib_put(struct radeon_device *rdev) { radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); @@ -683,17 +683,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev, if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { WARN_ON(1); - -#if 0 - r600_vb_ib_put(rdev); - - r600_nomm_put_vb(dev); - r600_nomm_get_vb(dev); - if (!dev_priv->blit_vb) - return; - set_shaders(dev); - vb = r600_nomm_get_vb_ptr(dev); -#endif } vb[0] = i2f(dst_x); @@ -778,17 +767,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev, if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { WARN_ON(1); } -#if 0 - if ((rdev->blit_vb->used + 48) > rdev->blit_vb->total) { - r600_nomm_put_vb(dev); - r600_nomm_get_vb(dev); - if (!rdev->blit_vb) - return; - - set_shaders(dev); - vb = r600_nomm_get_vb_ptr(dev); - } -#endif vb[0] = i2f(dst_x / 4); vb[1] = 0; diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 8e5cb2c4fa7e..2edd52ece226 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1130,6 +1130,12 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence) void r600_kms_blit_copy(struct radeon_device *rdev, u64 src_gpu_addr, u64 dst_gpu_addr, int size_bytes); +/* evergreen blit */ +int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes); +void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence); +void evergreen_kms_blit_copy(struct radeon_device *rdev, + u64 src_gpu_addr, u64 dst_gpu_addr, + int size_bytes); static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) { @@ -1471,6 +1477,8 @@ extern void r700_cp_stop(struct radeon_device *rdev); extern void r700_cp_fini(struct radeon_device *rdev); extern void evergreen_disable_interrupt_state(struct radeon_device *rdev); extern int evergreen_irq_set(struct radeon_device *rdev); +extern int evergreen_blit_init(struct radeon_device *rdev); +extern void evergreen_blit_fini(struct radeon_device *rdev); /* radeon_acpi.c */ #if defined(CONFIG_ACPI) diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 25e1dd197791..64fb89ecbf74 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -726,9 +726,9 @@ static struct radeon_asic evergreen_asic = { .get_vblank_counter = &evergreen_get_vblank_counter, .fence_ring_emit = &r600_fence_ring_emit, .cs_parse = &evergreen_cs_parse, - .copy_blit = NULL, - .copy_dma = NULL, - .copy = NULL, + .copy_blit = &evergreen_copy_blit, + .copy_dma = &evergreen_copy_blit, + .copy = &evergreen_copy_blit, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, .get_memory_clock = &radeon_atom_get_memory_clock, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 6d3b055c02fd..740988244143 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -254,11 +254,6 @@ void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); int r600_cs_parse(struct radeon_cs_parser *p); void r600_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); -int r600_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, - uint64_t dst_offset, - unsigned num_pages, - struct radeon_fence *fence); int r600_irq_process(struct radeon_device *rdev); int r600_irq_set(struct radeon_device *rdev); bool r600_gpu_is_lockup(struct radeon_device *rdev); @@ -304,6 +299,9 @@ int evergreen_resume(struct radeon_device *rdev); bool evergreen_gpu_is_lockup(struct radeon_device *rdev); int evergreen_asic_reset(struct radeon_device *rdev); void evergreen_bandwidth_update(struct radeon_device *rdev); +int evergreen_copy_blit(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_pages, struct radeon_fence *fence); void evergreen_hpd_init(struct radeon_device *rdev); void evergreen_hpd_fini(struct radeon_device *rdev); bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); From 4dd19b0dd79c2bfe04a7a19bea0caf9284695cb4 Mon Sep 17 00:00:00 2001 From: Chris Ball Date: Sun, 26 Sep 2010 06:47:23 -0500 Subject: [PATCH 388/476] drm/radeon/kms: Implement KDB debug hooks for radeon KMS. Signed-off-by: Chris Ball Signed-off-by: Jason Wessel CC: Jesse Barnes CC: dri-devel@lists.freedesktop.org Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/atombios_crtc.c | 119 ++++++++++++++------ drivers/gpu/drm/radeon/radeon_fb.c | 2 + drivers/gpu/drm/radeon/radeon_legacy_crtc.c | 45 ++++++-- drivers/gpu/drm/radeon/radeon_mode.h | 10 +- 4 files changed, 129 insertions(+), 47 deletions(-) diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index cd0290f946cf..2ab9b360d3c9 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -854,13 +854,15 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode } -static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb) +static int evergreen_crtc_do_set_base(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, int atomic) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_framebuffer *radeon_fb; + struct drm_framebuffer *target_fb; struct drm_gem_object *obj; struct radeon_bo *rbo; uint64_t fb_location; @@ -868,28 +870,43 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y, int r; /* no fb bound */ - if (!crtc->fb) { + if (!atomic && !crtc->fb) { DRM_DEBUG_KMS("No FB bound\n"); return 0; } - radeon_fb = to_radeon_framebuffer(crtc->fb); + if (atomic) { + radeon_fb = to_radeon_framebuffer(fb); + target_fb = fb; + } + else { + radeon_fb = to_radeon_framebuffer(crtc->fb); + target_fb = crtc->fb; + } - /* Pin framebuffer & get tilling informations */ + /* If atomic, assume fb object is pinned & idle & fenced and + * just update base pointers + */ obj = radeon_fb->obj; rbo = obj->driver_private; r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) return r; - r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location); - if (unlikely(r != 0)) { - radeon_bo_unreserve(rbo); - return -EINVAL; + + if (atomic) + fb_location = radeon_bo_gpu_offset(rbo); + else { + r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location); + if (unlikely(r != 0)) { + radeon_bo_unreserve(rbo); + return -EINVAL; + } } + radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); radeon_bo_unreserve(rbo); - switch (crtc->fb->bits_per_pixel) { + switch (target_fb->bits_per_pixel) { case 8: fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED)); @@ -909,7 +926,7 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y, break; default: DRM_ERROR("Unsupported screen depth %d\n", - crtc->fb->bits_per_pixel); + target_fb->bits_per_pixel); return -EINVAL; } @@ -955,10 +972,10 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y, WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0); WREG32(EVERGREEN_GRPH_Y_START + radeon_crtc->crtc_offset, 0); - WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width); - WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height); + WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width); + WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height); - fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8); + fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8); WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels); WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1); @@ -977,8 +994,8 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y, else WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0); - if (old_fb && old_fb != crtc->fb) { - radeon_fb = to_radeon_framebuffer(old_fb); + if (!atomic && fb && fb != crtc->fb) { + radeon_fb = to_radeon_framebuffer(fb); rbo = radeon_fb->obj->driver_private; r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) @@ -993,8 +1010,9 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y, return 0; } -static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb) +static int avivo_crtc_do_set_base(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, int atomic) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; @@ -1002,33 +1020,48 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct radeon_framebuffer *radeon_fb; struct drm_gem_object *obj; struct radeon_bo *rbo; + struct drm_framebuffer *target_fb; uint64_t fb_location; uint32_t fb_format, fb_pitch_pixels, tiling_flags; int r; /* no fb bound */ - if (!crtc->fb) { + if (!atomic && !crtc->fb) { DRM_DEBUG_KMS("No FB bound\n"); return 0; } - radeon_fb = to_radeon_framebuffer(crtc->fb); + if (atomic) { + radeon_fb = to_radeon_framebuffer(fb); + target_fb = fb; + } + else { + radeon_fb = to_radeon_framebuffer(crtc->fb); + target_fb = crtc->fb; + } - /* Pin framebuffer & get tilling informations */ obj = radeon_fb->obj; rbo = obj->driver_private; r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) return r; - r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location); - if (unlikely(r != 0)) { - radeon_bo_unreserve(rbo); - return -EINVAL; + + /* If atomic, assume fb object is pinned & idle & fenced and + * just update base pointers + */ + if (atomic) + fb_location = radeon_bo_gpu_offset(rbo); + else { + r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location); + if (unlikely(r != 0)) { + radeon_bo_unreserve(rbo); + return -EINVAL; + } } radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); radeon_bo_unreserve(rbo); - switch (crtc->fb->bits_per_pixel) { + switch (target_fb->bits_per_pixel) { case 8: fb_format = AVIVO_D1GRPH_CONTROL_DEPTH_8BPP | @@ -1052,7 +1085,7 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, break; default: DRM_ERROR("Unsupported screen depth %d\n", - crtc->fb->bits_per_pixel); + target_fb->bits_per_pixel); return -EINVAL; } @@ -1093,10 +1126,10 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0); WREG32(AVIVO_D1GRPH_Y_START + radeon_crtc->crtc_offset, 0); - WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width); - WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height); + WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width); + WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height); - fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8); + fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8); WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels); WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1); @@ -1115,8 +1148,8 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, else WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0); - if (old_fb && old_fb != crtc->fb) { - radeon_fb = to_radeon_framebuffer(old_fb); + if (!atomic && fb && fb != crtc->fb) { + radeon_fb = to_radeon_framebuffer(fb); rbo = radeon_fb->obj->driver_private; r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) @@ -1138,11 +1171,26 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct radeon_device *rdev = dev->dev_private; if (ASIC_IS_DCE4(rdev)) - return evergreen_crtc_set_base(crtc, x, y, old_fb); + return evergreen_crtc_do_set_base(crtc, old_fb, x, y, 0); else if (ASIC_IS_AVIVO(rdev)) - return avivo_crtc_set_base(crtc, x, y, old_fb); + return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0); else - return radeon_crtc_set_base(crtc, x, y, old_fb); + return radeon_crtc_do_set_base(crtc, old_fb, x, y, 0); +} + +int atombios_crtc_set_base_atomic(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y) +{ + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + + if (ASIC_IS_DCE4(rdev)) + return evergreen_crtc_do_set_base(crtc, fb, x, y, 1); + else if (ASIC_IS_AVIVO(rdev)) + return avivo_crtc_do_set_base(crtc, fb, x, y, 1); + else + return radeon_crtc_do_set_base(crtc, fb, x, y, 1); } /* properly set additional regs when using atombios */ @@ -1311,6 +1359,7 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = { .mode_fixup = atombios_crtc_mode_fixup, .mode_set = atombios_crtc_mode_set, .mode_set_base = atombios_crtc_set_base, + .mode_set_base_atomic = atombios_crtc_set_base_atomic, .prepare = atombios_crtc_prepare, .commit = atombios_crtc_commit, .load_lut = radeon_crtc_load_lut, diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 9cdf6a35bc2c..bc61c5adb56d 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -59,6 +59,8 @@ static struct fb_ops radeonfb_ops = { .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, + .fb_debug_enter = drm_fb_helper_debug_enter, + .fb_debug_leave = drm_fb_helper_debug_leave, }; diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 305049afde15..bfa090e1f512 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -347,11 +347,26 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) +{ + return radeon_crtc_do_set_base(crtc, old_fb, x, y, 0); +} + +int radeon_crtc_set_base_atomic(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y) +{ + return radeon_crtc_do_set_base(crtc, fb, x, y, 1); +} + +int radeon_crtc_do_set_base(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, int atomic) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_framebuffer *radeon_fb; + struct drm_framebuffer *target_fb; struct drm_gem_object *obj; struct radeon_bo *rbo; uint64_t base; @@ -364,14 +379,21 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, DRM_DEBUG_KMS("\n"); /* no fb bound */ - if (!crtc->fb) { + if (!atomic && !crtc->fb) { DRM_DEBUG_KMS("No FB bound\n"); return 0; } - radeon_fb = to_radeon_framebuffer(crtc->fb); + if (atomic) { + radeon_fb = to_radeon_framebuffer(fb); + target_fb = fb; + } + else { + radeon_fb = to_radeon_framebuffer(crtc->fb); + target_fb = crtc->fb; + } - switch (crtc->fb->bits_per_pixel) { + switch (target_fb->bits_per_pixel) { case 8: format = 2; break; @@ -415,10 +437,10 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, crtc_offset_cntl = 0; - pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8); - crtc_pitch = (((pitch_pixels * crtc->fb->bits_per_pixel) + - ((crtc->fb->bits_per_pixel * 8) - 1)) / - (crtc->fb->bits_per_pixel * 8)); + pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8); + crtc_pitch = (((pitch_pixels * target_fb->bits_per_pixel) + + ((target_fb->bits_per_pixel * 8) - 1)) / + (target_fb->bits_per_pixel * 8)); crtc_pitch |= crtc_pitch << 16; @@ -443,14 +465,14 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, crtc_tile_x0_y0 = x | (y << 16); base &= ~0x7ff; } else { - int byteshift = crtc->fb->bits_per_pixel >> 4; + int byteshift = target_fb->bits_per_pixel >> 4; int tile_addr = (((y >> 3) * pitch_pixels + x) >> (8 - byteshift)) << 11; base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8); crtc_offset_cntl |= (y % 16); } } else { int offset = y * pitch_pixels + x; - switch (crtc->fb->bits_per_pixel) { + switch (target_fb->bits_per_pixel) { case 8: offset *= 1; break; @@ -496,8 +518,8 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, crtc_offset); WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch); - if (old_fb && old_fb != crtc->fb) { - radeon_fb = to_radeon_framebuffer(old_fb); + if (!atomic && fb && fb != crtc->fb) { + radeon_fb = to_radeon_framebuffer(fb); rbo = radeon_fb->obj->driver_private; r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) @@ -1040,6 +1062,7 @@ static const struct drm_crtc_helper_funcs legacy_helper_funcs = { .mode_fixup = radeon_crtc_mode_fixup, .mode_set = radeon_crtc_mode_set, .mode_set_base = radeon_crtc_set_base, + .mode_set_base_atomic = radeon_crtc_set_base_atomic, .prepare = radeon_crtc_prepare, .commit = radeon_crtc_commit, .load_lut = radeon_crtc_load_lut, diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 17a6602b5885..9dd27c23a798 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -514,6 +514,9 @@ extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); extern void radeon_crtc_load_lut(struct drm_crtc *crtc); extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb); +extern int atombios_crtc_set_base_atomic(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y); extern int atombios_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, @@ -523,7 +526,12 @@ extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode); extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb); - +extern int radeon_crtc_set_base_atomic(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y); +extern int radeon_crtc_do_set_base(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, int atomic); extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t handle, From be64c2bb4731b0e6223a496eed615b816ac879ec Mon Sep 17 00:00:00 2001 From: Chris Ball Date: Sun, 26 Sep 2010 06:47:24 -0500 Subject: [PATCH 389/476] drm/nouveau/kms: Implement KDB debug hooks for nouveau KMS. Tested on nv50 and nv04 HW. Signed-off-by: Chris Ball Signed-off-by: Jason Wessel CC: Jesse Barnes CC: dri-devel@lists.freedesktop.org Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nouveau_fbcon.c | 6 +++ drivers/gpu/drm/nouveau/nv04_crtc.c | 46 +++++++++++++++++++---- drivers/gpu/drm/nouveau/nv50_crtc.c | 49 ++++++++++++++++++------- 3 files changed, 80 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index d2047713dc59..c5afd146aeb2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -104,6 +104,8 @@ static struct fb_ops nouveau_fbcon_ops = { .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, + .fb_debug_enter = drm_fb_helper_debug_enter, + .fb_debug_leave = drm_fb_helper_debug_leave, }; static struct fb_ops nv04_fbcon_ops = { @@ -117,6 +119,8 @@ static struct fb_ops nv04_fbcon_ops = { .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, + .fb_debug_enter = drm_fb_helper_debug_enter, + .fb_debug_leave = drm_fb_helper_debug_leave, }; static struct fb_ops nv50_fbcon_ops = { @@ -130,6 +134,8 @@ static struct fb_ops nv50_fbcon_ops = { .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, + .fb_debug_enter = drm_fb_helper_debug_enter, + .fb_debug_leave = drm_fb_helper_debug_leave, }; static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index 497df8765f28..f5bbd46f76bc 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c @@ -768,8 +768,9 @@ nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, } static int -nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb) +nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, + struct drm_framebuffer *passed_fb, + int x, int y, bool atomic) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct drm_device *dev = crtc->dev; @@ -780,13 +781,26 @@ nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, int arb_burst, arb_lwm; int ret; - ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM); - if (ret) - return ret; + /* If atomic, we want to switch to the fb we were passed, so + * now we update pointers to do that. (We don't pin; just + * assume we're already pinned and update the base address.) + */ + if (atomic) { + drm_fb = passed_fb; + fb = nouveau_framebuffer(passed_fb); + } + else { + /* If not atomic, we can go ahead and pin, and unpin the + * old fb we were passed. + */ + ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM); + if (ret) + return ret; - if (old_fb) { - struct nouveau_framebuffer *ofb = nouveau_framebuffer(old_fb); - nouveau_bo_unpin(ofb->nvbo); + if (passed_fb) { + struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb); + nouveau_bo_unpin(ofb->nvbo); + } } nv_crtc->fb.offset = fb->nvbo->bo.offset; @@ -834,6 +848,21 @@ nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, return 0; } +static int +nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false); +} + +static int +nv04_crtc_mode_set_base_atomic(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y) +{ + return nv04_crtc_do_mode_set_base(crtc, fb, x, y, true); +} + static void nv04_cursor_upload(struct drm_device *dev, struct nouveau_bo *src, struct nouveau_bo *dst) { @@ -962,6 +991,7 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = { .mode_fixup = nv_crtc_mode_fixup, .mode_set = nv_crtc_mode_set, .mode_set_base = nv04_crtc_mode_set_base, + .mode_set_base_atomic = nv04_crtc_mode_set_base_atomic, .load_lut = nv_crtc_gamma_load, }; diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index bfd4ca2fe7ef..f41b44864e80 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -491,8 +491,9 @@ nv50_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, } static int -nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb, bool update) +nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, + struct drm_framebuffer *passed_fb, + int x, int y, bool update, bool atomic) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct drm_device *dev = nv_crtc->base.dev; @@ -504,6 +505,28 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y, NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); + /* If atomic, we want to switch to the fb we were passed, so + * now we update pointers to do that. (We don't pin; just + * assume we're already pinned and update the base address.) + */ + if (atomic) { + drm_fb = passed_fb; + fb = nouveau_framebuffer(passed_fb); + } + else { + /* If not atomic, we can go ahead and pin, and unpin the + * old fb we were passed. + */ + ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM); + if (ret) + return ret; + + if (passed_fb) { + struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb); + nouveau_bo_unpin(ofb->nvbo); + } + } + switch (drm_fb->depth) { case 8: format = NV50_EVO_CRTC_FB_DEPTH_8; @@ -526,15 +549,6 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y, return -EINVAL; } - ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM); - if (ret) - return ret; - - if (old_fb) { - struct nouveau_framebuffer *ofb = nouveau_framebuffer(old_fb); - nouveau_bo_unpin(ofb->nvbo); - } - nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base; nv_crtc->fb.tile_flags = fb->nvbo->tile_flags; nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; @@ -685,14 +699,22 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false); nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false); - return nv50_crtc_do_mode_set_base(crtc, x, y, old_fb, false); + return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false, false); } static int nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { - return nv50_crtc_do_mode_set_base(crtc, x, y, old_fb, true); + return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, true, false); +} + +static int +nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y) +{ + return nv50_crtc_do_mode_set_base(crtc, fb, x, y, true, true); } static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = { @@ -702,6 +724,7 @@ static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = { .mode_fixup = nv50_crtc_mode_fixup, .mode_set = nv50_crtc_mode_set, .mode_set_base = nv50_crtc_mode_set_base, + .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic, .load_lut = nv50_crtc_lut_load, }; From 413d45d3627be4748058dea697718ed6fb88bd01 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Sun, 26 Sep 2010 06:47:25 -0500 Subject: [PATCH 390/476] drm, kdb, kms: Add an enter argument to mode_set_base_atomic() API Some devices such as the radeon chips receive information from user space which needs to be saved when executing an atomic mode set operation, else the user space would have to be queried again for the information. This patch extends the mode_set_base_atomic() call to pass an argument to indicate if this is an entry or an exit from an atomic kernel mode set change. Individual drm drivers can properly save and restore state accordingly. Signed-off-by: Jason Wessel CC: Jesse Barnes CC: David Airlie CC: dri-devel@lists.freedesktop.org Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_fb_helper.c | 5 +++-- drivers/gpu/drm/i915/intel_display.c | 4 ++-- drivers/gpu/drm/nouveau/nv04_crtc.c | 2 +- drivers/gpu/drm/nouveau/nv50_crtc.c | 2 +- drivers/gpu/drm/radeon/atombios_crtc.c | 2 +- drivers/gpu/drm/radeon/radeon_legacy_crtc.c | 2 +- drivers/gpu/drm/radeon/radeon_mode.h | 4 ++-- include/drm/drm_crtc_helper.h | 3 ++- 8 files changed, 13 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 6a5e403f9aa1..625a2d551d6a 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -263,7 +263,8 @@ int drm_fb_helper_debug_enter(struct fb_info *info) funcs->mode_set_base_atomic(mode_set->crtc, mode_set->fb, mode_set->x, - mode_set->y); + mode_set->y, + 1); } } @@ -309,7 +310,7 @@ int drm_fb_helper_debug_leave(struct fb_info *info) } funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x, - crtc->y); + crtc->y, 0); } return 0; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 69c54c5a4254..9109c00f3ead 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1492,7 +1492,7 @@ err_unpin: /* Assume fb object is pinned & idle & fenced and just update base pointers */ static int intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, - int x, int y) + int x, int y, int enter) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1614,7 +1614,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, atomic_read(&obj_priv->pending_flip) == 0); } - ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y); + ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, 0); if (ret) { i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index f5bbd46f76bc..fb669dd39c3c 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c @@ -858,7 +858,7 @@ nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, static int nv04_crtc_mode_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, - int x, int y) + int x, int y, int enter) { return nv04_crtc_do_mode_set_base(crtc, fb, x, y, true); } diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index f41b44864e80..727a7a12fed9 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -712,7 +712,7 @@ nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, static int nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, - int x, int y) + int x, int y, int enter) { return nv50_crtc_do_mode_set_base(crtc, fb, x, y, true, true); } diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 2ab9b360d3c9..501e5286ec3f 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1180,7 +1180,7 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, int atombios_crtc_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, - int x, int y) + int x, int y, int enter) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index bfa090e1f512..8752d3447b72 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -353,7 +353,7 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, int radeon_crtc_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, - int x, int y) + int x, int y, int enter) { return radeon_crtc_do_set_base(crtc, fb, x, y, 1); } diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 9dd27c23a798..c4116d3d8d06 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -516,7 +516,7 @@ extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb); extern int atombios_crtc_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, - int x, int y); + int x, int y, int enter); extern int atombios_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, @@ -528,7 +528,7 @@ extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb); extern int radeon_crtc_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, - int x, int y); + int x, int y, int enter); extern int radeon_crtc_do_set_base(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y, int atomic); diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index 59b7073b13fe..6a9f3935ea0b 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -61,7 +61,8 @@ struct drm_crtc_helper_funcs { int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb); int (*mode_set_base_atomic)(struct drm_crtc *crtc, - struct drm_framebuffer *fb, int x, int y); + struct drm_framebuffer *fb, int x, int y, + int is_enter); /* reload the current crtc LUT */ void (*load_lut)(struct drm_crtc *crtc); From ff773714dd30b802c336064109c535d8b2774e2f Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Sun, 26 Sep 2010 06:47:26 -0500 Subject: [PATCH 391/476] radeon, kdb, kms: Save and restore the LUT on atomic KMS enter/exit When changing VTs non-atomically the kernel works in conjunction with the Xserver in user space and receives the LUT information from the Xserver via a system call. When changing modes atomically for kdb, this information must be saved and restored without disturbing user space as if nothing ever happened. Signed-off-by: Jason Wessel CC: Jesse Barnes CC: David Airlie CC: dri-devel@lists.freedesktop.org Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_display.c | 32 +++++++++++++++++++++ drivers/gpu/drm/radeon/radeon_legacy_crtc.c | 5 ++++ drivers/gpu/drm/radeon/radeon_mode.h | 3 ++ 3 files changed, 40 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index b92d2f2fcbed..26935cf2c3b3 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -138,6 +138,38 @@ void radeon_crtc_load_lut(struct drm_crtc *crtc) legacy_crtc_load_lut(crtc); } +void radeon_crtc_save_lut(struct drm_crtc *crtc) +{ + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + int i; + + if (!crtc->enabled) + return; + + for (i = 0; i < 256; i++) { + radeon_crtc->lut_r_copy[i] = radeon_crtc->lut_r[i]; + radeon_crtc->lut_g_copy[i] = radeon_crtc->lut_g[i]; + radeon_crtc->lut_b_copy[i] = radeon_crtc->lut_b[i]; + } +} + +void radeon_crtc_restore_lut(struct drm_crtc *crtc) +{ + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + int i; + + if (!crtc->enabled) + return; + + for (i = 0; i < 256; i++) { + radeon_crtc->lut_r[i] = radeon_crtc->lut_r_copy[i]; + radeon_crtc->lut_g[i] = radeon_crtc->lut_g_copy[i]; + radeon_crtc->lut_b[i] = radeon_crtc->lut_b_copy[i]; + } + + radeon_crtc_load_lut(crtc); +} + /** Sets the color ramps on behalf of fbcon */ void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno) diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 8752d3447b72..42954785247f 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -355,6 +355,11 @@ int radeon_crtc_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y, int enter) { + if (enter) + radeon_crtc_save_lut(crtc); + else + radeon_crtc_restore_lut(crtc); + return radeon_crtc_do_set_base(crtc, fb, x, y, 1); } diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index c4116d3d8d06..2f78615f02aa 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -267,6 +267,7 @@ struct radeon_crtc { struct drm_crtc base; int crtc_id; u16 lut_r[256], lut_g[256], lut_b[256]; + u16 lut_r_copy[256], lut_g_copy[256], lut_b_copy[256]; bool enabled; bool can_tile; uint32_t crtc_offset; @@ -512,6 +513,8 @@ extern int atombios_get_encoder_mode(struct drm_encoder *encoder); extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); extern void radeon_crtc_load_lut(struct drm_crtc *crtc); +extern void radeon_crtc_save_lut(struct drm_crtc *crtc); +extern void radeon_crtc_restore_lut(struct drm_crtc *crtc); extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb); extern int atombios_crtc_set_base_atomic(struct drm_crtc *crtc, From a424d761a00c0233cb7734a8cd572ecd6d0362aa Mon Sep 17 00:00:00 2001 From: Chris Ball Date: Sun, 26 Sep 2010 06:47:27 -0500 Subject: [PATCH 392/476] drm/nouveau/kms: Avoid a hang entering KDB with VT accel on. Francisco Jerez advises that pre-nv20 cards would hang if we entered kdb with accel on and IRQs disabled, so we now disable accel before entering kdb and re-enable it on the way back out. Reported-by: Francisco Jerez Signed-off-by: Chris Ball Signed-off-by: Jason Wessel Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nv04_crtc.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index fb669dd39c3c..427f90e17b9d 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c @@ -33,6 +33,7 @@ #include "nouveau_fb.h" #include "nouveau_hw.h" #include "nvreg.h" +#include "nouveau_fbcon.h" static int nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, @@ -860,6 +861,14 @@ nv04_crtc_mode_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y, int enter) { + struct drm_nouveau_private *dev_priv = crtc->dev->dev_private; + struct drm_device *dev = dev_priv->dev; + + if (enter) + nouveau_fbcon_save_disable_accel(dev); + else + nouveau_fbcon_restore_accel(dev); + return nv04_crtc_do_mode_set_base(crtc, fb, x, y, true); } From 2126d0a4a205e2d6b763434f892524cd60f74228 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 6 Oct 2010 00:13:04 -0400 Subject: [PATCH 393/476] drm/radeon/kms: make sure blit addr masks are 64 bit Signed-off-by: Alex Deucher Cc: stable@kernel.org Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen_blit_kms.c | 8 ++++---- drivers/gpu/drm/radeon/r600_blit_kms.c | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c index ce1ae4a2aa54..a9825aa324b4 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c @@ -611,8 +611,8 @@ void evergreen_kms_blit_copy(struct radeon_device *rdev, int src_x = src_gpu_addr & 255; int dst_x = dst_gpu_addr & 255; int h = 1; - src_gpu_addr = src_gpu_addr & ~255; - dst_gpu_addr = dst_gpu_addr & ~255; + src_gpu_addr = src_gpu_addr & ~255ULL; + dst_gpu_addr = dst_gpu_addr & ~255ULL; if (!src_x && !dst_x) { h = (cur_size / max_bytes); @@ -695,8 +695,8 @@ void evergreen_kms_blit_copy(struct radeon_device *rdev, int src_x = (src_gpu_addr & 255); int dst_x = (dst_gpu_addr & 255); int h = 1; - src_gpu_addr = src_gpu_addr & ~255; - dst_gpu_addr = dst_gpu_addr & ~255; + src_gpu_addr = src_gpu_addr & ~255ULL; + dst_gpu_addr = dst_gpu_addr & ~255ULL; if (!src_x && !dst_x) { h = (cur_size / max_bytes); diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 2a4747d9747c..39d566dbabfa 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c @@ -661,8 +661,8 @@ void r600_kms_blit_copy(struct radeon_device *rdev, int src_x = src_gpu_addr & 255; int dst_x = dst_gpu_addr & 255; int h = 1; - src_gpu_addr = src_gpu_addr & ~255; - dst_gpu_addr = dst_gpu_addr & ~255; + src_gpu_addr = src_gpu_addr & ~255ULL; + dst_gpu_addr = dst_gpu_addr & ~255ULL; if (!src_x && !dst_x) { h = (cur_size / max_bytes); @@ -744,8 +744,8 @@ void r600_kms_blit_copy(struct radeon_device *rdev, int src_x = (src_gpu_addr & 255); int dst_x = (dst_gpu_addr & 255); int h = 1; - src_gpu_addr = src_gpu_addr & ~255; - dst_gpu_addr = dst_gpu_addr & ~255; + src_gpu_addr = src_gpu_addr & ~255ULL; + dst_gpu_addr = dst_gpu_addr & ~255ULL; if (!src_x && !dst_x) { h = (cur_size / max_bytes); From c2873e9633fe908dccd36dbb1d370e9c59a1ca62 Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Thu, 7 Oct 2010 09:20:12 +0100 Subject: [PATCH 394/476] drm/i915: Free hardware status page on unload when physically mapped A physically mapped hardware status page is allocated at driver load time but was never freed. Call the existing code to free this page at driver unload time on hardware which uses this kind. Signed-off-by: Keith Packard [ickle: call before tearing down registers on KMS-only path, as pointed out by Dave Airlie] Signed-off-by: Chris Wilson Cc: stable@kernel.org --- drivers/gpu/drm/i915/i915_dma.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 726c3736082f..3bbe72352cd8 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -2150,6 +2150,9 @@ int i915_driver_unload(struct drm_device *dev) drm_mm_takedown(&dev_priv->mm.vram); intel_cleanup_overlay(dev); + + if (!I915_NEED_GFX_HWS(dev)) + i915_free_hws(dev); } intel_teardown_gmbus(dev); From e59f2bac15042eb744851bcf866f18dadc3091c6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 7 Oct 2010 17:28:15 +0100 Subject: [PATCH 395/476] drm/i915: Wait for pending flips on the GPU Currently, if a batch buffer refers to an object with a pending flip, then we sleep until that pending flip is completed (unpinned and signalled). This is so that a flip can be queued and the user can continue rendering to the backbuffer oblivious to whether the buffer is still pinned as the scan out. (The kernel arbitrating at the last moment to stall the batch and wait until the buffer is unpinned and replaced as the front buffer.) As we only have a queue depth of 1, we can simply wait for the current pending flip to complete and continue rendering. We can achieve this with a single WAIT_FOR_EVENT command inserted into the ring buffer prior to executing the batch, *without* stalling the client. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 75 +++++++++++----------------- drivers/gpu/drm/i915/intel_display.c | 7 +-- 2 files changed, 34 insertions(+), 48 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 100a7537980e..72ab3032300a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3646,41 +3646,6 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec, return 0; } -static int -i915_gem_wait_for_pending_flip(struct drm_device *dev, - struct drm_gem_object **object_list, - int count) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; - DEFINE_WAIT(wait); - int i, ret = 0; - - for (;;) { - prepare_to_wait(&dev_priv->pending_flip_queue, - &wait, TASK_INTERRUPTIBLE); - for (i = 0; i < count; i++) { - obj_priv = to_intel_bo(object_list[i]); - if (atomic_read(&obj_priv->pending_flip) > 0) - break; - } - if (i == count) - break; - - if (!signal_pending(current)) { - mutex_unlock(&dev->struct_mutex); - schedule(); - mutex_lock(&dev->struct_mutex); - continue; - } - ret = -ERESTARTSYS; - break; - } - finish_wait(&dev_priv->pending_flip_queue, &wait); - - return ret; -} - static int i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv, @@ -3773,7 +3738,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } /* Look up object handles */ - flips = 0; for (i = 0; i < args->buffer_count; i++) { object_list[i] = drm_gem_object_lookup(dev, file_priv, exec_list[i].handle); @@ -3796,14 +3760,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } obj_priv->in_execbuffer = true; - flips += atomic_read(&obj_priv->pending_flip); - } - - if (flips > 0) { - ret = i915_gem_wait_for_pending_flip(dev, object_list, - args->buffer_count); - if (ret) - goto err; } /* Pin and relocate */ @@ -3943,9 +3899,38 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ~0); #endif + /* Check for any pending flips. As we only maintain a flip queue depth + * of 1, we can simply insert a WAIT for the next display flip prior + * to executing the batch and avoid stalling the CPU. + */ + flips = 0; + for (i = 0; i < args->buffer_count; i++) { + if (object_list[i]->write_domain) + flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip); + } + if (flips) { + int plane, flip_mask; + + for (plane = 0; flips >> plane; plane++) { + if (((flips >> plane) & 1) == 0) + continue; + + if (plane) + flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; + else + flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; + + intel_ring_begin(dev, ring, 2); + intel_ring_emit(dev, ring, + MI_WAIT_FOR_EVENT | flip_mask); + intel_ring_emit(dev, ring, MI_NOOP); + intel_ring_advance(dev, ring); + } + } + /* Exec the batchbuffer */ ret = ring->dispatch_gem_execbuffer(dev, ring, args, - cliprects, exec_offset); + cliprects, exec_offset); if (ret) { DRM_ERROR("dispatch failed %d\n", ret); goto err; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9109c00f3ead..7fe92d06eb26 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4994,8 +4994,9 @@ static void do_intel_finish_page_flip(struct drm_device *dev, obj_priv = to_intel_bo(work->pending_flip_obj); /* Initial scanout buffer will have a 0 pending flip count */ - if ((atomic_read(&obj_priv->pending_flip) == 0) || - atomic_dec_and_test(&obj_priv->pending_flip)) + atomic_clear_mask(1 << intel_crtc->plane, + &obj_priv->pending_flip.counter); + if (atomic_read(&obj_priv->pending_flip) == 0) wake_up(&dev_priv->pending_flip_queue); schedule_work(&work->work); @@ -5092,7 +5093,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, goto cleanup_objs; obj_priv = to_intel_bo(obj); - atomic_inc(&obj_priv->pending_flip); + atomic_add(1 << intel_crtc->plane, &obj_priv->pending_flip); work->pending_flip_obj = obj; if (IS_GEN3(dev) || IS_GEN2(dev)) { From cfcb0fc9c2f2decf065e9a6a1c622541e8b4090b Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 7 Oct 2010 16:01:06 -0700 Subject: [PATCH 396/476] drm/i915/dp: convert eDP checks to functions and document Most of the PCH eDP checks are redundant, so document the functions in preparation for removing most of the calls. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_dp.c | 92 ++++++++++++++++++++------------- 1 file changed, 57 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 152d94507b79..f2810ade343c 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -42,9 +42,6 @@ #define DP_LINK_CONFIGURATION_SIZE 9 -#define IS_eDP(i) ((i)->base.type == INTEL_OUTPUT_EDP) -#define IS_PCH_eDP(i) ((i)->is_pch_edp) - struct intel_dp { struct intel_encoder base; uint32_t output_reg; @@ -62,6 +59,31 @@ struct intel_dp { uint8_t link_status[DP_LINK_STATUS_SIZE]; }; +/** + * is_edp - is the given port attached to an eDP panel (either CPU or PCH) + * @intel_dp: DP struct + * + * If a CPU or PCH DP output is attached to an eDP panel, this function + * will return true, and false otherwise. + */ +static bool is_edp(struct intel_dp *intel_dp) +{ + return intel_dp->base.type == INTEL_OUTPUT_EDP; +} + +/** + * is_pch_edp - is the port on the PCH and attached to an eDP panel? + * @intel_dp: DP struct + * + * Returns true if the given DP struct corresponds to a PCH DP port attached + * to an eDP panel, false otherwise. Helpful for determining whether we + * may need FDI resources for a given DP output or not. + */ +static bool is_pch_edp(struct intel_dp *intel_dp) +{ + return intel_dp->is_pch_edp; +} + static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) { return container_of(encoder, struct intel_dp, base.base); @@ -138,7 +160,7 @@ intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pi { struct drm_i915_private *dev_priv = dev->dev_private; - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) + if (is_edp(intel_dp) || is_pch_edp(intel_dp)) return (pixel_clock * dev_priv->edp.bpp + 7) / 8; else return pixel_clock * 3; @@ -160,7 +182,7 @@ intel_dp_mode_valid(struct drm_connector *connector, int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); int max_lanes = intel_dp_max_lane_count(intel_dp); - if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && + if ((is_edp(intel_dp) || is_pch_edp(intel_dp)) && dev_priv->panel_fixed_mode) { if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay) return MODE_PANEL; @@ -171,7 +193,7 @@ intel_dp_mode_valid(struct drm_connector *connector, /* only refuse the mode on non eDP since we have seen some wierd eDP panels which are outside spec tolerances but somehow work by magic */ - if (!IS_eDP(intel_dp) && + if (!is_edp(intel_dp) && (intel_dp_link_required(connector->dev, intel_dp, mode->clock) > intel_dp_max_data_rate(max_link_clock, max_lanes))) return MODE_CLOCK_HIGH; @@ -258,7 +280,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, * Note that PCH attached eDP panels should use a 125MHz input * clock divider. */ - if (IS_eDP(intel_dp) && !IS_PCH_eDP(intel_dp)) { + if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) { if (IS_GEN6(dev)) aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ else @@ -530,7 +552,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; - if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && + if ((is_edp(intel_dp) || is_pch_edp(intel_dp)) && dev_priv->panel_fixed_mode) { intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode); intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, @@ -560,7 +582,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, } } - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { + if (is_edp(intel_dp) || is_pch_edp(intel_dp)) { /* okay we failed just pick the highest */ intel_dp->lane_count = max_lane_count; intel_dp->link_bw = bws[max_clock]; @@ -652,7 +674,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, intel_dp = enc_to_intel_dp(encoder); if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) { lane_count = intel_dp->lane_count; - if (IS_PCH_eDP(intel_dp)) + if (is_pch_edp(intel_dp)) bpp = dev_priv->edp.bpp; break; } @@ -720,7 +742,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) intel_dp->DP |= DP_SYNC_VS_HIGH; - if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) + if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; else intel_dp->DP |= DP_LINK_TRAIN_OFF; @@ -755,7 +777,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) intel_dp->DP |= DP_PIPEB_SELECT; - if (IS_eDP(intel_dp)) { + if (is_edp(intel_dp)) { /* don't miss out required setting for eDP */ intel_dp->DP |= DP_PLL_ENABLE; if (adjusted_mode->clock < 200000) @@ -909,7 +931,7 @@ static void intel_dp_prepare(struct drm_encoder *encoder) struct drm_i915_private *dev_priv = dev->dev_private; uint32_t dp_reg = I915_READ(intel_dp->output_reg); - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { + if (is_edp(intel_dp) || is_pch_edp(intel_dp)) { ironlake_edp_panel_off(dev); ironlake_edp_backlight_off(dev); ironlake_edp_panel_vdd_on(dev); @@ -926,12 +948,12 @@ static void intel_dp_commit(struct drm_encoder *encoder) intel_dp_start_link_train(intel_dp); - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) + if (is_edp(intel_dp) || is_pch_edp(intel_dp)) ironlake_edp_panel_on(dev); intel_dp_complete_link_train(intel_dp); - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) + if (is_edp(intel_dp) || is_pch_edp(intel_dp)) ironlake_edp_backlight_on(dev); intel_dp->dpms_mode = DRM_MODE_DPMS_ON; } @@ -945,21 +967,21 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) uint32_t dp_reg = I915_READ(intel_dp->output_reg); if (mode != DRM_MODE_DPMS_ON) { - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { + if (is_edp(intel_dp) || is_pch_edp(intel_dp)) { ironlake_edp_backlight_off(dev); ironlake_edp_panel_off(dev); } if (dp_reg & DP_PORT_EN) intel_dp_link_down(intel_dp); - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) + if (is_edp(intel_dp) || is_pch_edp(intel_dp)) ironlake_edp_pll_off(encoder); } else { if (!(dp_reg & DP_PORT_EN)) { intel_dp_start_link_train(intel_dp); - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) + if (is_edp(intel_dp) || is_pch_edp(intel_dp)) ironlake_edp_panel_on(dev); intel_dp_complete_link_train(intel_dp); - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) + if (is_edp(intel_dp) || is_pch_edp(intel_dp)) ironlake_edp_backlight_on(dev); } } @@ -1234,7 +1256,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) DP_LINK_CONFIGURATION_SIZE); DP |= DP_PORT_EN; - if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) + if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) DP &= ~DP_LINK_TRAIN_MASK_CPT; else DP &= ~DP_LINK_TRAIN_MASK; @@ -1245,7 +1267,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) for (;;) { /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ uint32_t signal_levels; - if (IS_GEN6(dev) && IS_eDP(intel_dp)) { + if (IS_GEN6(dev) && is_edp(intel_dp)) { signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; } else { @@ -1253,7 +1275,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } - if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) + if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) reg = DP | DP_LINK_TRAIN_PAT_1_CPT; else reg = DP | DP_LINK_TRAIN_PAT_1; @@ -1312,7 +1334,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ uint32_t signal_levels; - if (IS_GEN6(dev) && IS_eDP(intel_dp)) { + if (IS_GEN6(dev) && is_edp(intel_dp)) { signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; } else { @@ -1320,7 +1342,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } - if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) + if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) reg = DP | DP_LINK_TRAIN_PAT_2_CPT; else reg = DP | DP_LINK_TRAIN_PAT_2; @@ -1348,7 +1370,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) ++tries; } - if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) + if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) reg = DP | DP_LINK_TRAIN_OFF_CPT; else reg = DP | DP_LINK_TRAIN_OFF; @@ -1368,14 +1390,14 @@ intel_dp_link_down(struct intel_dp *intel_dp) DRM_DEBUG_KMS("\n"); - if (IS_eDP(intel_dp)) { + if (is_edp(intel_dp)) { DP &= ~DP_PLL_ENABLE; I915_WRITE(intel_dp->output_reg, DP); POSTING_READ(intel_dp->output_reg); udelay(100); } - if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) { + if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) { DP &= ~DP_LINK_TRAIN_MASK_CPT; I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); } else { @@ -1386,7 +1408,7 @@ intel_dp_link_down(struct intel_dp *intel_dp) msleep(17); - if (IS_eDP(intel_dp)) + if (is_edp(intel_dp)) DP |= DP_LINK_TRAIN_OFF; I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); POSTING_READ(intel_dp->output_reg); @@ -1425,7 +1447,7 @@ ironlake_dp_detect(struct drm_connector *connector) enum drm_connector_status status; /* Panel needs power for AUX to work */ - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) + if (is_edp(intel_dp) || is_pch_edp(intel_dp)) ironlake_edp_panel_vdd_on(connector->dev); status = connector_status_disconnected; if (intel_dp_aux_native_read(intel_dp, @@ -1437,7 +1459,7 @@ ironlake_dp_detect(struct drm_connector *connector) } DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]); - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) + if (is_edp(intel_dp) || is_pch_edp(intel_dp)) ironlake_edp_panel_vdd_off(connector->dev); return status; } @@ -1504,7 +1526,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) ret = intel_ddc_get_modes(connector, &intel_dp->adapter); if (ret) { - if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && + if ((is_edp(intel_dp) || is_pch_edp(intel_dp)) && !dev_priv->panel_fixed_mode) { struct drm_display_mode *newmode; list_for_each_entry(newmode, &connector->probed_modes, @@ -1521,7 +1543,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) } /* if eDP has no EDID, try to use fixed panel mode from VBT */ - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { + if (is_edp(intel_dp) || is_pch_edp(intel_dp)) { if (dev_priv->panel_fixed_mode != NULL) { struct drm_display_mode *mode; mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); @@ -1651,7 +1673,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) if (intel_dpd_is_edp(dev)) intel_dp->is_pch_edp = true; - if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) { + if (output_reg == DP_A || is_pch_edp(intel_dp)) { type = DRM_MODE_CONNECTOR_eDP; intel_encoder->type = INTEL_OUTPUT_EDP; } else { @@ -1672,7 +1694,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) else if (output_reg == DP_D || output_reg == PCH_DP_D) intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); - if (IS_eDP(intel_dp)) + if (is_edp(intel_dp)) intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); intel_encoder->crtc_mask = (1 << 0) | (1 << 1); @@ -1719,7 +1741,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) intel_encoder->hot_plug = intel_dp_hot_plug; - if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) { + if (output_reg == DP_A || is_pch_edp(intel_dp)) { /* initialize panel mode from VBT if available for eDP */ if (dev_priv->lfp_lvds_vbt_mode) { dev_priv->panel_fixed_mode = From 4d9264615b98fe8015eca7d84a9862b1489c69d4 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 7 Oct 2010 16:01:07 -0700 Subject: [PATCH 397/476] drm/i915/dp: remove redundant is_pch_edp checks If is_edp is true, is_pch_edp will always be true. So limit the calls to the latter function to places where the distinction actually matters. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_dp.c | 36 +++++++++++++++------------------ 1 file changed, 16 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index f2810ade343c..1b736637e13e 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -160,7 +160,7 @@ intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pi { struct drm_i915_private *dev_priv = dev->dev_private; - if (is_edp(intel_dp) || is_pch_edp(intel_dp)) + if (is_edp(intel_dp)) return (pixel_clock * dev_priv->edp.bpp + 7) / 8; else return pixel_clock * 3; @@ -182,8 +182,7 @@ intel_dp_mode_valid(struct drm_connector *connector, int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); int max_lanes = intel_dp_max_lane_count(intel_dp); - if ((is_edp(intel_dp) || is_pch_edp(intel_dp)) && - dev_priv->panel_fixed_mode) { + if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) { if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay) return MODE_PANEL; @@ -552,8 +551,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; - if ((is_edp(intel_dp) || is_pch_edp(intel_dp)) && - dev_priv->panel_fixed_mode) { + if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) { intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode); intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, mode, adjusted_mode); @@ -582,7 +580,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, } } - if (is_edp(intel_dp) || is_pch_edp(intel_dp)) { + if (is_edp(intel_dp)) { /* okay we failed just pick the highest */ intel_dp->lane_count = max_lane_count; intel_dp->link_bw = bws[max_clock]; @@ -931,7 +929,7 @@ static void intel_dp_prepare(struct drm_encoder *encoder) struct drm_i915_private *dev_priv = dev->dev_private; uint32_t dp_reg = I915_READ(intel_dp->output_reg); - if (is_edp(intel_dp) || is_pch_edp(intel_dp)) { + if (is_edp(intel_dp)) { ironlake_edp_panel_off(dev); ironlake_edp_backlight_off(dev); ironlake_edp_panel_vdd_on(dev); @@ -948,14 +946,13 @@ static void intel_dp_commit(struct drm_encoder *encoder) intel_dp_start_link_train(intel_dp); - if (is_edp(intel_dp) || is_pch_edp(intel_dp)) + if (is_edp(intel_dp)) ironlake_edp_panel_on(dev); intel_dp_complete_link_train(intel_dp); - if (is_edp(intel_dp) || is_pch_edp(intel_dp)) + if (is_edp(intel_dp)) ironlake_edp_backlight_on(dev); - intel_dp->dpms_mode = DRM_MODE_DPMS_ON; } static void @@ -967,21 +964,21 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) uint32_t dp_reg = I915_READ(intel_dp->output_reg); if (mode != DRM_MODE_DPMS_ON) { - if (is_edp(intel_dp) || is_pch_edp(intel_dp)) { + if (is_edp(intel_dp)) { ironlake_edp_backlight_off(dev); ironlake_edp_panel_off(dev); } if (dp_reg & DP_PORT_EN) intel_dp_link_down(intel_dp); - if (is_edp(intel_dp) || is_pch_edp(intel_dp)) + if (is_edp(intel_dp)) ironlake_edp_pll_off(encoder); } else { if (!(dp_reg & DP_PORT_EN)) { intel_dp_start_link_train(intel_dp); - if (is_edp(intel_dp) || is_pch_edp(intel_dp)) + if (is_edp(intel_dp)) ironlake_edp_panel_on(dev); intel_dp_complete_link_train(intel_dp); - if (is_edp(intel_dp) || is_pch_edp(intel_dp)) + if (is_edp(intel_dp)) ironlake_edp_backlight_on(dev); } } @@ -1447,7 +1444,7 @@ ironlake_dp_detect(struct drm_connector *connector) enum drm_connector_status status; /* Panel needs power for AUX to work */ - if (is_edp(intel_dp) || is_pch_edp(intel_dp)) + if (is_edp(intel_dp)) ironlake_edp_panel_vdd_on(connector->dev); status = connector_status_disconnected; if (intel_dp_aux_native_read(intel_dp, @@ -1459,7 +1456,7 @@ ironlake_dp_detect(struct drm_connector *connector) } DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]); - if (is_edp(intel_dp) || is_pch_edp(intel_dp)) + if (is_edp(intel_dp)) ironlake_edp_panel_vdd_off(connector->dev); return status; } @@ -1526,8 +1523,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) ret = intel_ddc_get_modes(connector, &intel_dp->adapter); if (ret) { - if ((is_edp(intel_dp) || is_pch_edp(intel_dp)) && - !dev_priv->panel_fixed_mode) { + if (is_edp(intel_dp) && !dev_priv->panel_fixed_mode) { struct drm_display_mode *newmode; list_for_each_entry(newmode, &connector->probed_modes, head) { @@ -1543,7 +1539,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) } /* if eDP has no EDID, try to use fixed panel mode from VBT */ - if (is_edp(intel_dp) || is_pch_edp(intel_dp)) { + if (is_edp(intel_dp)) { if (dev_priv->panel_fixed_mode != NULL) { struct drm_display_mode *mode; mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); @@ -1741,7 +1737,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) intel_encoder->hot_plug = intel_dp_hot_plug; - if (output_reg == DP_A || is_pch_edp(intel_dp)) { + if (is_edp(intel_dp)) { /* initialize panel mode from VBT if available for eDP */ if (dev_priv->lfp_lvds_vbt_mode) { dev_priv->panel_fixed_mode = From 51190667b3c6927356e594cdf6955980ff47bb16 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 7 Oct 2010 16:01:08 -0700 Subject: [PATCH 398/476] drm/i915/dp: correct eDP lane count and bpp With the old check we'd never set lane_count or bpp to different values on PCH attached eDP panels. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_dp.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 1b736637e13e..714e553960fd 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -672,8 +672,10 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, intel_dp = enc_to_intel_dp(encoder); if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) { lane_count = intel_dp->lane_count; - if (is_pch_edp(intel_dp)) - bpp = dev_priv->edp.bpp; + break; + } else if (is_edp(intel_dp)) { + lane_count = dev_priv->edp.lanes; + bpp = dev_priv->edp.bpp; break; } } From 814948adec172dbc41252b1815e4e83aedfe91b9 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 7 Oct 2010 16:01:09 -0700 Subject: [PATCH 399/476] drm/i915: add eDP checking functions for the display code The display code needs to distinguish between CPU and PCH attached eDP panels, so add some helpers to handle that. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_dp.c | 19 +++++++++++++++++++ drivers/gpu/drm/i915/intel_drv.h | 1 + 2 files changed, 20 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 714e553960fd..da71263f6fab 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -95,6 +95,25 @@ static struct intel_dp *intel_attached_dp(struct drm_connector *connector) struct intel_dp, base); } +/** + * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP? + * @encoder: DRM encoder + * + * Return true if @encoder corresponds to a PCH attached eDP panel. Needed + * by intel_display.c. + */ +bool intel_encoder_is_pch_edp(struct drm_encoder *encoder) +{ + struct intel_dp *intel_dp; + + if (!encoder) + return false; + + intel_dp = enc_to_intel_dp(encoder); + + return is_pch_edp(intel_dp); +} + static void intel_dp_start_link_train(struct intel_dp *intel_dp); static void intel_dp_complete_link_train(struct intel_dp *intel_dp); static void intel_dp_link_down(struct intel_dp *intel_dp); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 40e99bf27ff7..c946c48b472f 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -212,6 +212,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, extern bool intel_pch_has_edp(struct drm_crtc *crtc); extern bool intel_dpd_is_edp(struct drm_device *dev); extern void intel_edp_link_config (struct intel_encoder *, int *, int *); +extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder); /* intel_panel.c */ extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, From 1d85036278f1b3eb3b7c5db805e5c4c847d1415d Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 7 Oct 2010 16:01:10 -0700 Subject: [PATCH 400/476] drm/i915: remove broken intel_pch_has_edp function Since we set the output type of PCH attached eDP panels to INTEL_OUTPUT_eDP this function would never return true when it should. It's been replaced by working functions. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 5 ++--- drivers/gpu/drm/i915/intel_dp.c | 19 ------------------- drivers/gpu/drm/i915/intel_drv.h | 1 - 3 files changed, 2 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 7fe92d06eb26..0ef52db4685f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2001,8 +2001,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) /* Enable panel fitting for LVDS */ if (dev_priv->pch_pf_size && - (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) - || HAS_eDP || intel_pch_has_edp(crtc))) { + (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) { /* Force use of hard-coded filter coefficients * as some pre-programmed values are broken, * e.g. x201. @@ -3717,7 +3716,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, temp |= PIPE_8BPC; else temp |= PIPE_6BPC; - } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) { + } else if (has_edp_encoder) { switch (dev_priv->edp.bpp/3) { case 8: temp |= PIPE_8BPC; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index da71263f6fab..57bfc3e7b40a 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -648,25 +648,6 @@ intel_dp_compute_m_n(int bpp, intel_reduce_ratio(&m_n->link_m, &m_n->link_n); } -bool intel_pch_has_edp(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_mode_config *mode_config = &dev->mode_config; - struct drm_encoder *encoder; - - list_for_each_entry(encoder, &mode_config->encoder_list, head) { - struct intel_dp *intel_dp; - - if (encoder->crtc != crtc) - continue; - - intel_dp = enc_to_intel_dp(encoder); - if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) - return intel_dp->is_pch_edp; - } - return false; -} - void intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index c946c48b472f..0581e5e5ac55 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -209,7 +209,6 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg); void intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); -extern bool intel_pch_has_edp(struct drm_crtc *crtc); extern bool intel_dpd_is_edp(struct drm_device *dev); extern void intel_edp_link_config (struct intel_encoder *, int *, int *); extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder); From 5c5313c8db9bfb549e080fc4cb0a4c3c2aa7a73d Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 7 Oct 2010 16:01:11 -0700 Subject: [PATCH 401/476] drm/i915: fix CPU vs PCH eDP confusion FDI training needs to done and idle for PCH eDP and before we turn the pipes on, and various eDP checks need to account for PCH attached eDP. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 67 +++++++++++++--------------- 1 file changed, 32 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 0ef52db4685f..4c44e1663a95 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -932,10 +932,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; intel_clock_t clock; - /* return directly when it is eDP */ - if (HAS_eDP) - return true; - if (target < 200000) { clock.n = 1; clock.p1 = 2; @@ -1763,6 +1759,28 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) DRM_ERROR("FDI train 2 fail!\n"); DRM_DEBUG_KMS("FDI train done\n"); + + /* enable normal train */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; + I915_WRITE(reg, temp); + + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + if (HAS_PCH_CPT(dev)) { + temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; + temp |= FDI_LINK_TRAIN_NORMAL_CPT; + } else { + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_NONE; + } + I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); + + /* wait one idle pattern time */ + POSTING_READ(reg); + udelay(1000); } static const int const snb_b_fdi_train_param [] = { @@ -2065,28 +2083,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); - /* enable normal train */ - reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; - I915_WRITE(reg, temp); - - reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); - if (HAS_PCH_CPT(dev)) { - temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; - temp |= FDI_LINK_TRAIN_NORMAL_CPT; - } else { - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_NONE; - } - I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); - - /* wait one idle pattern time */ - POSTING_READ(reg); - udelay(100); - /* For PCH DP, enable TRANS_DP_CTL */ if (HAS_PCH_CPT(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { @@ -3683,16 +3679,16 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* FDI link */ if (HAS_PCH_SPLIT(dev)) { int lane = 0, link_bw, bpp; - /* eDP doesn't require FDI link, so just set DP M/N + /* CPU eDP doesn't require FDI link, so just set DP M/N according to current link config */ - if (has_edp_encoder) { + if (has_edp_encoder && !intel_encoder_is_pch_edp(&encoder->base)) { target_clock = mode->clock; intel_edp_link_config(has_edp_encoder, &lane, &link_bw); } else { - /* DP over FDI requires target mode clock + /* [e]DP over FDI requires target mode clock instead of link clock */ - if (is_dp) + if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) target_clock = mode->clock; else target_clock = adjusted_mode->clock; @@ -3932,7 +3928,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, dpll_reg = DPLL(pipe); } - if (!has_edp_encoder) { + /* PCH eDP needs FDI, but CPU eDP does not */ + if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { I915_WRITE(fp_reg, fp); I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); @@ -4009,9 +4006,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } } - if (is_dp) + if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { intel_dp_set_m_n(crtc, mode, adjusted_mode); - else if (HAS_PCH_SPLIT(dev)) { + } else if (HAS_PCH_SPLIT(dev)) { /* For non-DP output, clear any trans DP clock recovery setting.*/ if (pipe == 0) { I915_WRITE(TRANSA_DATA_M1, 0); @@ -4026,7 +4023,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } } - if (!has_edp_encoder) { + if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { I915_WRITE(fp_reg, fp); I915_WRITE(dpll_reg, dpll); @@ -4120,7 +4117,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); - if (has_edp_encoder) { + if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { ironlake_set_pll_edp(crtc, adjusted_mode->clock); } else { /* enable FDI RX PLL too */ From 01cb9ea633ddf3e8770dfe7851e88610087098bc Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 7 Oct 2010 16:01:12 -0700 Subject: [PATCH 402/476] drm/i915/dp: eDP power sequencing fixes Enable the panel before adjusting eDP link params, make sure the panel is idle after powering it on before proceeding with other activity, delay backlight enable to avoid visible flicker. Also avoid using VDD per hw team recommendation; it can conflict with the builtin panel power sequencing logic and lead to panel power sequencing failures. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_reg.h | 3 ++ drivers/gpu/drm/i915/intel_dp.c | 77 ++++++++++++++------------------- 2 files changed, 35 insertions(+), 45 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index d02de212e6ad..a72335e940f1 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1373,6 +1373,9 @@ #define PP_SEQUENCE_ON (1 << 28) #define PP_SEQUENCE_OFF (2 << 28) #define PP_SEQUENCE_MASK 0x30000000 +#define PP_CYCLE_DELAY_ACTIVE (1 << 27) +#define PP_SEQUENCE_STATE_ON_IDLE (1 << 3) +#define PP_SEQUENCE_STATE_MASK 0x0000000f #define PP_CONTROL 0x61204 #define POWER_TARGET_ON (1 << 0) #define PP_ON_DELAYS 0x61208 diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 57bfc3e7b40a..944dfe199f6e 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -788,10 +788,11 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, } /* Returns true if the panel was already on when called */ -static bool ironlake_edp_panel_on (struct drm_device *dev) +static bool ironlake_edp_panel_on (struct intel_dp *intel_dp) { + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 pp; + u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE; if (I915_READ(PCH_PP_STATUS) & PP_ON) return true; @@ -803,19 +804,20 @@ static bool ironlake_edp_panel_on (struct drm_device *dev) I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); - pp |= POWER_TARGET_ON; + pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON; I915_WRITE(PCH_PP_CONTROL, pp); + POSTING_READ(PCH_PP_CONTROL); /* Ouch. We need to wait here for some panels, like Dell e6510 * https://bugs.freedesktop.org/show_bug.cgi?id=29278i */ msleep(300); - if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000)) + if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask, + 5000)) DRM_ERROR("panel on wait timed out: 0x%08x\n", I915_READ(PCH_PP_STATUS)); - pp &= ~(PANEL_UNLOCK_REGS); pp |= PANEL_POWER_RESET; /* restore panel reset bit */ I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); @@ -826,7 +828,8 @@ static bool ironlake_edp_panel_on (struct drm_device *dev) static void ironlake_edp_panel_off (struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - u32 pp; + u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK | + PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK; pp = I915_READ(PCH_PP_CONTROL); @@ -837,12 +840,12 @@ static void ironlake_edp_panel_off (struct drm_device *dev) pp &= ~POWER_TARGET_ON; I915_WRITE(PCH_PP_CONTROL, pp); + POSTING_READ(PCH_PP_CONTROL); - if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000)) + if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000)) DRM_ERROR("panel off wait timed out: 0x%08x\n", I915_READ(PCH_PP_STATUS)); - /* Make sure VDD is enabled so DP AUX will work */ pp |= PANEL_POWER_RESET; /* restore panel reset bit */ I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); @@ -853,36 +856,19 @@ static void ironlake_edp_panel_off (struct drm_device *dev) msleep(300); } -static void ironlake_edp_panel_vdd_on(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 pp; - - pp = I915_READ(PCH_PP_CONTROL); - pp |= EDP_FORCE_VDD; - I915_WRITE(PCH_PP_CONTROL, pp); - POSTING_READ(PCH_PP_CONTROL); - msleep(300); -} - -static void ironlake_edp_panel_vdd_off(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 pp; - - pp = I915_READ(PCH_PP_CONTROL); - pp &= ~EDP_FORCE_VDD; - I915_WRITE(PCH_PP_CONTROL, pp); - POSTING_READ(PCH_PP_CONTROL); - msleep(300); -} - static void ironlake_edp_backlight_on (struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 pp; DRM_DEBUG_KMS("\n"); + /* + * If we enable the backlight right away following a panel power + * on, we may see slight flicker as the panel syncs with the eDP + * link. So delay a bit to make sure the image is solid before + * allowing it to appear. + */ + msleep(300); pp = I915_READ(PCH_PP_CONTROL); pp |= EDP_BLC_ENABLE; I915_WRITE(PCH_PP_CONTROL, pp); @@ -932,10 +918,12 @@ static void intel_dp_prepare(struct drm_encoder *encoder) uint32_t dp_reg = I915_READ(intel_dp->output_reg); if (is_edp(intel_dp)) { - ironlake_edp_panel_off(dev); ironlake_edp_backlight_off(dev); - ironlake_edp_panel_vdd_on(dev); - ironlake_edp_pll_on(encoder); + ironlake_edp_panel_on(intel_dp); + if (!is_pch_edp(intel_dp)) + ironlake_edp_pll_on(encoder); + else + ironlake_edp_pll_off(encoder); } if (dp_reg & DP_PORT_EN) intel_dp_link_down(intel_dp); @@ -949,7 +937,7 @@ static void intel_dp_commit(struct drm_encoder *encoder) intel_dp_start_link_train(intel_dp); if (is_edp(intel_dp)) - ironlake_edp_panel_on(dev); + ironlake_edp_panel_on(intel_dp); intel_dp_complete_link_train(intel_dp); @@ -966,19 +954,19 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) uint32_t dp_reg = I915_READ(intel_dp->output_reg); if (mode != DRM_MODE_DPMS_ON) { - if (is_edp(intel_dp)) { + if (is_edp(intel_dp)) ironlake_edp_backlight_off(dev); - ironlake_edp_panel_off(dev); - } if (dp_reg & DP_PORT_EN) intel_dp_link_down(intel_dp); if (is_edp(intel_dp)) + ironlake_edp_panel_off(dev); + if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) ironlake_edp_pll_off(encoder); } else { if (!(dp_reg & DP_PORT_EN)) { - intel_dp_start_link_train(intel_dp); if (is_edp(intel_dp)) - ironlake_edp_panel_on(dev); + ironlake_edp_panel_on(intel_dp); + intel_dp_start_link_train(intel_dp); intel_dp_complete_link_train(intel_dp); if (is_edp(intel_dp)) ironlake_edp_backlight_on(dev); @@ -1445,9 +1433,10 @@ ironlake_dp_detect(struct drm_connector *connector) struct intel_dp *intel_dp = intel_attached_dp(connector); enum drm_connector_status status; - /* Panel needs power for AUX to work */ + /* Can't disconnect eDP */ if (is_edp(intel_dp)) - ironlake_edp_panel_vdd_on(connector->dev); + return connector_status_connected; + status = connector_status_disconnected; if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd, @@ -1458,8 +1447,6 @@ ironlake_dp_detect(struct drm_connector *connector) } DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]); - if (is_edp(intel_dp)) - ironlake_edp_panel_vdd_off(connector->dev); return status; } From 723bfd707a97fee06eb3ba4d3e8b4714c29a1064 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 7 Oct 2010 16:01:13 -0700 Subject: [PATCH 403/476] drm/i915: add _DSM support The _DSM method on the integrated graphics device can tell us which connectors are muxable, so add support for making the call and parsing out the connector info. Signed-off-by: Jesse Barnes [ickle: fix compiler warnings for using uninitialized 'result' and downgrade error message for non-switchable devices] Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/Makefile | 2 + drivers/gpu/drm/i915/i915_dma.c | 2 + drivers/gpu/drm/i915/i915_drv.h | 9 + drivers/gpu/drm/i915/intel_acpi.c | 286 +++++++++++++++++++++++++++ drivers/gpu/drm/i915/intel_display.c | 3 + 5 files changed, 302 insertions(+) create mode 100644 drivers/gpu/drm/i915/intel_acpi.c diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index f6e98dd416c9..fdc833d5cc7b 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -35,6 +35,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ i915-$(CONFIG_COMPAT) += i915_ioc32.o +i915-$(CONFIG_ACPI) += intel_acpi.o + obj-$(CONFIG_DRM_I915) += i915.o CFLAGS_i915_trace_points.o := -I$(src) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 3bbe72352cd8..f451af69d437 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1244,6 +1244,8 @@ static int i915_load_modeset_init(struct drm_device *dev, if (ret) goto cleanup_ringbuffer; + intel_register_dsm_handler(); + ret = vga_switcheroo_register_client(dev->pdev, i915_switcheroo_set_state, i915_switcheroo_can_switch); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 73ad8bff2c2a..e4ffcd3a7aef 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1136,6 +1136,15 @@ static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; } static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; } #endif +/* intel_acpi.c */ +#ifdef CONFIG_ACPI +extern void intel_register_dsm_handler(void); +extern void intel_unregister_dsm_handler(void); +#else +static inline void intel_register_dsm_handler(void) { return; } +static inline void intel_unregister_dsm_handler(void) { return; } +#endif /* CONFIG_ACPI */ + /* modesetting */ extern void intel_modeset_init(struct drm_device *dev); extern void intel_modeset_cleanup(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c new file mode 100644 index 000000000000..65c88f9ba12c --- /dev/null +++ b/drivers/gpu/drm/i915/intel_acpi.c @@ -0,0 +1,286 @@ +/* + * Intel ACPI functions + * + * _DSM related code stolen from nouveau_acpi.c. + */ +#include +#include +#include +#include + +#include "drmP.h" + +#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */ + +#define INTEL_DSM_FN_SUPPORTED_FUNCTIONS 0 /* No args */ +#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */ + +static struct intel_dsm_priv { + acpi_handle dhandle; +} intel_dsm_priv; + +static const u8 intel_dsm_guid[] = { + 0xd3, 0x73, 0xd8, 0x7e, + 0xd0, 0xc2, + 0x4f, 0x4e, + 0xa8, 0x54, + 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c +}; + +static int intel_dsm(acpi_handle handle, int func, int arg) +{ + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_object_list input; + union acpi_object params[4]; + union acpi_object *obj; + u32 result; + int ret = 0; + + input.count = 4; + input.pointer = params; + params[0].type = ACPI_TYPE_BUFFER; + params[0].buffer.length = sizeof(intel_dsm_guid); + params[0].buffer.pointer = (char *)intel_dsm_guid; + params[1].type = ACPI_TYPE_INTEGER; + params[1].integer.value = INTEL_DSM_REVISION_ID; + params[2].type = ACPI_TYPE_INTEGER; + params[2].integer.value = func; + params[3].type = ACPI_TYPE_INTEGER; + params[3].integer.value = arg; + + ret = acpi_evaluate_object(handle, "_DSM", &input, &output); + if (ret) { + DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret); + return ret; + } + + obj = (union acpi_object *)output.pointer; + + result = 0; + switch (obj->type) { + case ACPI_TYPE_INTEGER: + result = obj->integer.value; + break; + + case ACPI_TYPE_BUFFER: + if (obj->buffer.length == 4) { + result =(obj->buffer.pointer[0] | + (obj->buffer.pointer[1] << 8) | + (obj->buffer.pointer[2] << 16) | + (obj->buffer.pointer[3] << 24)); + break; + } + default: + ret = -EINVAL; + break; + } + if (result == 0x80000002) + ret = -ENODEV; + + kfree(output.pointer); + return ret; +} + +static char *intel_dsm_port_name(u8 id) +{ + switch (id) { + case 0: + return "Reserved"; + case 1: + return "Analog VGA"; + case 2: + return "LVDS"; + case 3: + return "Reserved"; + case 4: + return "HDMI/DVI_B"; + case 5: + return "HDMI/DVI_C"; + case 6: + return "HDMI/DVI_D"; + case 7: + return "DisplayPort_A"; + case 8: + return "DisplayPort_B"; + case 9: + return "DisplayPort_C"; + case 0xa: + return "DisplayPort_D"; + case 0xb: + case 0xc: + case 0xd: + return "Reserved"; + case 0xe: + return "WiDi"; + default: + return "bad type"; + } +} + +static char *intel_dsm_mux_type(u8 type) +{ + switch (type) { + case 0: + return "unknown"; + case 1: + return "No MUX, iGPU only"; + case 2: + return "No MUX, dGPU only"; + case 3: + return "MUXed between iGPU and dGPU"; + default: + return "bad type"; + } +} + +static void intel_dsm_platform_mux_info(void) +{ + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_object_list input; + union acpi_object params[4]; + union acpi_object *pkg; + int i, ret; + + input.count = 4; + input.pointer = params; + params[0].type = ACPI_TYPE_BUFFER; + params[0].buffer.length = sizeof(intel_dsm_guid); + params[0].buffer.pointer = (char *)intel_dsm_guid; + params[1].type = ACPI_TYPE_INTEGER; + params[1].integer.value = INTEL_DSM_REVISION_ID; + params[2].type = ACPI_TYPE_INTEGER; + params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO; + params[3].type = ACPI_TYPE_INTEGER; + params[3].integer.value = 0; + + ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input, + &output); + if (ret) { + DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret); + goto out; + } + + pkg = (union acpi_object *)output.pointer; + + if (pkg->type == ACPI_TYPE_PACKAGE) { + union acpi_object *connector_count = &pkg->package.elements[0]; + DRM_DEBUG_DRIVER("MUX info connectors: %lld\n", + (unsigned long long)connector_count->integer.value); + for (i = 1; i < pkg->package.count; i++) { + union acpi_object *obj = &pkg->package.elements[i]; + union acpi_object *connector_id = + &obj->package.elements[0]; + union acpi_object *info = &obj->package.elements[1]; + DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n", + (unsigned long long)connector_id->integer.value); + DRM_DEBUG_DRIVER(" port id: %s\n", + intel_dsm_port_name(info->buffer.pointer[0])); + DRM_DEBUG_DRIVER(" display mux info: %s\n", + intel_dsm_mux_type(info->buffer.pointer[1])); + DRM_DEBUG_DRIVER(" aux/dc mux info: %s\n", + intel_dsm_mux_type(info->buffer.pointer[2])); + DRM_DEBUG_DRIVER(" hpd mux info: %s\n", + intel_dsm_mux_type(info->buffer.pointer[3])); + } + } else { + DRM_ERROR("MUX INFO call failed\n"); + } + +out: + kfree(output.pointer); +} + +static int intel_dsm_switchto(enum vga_switcheroo_client_id id) +{ + return 0; +} + +static int intel_dsm_power_state(enum vga_switcheroo_client_id id, + enum vga_switcheroo_state state) +{ + return 0; +} + +static int intel_dsm_init(void) +{ + return 0; +} + +static int intel_dsm_get_client_id(struct pci_dev *pdev) +{ + if (intel_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) + return VGA_SWITCHEROO_IGD; + else + return VGA_SWITCHEROO_DIS; +} + +static struct vga_switcheroo_handler intel_dsm_handler = { + .switchto = intel_dsm_switchto, + .power_state = intel_dsm_power_state, + .init = intel_dsm_init, + .get_client_id = intel_dsm_get_client_id, +}; + +static bool intel_dsm_pci_probe(struct pci_dev *pdev) +{ + acpi_handle dhandle, intel_handle; + acpi_status status; + int ret; + + dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + if (!dhandle) + return false; + + status = acpi_get_handle(dhandle, "_DSM", &intel_handle); + if (ACPI_FAILURE(status)) { + DRM_DEBUG_KMS("no _DSM method for intel device\n"); + return false; + } + + ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0); + if (ret < 0) { + DRM_ERROR("failed to get supported _DSM functions\n"); + return false; + } + + intel_dsm_priv.dhandle = dhandle; + + intel_dsm_platform_mux_info(); + return true; +} + +static bool intel_dsm_detect(void) +{ + char acpi_method_name[255] = { 0 }; + struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; + struct pci_dev *pdev = NULL; + bool has_dsm = false; + int vga_count = 0; + + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { + vga_count++; + has_dsm |= intel_dsm_pci_probe(pdev); + } + + if (vga_count == 2 && has_dsm) { + acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); + DRM_DEBUG_DRIVER("VGA switcheroo: detected DSM switching method %s handle\n", + acpi_method_name); + return true; + } + + return false; +} + +void intel_register_dsm_handler(void) +{ + if (!intel_dsm_detect()) + return; + + vga_switcheroo_register_handler(&intel_dsm_handler); +} + +void intel_unregister_dsm_handler(void) +{ + vga_switcheroo_unregister_handler(); +} diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 4c44e1663a95..349710a8014c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6127,6 +6127,9 @@ void intel_modeset_cleanup(struct drm_device *dev) drm_kms_helper_poll_fini(dev); mutex_lock(&dev->struct_mutex); + intel_unregister_dsm_handler(); + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { /* Skip inactive CRTCs */ if (!crtc->fb) From 9f0e7ff4b366d27570cbe0ffa137ed1018009114 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 7 Oct 2010 16:01:14 -0700 Subject: [PATCH 404/476] drm/i915: fetch eDP configuration data from the VBT We need to use some of these values in eDP configurations, so be sure to fetch them and store them in the i915 private structure. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 16 ++++----- drivers/gpu/drm/i915/intel_bios.c | 60 +++++++++++++++++++++++++------ include/drm/drm_dp_helper.h | 3 ++ 3 files changed, 60 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e4ffcd3a7aef..6d49a9f5c2b1 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -339,16 +339,16 @@ typedef struct drm_i915_private { unsigned int int_crt_support:1; unsigned int lvds_use_ssc:1; int lvds_ssc_freq; - struct { - u8 rate:4; - u8 lanes:4; - u8 preemphasis:4; - u8 vswing:4; + int rate; + int lanes; + int preemphasis; + int vswing; - u8 initialized:1; - u8 support:1; - u8 bpp:6; + bool initialized; + bool support; + int bpp; + struct edp_power_seq pps; } edp; struct notifier_block lid_notifier; diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index b1f73ac0f3fd..cc15447eff41 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -24,6 +24,7 @@ * Eric Anholt * */ +#include #include "drmP.h" #include "drm.h" #include "i915_drm.h" @@ -413,6 +414,8 @@ static void parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct bdb_edp *edp; + struct edp_power_seq *edp_pps; + struct edp_link_params *edp_link_params; edp = find_section(bdb, BDB_EDP); if (!edp) { @@ -437,19 +440,54 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) break; } - dev_priv->edp.rate = edp->link_params[panel_type].rate; - dev_priv->edp.lanes = edp->link_params[panel_type].lanes; - dev_priv->edp.preemphasis = edp->link_params[panel_type].preemphasis; - dev_priv->edp.vswing = edp->link_params[panel_type].vswing; + /* Get the eDP sequencing and link info */ + edp_pps = &edp->power_seqs[panel_type]; + edp_link_params = &edp->link_params[panel_type]; - DRM_DEBUG_KMS("eDP vBIOS settings: bpp=%d, rate=%d, lanes=%d, preemphasis=%d, vswing=%d\n", - dev_priv->edp.bpp, - dev_priv->edp.rate, - dev_priv->edp.lanes, - dev_priv->edp.preemphasis, - dev_priv->edp.vswing); + dev_priv->edp.pps = *edp_pps; - dev_priv->edp.initialized = true; + dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 : + DP_LINK_BW_1_62; + switch (edp_link_params->lanes) { + case 0: + dev_priv->edp.lanes = 1; + break; + case 1: + dev_priv->edp.lanes = 2; + break; + case 3: + default: + dev_priv->edp.lanes = 4; + break; + } + switch (edp_link_params->preemphasis) { + case 0: + dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0; + break; + case 1: + dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; + break; + case 2: + dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6; + break; + case 3: + dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; + break; + } + switch (edp_link_params->vswing) { + case 0: + dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400; + break; + case 1: + dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600; + break; + case 2: + dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800; + break; + case 3: + dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200; + break; + } } static void diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index a49e791db0b0..83a389e44543 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -23,6 +23,9 @@ #ifndef _DRM_DP_HELPER_H_ #define _DRM_DP_HELPER_H_ +#include +#include + /* From the VESA DisplayPort spec */ #define AUX_NATIVE_WRITE 0x8 From 5b2adf897146edeac6a1e438fb67b5a53dbbdf34 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 7 Oct 2010 16:01:15 -0700 Subject: [PATCH 405/476] drm/i915: add Ironlake clock gating workaround for FDI link training Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_reg.h | 1 + drivers/gpu/drm/i915/intel_display.c | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index a72335e940f1..5a22887a5381 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2782,6 +2782,7 @@ #define FDI_RXA_CHICKEN 0xc200c #define FDI_RXB_CHICKEN 0xc2010 #define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1) +#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, FDI_RXA_CHICKEN, FDI_RXB_CHICKEN) /* CPU: FDI_TX */ #define FDI_TXA_CTL 0x60100 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 349710a8014c..5812fc7c5a0f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1714,6 +1714,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) POSTING_READ(reg); udelay(150); + /* Ironlake workaround, enable clock pointer after FDI enable*/ + I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_ENABLE); + reg = FDI_RX_IIR(pipe); for (tries = 0; tries < 5; tries++) { temp = I915_READ(reg); @@ -2192,6 +2195,11 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) POSTING_READ(reg); udelay(100); + /* Ironlake workaround, disable clock pointer after downing FDI */ + I915_WRITE(FDI_RX_CHICKEN(pipe), + I915_READ(FDI_RX_CHICKEN(pipe) & + ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); + /* still set train pattern 1 */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); From 7f8232826842b27525857615262f50fe66c84dd7 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 7 Oct 2010 16:01:16 -0700 Subject: [PATCH 406/476] drm/i915: fix PCH eDP SSC support Enable SSC on PCH eDP if possible. Signed-off-by: Jesse Barnes [ickle: added a posting read of PCH_DREF_CONTROL before the udelay] Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 5812fc7c5a0f..d7d59006a846 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3796,13 +3796,25 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, POSTING_READ(PCH_DREF_CONTROL); udelay(200); + } + temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; - temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; - temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; + /* Enable CPU source on CPU attached eDP */ + if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { + if (dev_priv->lvds_use_ssc) + temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; + else + temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; } else { - temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; + /* Enable SSC on PCH eDP if needed */ + if (dev_priv->lvds_use_ssc) { + DRM_ERROR("enabling SSC on PCH\n"); + temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; + } } I915_WRITE(PCH_DREF_CONTROL, temp); + POSTING_READ(PCH_DREF_CONTROL); + udelay(200); } } From 1cb1b75e5e0120cc35c4a9420ce366b84e0cf951 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 7 Oct 2010 16:01:17 -0700 Subject: [PATCH 407/476] drm/i915: use 120MHz refclk in PCH eDP case too CPU eDP needs a different reference clock than PCH eDP, which uses the standard PCH refclk of 120MHz. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d7d59006a846..5f00632d6fcc 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3625,7 +3625,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, refclk / 1000); } else if (!IS_GEN2(dev)) { refclk = 96000; - if (HAS_PCH_SPLIT(dev)) + if (HAS_PCH_SPLIT(dev) && + (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base))) refclk = 120000; /* 120Mhz refclk */ } else { refclk = 48000; From 83240120878805b537a2efeaabff92798140b7cf Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 7 Oct 2010 16:01:18 -0700 Subject: [PATCH 408/476] drm/i915: use DPLL_DVO_HIGH_SPEED for PCH eDP As with other PCH DP connections. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 5f00632d6fcc..acd7180ba7c7 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3850,7 +3850,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } dpll |= DPLL_DVO_HIGH_SPEED; } - if (is_dp) + if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) dpll |= DPLL_DVO_HIGH_SPEED; /* compute bitmask from p1 value */ From 17f6766c622e03a938f767b49399a68107aef537 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 7 Oct 2010 16:01:19 -0700 Subject: [PATCH 409/476] drm/i915: fix ironlake CRTC enable/disable Wait for vblank after enabling a pipe, make the error messages more informative, and wait for the pipe to turn off when we disable it. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index acd7180ba7c7..29ecaa0b1344 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2041,7 +2041,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) if ((temp & PIPECONF_ENABLE) == 0) { I915_WRITE(reg, temp | PIPECONF_ENABLE); POSTING_READ(reg); - udelay(100); + intel_wait_for_vblank(dev, intel_crtc->pipe); } /* configure and enable CPU plane */ @@ -2131,7 +2131,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) temp |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK; I915_WRITE(reg, temp | TRANS_ENABLE); if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) - DRM_ERROR("failed to enable transcoder\n"); + DRM_ERROR("failed to enable transcoder %d\n", pipe); intel_crtc_load_lut(crtc); intel_update_fbc(dev); @@ -2171,9 +2171,9 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) temp = I915_READ(reg); if (temp & PIPECONF_ENABLE) { I915_WRITE(reg, temp & ~PIPECONF_ENABLE); + POSTING_READ(reg); /* wait for cpu pipe off, pipe state */ - if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 50)) - DRM_ERROR("failed to turn off cpu pipe\n"); + intel_wait_for_pipe_off(dev, intel_crtc->pipe); } /* Disable PF */ From 8088699f029b2a27af9bc5431ef7542c84195760 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 7 Oct 2010 16:01:20 -0700 Subject: [PATCH 410/476] drm/i915: don't program FDI RX/TX in mode_set We do this later (and more properly) when we enable FDI, so we don't need to do it here. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 29ecaa0b1344..89cfe4684147 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4140,27 +4140,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { ironlake_set_pll_edp(crtc, adjusted_mode->clock); - } else { - /* enable FDI RX PLL too */ - reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); - I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); - - POSTING_READ(reg); - udelay(200); - - /* enable FDI TX PLL too */ - reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); - I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); - - /* enable FDI RX PCDCLK */ - reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); - I915_WRITE(reg, temp | FDI_PCDCLK); - - POSTING_READ(reg); - udelay(200); } } From 896673836b8c55b75e7d7d2741aaaadff0c6a038 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 7 Oct 2010 16:01:21 -0700 Subject: [PATCH 411/476] drm/i915/dp: cache eDP DPCD data Cache the first 4 bytes of DPCD data in the eDP case. It's unlikely to change and can save us some trouble at link training time. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/intel_dp.c | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6d49a9f5c2b1..84e33aeececd 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -350,6 +350,7 @@ typedef struct drm_i915_private { int bpp; struct edp_power_seq pps; } edp; + bool no_aux_handshake; struct notifier_block lid_notifier; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 944dfe199f6e..7fa828275e4a 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1724,6 +1724,26 @@ intel_dp_init(struct drm_device *dev, int output_reg) intel_dp_i2c_init(intel_dp, intel_connector, name); + /* Cache some DPCD data in the eDP case */ + if (is_edp(intel_dp)) { + int ret; + bool was_on; + + was_on = ironlake_edp_panel_on(intel_dp); + ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV, + intel_dp->dpcd, + sizeof(intel_dp->dpcd)); + if (ret == sizeof(intel_dp->dpcd)) { + if (intel_dp->dpcd[0] >= 0x11) + dev_priv->no_aux_handshake = intel_dp->dpcd[3] & + DP_NO_AUX_HANDSHAKE_LINK_TRAINING; + } else { + DRM_ERROR("failed to retrieve link info\n"); + } + if (!was_on) + ironlake_edp_panel_off(dev); + } + intel_encoder->hot_plug = intel_dp_hot_plug; if (is_edp(intel_dp)) { From 869184a675662bddcdf76c5b95665272facff2b8 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 7 Oct 2010 16:01:22 -0700 Subject: [PATCH 412/476] drm/i915/dp: use VBT provided eDP params if available We can skip most of the link training step if we use the VBT provided values. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_dp.c | 154 +++++++++++++++++++------------- 1 file changed, 93 insertions(+), 61 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 7fa828275e4a..d4ef20598da8 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -581,6 +581,17 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, mode->clock = dev_priv->panel_fixed_mode->clock; } + /* Just use VBT values for eDP */ + if (is_edp(intel_dp)) { + intel_dp->lane_count = dev_priv->edp.lanes; + intel_dp->link_bw = dev_priv->edp.rate; + adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); + DRM_DEBUG_KMS("eDP link bw %02x lane count %d clock %d\n", + intel_dp->link_bw, intel_dp->lane_count, + adjusted_mode->clock); + return true; + } + for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { for (clock = 0; clock <= max_clock; clock++) { int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); @@ -599,19 +610,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, } } - if (is_edp(intel_dp)) { - /* okay we failed just pick the highest */ - intel_dp->lane_count = max_lane_count; - intel_dp->link_bw = bws[max_clock]; - adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); - DRM_DEBUG_KMS("Force picking display port link bw %02x lane " - "count %d clock %d\n", - intel_dp->link_bw, intel_dp->lane_count, - adjusted_mode->clock); - - return true; - } - return false; } @@ -1088,11 +1086,21 @@ intel_get_adjust_train(struct intel_dp *intel_dp) } static uint32_t -intel_dp_signal_levels(uint8_t train_set, int lane_count) +intel_dp_signal_levels(struct intel_dp *intel_dp) { - uint32_t signal_levels = 0; + struct drm_device *dev = intel_dp->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t signal_levels = 0; + u8 train_set = intel_dp->train_set[0]; + u32 vswing = train_set & DP_TRAIN_VOLTAGE_SWING_MASK; + u32 preemphasis = train_set & DP_TRAIN_PRE_EMPHASIS_MASK; - switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { + if (is_edp(intel_dp)) { + vswing = dev_priv->edp.vswing; + preemphasis = dev_priv->edp.preemphasis; + } + + switch (vswing) { case DP_TRAIN_VOLTAGE_SWING_400: default: signal_levels |= DP_VOLTAGE_0_4; @@ -1107,7 +1115,7 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count) signal_levels |= DP_VOLTAGE_1_2; break; } - switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { + switch (preemphasis) { case DP_TRAIN_PRE_EMPHASIS_0: default: signal_levels |= DP_PRE_EMPHASIS_0; @@ -1193,6 +1201,18 @@ intel_channel_eq_ok(struct intel_dp *intel_dp) return true; } +static bool +intel_dp_aux_handshake_required(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + if (is_edp(intel_dp) && dev_priv->no_aux_handshake) + return false; + + return true; +} + static bool intel_dp_set_link_train(struct intel_dp *intel_dp, uint32_t dp_reg_value, @@ -1205,6 +1225,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, I915_WRITE(intel_dp->output_reg, dp_reg_value); POSTING_READ(intel_dp->output_reg); + if (!intel_dp_aux_handshake_required(intel_dp)) + return true; + intel_dp_aux_native_write_1(intel_dp, DP_TRAINING_PATTERN_SET, dp_train_pat); @@ -1237,10 +1260,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) POSTING_READ(intel_dp->output_reg); intel_wait_for_vblank(dev, intel_crtc->pipe); - /* Write the link configuration data */ - intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, - intel_dp->link_configuration, - DP_LINK_CONFIGURATION_SIZE); + if (intel_dp_aux_handshake_required(intel_dp)) + /* Write the link configuration data */ + intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, + intel_dp->link_configuration, + DP_LINK_CONFIGURATION_SIZE); DP |= DP_PORT_EN; if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) @@ -1258,7 +1282,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; } else { - signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); + signal_levels = intel_dp_signal_levels(intel_dp); DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } @@ -1272,33 +1296,37 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) break; /* Set training pattern 1 */ - udelay(100); - if (!intel_dp_get_link_status(intel_dp)) + udelay(500); + if (intel_dp_aux_handshake_required(intel_dp)) { break; + } else { + if (!intel_dp_get_link_status(intel_dp)) + break; - if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { - clock_recovery = true; - break; + if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { + clock_recovery = true; + break; + } + + /* Check to see if we've tried the max voltage */ + for (i = 0; i < intel_dp->lane_count; i++) + if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) + break; + if (i == intel_dp->lane_count) + break; + + /* Check to see if we've tried the same voltage 5 times */ + if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { + ++tries; + if (tries == 5) + break; + } else + tries = 0; + voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; + + /* Compute new intel_dp->train_set as requested by target */ + intel_get_adjust_train(intel_dp); } - - /* Check to see if we've tried the max voltage */ - for (i = 0; i < intel_dp->lane_count; i++) - if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) - break; - if (i == intel_dp->lane_count) - break; - - /* Check to see if we've tried the same voltage 5 times */ - if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { - ++tries; - if (tries == 5) - break; - } else - tries = 0; - voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; - - /* Compute new intel_dp->train_set as requested by target */ - intel_get_adjust_train(intel_dp); } intel_dp->DP = DP; @@ -1325,7 +1353,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; } else { - signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); + signal_levels = intel_dp_signal_levels(intel_dp); DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } @@ -1339,24 +1367,28 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) DP_TRAINING_PATTERN_2)) break; - udelay(400); - if (!intel_dp_get_link_status(intel_dp)) - break; + udelay(500); - if (intel_channel_eq_ok(intel_dp)) { - channel_eq = true; + if (!intel_dp_aux_handshake_required(intel_dp)) { break; + } else { + if (!intel_dp_get_link_status(intel_dp)) + break; + + if (intel_channel_eq_ok(intel_dp)) { + channel_eq = true; + break; + } + + /* Try 5 times */ + if (tries > 5) + break; + + /* Compute new intel_dp->train_set as requested by target */ + intel_get_adjust_train(intel_dp); + ++tries; } - - /* Try 5 times */ - if (tries > 5) - break; - - /* Compute new intel_dp->train_set as requested by target */ - intel_get_adjust_train(intel_dp); - ++tries; } - if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) reg = DP | DP_LINK_TRAIN_OFF_CPT; else From 895692befab73fd399d854c7db41d6d7260af2da Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 7 Oct 2010 16:01:23 -0700 Subject: [PATCH 413/476] drm/i915/dp: don't bother with DP PLL for PCH attached eDP We don't use the CPU DP PLL with PCH attached eDP panels, so don't bother to enable it. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_dp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d4ef20598da8..ada7319f0eaf 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -775,7 +775,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) intel_dp->DP |= DP_PIPEB_SELECT; - if (is_edp(intel_dp)) { + if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) { /* don't miss out required setting for eDP */ intel_dp->DP |= DP_PLL_ENABLE; if (adjusted_mode->clock < 200000) From 298b0b392c750137f148fda056a7d4c42019814c Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 7 Oct 2010 16:01:24 -0700 Subject: [PATCH 414/476] drm/i915/dp: make eDP PLL functions work as advertised Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_dp.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index ada7319f0eaf..128c2fefd541 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -891,8 +891,10 @@ static void ironlake_edp_pll_on(struct drm_encoder *encoder) DRM_DEBUG_KMS("\n"); dpa_ctl = I915_READ(DP_A); - dpa_ctl &= ~DP_PLL_ENABLE; + dpa_ctl |= DP_PLL_ENABLE; I915_WRITE(DP_A, dpa_ctl); + POSTING_READ(DP_A); + udelay(200); } static void ironlake_edp_pll_off(struct drm_encoder *encoder) @@ -902,7 +904,7 @@ static void ironlake_edp_pll_off(struct drm_encoder *encoder) u32 dpa_ctl; dpa_ctl = I915_READ(DP_A); - dpa_ctl |= DP_PLL_ENABLE; + dpa_ctl &= ~DP_PLL_ENABLE; I915_WRITE(DP_A, dpa_ctl); POSTING_READ(DP_A); udelay(200); From 382b09362711d7d03272230a33767015a277926e Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 7 Oct 2010 16:01:25 -0700 Subject: [PATCH 415/476] drm/i915: diasable clock gating for the panel power sequencer Needed on Ibex Peak and Cougar Point or the panel won't always come on. Cc: stable@kernel.org Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_reg.h | 3 +++ drivers/gpu/drm/i915/intel_display.c | 7 +++++++ 2 files changed, 10 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5a22887a5381..88292893b255 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2784,6 +2784,9 @@ #define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1) #define FDI_RX_CHICKEN(pipe) _PIPE(pipe, FDI_RXA_CHICKEN, FDI_RXB_CHICKEN) +#define SOUTH_DSPCLK_GATE_D 0xc2020 +#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) + /* CPU: FDI_TX */ #define FDI_TXA_CTL 0x60100 #define FDI_TXB_CTL 0x61100 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 89cfe4684147..8e98d708f970 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5745,6 +5745,13 @@ void intel_init_clock_gating(struct drm_device *dev) I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); + /* + * On Ibex Peak and Cougar Point, we need to disable clock + * gating for the panel power sequencer or it will fail to + * start up when no ports are active. + */ + I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); + /* * According to the spec the following bits should be set in * order to enable memory self-refresh From 1510a97182b4ddb5fe3c4e8d05240f7cd6fd13e7 Mon Sep 17 00:00:00 2001 From: Yuanhan Liu Date: Fri, 8 Oct 2010 10:18:01 +0100 Subject: [PATCH 416/476] drm/i915/crt: Make sure the hotplug interrupt is enabled After disabling the hotplug interrupts for VGA detection on Ironlake, be sure to re-enable them again afterwards. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=30378 Signed-off-by: Yuanhan Liu Cc: stable@kernel.org Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_crt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 389fcd2aea1f..c55c77043357 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -191,7 +191,8 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); if (turn_off_dac) { - I915_WRITE(PCH_ADPA, temp); + /* Make sure hotplug is enabled */ + I915_WRITE(PCH_ADPA, temp | ADPA_CRT_HOTPLUG_ENABLE); (void)I915_READ(PCH_ADPA); } From 2d7b8366ae4a9ec2183c30e432a4a9a495c82bcd Mon Sep 17 00:00:00 2001 From: Yuanhan Liu Date: Fri, 8 Oct 2010 10:21:06 +0100 Subject: [PATCH 417/476] drm/i915: Update hotplug interrupts register definitions for Sandybridge On Sandybridge, the bit definition for hotplug on SDE has changed, so update the code to new definition. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=30378 Cc: stable@kernel.org Signed-off-by: Yuanhan Liu Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_irq.c | 21 ++++++++++++++++----- drivers/gpu/drm/i915/i915_reg.h | 4 ++++ 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 64c07c24e300..0d051e7f6702 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -298,6 +298,7 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int ret = IRQ_NONE; u32 de_iir, gt_iir, de_ier, pch_iir; + u32 hotplug_mask; struct drm_i915_master_private *master_priv; struct intel_ring_buffer *render_ring = &dev_priv->render_ring; u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; @@ -317,6 +318,11 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) if (de_iir == 0 && gt_iir == 0 && pch_iir == 0) goto done; + if (HAS_PCH_CPT(dev)) + hotplug_mask = SDE_HOTPLUG_MASK_CPT; + else + hotplug_mask = SDE_HOTPLUG_MASK; + ret = IRQ_HANDLED; if (dev->primary->master) { @@ -358,10 +364,8 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) drm_handle_vblank(dev, 1); /* check event from PCH */ - if ((de_iir & DE_PCH_EVENT) && - (pch_iir & SDE_HOTPLUG_MASK)) { + if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask)) queue_work(dev_priv->wq, &dev_priv->hotplug_work); - } if (de_iir & DE_PCU_EVENT) { I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); @@ -1431,8 +1435,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT; - u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | - SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; + u32 hotplug_mask; dev_priv->irq_mask_reg = ~display_mask; dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; @@ -1459,6 +1462,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev) I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); (void) I915_READ(GTIER); + if (HAS_PCH_CPT(dev)) { + hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT | + SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT ; + } else { + hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | + SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; + } + dev_priv->pch_irq_mask_reg = ~hotplug_mask; dev_priv->pch_irq_enable_reg = hotplug_mask; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 88292893b255..47032186a31a 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2601,6 +2601,10 @@ #define SDE_PORTD_HOTPLUG_CPT (1 << 23) #define SDE_PORTC_HOTPLUG_CPT (1 << 22) #define SDE_PORTB_HOTPLUG_CPT (1 << 21) +#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \ + SDE_PORTD_HOTPLUG_CPT | \ + SDE_PORTC_HOTPLUG_CPT | \ + SDE_PORTB_HOTPLUG_CPT) #define SDEISR 0xc4000 #define SDEIMR 0xc4004 From c9220b0f7cbd1d2272426aa81a72ae2f6582bb71 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Fri, 8 Oct 2010 08:57:10 +1000 Subject: [PATCH 418/476] drm/ttm: add unlocked variant of new manager put node. We need the unlocked variant for the new codepath introduced to fix the race condition in master recently. Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/ttm_bo.c | 13 ++++++++++--- drivers/gpu/drm/ttm/ttm_bo_manager.c | 10 ++++++++++ include/drm/ttm/ttm_bo_driver.h | 4 ++++ 3 files changed, 24 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 1e9bb2156dcf..5ef0103bd0b6 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -464,9 +464,7 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) spin_lock(&glob->lru_lock); } - if (bo->mem.mm_node) { - ttm_bo_mem_put(bo, &bo->mem); - } + ttm_bo_mem_put_locked(bo, &bo->mem); atomic_set(&bo->reserved, 0); wake_up_all(&bo->event_queue); @@ -791,6 +789,15 @@ void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) } EXPORT_SYMBOL(ttm_bo_mem_put); +void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) +{ + struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; + + if (mem->mm_node) + (*man->func->put_node_locked)(man, mem); +} +EXPORT_SYMBOL(ttm_bo_mem_put_locked); + /** * Repeatedly evict memory from the LRU for @mem_type until we create enough * space, or we've evicted everything and there isn't enough space. diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c index 7410c190c891..35c97b20bdae 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c @@ -90,6 +90,15 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, } } +static void ttm_bo_man_put_node_locked(struct ttm_mem_type_manager *man, + struct ttm_mem_reg *mem) +{ + if (mem->mm_node) { + drm_mm_put_block(mem->mm_node); + mem->mm_node = NULL; + } +} + static int ttm_bo_man_init(struct ttm_mem_type_manager *man, unsigned long p_size) { @@ -143,6 +152,7 @@ const struct ttm_mem_type_manager_func ttm_bo_manager_func = { ttm_bo_man_takedown, ttm_bo_man_get_node, ttm_bo_man_put_node, + ttm_bo_man_put_node_locked, ttm_bo_man_debug }; EXPORT_SYMBOL(ttm_bo_manager_func); diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index e3371dbe6a10..d0ff529fedde 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -214,6 +214,8 @@ struct ttm_mem_type_manager_func { struct ttm_mem_reg *mem); void (*put_node)(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem); + void (*put_node_locked)(struct ttm_mem_type_manager *man, + struct ttm_mem_reg *mem); void (*debug)(struct ttm_mem_type_manager *man, const char *prefix); }; @@ -667,6 +669,8 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, extern void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem); +extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem); /** * ttm_bo_wait_for_cpu From 5480f727dc4c049eb46b191bfaeb034067aa6835 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 19 Oct 2010 10:36:47 +1000 Subject: [PATCH 419/476] Revert "drm/radeon/kms: remove some pll algo flags" This reverts commit f28488c282d8916b9b6190cc41714815bbaf97d5. On my rv610 test machine the monitor failed to light up after this. Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/atombios_crtc.c | 14 ++++++++++- drivers/gpu/drm/radeon/radeon_display.c | 26 +++++++++++++++++++-- drivers/gpu/drm/radeon/radeon_legacy_crtc.c | 5 ++++ drivers/gpu/drm/radeon/radeon_mode.h | 14 +++++++---- 4 files changed, 52 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 037e3260cb7c..176f424975ac 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -501,9 +501,21 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, (rdev->family == CHIP_RS740)) pll->flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/ RADEON_PLL_PREFER_CLOSEST_LOWER); - } else + + if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ + pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; + else + pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; + } else { pll->flags |= RADEON_PLL_LEGACY; + if (mode->clock > 200000) /* range limits??? */ + pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; + else + pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; + + } + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc == crtc) { radeon_encoder = to_radeon_encoder(encoder); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 6c6846cdaa30..9151ded9c1cd 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -611,8 +611,7 @@ void radeon_compute_pll(struct radeon_pll *pll, if ((best_vco == 0 && error < best_error) || (best_vco != 0 && ((best_error > 100 && error < best_error - 100) || - (abs(error - best_error) < 100 && - vco_diff < best_vco_diff)))) { + (abs(error - best_error) < 100 && vco_diff < best_vco_diff)))) { best_post_div = post_div; best_ref_div = ref_div; best_feedback_div = feedback_div; @@ -620,6 +619,29 @@ void radeon_compute_pll(struct radeon_pll *pll, best_freq = current_freq; best_error = error; best_vco_diff = vco_diff; + } else if (current_freq == freq) { + if (best_freq == -1) { + best_post_div = post_div; + best_ref_div = ref_div; + best_feedback_div = feedback_div; + best_frac_feedback_div = frac_feedback_div; + best_freq = current_freq; + best_error = error; + best_vco_diff = vco_diff; + } else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) || + ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) || + ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) || + ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) || + ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) || + ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) { + best_post_div = post_div; + best_ref_div = ref_div; + best_feedback_div = feedback_div; + best_frac_feedback_div = frac_feedback_div; + best_freq = current_freq; + best_error = error; + best_vco_diff = vco_diff; + } } if (current_freq < freq) min_frac_feed_div = frac_feedback_div + 1; diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index c0bf8b7cc56c..f8dae717acc8 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -745,6 +745,11 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) pll->flags = RADEON_PLL_LEGACY; + if (mode->clock > 200000) /* range limits??? */ + pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; + else + pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc == crtc) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 3cda63e37b28..d58b003e9a04 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -139,10 +139,16 @@ struct radeon_tmds_pll { #define RADEON_PLL_NO_ODD_POST_DIV (1 << 1) #define RADEON_PLL_USE_REF_DIV (1 << 2) #define RADEON_PLL_LEGACY (1 << 3) -#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 4) -#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 5) -#define RADEON_PLL_USE_POST_DIV (1 << 6) -#define RADEON_PLL_IS_LCD (1 << 7) +#define RADEON_PLL_PREFER_LOW_REF_DIV (1 << 4) +#define RADEON_PLL_PREFER_HIGH_REF_DIV (1 << 5) +#define RADEON_PLL_PREFER_LOW_FB_DIV (1 << 6) +#define RADEON_PLL_PREFER_HIGH_FB_DIV (1 << 7) +#define RADEON_PLL_PREFER_LOW_POST_DIV (1 << 8) +#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9) +#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) +#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) +#define RADEON_PLL_USE_POST_DIV (1 << 12) +#define RADEON_PLL_IS_LCD (1 << 13) struct radeon_pll { /* reference frequency */ From 965d38074e6eae71757a8baf9a348139e1e6894d Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Sat, 9 Oct 2010 12:36:45 +0000 Subject: [PATCH 420/476] drm/ttm: Simplify ttm_bo_wait_unreserved Function ttm_bo_wait_unreserved can be slightly simplified. Signed-off-by: Jean Delvare Cc: Thomas Hellstrom Cc: Jerome Glisse Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/ttm_bo.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 5ef0103bd0b6..9f2eed520fc3 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -166,18 +166,13 @@ static void ttm_bo_release_list(struct kref *list_kref) int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) { - if (interruptible) { - int ret = 0; - - ret = wait_event_interruptible(bo->event_queue, + return wait_event_interruptible(bo->event_queue, atomic_read(&bo->reserved) == 0); - if (unlikely(ret != 0)) - return ret; } else { wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0); + return 0; } - return 0; } EXPORT_SYMBOL(ttm_bo_wait_unreserved); From f6086134d0b17b2c37f537a5429a919b3d2cced8 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Sat, 16 Oct 2010 00:45:15 +0000 Subject: [PATCH 421/476] agp/amd-k7: Allow binding user memory to the AGP GART. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit TTM-based DRM drivers need to be able to bind user memory to the AGP aperture. This patch fixes the "[TTM] AGP Bind memory failed." errors and the subsequent fallout seen with the nouveau driver. Signed-off-by: Francisco Jerez Tested-by: Grzesiek Sójka Signed-off-by: Dave Airlie --- drivers/char/agp/amd-k7-agp.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c index b6b1568314c8..b1b4362bc648 100644 --- a/drivers/char/agp/amd-k7-agp.c +++ b/drivers/char/agp/amd-k7-agp.c @@ -309,7 +309,8 @@ static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type) num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; - if (type != 0 || mem->type != 0) + if (type != mem->type || + agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) return -EINVAL; if ((pg_start + mem->page_count) > num_entries) @@ -348,7 +349,8 @@ static int amd_remove_memory(struct agp_memory *mem, off_t pg_start, int type) unsigned long __iomem *cur_gatt; unsigned long addr; - if (type != 0 || mem->type != 0) + if (type != mem->type || + agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) return -EINVAL; for (i = pg_start; i < (mem->page_count + pg_start); i++) { From 38ed0fcacd6176cdadd070df42c29f94c546aec5 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Wed, 13 Oct 2010 14:09:42 -0500 Subject: [PATCH 422/476] Revert "radeon, kdb, kms: Save and restore the LUT on atomic KMS enter/exit" This reverts commit ff773714dd30b802c336064109c535d8b2774e2f. A generic solution is needed to save and retore the LUT information. CC: Jesse Barnes CC: dri-devel@lists.freedesktop.org Signed-off-by: Jason Wessel Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_display.c | 32 --------------------- drivers/gpu/drm/radeon/radeon_legacy_crtc.c | 5 ---- drivers/gpu/drm/radeon/radeon_mode.h | 3 -- 3 files changed, 40 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 9151ded9c1cd..0383631da69c 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -138,38 +138,6 @@ void radeon_crtc_load_lut(struct drm_crtc *crtc) legacy_crtc_load_lut(crtc); } -void radeon_crtc_save_lut(struct drm_crtc *crtc) -{ - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - int i; - - if (!crtc->enabled) - return; - - for (i = 0; i < 256; i++) { - radeon_crtc->lut_r_copy[i] = radeon_crtc->lut_r[i]; - radeon_crtc->lut_g_copy[i] = radeon_crtc->lut_g[i]; - radeon_crtc->lut_b_copy[i] = radeon_crtc->lut_b[i]; - } -} - -void radeon_crtc_restore_lut(struct drm_crtc *crtc) -{ - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - int i; - - if (!crtc->enabled) - return; - - for (i = 0; i < 256; i++) { - radeon_crtc->lut_r[i] = radeon_crtc->lut_r_copy[i]; - radeon_crtc->lut_g[i] = radeon_crtc->lut_g_copy[i]; - radeon_crtc->lut_b[i] = radeon_crtc->lut_b_copy[i]; - } - - radeon_crtc_load_lut(crtc); -} - /** Sets the color ramps on behalf of fbcon */ void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno) diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index f8dae717acc8..326843ec51f6 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -355,11 +355,6 @@ int radeon_crtc_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y, int enter) { - if (enter) - radeon_crtc_save_lut(crtc); - else - radeon_crtc_restore_lut(crtc); - return radeon_crtc_do_set_base(crtc, fb, x, y, 1); } diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index d58b003e9a04..f99e12daa81d 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -261,7 +261,6 @@ struct radeon_crtc { struct drm_crtc base; int crtc_id; u16 lut_r[256], lut_g[256], lut_b[256]; - u16 lut_r_copy[256], lut_g_copy[256], lut_b_copy[256]; bool enabled; bool can_tile; uint32_t crtc_offset; @@ -523,8 +522,6 @@ extern int atombios_get_encoder_mode(struct drm_encoder *encoder); extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); extern void radeon_crtc_load_lut(struct drm_crtc *crtc); -extern void radeon_crtc_save_lut(struct drm_crtc *crtc); -extern void radeon_crtc_restore_lut(struct drm_crtc *crtc); extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb); extern int atombios_crtc_set_base_atomic(struct drm_crtc *crtc, From 99231028ff713820829b798d056c08a584281c25 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Wed, 13 Oct 2010 14:09:43 -0500 Subject: [PATCH 423/476] kdb, kms: Save and restore the LUT on atomic KMS enter/exit When changing VTs non-atomically the kernel works in conjunction with the Xserver in user space and receives the LUT information from the Xserver via a system call. When changing modes atomically for kdb, this information must be saved and restored without disturbing user space as if nothing ever happened. There is a short cut used by this patch where gamma_store is used as the save space. If this turns out to be a problem in the future a pre-allocated chunk of memory will be required for each crtc to save and restore the LUT information. Signed-off-by: Jason Wessel CC: Jesse Barnes CC: David Airlie CC: dri-devel@lists.freedesktop.org Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_fb_helper.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 625a2d551d6a..8208e190faaa 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -242,6 +242,30 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper) return 0; } +static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper) +{ + uint16_t *r_base, *g_base, *b_base; + int i; + + r_base = crtc->gamma_store; + g_base = r_base + crtc->gamma_size; + b_base = g_base + crtc->gamma_size; + + for (i = 0; i < crtc->gamma_size; i++) + helper->funcs->gamma_get(crtc, &r_base[i], &g_base[i], &b_base[i], i); +} + +static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc) +{ + uint16_t *r_base, *g_base, *b_base; + + r_base = crtc->gamma_store; + g_base = r_base + crtc->gamma_size; + b_base = g_base + crtc->gamma_size; + + crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size); +} + int drm_fb_helper_debug_enter(struct fb_info *info) { struct drm_fb_helper *helper = info->par; @@ -260,6 +284,7 @@ int drm_fb_helper_debug_enter(struct fb_info *info) continue; funcs = mode_set->crtc->helper_private; + drm_fb_helper_save_lut_atomic(mode_set->crtc, helper); funcs->mode_set_base_atomic(mode_set->crtc, mode_set->fb, mode_set->x, @@ -309,6 +334,7 @@ int drm_fb_helper_debug_leave(struct fb_info *info) continue; } + drm_fb_helper_restore_lut_atomic(mode_set->crtc); funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x, crtc->y, 0); } From 21c74a8ea8b47eb6c3c621e36578f6e27f65c5c7 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Wed, 13 Oct 2010 14:09:44 -0500 Subject: [PATCH 424/476] drm, kdb, kms: Change mode_set_base_atomic() enter argument to be an enum The enter argument as implemented by commit 413d45d3627 (drm, kdb, kms: Add an enter argument to mode_set_base_atomic() API) should be more descriptive as to what it does vs just passing 1 and 0 around. There is no runtime behavior change as a result of this patch. Reported-by: Jesse Barnes Signed-off-by: Jason Wessel CC: David Airlie CC: dri-devel@lists.freedesktop.org Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_fb_helper.c | 5 ++--- drivers/gpu/drm/i915/intel_display.c | 5 +++-- drivers/gpu/drm/nouveau/nv04_crtc.c | 4 ++-- drivers/gpu/drm/nouveau/nv50_crtc.c | 2 +- drivers/gpu/drm/radeon/atombios_crtc.c | 2 +- drivers/gpu/drm/radeon/radeon_legacy_crtc.c | 2 +- drivers/gpu/drm/radeon/radeon_mode.h | 7 +++++-- include/drm/drm_crtc_helper.h | 7 ++++++- 8 files changed, 21 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 8208e190faaa..d2849e4ea4d0 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -289,8 +289,7 @@ int drm_fb_helper_debug_enter(struct fb_info *info) mode_set->fb, mode_set->x, mode_set->y, - 1); - + ENTER_ATOMIC_MODE_SET); } } @@ -336,7 +335,7 @@ int drm_fb_helper_debug_leave(struct fb_info *info) drm_fb_helper_restore_lut_atomic(mode_set->crtc); funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x, - crtc->y, 0); + crtc->y, LEAVE_ATOMIC_MODE_SET); } return 0; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9109c00f3ead..96d08a9f3aaa 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1492,7 +1492,7 @@ err_unpin: /* Assume fb object is pinned & idle & fenced and just update base pointers */ static int intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, - int x, int y, int enter) + int x, int y, enum mode_set_atomic state) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1614,7 +1614,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, atomic_read(&obj_priv->pending_flip) == 0); } - ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, 0); + ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, + LEAVE_ATOMIC_MODE_SET); if (ret) { i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index 17f7cf0c11a8..c71abc2a34d5 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c @@ -860,12 +860,12 @@ nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, static int nv04_crtc_mode_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, - int x, int y, int enter) + int x, int y, enum mode_set_atomic state) { struct drm_nouveau_private *dev_priv = crtc->dev->dev_private; struct drm_device *dev = dev_priv->dev; - if (enter) + if (state == ENTER_ATOMIC_MODE_SET) nouveau_fbcon_save_disable_accel(dev); else nouveau_fbcon_restore_accel(dev); diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index ba91befd3734..16380d52cd88 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -708,7 +708,7 @@ nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, static int nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, - int x, int y, int enter) + int x, int y, enum mode_set_atomic state) { return nv50_crtc_do_mode_set_base(crtc, fb, x, y, true, true); } diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 176f424975ac..df2b6f2b35f8 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1270,7 +1270,7 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, int atombios_crtc_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, - int x, int y, int enter) + int x, int y, enum mode_set_atomic state) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 326843ec51f6..ace2e6384d40 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -353,7 +353,7 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, int radeon_crtc_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, - int x, int y, int enter) + int x, int y, enum mode_set_atomic state) { return radeon_crtc_do_set_base(crtc, fb, x, y, 1); } diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index f99e12daa81d..61b9243db217 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -526,7 +527,8 @@ extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb); extern int atombios_crtc_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, - int x, int y, int enter); + int x, int y, + enum mode_set_atomic state); extern int atombios_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, @@ -538,7 +540,8 @@ extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb); extern int radeon_crtc_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, - int x, int y, int enter); + int x, int y, + enum mode_set_atomic state); extern int radeon_crtc_do_set_base(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y, int atomic); diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index 6a9f3935ea0b..73b071203dcc 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -39,6 +39,11 @@ #include +enum mode_set_atomic { + LEAVE_ATOMIC_MODE_SET, + ENTER_ATOMIC_MODE_SET, +}; + struct drm_crtc_helper_funcs { /* * Control power levels on the CRTC. If the mode passed in is @@ -62,7 +67,7 @@ struct drm_crtc_helper_funcs { struct drm_framebuffer *old_fb); int (*mode_set_base_atomic)(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y, - int is_enter); + enum mode_set_atomic); /* reload the current crtc LUT */ void (*load_lut)(struct drm_crtc *crtc); From 83e41eb9cc3de6c08d63bbcf2c953bfbf65c025c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 8 Oct 2010 16:28:27 +0100 Subject: [PATCH 425/476] Revert "drm/i915: Prevent module unload to avoid random memory corruption" This reverts commit 6939a5aca7cfada279a24c307e772f33104fca20. Daniel Vetter supplied a set of fixes for all the module unload bugs he could trigger on his machines, so let the fun recommence! --- drivers/gpu/drm/i915/i915_dma.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index a99fae33bdf6..f451af69d437 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -2065,9 +2065,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev_priv->mchdev_lock = &mchdev_lock; spin_unlock(&mchdev_lock); - /* XXX Prevent module unload due to memory corruption bugs. */ - __module_get(THIS_MODULE); - return 0; out_workqueue_free: From 939fe4d7d6e2c92370ca5d1fb70e81043f5ff8d9 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 9 Oct 2010 10:33:26 +0100 Subject: [PATCH 426/476] drm/i915: Remove duplicate set of ADPA definitions Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_reg.h | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 47032186a31a..557f27134d05 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -661,13 +661,6 @@ #define LVDS 0x61180 #define LVDS_ON (1<<31) -#define ADPA 0x61100 -#define ADPA_DPMS_MASK (~(3<<10)) -#define ADPA_DPMS_ON (0<<10) -#define ADPA_DPMS_SUSPEND (1<<10) -#define ADPA_DPMS_STANDBY (2<<10) -#define ADPA_DPMS_OFF (3<<10) - /* Scratch pad debug 0 reg: */ #define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 @@ -1200,6 +1193,7 @@ #define ADPA_DPMS_STANDBY (2<<10) #define ADPA_DPMS_OFF (3<<10) + /* Hotplug control (945+ only) */ #define PORT_HOTPLUG_EN 0x61110 #define HDMIB_HOTPLUG_INT_EN (1 << 29) From 701394cc534a4a7883ddc4f8f82fb438b3d664ff Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 10 Oct 2010 18:54:08 +0100 Subject: [PATCH 427/476] drm/i915: Fix oops on HWS unload Freeing the Hardware Status Page was writing to the HWS register in order to disable the GPU writing to the HWS page. Unfortunately, we were writing to the mmio register after unmapping the register space, hence the oops. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index f451af69d437..2caf43de8a6a 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -2133,9 +2133,6 @@ int i915_driver_unload(struct drm_device *dev) if (dev->pdev->msi_enabled) pci_disable_msi(dev->pdev); - if (dev_priv->regs != NULL) - iounmap(dev_priv->regs); - intel_opregion_fini(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) { @@ -2157,6 +2154,9 @@ int i915_driver_unload(struct drm_device *dev) i915_free_hws(dev); } + if (dev_priv->regs != NULL) + iounmap(dev_priv->regs); + intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); From 736085bcf91720fd90175c288c542c721c281bb0 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 8 Oct 2010 10:35:55 -0700 Subject: [PATCH 428/476] drm/i915/dp: down the DP link even if the reg indicates it's already down Since the PLL may still be on, and the training pattern may not be correct. Fixes suspend/resume on my PCH eDP test system. Signed-off-by: Jesse Barnes [ickle: minor merge conflict and silence the compiler] Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_dp.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 128c2fefd541..350c541e8e6c 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -914,8 +914,6 @@ static void intel_dp_prepare(struct drm_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t dp_reg = I915_READ(intel_dp->output_reg); if (is_edp(intel_dp)) { ironlake_edp_backlight_off(dev); @@ -925,8 +923,7 @@ static void intel_dp_prepare(struct drm_encoder *encoder) else ironlake_edp_pll_off(encoder); } - if (dp_reg & DP_PORT_EN) - intel_dp_link_down(intel_dp); + intel_dp_link_down(intel_dp); } static void intel_dp_commit(struct drm_encoder *encoder) @@ -956,21 +953,20 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) if (mode != DRM_MODE_DPMS_ON) { if (is_edp(intel_dp)) ironlake_edp_backlight_off(dev); - if (dp_reg & DP_PORT_EN) - intel_dp_link_down(intel_dp); + intel_dp_link_down(intel_dp); if (is_edp(intel_dp)) ironlake_edp_panel_off(dev); if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) ironlake_edp_pll_off(encoder); } else { + if (is_edp(intel_dp)) + ironlake_edp_panel_on(intel_dp); if (!(dp_reg & DP_PORT_EN)) { - if (is_edp(intel_dp)) - ironlake_edp_panel_on(intel_dp); intel_dp_start_link_train(intel_dp); intel_dp_complete_link_train(intel_dp); - if (is_edp(intel_dp)) - ironlake_edp_backlight_on(dev); } + if (is_edp(intel_dp)) + ironlake_edp_backlight_on(dev); } intel_dp->dpms_mode = mode; } From 8b99e68c0a40bcf082c1ba9aaad83cca4def8cec Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 13 Oct 2010 09:59:17 +0100 Subject: [PATCH 429/476] drm/i915: restore fixed FDI link rate on Sandybridge FDI_PLL_BIOS_0 register is for Ironlake only, don't apply to Sandybridge. Original-patch-by: Zhenyu Wang Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index faacbbdbb270..cda36b348fe8 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -345,8 +345,11 @@ intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, static inline u32 /* units of 100MHz */ intel_fdi_link_freq(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; + if (IS_GEN5(dev)) { + struct drm_i915_private *dev_priv = dev->dev_private; + return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; + } else + return 27; } static const intel_limit_t intel_limits_i8xx_dvo = { From 7b5337ddbaf7e4b71ef6fd6307c6f9ef84f636e9 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Wed, 13 Oct 2010 16:40:12 +0800 Subject: [PATCH 430/476] drm/i915: Fix GPIO pin to register mapping In i2c GPIO fallback, index 6 is reserved for nothing. Signed-off-by: Zhenyu Wang Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_i2c.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 2449a74d4d80..2be4f728ed0c 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -155,6 +155,7 @@ intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin) GPIOC, GPIOD, GPIOE, + 0, GPIOF, }; struct intel_gpio *gpio; From e60a0b107b3df072e23cb6d68510aa2615b059ce Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 13 Oct 2010 10:09:14 +0100 Subject: [PATCH 431/476] drm/i915: Sleep whilst waiting for the ring If userspace is submitting so many long running batches that the ring becomes full, throttle by sleeping for a 1ms before checking for free space. Simply yielding was causing excessive scheduler overhead whilst making no progress. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_ringbuffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index d89b88791aac..89004a622f49 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -707,7 +707,7 @@ int intel_wait_ring_buffer(struct drm_device *dev, master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; } - yield(); + msleep(1); } while (!time_after(jiffies, end)); trace_i915_ring_wait_end (dev); return -EBUSY; From 6d139a87b747aaebc969ac5f4eb8db766fcd9cbd Mon Sep 17 00:00:00 2001 From: Bryan Freed Date: Thu, 14 Oct 2010 09:14:51 +0100 Subject: [PATCH 432/476] drm/i915: Initialize panel timing registers if VBIOS did not The time between start of the pixel clock and backlight enable is a basic panel timing constraint. If the Panel Power On/Off registers are found to be 0, assume we are booting without VBIOS initialization and set these registers to something reasonable. Change-Id: Ibed6cc10d46bf52fd92e0beb25ae3525b5eef99d Signed-off-by: Bryan Freed [ickle: rearranged into a separate function to distinguish its role from simply parsing the VBIOS tables.] Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 5 ++++- drivers/gpu/drm/i915/intel_bios.c | 21 +++++++++++++++++++-- drivers/gpu/drm/i915/intel_bios.h | 3 ++- 3 files changed, 25 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 2caf43de8a6a..35d121c70a1f 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1235,7 +1235,7 @@ static int i915_load_modeset_init(struct drm_device *dev, */ dev_priv->allow_batchbuffer = 1; - ret = intel_init_bios(dev); + ret = intel_parse_bios(dev); if (ret) DRM_INFO("failed to find VBIOS tables\n"); @@ -2001,6 +2001,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) intel_setup_gmbus(dev); intel_opregion_setup(dev); + /* Make sure the bios did its job and set up vital registers */ + intel_setup_bios(dev); + i915_gem_load(dev); /* Init HWS */ diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index cc15447eff41..b9560f3cbb3d 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -577,7 +577,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) } /** - * intel_init_bios - initialize VBIOS settings & find VBT + * intel_parse_bios - find VBT and initialize settings from the BIOS * @dev: DRM device * * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers @@ -586,7 +586,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) * Returns 0 on success, nonzero on failure. */ bool -intel_init_bios(struct drm_device *dev) +intel_parse_bios(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct pci_dev *pdev = dev->pdev; @@ -647,3 +647,20 @@ intel_init_bios(struct drm_device *dev) return 0; } + +/* Ensure that vital registers have been initialised, even if the BIOS + * is absent or just failing to do its job. + */ +void intel_setup_bios(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* Set the Panel Power On/Off timings if uninitialized. */ + if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) { + /* Set T2 to 40ms and T5 to 200ms */ + I915_WRITE(PP_ON_DELAYS, 0x019007d0); + + /* Set T3 to 35ms and Tx to 200ms */ + I915_WRITE(PP_OFF_DELAYS, 0x015e07d0); + } +} diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index e1a598f2a966..5f8e4edcbbb9 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -467,7 +467,8 @@ struct bdb_edp { struct edp_link_params link_params[16]; } __attribute__ ((packed)); -bool intel_init_bios(struct drm_device *dev); +void intel_setup_bios(struct drm_device *dev); +bool intel_parse_bios(struct drm_device *dev); /* * Driver<->VBIOS interaction occurs through scratch bits in From 8fe9790d1652e7c306c862ea102a5e6126b412e1 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Sun, 19 Sep 2010 14:27:28 +0800 Subject: [PATCH 433/476] drm/edid: add helper function to detect monitor audio capability To help to determine if digital display port needs to enable audio output or not. This one adds a helper to get monitor's audio capability via EDID CEA extension block. Tested-by: Wu Fengguang Signed-off-by: Zhenyu Wang Reviewed-by: Adam Jackson Signed-off-by: Chris Wilson --- drivers/gpu/drm/drm_edid.c | 92 ++++++++++++++++++++++++++++++++------ include/drm/drm_crtc.h | 1 + 2 files changed, 79 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index fd033ebbdf84..c1a26217a530 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1267,7 +1267,35 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid, } #define HDMI_IDENTIFIER 0x000C03 +#define AUDIO_BLOCK 0x01 #define VENDOR_BLOCK 0x03 +#define EDID_BASIC_AUDIO (1 << 6) + +/** + * Search EDID for CEA extension block. + */ +static u8 *drm_find_cea_extension(struct edid *edid) +{ + u8 *edid_ext = NULL; + int i; + + /* No EDID or EDID extensions */ + if (edid == NULL || edid->extensions == 0) + return NULL; + + /* Find CEA extension */ + for (i = 0; i < edid->extensions; i++) { + edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1); + if (edid_ext[0] == CEA_EXT) + break; + } + + if (i == edid->extensions) + return NULL; + + return edid_ext; +} + /** * drm_detect_hdmi_monitor - detect whether monitor is hdmi. * @edid: monitor EDID information @@ -1277,24 +1305,13 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid, */ bool drm_detect_hdmi_monitor(struct edid *edid) { - char *edid_ext = NULL; + u8 *edid_ext; int i, hdmi_id; int start_offset, end_offset; bool is_hdmi = false; - /* No EDID or EDID extensions */ - if (edid == NULL || edid->extensions == 0) - goto end; - - /* Find CEA extension */ - for (i = 0; i < edid->extensions; i++) { - edid_ext = (char *)edid + EDID_LENGTH * (i + 1); - /* This block is CEA extension */ - if (edid_ext[0] == 0x02) - break; - } - - if (i == edid->extensions) + edid_ext = drm_find_cea_extension(edid); + if (!edid_ext) goto end; /* Data block offset in CEA extension block */ @@ -1324,6 +1341,53 @@ end: } EXPORT_SYMBOL(drm_detect_hdmi_monitor); +/** + * drm_detect_monitor_audio - check monitor audio capability + * + * Monitor should have CEA extension block. + * If monitor has 'basic audio', but no CEA audio blocks, it's 'basic + * audio' only. If there is any audio extension block and supported + * audio format, assume at least 'basic audio' support, even if 'basic + * audio' is not defined in EDID. + * + */ +bool drm_detect_monitor_audio(struct edid *edid) +{ + u8 *edid_ext; + int i, j; + bool has_audio = false; + int start_offset, end_offset; + + edid_ext = drm_find_cea_extension(edid); + if (!edid_ext) + goto end; + + has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0); + + if (has_audio) { + DRM_DEBUG_KMS("Monitor has basic audio support\n"); + goto end; + } + + /* Data block offset in CEA extension block */ + start_offset = 4; + end_offset = edid_ext[2]; + + for (i = start_offset; i < end_offset; + i += ((edid_ext[i] & 0x1f) + 1)) { + if ((edid_ext[i] >> 5) == AUDIO_BLOCK) { + has_audio = true; + for (j = 1; j < (edid_ext[i] & 0x1f); j += 3) + DRM_DEBUG_KMS("CEA audio format %d\n", + (edid_ext[i + j] >> 3) & 0xf); + goto end; + } + } +end: + return has_audio; +} +EXPORT_SYMBOL(drm_detect_monitor_audio); + /** * drm_add_edid_modes - add modes from EDID data, if available * @connector: connector we're probing diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 15c4796fd467..029aa688e787 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -763,6 +763,7 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev, extern int drm_mode_gamma_set_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern bool drm_detect_hdmi_monitor(struct edid *edid); +extern bool drm_detect_monitor_audio(struct edid *edid); extern int drm_mode_page_flip_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, From a9756bb5b25d5d997df0c5d8c95db01292191bea Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Sun, 19 Sep 2010 13:09:06 +0800 Subject: [PATCH 434/476] drm/i915: Enable DisplayPort audio This will turn on DP audio output by checking monitor's audio capability. Signed-off-by: Zhenyu Wang [ickle: rebase onto recent changes and rearranged for clarity] Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_dp.c | 62 ++++++++++++++++++++++----------- 1 file changed, 41 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 350c541e8e6c..42cd528286a5 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1458,9 +1458,8 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) } static enum drm_connector_status -ironlake_dp_detect(struct drm_connector *connector) +ironlake_dp_detect(struct intel_dp *intel_dp) { - struct intel_dp *intel_dp = intel_attached_dp(connector); enum drm_connector_status status; /* Can't disconnect eDP */ @@ -1470,8 +1469,8 @@ ironlake_dp_detect(struct drm_connector *connector) status = connector_status_disconnected; if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd, - sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) - { + sizeof (intel_dp->dpcd)) + == sizeof(intel_dp->dpcd)) { if (intel_dp->dpcd[0] != 0) status = connector_status_connected; } @@ -1480,25 +1479,13 @@ ironlake_dp_detect(struct drm_connector *connector) return status; } -/** - * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. - * - * \return true if DP port is connected. - * \return false if DP port is disconnected. - */ static enum drm_connector_status -intel_dp_detect(struct drm_connector *connector, bool force) +g4x_dp_detect(struct intel_dp *intel_dp) { - struct intel_dp *intel_dp = intel_attached_dp(connector); struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t temp, bit; enum drm_connector_status status; - - intel_dp->has_audio = false; - - if (HAS_PCH_SPLIT(dev)) - return ironlake_dp_detect(connector); + uint32_t temp, bit; switch (intel_dp->output_reg) { case DP_B: @@ -1520,14 +1507,47 @@ intel_dp_detect(struct drm_connector *connector, bool force) return connector_status_disconnected; status = connector_status_disconnected; - if (intel_dp_aux_native_read(intel_dp, - 0x000, intel_dp->dpcd, + if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd, sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) { if (intel_dp->dpcd[0] != 0) status = connector_status_connected; } - return status; + + return bit; +} + +/** + * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. + * + * \return true if DP port is connected. + * \return false if DP port is disconnected. + */ +static enum drm_connector_status +intel_dp_detect(struct drm_connector *connector, bool force) +{ + struct intel_dp *intel_dp = intel_attached_dp(connector); + struct drm_device *dev = intel_dp->base.base.dev; + enum drm_connector_status status; + struct edid *edid = NULL; + + intel_dp->has_audio = false; + + if (HAS_PCH_SPLIT(dev)) + status = ironlake_dp_detect(intel_dp); + else + status = g4x_dp_detect(intel_dp); + if (status != connector_status_connected) + return status; + + edid = drm_get_edid(connector, &intel_dp->adapter); + if (edid) { + intel_dp->has_audio = drm_detect_monitor_audio(edid); + connector->display_info.raw_edid = NULL; + kfree(edid); + } + + return connector_status_connected; } static int intel_dp_get_modes(struct drm_connector *connector) From 2e3d6006aca163db3eeb931cec631974aaa3c293 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Fri, 10 Sep 2010 10:39:40 +0800 Subject: [PATCH 435/476] drm/i915: Enable HDMI audio for monitor with audio support Rely on monitor's audio capability to turn on audio output for HDMI. Tested-by: Wu Fengguang Signed-off-by: Zhenyu Wang Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_hdmi.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 9fb9501f2d07..2d918dc046ef 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -42,6 +42,7 @@ struct intel_hdmi { u32 sdvox_reg; int ddc_bus; bool has_hdmi_sink; + bool has_audio; }; static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) @@ -72,11 +73,12 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) sdvox |= SDVO_HSYNC_ACTIVE_HIGH; - if (intel_hdmi->has_hdmi_sink) { + /* Required on CPT */ + if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev)) + sdvox |= HDMI_MODE_SELECT; + + if (intel_hdmi->has_audio) sdvox |= SDVO_AUDIO_ENABLE; - if (HAS_PCH_CPT(dev)) - sdvox |= HDMI_MODE_SELECT; - } if (intel_crtc->pipe == 1) { if (HAS_PCH_CPT(dev)) @@ -154,6 +156,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) enum drm_connector_status status = connector_status_disconnected; intel_hdmi->has_hdmi_sink = false; + intel_hdmi->has_audio = false; edid = drm_get_edid(connector, &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); @@ -161,6 +164,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) if (edid->input & DRM_EDID_INPUT_DIGITAL) { status = connector_status_connected; intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid); + intel_hdmi->has_audio = drm_detect_monitor_audio(edid); } connector->display_info.raw_edid = NULL; kfree(edid); From f684960ed5b902994ba6540138d910f5caf7ea2a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 19 Sep 2010 09:29:33 +0100 Subject: [PATCH 436/476] drm/i915/dp: Add 'force_audio' property Allow the user to override the detection of the sink's audio capabilities from EDID. Not all sinks support the required EDID level to specify whether they handle audio over the display connection, so allow the user to enable it manually. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_dp.c | 74 ++++++++++++++++++++++++++++++--- 1 file changed, 69 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 42cd528286a5..891f4f1d63b1 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -48,6 +48,7 @@ struct intel_dp { uint32_t DP; uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; bool has_audio; + int force_audio; int dpms_mode; uint8_t link_bw; uint8_t lane_count; @@ -57,6 +58,8 @@ struct intel_dp { bool is_pch_edp; uint8_t train_set[4]; uint8_t link_status[DP_LINK_STATUS_SIZE]; + + struct drm_property *force_audio_property; }; /** @@ -1540,11 +1543,15 @@ intel_dp_detect(struct drm_connector *connector, bool force) if (status != connector_status_connected) return status; - edid = drm_get_edid(connector, &intel_dp->adapter); - if (edid) { - intel_dp->has_audio = drm_detect_monitor_audio(edid); - connector->display_info.raw_edid = NULL; - kfree(edid); + if (intel_dp->force_audio) { + intel_dp->has_audio = intel_dp->force_audio > 0; + } else { + edid = drm_get_edid(connector, &intel_dp->adapter); + if (edid) { + intel_dp->has_audio = drm_detect_monitor_audio(edid); + connector->display_info.raw_edid = NULL; + kfree(edid); + } } return connector_status_connected; @@ -1589,6 +1596,46 @@ static int intel_dp_get_modes(struct drm_connector *connector) return 0; } +static int +intel_dp_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t val) +{ + struct intel_dp *intel_dp = intel_attached_dp(connector); + int ret; + + ret = drm_connector_property_set_value(connector, property, val); + if (ret) + return ret; + + if (property == intel_dp->force_audio_property) { + if (val == intel_dp->force_audio) + return 0; + + intel_dp->force_audio = val; + + if (val > 0 && intel_dp->has_audio) + return 0; + if (val < 0 && !intel_dp->has_audio) + return 0; + + intel_dp->has_audio = val > 0; + goto done; + } + + return -EINVAL; + +done: + if (intel_dp->base.base.crtc) { + struct drm_crtc *crtc = intel_dp->base.base.crtc; + drm_crtc_helper_set_mode(crtc, &crtc->mode, + crtc->x, crtc->y, + crtc->fb); + } + + return 0; +} + static void intel_dp_destroy (struct drm_connector *connector) { @@ -1618,6 +1665,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = intel_dp_detect, .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = intel_dp_set_property, .destroy = intel_dp_destroy, }; @@ -1682,6 +1730,20 @@ bool intel_dpd_is_edp(struct drm_device *dev) return false; } +static void +intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + + intel_dp->force_audio_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2); + if (intel_dp->force_audio_property) { + intel_dp->force_audio_property->values[0] = -1; + intel_dp->force_audio_property->values[1] = 1; + drm_connector_attach_property(connector, intel_dp->force_audio_property, 0); + } +} + void intel_dp_init(struct drm_device *dev, int output_reg) { @@ -1808,6 +1870,8 @@ intel_dp_init(struct drm_device *dev, int output_reg) } } + intel_dp_add_properties(intel_dp, connector); + /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written * 0xd. Failure to do so will result in spurious interrupts being * generated on the port when a cable is not attached. From 7f36e7edd6c1851ea1f061ddbefb6f820a0575a2 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 19 Sep 2010 09:29:33 +0100 Subject: [PATCH 437/476] drm/i915/sdvo: Add 'force_audio' property Allow the user to override the detection of the sink's audio capabilities from EDID. Not all sinks support the required EDID level to specify whether they handle audio over the display connection, so allow the user to enable it manually. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_sdvo.c | 48 +++++++++++++++++++++++++++++-- 1 file changed, 46 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index a84224f37605..c245383cf7ed 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -107,6 +107,7 @@ struct intel_sdvo { * This is set if we treat the device as HDMI, instead of DVI. */ bool is_hdmi; + bool has_audio; /** * This is set if we detect output of sdvo device as LVDS and @@ -138,11 +139,15 @@ struct intel_sdvo_connector { /* Mark the type of connector */ uint16_t output_flag; + int force_audio; + /* This contains all current supported TV format */ u8 tv_format_supported[TV_FORMAT_NUM]; int format_supported_num; struct drm_property *tv_format; + struct drm_property *force_audio_property; + /* add the property for the SDVO-TV */ struct drm_property *left; struct drm_property *right; @@ -1150,7 +1155,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, } if (intel_crtc->pipe == 1) sdvox |= SDVO_PIPE_B_SELECT; - if (intel_sdvo->is_hdmi) + if (intel_sdvo->has_audio) sdvox |= SDVO_AUDIO_ENABLE; if (INTEL_INFO(dev)->gen >= 4) { @@ -1476,11 +1481,18 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) if (edid->input & DRM_EDID_INPUT_DIGITAL) { status = connector_status_connected; intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid); + intel_sdvo->has_audio = drm_detect_monitor_audio(edid); } connector->display_info.raw_edid = NULL; kfree(edid); } - + + if (status == connector_status_connected) { + struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); + if (intel_sdvo_connector->force_audio) + intel_sdvo->has_audio = intel_sdvo_connector->force_audio > 0; + } + return status; } @@ -1787,6 +1799,21 @@ intel_sdvo_set_property(struct drm_connector *connector, if (ret) return ret; + if (property == intel_sdvo_connector->force_audio_property) { + if (val == intel_sdvo_connector->force_audio) + return 0; + + intel_sdvo_connector->force_audio = val; + + if (val > 0 && intel_sdvo->has_audio) + return 0; + if (val < 0 && !intel_sdvo->has_audio) + return 0; + + intel_sdvo->has_audio = val > 0; + goto done; + } + #define CHECK_PROPERTY(name, NAME) \ if (intel_sdvo_connector->name == property) { \ if (intel_sdvo_connector->cur_##name == temp_value) return 0; \ @@ -2078,6 +2105,21 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector, drm_sysfs_connector_add(&connector->base.base); } +static void +intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector) +{ + struct drm_device *dev = connector->base.base.dev; + + connector->force_audio_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2); + if (connector->force_audio_property) { + connector->force_audio_property->values[0] = -1; + connector->force_audio_property->values[1] = 1; + drm_connector_attach_property(&connector->base.base, + connector->force_audio_property, 0); + } +} + static bool intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) { @@ -2118,6 +2160,8 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); + intel_sdvo_add_hdmi_properties(intel_sdvo_connector); + return true; } From 55b7d6e8c4690047ac001026cb75a47f747db816 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 19 Sep 2010 09:29:33 +0100 Subject: [PATCH 438/476] drm/i915/hdmi: Add 'force_audio' property Allow the user to override the detection of the sink's audio capabilities from EDID. Not all sinks support the required EDID level to specify whether they handle audio over the display connection, so allow the user to enable it manually. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_hdmi.c | 64 +++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 2d918dc046ef..6c3b2ecd59d5 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -43,6 +43,8 @@ struct intel_hdmi { int ddc_bus; bool has_hdmi_sink; bool has_audio; + int force_audio; + struct drm_property *force_audio_property; }; static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) @@ -170,6 +172,11 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) kfree(edid); } + if (status == connector_status_connected) { + if (intel_hdmi->force_audio) + intel_hdmi->has_audio = intel_hdmi->force_audio > 0; + } + return status; } @@ -186,6 +193,46 @@ static int intel_hdmi_get_modes(struct drm_connector *connector) &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); } +static int +intel_hdmi_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t val) +{ + struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); + int ret; + + ret = drm_connector_property_set_value(connector, property, val); + if (ret) + return ret; + + if (property == intel_hdmi->force_audio_property) { + if (val == intel_hdmi->force_audio) + return 0; + + intel_hdmi->force_audio = val; + + if (val > 0 && intel_hdmi->has_audio) + return 0; + if (val < 0 && !intel_hdmi->has_audio) + return 0; + + intel_hdmi->has_audio = val > 0; + goto done; + } + + return -EINVAL; + +done: + if (intel_hdmi->base.base.crtc) { + struct drm_crtc *crtc = intel_hdmi->base.base.crtc; + drm_crtc_helper_set_mode(crtc, &crtc->mode, + crtc->x, crtc->y, + crtc->fb); + } + + return 0; +} + static void intel_hdmi_destroy(struct drm_connector *connector) { drm_sysfs_connector_remove(connector); @@ -205,6 +252,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = intel_hdmi_detect, .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = intel_hdmi_set_property, .destroy = intel_hdmi_destroy, }; @@ -218,6 +266,20 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { .destroy = intel_encoder_destroy, }; +static void +intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + + intel_hdmi->force_audio_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2); + if (intel_hdmi->force_audio_property) { + intel_hdmi->force_audio_property->values[0] = -1; + intel_hdmi->force_audio_property->values[1] = 1; + drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0); + } +} + void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -279,6 +341,8 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); + intel_hdmi_add_properties(intel_hdmi, connector); + intel_connector_attach_encoder(intel_connector, intel_encoder); drm_sysfs_connector_add(connector); From 2549d6c26ce1c85a76990b972a2c7e8f440455cd Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 14 Oct 2010 12:10:41 +0100 Subject: [PATCH 439/476] drm/i915: Avoid vmallocing a buffer for the relocations ... perform an access validation check up front instead and copy them in on-demand, during i915_gem_object_pin_and_relocate(). As around 20% of the CPU overhead may be spent inside vmalloc for the relocation entries when submitting an execbuffer [for x11perf -aa10text], the savings are considerable and result in around a 10% throughput increase [for glyphs]. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 230 +++++++++++--------------------- 1 file changed, 78 insertions(+), 152 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 72ab3032300a..67998e8a2d70 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3291,12 +3291,12 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, static int i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, struct drm_file *file_priv, - struct drm_i915_gem_exec_object2 *entry, - struct drm_i915_gem_relocation_entry *relocs) + struct drm_i915_gem_exec_object2 *entry) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct drm_i915_gem_relocation_entry __user *user_relocs; int i, ret; void __iomem *reloc_page; bool need_fence; @@ -3337,15 +3337,24 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, /* Apply the relocations, using the GTT aperture to avoid cache * flushing requirements. */ + user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; for (i = 0; i < entry->relocation_count; i++) { - struct drm_i915_gem_relocation_entry *reloc= &relocs[i]; + struct drm_i915_gem_relocation_entry reloc; struct drm_gem_object *target_obj; struct drm_i915_gem_object *target_obj_priv; uint32_t reloc_val, reloc_offset; uint32_t __iomem *reloc_entry; + ret = __copy_from_user_inatomic(&reloc, + user_relocs+i, + sizeof(reloc)); + if (ret) { + i915_gem_object_unpin(obj); + return -EFAULT; + } + target_obj = drm_gem_object_lookup(obj->dev, file_priv, - reloc->target_handle); + reloc.target_handle); if (target_obj == NULL) { i915_gem_object_unpin(obj); return -ENOENT; @@ -3358,13 +3367,13 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, "presumed %08x delta %08x\n", __func__, obj, - (int) reloc->offset, - (int) reloc->target_handle, - (int) reloc->read_domains, - (int) reloc->write_domain, + (int) reloc.offset, + (int) reloc.target_handle, + (int) reloc.read_domains, + (int) reloc.write_domain, (int) target_obj_priv->gtt_offset, - (int) reloc->presumed_offset, - reloc->delta); + (int) reloc.presumed_offset, + reloc.delta); #endif /* The target buffer should have appeared before us in the @@ -3372,89 +3381,89 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, */ if (target_obj_priv->gtt_space == NULL) { DRM_ERROR("No GTT space found for object %d\n", - reloc->target_handle); + reloc.target_handle); drm_gem_object_unreference(target_obj); i915_gem_object_unpin(obj); return -EINVAL; } /* Validate that the target is in a valid r/w GPU domain */ - if (reloc->write_domain & (reloc->write_domain - 1)) { + if (reloc.write_domain & (reloc.write_domain - 1)) { DRM_ERROR("reloc with multiple write domains: " "obj %p target %d offset %d " "read %08x write %08x", - obj, reloc->target_handle, - (int) reloc->offset, - reloc->read_domains, - reloc->write_domain); + obj, reloc.target_handle, + (int) reloc.offset, + reloc.read_domains, + reloc.write_domain); drm_gem_object_unreference(target_obj); i915_gem_object_unpin(obj); return -EINVAL; } - if (reloc->write_domain & I915_GEM_DOMAIN_CPU || - reloc->read_domains & I915_GEM_DOMAIN_CPU) { + if (reloc.write_domain & I915_GEM_DOMAIN_CPU || + reloc.read_domains & I915_GEM_DOMAIN_CPU) { DRM_ERROR("reloc with read/write CPU domains: " "obj %p target %d offset %d " "read %08x write %08x", - obj, reloc->target_handle, - (int) reloc->offset, - reloc->read_domains, - reloc->write_domain); + obj, reloc.target_handle, + (int) reloc.offset, + reloc.read_domains, + reloc.write_domain); drm_gem_object_unreference(target_obj); i915_gem_object_unpin(obj); return -EINVAL; } - if (reloc->write_domain && target_obj->pending_write_domain && - reloc->write_domain != target_obj->pending_write_domain) { + if (reloc.write_domain && target_obj->pending_write_domain && + reloc.write_domain != target_obj->pending_write_domain) { DRM_ERROR("Write domain conflict: " "obj %p target %d offset %d " "new %08x old %08x\n", - obj, reloc->target_handle, - (int) reloc->offset, - reloc->write_domain, + obj, reloc.target_handle, + (int) reloc.offset, + reloc.write_domain, target_obj->pending_write_domain); drm_gem_object_unreference(target_obj); i915_gem_object_unpin(obj); return -EINVAL; } - target_obj->pending_read_domains |= reloc->read_domains; - target_obj->pending_write_domain |= reloc->write_domain; + target_obj->pending_read_domains |= reloc.read_domains; + target_obj->pending_write_domain |= reloc.write_domain; /* If the relocation already has the right value in it, no * more work needs to be done. */ - if (target_obj_priv->gtt_offset == reloc->presumed_offset) { + if (target_obj_priv->gtt_offset == reloc.presumed_offset) { drm_gem_object_unreference(target_obj); continue; } /* Check that the relocation address is valid... */ - if (reloc->offset > obj->size - 4) { + if (reloc.offset > obj->size - 4) { DRM_ERROR("Relocation beyond object bounds: " "obj %p target %d offset %d size %d.\n", - obj, reloc->target_handle, - (int) reloc->offset, (int) obj->size); + obj, reloc.target_handle, + (int) reloc.offset, (int) obj->size); drm_gem_object_unreference(target_obj); i915_gem_object_unpin(obj); return -EINVAL; } - if (reloc->offset & 3) { + if (reloc.offset & 3) { DRM_ERROR("Relocation not 4-byte aligned: " "obj %p target %d offset %d.\n", - obj, reloc->target_handle, - (int) reloc->offset); + obj, reloc.target_handle, + (int) reloc.offset); drm_gem_object_unreference(target_obj); i915_gem_object_unpin(obj); return -EINVAL; } /* and points to somewhere within the target object. */ - if (reloc->delta >= target_obj->size) { + if (reloc.delta >= target_obj->size) { DRM_ERROR("Relocation beyond target object bounds: " "obj %p target %d delta %d size %d.\n", - obj, reloc->target_handle, - (int) reloc->delta, (int) target_obj->size); + obj, reloc.target_handle, + (int) reloc.delta, (int) target_obj->size); drm_gem_object_unreference(target_obj); i915_gem_object_unpin(obj); return -EINVAL; @@ -3470,23 +3479,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, /* Map the page containing the relocation we're going to * perform. */ - reloc_offset = obj_priv->gtt_offset + reloc->offset; + reloc_offset = obj_priv->gtt_offset + reloc.offset; reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, (reloc_offset & ~(PAGE_SIZE - 1)), KM_USER0); reloc_entry = (uint32_t __iomem *)(reloc_page + (reloc_offset & (PAGE_SIZE - 1))); - reloc_val = target_obj_priv->gtt_offset + reloc->delta; + reloc_val = target_obj_priv->gtt_offset + reloc.delta; writel(reloc_val, reloc_entry); io_mapping_unmap_atomic(reloc_page, KM_USER0); - /* The updated presumed offset for this entry will be - * copied back out to the user. - */ - reloc->presumed_offset = target_obj_priv->gtt_offset; - drm_gem_object_unreference(target_obj); } @@ -3551,86 +3555,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) } static int -i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list, - uint32_t buffer_count, - struct drm_i915_gem_relocation_entry **relocs) -{ - uint32_t reloc_count = 0, reloc_index = 0, i; - int ret; - - *relocs = NULL; - for (i = 0; i < buffer_count; i++) { - if (reloc_count + exec_list[i].relocation_count < reloc_count) - return -EINVAL; - reloc_count += exec_list[i].relocation_count; - } - - *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); - if (*relocs == NULL) { - DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count); - return -ENOMEM; - } - - for (i = 0; i < buffer_count; i++) { - struct drm_i915_gem_relocation_entry __user *user_relocs; - - user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; - - ret = copy_from_user(&(*relocs)[reloc_index], - user_relocs, - exec_list[i].relocation_count * - sizeof(**relocs)); - if (ret != 0) { - drm_free_large(*relocs); - *relocs = NULL; - return -EFAULT; - } - - reloc_index += exec_list[i].relocation_count; - } - - return 0; -} - -static int -i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list, - uint32_t buffer_count, - struct drm_i915_gem_relocation_entry *relocs) -{ - uint32_t reloc_count = 0, i; - int ret = 0; - - if (relocs == NULL) - return 0; - - for (i = 0; i < buffer_count; i++) { - struct drm_i915_gem_relocation_entry __user *user_relocs; - int unwritten; - - user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; - - unwritten = copy_to_user(user_relocs, - &relocs[reloc_count], - exec_list[i].relocation_count * - sizeof(*relocs)); - - if (unwritten) { - ret = -EFAULT; - goto err; - } - - reloc_count += exec_list[i].relocation_count; - } - -err: - drm_free_large(relocs); - - return ret; -} - -static int -i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec, - uint64_t exec_offset) +i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec, + uint64_t exec_offset) { uint32_t exec_start, exec_len; @@ -3646,6 +3572,26 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec, return 0; } +static int +validate_exec_list(struct drm_i915_gem_exec_object2 *exec, + int count) +{ + int i; + + for (i = 0; i < count; i++) { + char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; + size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry); + + if (!access_ok(VERIFY_READ, ptr, length)) + return -EFAULT; + + if (fault_in_pages_readable(ptr, length)) + return -EFAULT; + } + + return 0; +} + static int i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv, @@ -3657,11 +3603,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_gem_object *batch_obj; struct drm_i915_gem_object *obj_priv; struct drm_clip_rect *cliprects = NULL; - struct drm_i915_gem_relocation_entry *relocs = NULL; struct drm_i915_gem_request *request = NULL; - int ret, ret2, i, pinned = 0; + int ret, i, pinned = 0; uint64_t exec_offset; - uint32_t reloc_index; int pin_tries, flips; struct intel_ring_buffer *ring = NULL; @@ -3670,6 +3614,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (ret) return ret; + ret = validate_exec_list(exec_list, args->buffer_count); + if (ret) + return ret; + #if WATCH_EXEC DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", (int) args->buffers_ptr, args->buffer_count, args->batch_len); @@ -3722,11 +3670,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto pre_mutex_err; } - ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count, - &relocs); - if (ret != 0) - goto pre_mutex_err; - ret = i915_mutex_lock_interruptible(dev); if (ret) goto pre_mutex_err; @@ -3765,19 +3708,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, /* Pin and relocate */ for (pin_tries = 0; ; pin_tries++) { ret = 0; - reloc_index = 0; for (i = 0; i < args->buffer_count; i++) { object_list[i]->pending_read_domains = 0; object_list[i]->pending_write_domain = 0; ret = i915_gem_object_pin_and_relocate(object_list[i], file_priv, - &exec_list[i], - &relocs[reloc_index]); + &exec_list[i]); if (ret) break; pinned = i + 1; - reloc_index += exec_list[i].relocation_count; } /* success */ if (ret == 0) @@ -3967,20 +3907,6 @@ err: mutex_unlock(&dev->struct_mutex); pre_mutex_err: - /* Copy the updated relocations out regardless of current error - * state. Failure to update the relocs would mean that the next - * time userland calls execbuf, it would do so with presumed offset - * state that didn't match the actual object state. - */ - ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count, - relocs); - if (ret2 != 0) { - DRM_ERROR("Failed to copy relocations back out: %d\n", ret2); - - if (ret == 0) - ret = ret2; - } - drm_free_large(object_list); kfree(cliprects); kfree(request); From f0c43d9b7ec1bb9827b3dd5ac5915d22ceed8f6a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 14 Oct 2010 12:44:48 +0100 Subject: [PATCH 440/476] drm/i915: Perform relocations in CPU domain [if in CPU domain] Avoid an early eviction of the batch buffer into the uncached GTT domain, and so do the relocation fixup in cacheable memory. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 55 +++++++++++++++++---------------- 1 file changed, 29 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 67998e8a2d70..32ff571672b4 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3124,9 +3124,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) uint32_t flush_domains = 0; uint32_t old_read_domains; - BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU); - BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU); - intel_mark_busy(dev, obj); /* @@ -3298,7 +3295,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_i915_gem_relocation_entry __user *user_relocs; int i, ret; - void __iomem *reloc_page; bool need_fence; need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE && @@ -3342,8 +3338,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, struct drm_i915_gem_relocation_entry reloc; struct drm_gem_object *target_obj; struct drm_i915_gem_object *target_obj_priv; - uint32_t reloc_val, reloc_offset; - uint32_t __iomem *reloc_entry; ret = __copy_from_user_inatomic(&reloc, user_relocs+i, @@ -3469,28 +3463,37 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, return -EINVAL; } - ret = i915_gem_object_set_to_gtt_domain(obj, 1); - if (ret != 0) { - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return ret; + reloc.delta += target_obj_priv->gtt_offset; + if (obj->write_domain == I915_GEM_DOMAIN_CPU) { + uint32_t page_offset = reloc.offset & ~PAGE_MASK; + char *vaddr; + + vaddr = kmap_atomic(obj_priv->pages[reloc.offset >> PAGE_SHIFT], KM_USER0); + *(uint32_t *)(vaddr + page_offset) = reloc.delta; + kunmap_atomic(vaddr, KM_USER0); + } else { + uint32_t __iomem *reloc_entry; + void __iomem *reloc_page; + int ret; + + ret = i915_gem_object_set_to_gtt_domain(obj, 1); + if (ret) { + drm_gem_object_unreference(target_obj); + i915_gem_object_unpin(obj); + return ret; + } + + /* Map the page containing the relocation we're going to perform. */ + reloc.offset += obj_priv->gtt_offset; + reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, + reloc.offset & PAGE_MASK, + KM_USER0); + reloc_entry = (uint32_t __iomem *) + (reloc_page + (reloc.offset & ~PAGE_MASK)); + iowrite32(reloc.delta, reloc_entry); + io_mapping_unmap_atomic(reloc_page, KM_USER0); } - /* Map the page containing the relocation we're going to - * perform. - */ - reloc_offset = obj_priv->gtt_offset + reloc.offset; - reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, - (reloc_offset & - ~(PAGE_SIZE - 1)), - KM_USER0); - reloc_entry = (uint32_t __iomem *)(reloc_page + - (reloc_offset & (PAGE_SIZE - 1))); - reloc_val = target_obj_priv->gtt_offset + reloc.delta; - - writel(reloc_val, reloc_entry); - io_mapping_unmap_atomic(reloc_page, KM_USER0); - drm_gem_object_unreference(target_obj); } From 202f2fef7a1aa6b2e4fa6e1de3ef582342fd41f0 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 14 Oct 2010 13:20:40 +0100 Subject: [PATCH 441/476] drm/i915: Avoid taking the mutex for dropping the refcnt upon creation After allocation a handle for the fresh object, we know that we can safely drop the refcnt without triggering a free so we do not need the mutex. Strangely, this mutex acquisition is the one that appears on driver profiles. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 32ff571672b4..942e4b351cdd 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -244,12 +244,17 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, return -ENOMEM; ret = drm_gem_handle_create(file_priv, obj, &handle); - /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference_unlocked(obj); if (ret) { + drm_gem_object_release(obj); + i915_gem_info_remove_obj(dev->dev_private, obj->size); + kfree(obj); return ret; } + /* drop reference from allocate - handle holds it now */ + drm_gem_object_unreference(obj); + trace_i915_gem_object_create(obj); + args->handle = handle; return 0; } @@ -4380,8 +4385,6 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, INIT_LIST_HEAD(&obj->gpu_write_list); obj->madv = I915_MADV_WILLNEED; - trace_i915_gem_object_create(&obj->base); - return &obj->base; } From b5e4feb6615fe07150f05bb0e0ccc0ff9138b9ec Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 14 Oct 2010 13:47:43 +0100 Subject: [PATCH 442/476] drm/i915: Attempt to prefault user pages for pread/pwrite ... in the hope that it makes the atomic fast paths more likely. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 942e4b351cdd..b44c09ab8928 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -265,19 +265,14 @@ fast_shmem_read(struct page **pages, char __user *data, int length) { - char __iomem *vaddr; int unwritten; + char *vaddr; vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); - if (vaddr == NULL) - return -ENOMEM; unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length); kunmap_atomic(vaddr, KM_USER0); - if (unwritten) - return -EFAULT; - - return 0; + return unwritten ? -EFAULT : 0; } static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) @@ -602,6 +597,13 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, goto out; } + ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr, + args->size); + if (ret) { + ret = -EFAULT; + goto out; + } + if (i915_gem_object_needs_bit17_swizzle(obj)) { ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); } else { @@ -668,18 +670,14 @@ fast_shmem_write(struct page **pages, char __user *data, int length) { - char __iomem *vaddr; - unsigned long unwritten; + int unwritten; + char *vaddr; vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); - if (vaddr == NULL) - return -ENOMEM; unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length); kunmap_atomic(vaddr, KM_USER0); - if (unwritten) - return -EFAULT; - return 0; + return unwritten ? -EFAULT : 0; } /** @@ -1078,6 +1076,13 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, goto out; } + ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr, + args->size); + if (ret) { + ret = -EFAULT; + goto out; + } + /* We can only do the GTT pwrite on untiled buffers, as otherwise * it would end up going through the fenced access, and we'll get * different detiling behavior between reading and writing. From fbd5a26d500c7cd8943cc5f37ccc7e49cf386053 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 14 Oct 2010 15:03:58 +0100 Subject: [PATCH 443/476] drm/i915: Rearrange acquisition of mutex during pwrite ... to avoid reacquiring it to drop the object reference count on exit. Note we have to make sure we now drop (and reacquire) the lock around acquiring the mm semaphore on the slow paths. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 172 ++++++++++++-------------------- 1 file changed, 66 insertions(+), 106 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b44c09ab8928..1177ff577914 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -635,9 +635,7 @@ fast_user_write(struct io_mapping *mapping, unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, user_data, length); io_mapping_unmap_atomic(vaddr_atomic, KM_USER0); - if (unwritten) - return -EFAULT; - return 0; + return unwritten; } /* Here's the write path which can sleep for @@ -670,14 +668,14 @@ fast_shmem_write(struct page **pages, char __user *data, int length) { - int unwritten; char *vaddr; + int ret; vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); - unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length); + ret = __copy_from_user_inatomic(vaddr + page_offset, data, length); kunmap_atomic(vaddr, KM_USER0); - return unwritten ? -EFAULT : 0; + return ret; } /** @@ -695,24 +693,10 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, loff_t offset, page_base; char __user *user_data; int page_offset, page_length; - int ret; user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; - - ret = i915_gem_object_pin(obj, 0); - if (ret) { - mutex_unlock(&dev->struct_mutex); - return ret; - } - ret = i915_gem_object_set_to_gtt_domain(obj, 1); - if (ret) - goto fail; - obj_priv = to_intel_bo(obj); offset = obj_priv->gtt_offset + args->offset; @@ -729,26 +713,21 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, if ((page_offset + remain) > PAGE_SIZE) page_length = PAGE_SIZE - page_offset; - ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base, - page_offset, user_data, page_length); - /* If we get a fault while copying data, then (presumably) our * source page isn't available. Return the error and we'll * retry in the slow path. */ - if (ret) - goto fail; + if (fast_user_write(dev_priv->mm.gtt_mapping, page_base, + page_offset, user_data, page_length)) + + return -EFAULT; remain -= page_length; user_data += page_length; offset += page_length; } -fail: - i915_gem_object_unpin(obj); - mutex_unlock(&dev->struct_mutex); - - return ret; + return 0; } /** @@ -785,30 +764,24 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; num_pages = last_data_page - first_data_page + 1; - user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); + user_pages = drm_malloc_ab(num_pages, sizeof(struct page *)); if (user_pages == NULL) return -ENOMEM; + mutex_unlock(&dev->struct_mutex); down_read(&mm->mmap_sem); pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, num_pages, 0, 0, user_pages, NULL); up_read(&mm->mmap_sem); + mutex_lock(&dev->struct_mutex); if (pinned_pages < num_pages) { ret = -EFAULT; goto out_unpin_pages; } - ret = i915_mutex_lock_interruptible(dev); - if (ret) - goto out_unpin_pages; - - ret = i915_gem_object_pin(obj, 0); - if (ret) - goto out_unlock; - ret = i915_gem_object_set_to_gtt_domain(obj, 1); if (ret) - goto out_unpin_object; + goto out_unpin_pages; obj_priv = to_intel_bo(obj); offset = obj_priv->gtt_offset + args->offset; @@ -844,10 +817,6 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, data_ptr += page_length; } -out_unpin_object: - i915_gem_object_unpin(obj); -out_unlock: - mutex_unlock(&dev->struct_mutex); out_unpin_pages: for (i = 0; i < pinned_pages; i++) page_cache_release(user_pages[i]); @@ -870,23 +839,10 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, loff_t offset, page_base; char __user *user_data; int page_offset, page_length; - int ret; user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; - - ret = i915_gem_object_get_pages(obj, 0); - if (ret != 0) - goto fail_unlock; - - ret = i915_gem_object_set_to_cpu_domain(obj, 1); - if (ret != 0) - goto fail_put_pages; - obj_priv = to_intel_bo(obj); offset = args->offset; obj_priv->dirty = 1; @@ -904,23 +860,17 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, if ((page_offset + remain) > PAGE_SIZE) page_length = PAGE_SIZE - page_offset; - ret = fast_shmem_write(obj_priv->pages, + if (fast_shmem_write(obj_priv->pages, page_base, page_offset, - user_data, page_length); - if (ret) - goto fail_put_pages; + user_data, page_length)) + return -EFAULT; remain -= page_length; user_data += page_length; offset += page_length; } -fail_put_pages: - i915_gem_object_put_pages(obj); -fail_unlock: - mutex_unlock(&dev->struct_mutex); - - return ret; + return 0; } /** @@ -962,28 +912,22 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, if (user_pages == NULL) return -ENOMEM; + mutex_unlock(&dev->struct_mutex); down_read(&mm->mmap_sem); pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, num_pages, 0, 0, user_pages, NULL); up_read(&mm->mmap_sem); + mutex_lock(&dev->struct_mutex); if (pinned_pages < num_pages) { ret = -EFAULT; - goto fail_put_user_pages; + goto out; } - do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); - - ret = i915_mutex_lock_interruptible(dev); - if (ret) - goto fail_put_user_pages; - - ret = i915_gem_object_get_pages_or_evict(obj); - if (ret) - goto fail_unlock; - ret = i915_gem_object_set_to_cpu_domain(obj, 1); - if (ret != 0) - goto fail_put_pages; + if (ret) + goto out; + + do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); obj_priv = to_intel_bo(obj); offset = args->offset; @@ -1029,11 +973,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, offset += page_length; } -fail_put_pages: - i915_gem_object_put_pages(obj); -fail_unlock: - mutex_unlock(&dev->struct_mutex); -fail_put_user_pages: +out: for (i = 0; i < pinned_pages; i++) page_cache_release(user_pages[i]); drm_free_large(user_pages); @@ -1048,18 +988,24 @@ fail_put_user_pages: */ int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_pwrite *args = data; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; int ret = 0; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = drm_gem_object_lookup(dev, file, args->handle); if (obj == NULL) return -ENOENT; obj_priv = to_intel_bo(obj); + ret = i915_mutex_lock_interruptible(dev); + if (ret) { + drm_gem_object_unreference_unlocked(obj); + return ret; + } + /* Bounds check destination. */ if (args->offset > obj->size || args->size > obj->size - args->offset) { ret = -EINVAL; @@ -1090,32 +1036,46 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, * perspective, requiring manual detiling by the client. */ if (obj_priv->phys_obj) - ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); + ret = i915_gem_phys_pwrite(dev, obj, args, file); else if (obj_priv->tiling_mode == I915_TILING_NONE && obj_priv->gtt_space && obj->write_domain != I915_GEM_DOMAIN_CPU) { - ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv); - if (ret == -EFAULT) { - ret = i915_gem_gtt_pwrite_slow(dev, obj, args, - file_priv); - } - } else if (i915_gem_object_needs_bit17_swizzle(obj)) { - ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv); + ret = i915_gem_object_pin(obj, 0); + if (ret) + goto out; + + ret = i915_gem_object_set_to_gtt_domain(obj, 1); + if (ret) + goto out_unpin; + + ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); + if (ret == -EFAULT) + ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file); + +out_unpin: + i915_gem_object_unpin(obj); } else { - ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv); - if (ret == -EFAULT) { - ret = i915_gem_shmem_pwrite_slow(dev, obj, args, - file_priv); - } + ret = i915_gem_object_get_pages_or_evict(obj); + if (ret) + goto out; + + ret = i915_gem_object_set_to_cpu_domain(obj, 1); + if (ret) + goto out_put; + + ret = -EFAULT; + if (!i915_gem_object_needs_bit17_swizzle(obj)) + ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file); + if (ret == -EFAULT) + ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file); + +out_put: + i915_gem_object_put_pages(obj); } -#if WATCH_PWRITE - if (ret) - DRM_INFO("pwrite failed %d\n", ret); -#endif - out: - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); return ret; } From 4f27b75d56334f33cbccff5da8372dc4aba122ba Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 14 Oct 2010 15:26:45 +0100 Subject: [PATCH 444/476] drm/i915: rearrange mutex acquisition for pread ... to avoid the double acquisition along fast[er] paths. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 103 ++++++++++++++------------------ 1 file changed, 45 insertions(+), 58 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1177ff577914..efc6a4e3b1d2 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -265,14 +265,14 @@ fast_shmem_read(struct page **pages, char __user *data, int length) { - int unwritten; char *vaddr; + int ret; vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); - unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length); + ret = __copy_to_user_inatomic(data, vaddr + page_offset, length); kunmap_atomic(vaddr, KM_USER0); - return unwritten ? -EFAULT : 0; + return ret; } static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) @@ -366,24 +366,10 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, loff_t offset, page_base; char __user *user_data; int page_offset, page_length; - int ret; user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; - - ret = i915_gem_object_get_pages(obj, 0); - if (ret != 0) - goto fail_unlock; - - ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, - args->size); - if (ret != 0) - goto fail_put_pages; - obj_priv = to_intel_bo(obj); offset = args->offset; @@ -400,23 +386,17 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, if ((page_offset + remain) > PAGE_SIZE) page_length = PAGE_SIZE - page_offset; - ret = fast_shmem_read(obj_priv->pages, - page_base, page_offset, - user_data, page_length); - if (ret) - goto fail_put_pages; + if (fast_shmem_read(obj_priv->pages, + page_base, page_offset, + user_data, page_length)) + return -EFAULT; remain -= page_length; user_data += page_length; offset += page_length; } -fail_put_pages: - i915_gem_object_put_pages(obj); -fail_unlock: - mutex_unlock(&dev->struct_mutex); - - return ret; + return 0; } static int @@ -477,33 +457,28 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; num_pages = last_data_page - first_data_page + 1; - user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); + user_pages = drm_malloc_ab(num_pages, sizeof(struct page *)); if (user_pages == NULL) return -ENOMEM; + mutex_unlock(&dev->struct_mutex); down_read(&mm->mmap_sem); pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, num_pages, 1, 0, user_pages, NULL); up_read(&mm->mmap_sem); + mutex_lock(&dev->struct_mutex); if (pinned_pages < num_pages) { ret = -EFAULT; - goto fail_put_user_pages; + goto out; } - do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); - - ret = i915_mutex_lock_interruptible(dev); - if (ret) - goto fail_put_user_pages; - - ret = i915_gem_object_get_pages_or_evict(obj); - if (ret) - goto fail_unlock; - - ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, + ret = i915_gem_object_set_cpu_read_domain_range(obj, + args->offset, args->size); - if (ret != 0) - goto fail_put_pages; + if (ret) + goto out; + + do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); obj_priv = to_intel_bo(obj); offset = args->offset; @@ -548,11 +523,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, offset += page_length; } -fail_put_pages: - i915_gem_object_put_pages(obj); -fail_unlock: - mutex_unlock(&dev->struct_mutex); -fail_put_user_pages: +out: for (i = 0; i < pinned_pages; i++) { SetPageDirty(user_pages[i]); page_cache_release(user_pages[i]); @@ -581,6 +552,12 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, return -ENOENT; obj_priv = to_intel_bo(obj); + ret = i915_mutex_lock_interruptible(dev); + if (ret) { + drm_gem_object_unreference_unlocked(obj); + return ret; + } + /* Bounds check source. */ if (args->offset > obj->size || args->size > obj->size - args->offset) { ret = -EINVAL; @@ -604,17 +581,27 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, goto out; } - if (i915_gem_object_needs_bit17_swizzle(obj)) { - ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); - } else { - ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); - if (ret != 0) - ret = i915_gem_shmem_pread_slow(dev, obj, args, - file_priv); - } + ret = i915_gem_object_get_pages_or_evict(obj); + if (ret) + goto out; + ret = i915_gem_object_set_cpu_read_domain_range(obj, + args->offset, + args->size); + if (ret) + goto out_put; + + ret = -EFAULT; + if (!i915_gem_object_needs_bit17_swizzle(obj)) + ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); + if (ret == -EFAULT) + ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); + +out_put: + i915_gem_object_put_pages(obj); out: - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); return ret; } @@ -908,7 +895,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; num_pages = last_data_page - first_data_page + 1; - user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); + user_pages = drm_malloc_ab(num_pages, sizeof(struct page *)); if (user_pages == NULL) return -ENOMEM; From 3dde04b0152634d42994b34b86bbf3c70fbc6b19 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 14 Oct 2010 16:30:41 +0100 Subject: [PATCH 445/476] agp/intel: Also add B43.1 to list of supported devices This was a missing piece from 41a5142 that dropped recognition of the AGP module for the second B43 variant. Reported-by: Stefan Bader Signed-off-by: Chris Wilson Cc: stable@kernel.org --- drivers/char/agp/intel-agp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 5cd2221ab472..e72f49d52202 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -895,6 +895,7 @@ static struct pci_device_id agp_intel_pci_table[] = { ID(PCI_DEVICE_ID_INTEL_G45_HB), ID(PCI_DEVICE_ID_INTEL_G41_HB), ID(PCI_DEVICE_ID_INTEL_B43_HB), + ID(PCI_DEVICE_ID_INTEL_B43_1_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), From 139d363bcf2d995a72694ddd2b8665af6cb7fb54 Mon Sep 17 00:00:00 2001 From: Andrea Gelmini Date: Fri, 15 Oct 2010 17:14:33 +0200 Subject: [PATCH 446/476] drivers: gpu: drm: i915: Fix a typo. "userpace" -> "userspace" Signed-off-by: Andrea Gelmini Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 0d051e7f6702..1e30c250140b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -664,7 +664,7 @@ static void i915_capture_error_state(struct drm_device *dev) } /* We need to copy these to an anonymous buffer as the simplest - * method to avoid being overwritten by userpace. + * method to avoid being overwritten by userspace. */ error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]); if (batchbuffer[1] != batchbuffer[0]) From 1d7cfea152cae6159aa30ceae38c3eaf13ea083c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 17 Oct 2010 09:45:41 +0100 Subject: [PATCH 447/476] drm/i915: Do interrupible mutex lock first to avoid locking for unreference One of the primarily consumers of the i915 driver is X, a large signal driven application. Frequently when writing into the buffers, there is a pending signal which causes us not to take the interruptible lock but then we need to take that same lock around the object unreference. By rearranging the code to do the interruptible lock as the first check, we can avoid the frequent additional locking around the unreference. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 206 ++++++++++++++------------------ 1 file changed, 93 insertions(+), 113 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index efc6a4e3b1d2..34a07fc20513 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -547,16 +547,16 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_object *obj_priv; int ret = 0; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); - if (obj == NULL) - return -ENOENT; - obj_priv = to_intel_bo(obj); - ret = i915_mutex_lock_interruptible(dev); - if (ret) { - drm_gem_object_unreference_unlocked(obj); + if (ret) return ret; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) { + ret = -ENOENT; + goto unlock; } + obj_priv = to_intel_bo(obj); /* Bounds check source. */ if (args->offset > obj->size || args->size > obj->size - args->offset) { @@ -601,6 +601,7 @@ out_put: i915_gem_object_put_pages(obj); out: drm_gem_object_unreference(obj); +unlock: mutex_unlock(&dev->struct_mutex); return ret; } @@ -982,16 +983,17 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_object *obj_priv; int ret = 0; + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + obj = drm_gem_object_lookup(dev, file, args->handle); - if (obj == NULL) - return -ENOENT; + if (obj == NULL) { + ret = -ENOENT; + goto unlock; + } obj_priv = to_intel_bo(obj); - ret = i915_mutex_lock_interruptible(dev); - if (ret) { - drm_gem_object_unreference_unlocked(obj); - return ret; - } /* Bounds check destination. */ if (args->offset > obj->size || args->size > obj->size - args->offset) { @@ -1062,6 +1064,7 @@ out_put: out: drm_gem_object_unreference(obj); +unlock: mutex_unlock(&dev->struct_mutex); return ret; } @@ -1098,16 +1101,16 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, if (write_domain != 0 && read_domains != write_domain) return -EINVAL; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); - if (obj == NULL) - return -ENOENT; - obj_priv = to_intel_bo(obj); - ret = i915_mutex_lock_interruptible(dev); - if (ret) { - drm_gem_object_unreference_unlocked(obj); + if (ret) return ret; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) { + ret = -ENOENT; + goto unlock; } + obj_priv = to_intel_bo(obj); intel_mark_busy(dev, obj); @@ -1139,6 +1142,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); drm_gem_object_unreference(obj); +unlock: mutex_unlock(&dev->struct_mutex); return ret; } @@ -1157,14 +1161,14 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); - if (obj == NULL) - return -ENOENT; - ret = i915_mutex_lock_interruptible(dev); - if (ret) { - drm_gem_object_unreference_unlocked(obj); + if (ret) return ret; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) { + ret = -ENOENT; + goto unlock; } /* Pinned buffers may be scanout, so flush the cache */ @@ -1172,6 +1176,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, i915_gem_object_flush_cpu_write_domain(obj); drm_gem_object_unreference(obj); +unlock: mutex_unlock(&dev->struct_mutex); return ret; } @@ -1469,33 +1474,27 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); - if (obj == NULL) - return -ENOENT; - ret = i915_mutex_lock_interruptible(dev); - if (ret) { - drm_gem_object_unreference_unlocked(obj); + if (ret) return ret; - } + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) { + ret = -ENOENT; + goto unlock; + } obj_priv = to_intel_bo(obj); if (obj_priv->madv != I915_MADV_WILLNEED) { DRM_ERROR("Attempting to mmap a purgeable buffer\n"); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return -EINVAL; + ret = -EINVAL; + goto out; } - if (!obj_priv->mmap_offset) { ret = i915_gem_create_mmap_offset(obj); - if (ret) { - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return ret; - } + if (ret) + goto out; } args->offset = obj_priv->mmap_offset; @@ -1506,17 +1505,15 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, */ if (!obj_priv->agp_mem) { ret = i915_gem_object_bind_to_gtt(obj, 0); - if (ret) { - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return ret; - } + if (ret) + goto out; } +out: drm_gem_object_unreference(obj); +unlock: mutex_unlock(&dev->struct_mutex); - - return 0; + return ret; } static void @@ -4100,44 +4097,36 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_object *obj_priv; int ret; + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) { - DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n", - args->handle); - return -ENOENT; + ret = -ENOENT; + goto unlock; } obj_priv = to_intel_bo(obj); - ret = i915_mutex_lock_interruptible(dev); - if (ret) { - drm_gem_object_unreference_unlocked(obj); - return ret; - } - if (obj_priv->madv != I915_MADV_WILLNEED) { DRM_ERROR("Attempting to pin a purgeable buffer\n"); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return -EINVAL; + ret = -EINVAL; + goto out; } if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", args->handle); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return -EINVAL; + ret = -EINVAL; + goto out; } obj_priv->user_pin_count++; obj_priv->pin_filp = file_priv; if (obj_priv->user_pin_count == 1) { ret = i915_gem_object_pin(obj, args->alignment); - if (ret != 0) { - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return ret; - } + if (ret) + goto out; } /* XXX - flush the CPU caches for pinned objects @@ -4145,10 +4134,11 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, */ i915_gem_object_flush_cpu_write_domain(obj); args->offset = obj_priv->gtt_offset; +out: drm_gem_object_unreference(obj); +unlock: mutex_unlock(&dev->struct_mutex); - - return 0; + return ret; } int @@ -4160,27 +4150,22 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_object *obj_priv; int ret; + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) { - DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n", - args->handle); - return -ENOENT; + ret = -ENOENT; + goto unlock; } - obj_priv = to_intel_bo(obj); - ret = i915_mutex_lock_interruptible(dev); - if (ret) { - drm_gem_object_unreference_unlocked(obj); - return ret; - } - if (obj_priv->pin_filp != file_priv) { DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", args->handle); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return -EINVAL; + ret = -EINVAL; + goto out; } obj_priv->user_pin_count--; if (obj_priv->user_pin_count == 0) { @@ -4188,9 +4173,11 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, i915_gem_object_unpin(obj); } +out: drm_gem_object_unreference(obj); +unlock: mutex_unlock(&dev->struct_mutex); - return 0; + return ret; } int @@ -4202,25 +4189,22 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_object *obj_priv; int ret; + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) { - DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n", - args->handle); - return -ENOENT; - } - - ret = i915_mutex_lock_interruptible(dev); - if (ret) { - drm_gem_object_unreference_unlocked(obj); - return ret; + ret = -ENOENT; + goto unlock; } + obj_priv = to_intel_bo(obj); /* Count all active objects as busy, even if they are currently not used * by the gpu. Users of this interface expect objects to eventually * become non-busy without any further actions, therefore emit any * necessary flushes here. */ - obj_priv = to_intel_bo(obj); args->busy = obj_priv->active; if (args->busy) { /* Unconditionally flush objects, even when the gpu still uses this @@ -4244,8 +4228,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, } drm_gem_object_unreference(obj); +unlock: mutex_unlock(&dev->struct_mutex); - return 0; + return ret; } int @@ -4272,26 +4257,20 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, return -EINVAL; } + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) { - DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n", - args->handle); - return -ENOENT; + ret = -ENOENT; + goto unlock; } obj_priv = to_intel_bo(obj); - ret = i915_mutex_lock_interruptible(dev); - if (ret) { - drm_gem_object_unreference_unlocked(obj); - return ret; - } - if (obj_priv->pin_count) { - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - - DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n"); - return -EINVAL; + ret = -EINVAL; + goto out; } if (obj_priv->madv != __I915_MADV_PURGED) @@ -4304,10 +4283,11 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, args->retained = obj_priv->madv != __I915_MADV_PURGED; +out: drm_gem_object_unreference(obj); +unlock: mutex_unlock(&dev->struct_mutex); - - return 0; + return ret; } struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, From 9af90d19f8a166694753b3f0558d3a8bcd66c0b5 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 17 Oct 2010 10:01:56 +0100 Subject: [PATCH 448/476] drm/i915: cache the last object lookup during pin_and_relocate() The most frequent relocation within a batchbuffer is a contiguous sequence of vertex buffer relocations, for which we can virtually eliminate the drm_gem_object_lookup() overhead by caching the last handle to object translation. In doing so we refactor the pin and relocate retry loop out of do_execbuffer into its own helper function and so improve the error paths. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 315 +++++++++++++++----------------- 1 file changed, 144 insertions(+), 171 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 34a07fc20513..f6a615ea3025 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2152,6 +2152,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) drm_mm_put_block(obj_priv->gtt_space); obj_priv->gtt_space = NULL; + obj_priv->gtt_offset = 0; if (i915_gem_object_is_purgeable(obj_priv)) i915_gem_object_truncate(obj); @@ -2645,12 +2646,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) search_free: free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, obj->size, alignment, 0); - if (free_space != NULL) { + if (free_space != NULL) obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size, alignment); - if (obj_priv->gtt_space != NULL) - obj_priv->gtt_offset = obj_priv->gtt_space->start; - } if (obj_priv->gtt_space == NULL) { /* If the gtt is empty and we're still having trouble * fitting our object in, we're out of memory. @@ -2693,7 +2691,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) obj_priv->agp_mem = drm_agp_bind_pages(dev, obj_priv->pages, obj->size >> PAGE_SHIFT, - obj_priv->gtt_offset, + obj_priv->gtt_space->start, obj_priv->agp_type); if (obj_priv->agp_mem == NULL) { i915_gem_object_put_pages(obj); @@ -2718,6 +2716,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); + obj_priv->gtt_offset = obj_priv->gtt_space->start; trace_i915_gem_object_bind(obj, obj_priv->gtt_offset); return 0; @@ -3240,74 +3239,42 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, * Pin an object to the GTT and evaluate the relocations landing in it. */ static int -i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, - struct drm_file *file_priv, - struct drm_i915_gem_exec_object2 *entry) +i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj, + struct drm_file *file_priv, + struct drm_i915_gem_exec_object2 *entry) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_i915_gem_relocation_entry __user *user_relocs; - int i, ret; - bool need_fence; + struct drm_gem_object *target_obj = NULL; + uint32_t target_handle = 0; + int i, ret = 0; - need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE && - obj_priv->tiling_mode != I915_TILING_NONE; - - /* Check fence reg constraints and rebind if necessary */ - if (need_fence && - !i915_gem_object_fence_offset_ok(obj, - obj_priv->tiling_mode)) { - ret = i915_gem_object_unbind(obj); - if (ret) - return ret; - } - - /* Choose the GTT offset for our buffer and put it there. */ - ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); - if (ret) - return ret; - - /* - * Pre-965 chips need a fence register set up in order to - * properly handle blits to/from tiled surfaces. - */ - if (need_fence) { - ret = i915_gem_object_get_fence_reg(obj, true); - if (ret != 0) { - i915_gem_object_unpin(obj); - return ret; - } - - dev_priv->fence_regs[obj_priv->fence_reg].gpu = true; - } - - entry->offset = obj_priv->gtt_offset; - - /* Apply the relocations, using the GTT aperture to avoid cache - * flushing requirements. - */ user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; for (i = 0; i < entry->relocation_count; i++) { struct drm_i915_gem_relocation_entry reloc; - struct drm_gem_object *target_obj; - struct drm_i915_gem_object *target_obj_priv; + uint32_t target_offset; - ret = __copy_from_user_inatomic(&reloc, - user_relocs+i, - sizeof(reloc)); - if (ret) { - i915_gem_object_unpin(obj); - return -EFAULT; + if (__copy_from_user_inatomic(&reloc, + user_relocs+i, + sizeof(reloc))) { + ret = -EFAULT; + break; } - target_obj = drm_gem_object_lookup(obj->dev, file_priv, - reloc.target_handle); - if (target_obj == NULL) { - i915_gem_object_unpin(obj); - return -ENOENT; + if (reloc.target_handle != target_handle) { + drm_gem_object_unreference(target_obj); + + target_obj = drm_gem_object_lookup(dev, file_priv, + reloc.target_handle); + if (target_obj == NULL) { + ret = -ENOENT; + break; + } + + target_handle = reloc.target_handle; } - target_obj_priv = to_intel_bo(target_obj); + target_offset = to_intel_bo(target_obj)->gtt_offset; #if WATCH_RELOC DRM_INFO("%s: obj %p offset %08x target %d " @@ -3319,7 +3286,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, (int) reloc.target_handle, (int) reloc.read_domains, (int) reloc.write_domain, - (int) target_obj_priv->gtt_offset, + (int) target_offset, (int) reloc.presumed_offset, reloc.delta); #endif @@ -3327,12 +3294,11 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, /* The target buffer should have appeared before us in the * exec_object list, so it should have a GTT space bound by now. */ - if (target_obj_priv->gtt_space == NULL) { + if (target_offset == 0) { DRM_ERROR("No GTT space found for object %d\n", reloc.target_handle); - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return -EINVAL; + ret = -EINVAL; + break; } /* Validate that the target is in a valid r/w GPU domain */ @@ -3344,9 +3310,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, (int) reloc.offset, reloc.read_domains, reloc.write_domain); - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return -EINVAL; + ret = -EINVAL; + break; } if (reloc.write_domain & I915_GEM_DOMAIN_CPU || reloc.read_domains & I915_GEM_DOMAIN_CPU) { @@ -3357,9 +3322,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, (int) reloc.offset, reloc.read_domains, reloc.write_domain); - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return -EINVAL; + ret = -EINVAL; + break; } if (reloc.write_domain && target_obj->pending_write_domain && reloc.write_domain != target_obj->pending_write_domain) { @@ -3370,40 +3334,35 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, (int) reloc.offset, reloc.write_domain, target_obj->pending_write_domain); - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return -EINVAL; + ret = -EINVAL; + break; } target_obj->pending_read_domains |= reloc.read_domains; - target_obj->pending_write_domain |= reloc.write_domain; + target_obj->pending_write_domain = reloc.write_domain; /* If the relocation already has the right value in it, no * more work needs to be done. */ - if (target_obj_priv->gtt_offset == reloc.presumed_offset) { - drm_gem_object_unreference(target_obj); + if (target_offset == reloc.presumed_offset) continue; - } /* Check that the relocation address is valid... */ - if (reloc.offset > obj->size - 4) { + if (reloc.offset > obj->base.size - 4) { DRM_ERROR("Relocation beyond object bounds: " "obj %p target %d offset %d size %d.\n", obj, reloc.target_handle, - (int) reloc.offset, (int) obj->size); - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return -EINVAL; + (int) reloc.offset, (int) obj->base.size); + ret = -EINVAL; + break; } if (reloc.offset & 3) { DRM_ERROR("Relocation not 4-byte aligned: " "obj %p target %d offset %d.\n", obj, reloc.target_handle, (int) reloc.offset); - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return -EINVAL; + ret = -EINVAL; + break; } /* and points to somewhere within the target object. */ @@ -3412,33 +3371,28 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, "obj %p target %d delta %d size %d.\n", obj, reloc.target_handle, (int) reloc.delta, (int) target_obj->size); - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return -EINVAL; + ret = -EINVAL; + break; } - reloc.delta += target_obj_priv->gtt_offset; - if (obj->write_domain == I915_GEM_DOMAIN_CPU) { + reloc.delta += target_offset; + if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { uint32_t page_offset = reloc.offset & ~PAGE_MASK; char *vaddr; - vaddr = kmap_atomic(obj_priv->pages[reloc.offset >> PAGE_SHIFT], KM_USER0); + vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT], KM_USER0); *(uint32_t *)(vaddr + page_offset) = reloc.delta; kunmap_atomic(vaddr, KM_USER0); } else { uint32_t __iomem *reloc_entry; void __iomem *reloc_page; - int ret; - ret = i915_gem_object_set_to_gtt_domain(obj, 1); - if (ret) { - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return ret; - } + ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1); + if (ret) + break; /* Map the page containing the relocation we're going to perform. */ - reloc.offset += obj_priv->gtt_offset; + reloc.offset += obj->gtt_offset; reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, reloc.offset & PAGE_MASK, KM_USER0); @@ -3447,8 +3401,74 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, iowrite32(reloc.delta, reloc_entry); io_mapping_unmap_atomic(reloc_page, KM_USER0); } + } - drm_gem_object_unreference(target_obj); + drm_gem_object_unreference(target_obj); + return ret; +} + +static int +i915_gem_execbuffer_pin(struct drm_device *dev, + struct drm_file *file, + struct drm_gem_object **object_list, + struct drm_i915_gem_exec_object2 *exec_list, + int count) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret, i, retry; + + /* attempt to pin all of the buffers into the GTT */ + for (retry = 0; retry < 2; retry++) { + ret = 0; + for (i = 0; i < count; i++) { + struct drm_i915_gem_exec_object2 *entry = &exec_list[i]; + struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]); + bool need_fence = + entry->flags & EXEC_OBJECT_NEEDS_FENCE && + obj->tiling_mode != I915_TILING_NONE; + + /* Check fence reg constraints and rebind if necessary */ + if (need_fence && + !i915_gem_object_fence_offset_ok(&obj->base, + obj->tiling_mode)) { + ret = i915_gem_object_unbind(&obj->base); + if (ret) + break; + } + + ret = i915_gem_object_pin(&obj->base, entry->alignment); + if (ret) + break; + + /* + * Pre-965 chips need a fence register set up in order + * to properly handle blits to/from tiled surfaces. + */ + if (need_fence) { + ret = i915_gem_object_get_fence_reg(&obj->base, true); + if (ret) { + i915_gem_object_unpin(&obj->base); + break; + } + + dev_priv->fence_regs[obj->fence_reg].gpu = true; + } + + entry->offset = obj->gtt_offset; + } + + while (i--) + i915_gem_object_unpin(object_list[i]); + + if (ret == 0) + break; + + if (ret != -ENOSPC || retry) + return ret; + + ret = i915_gem_evict_everything(dev); + if (ret) + return ret; } return 0; @@ -3551,7 +3571,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, static int i915_gem_do_execbuffer(struct drm_device *dev, void *data, - struct drm_file *file_priv, + struct drm_file *file, struct drm_i915_gem_execbuffer2 *args, struct drm_i915_gem_exec_object2 *exec_list) { @@ -3561,9 +3581,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_i915_gem_object *obj_priv; struct drm_clip_rect *cliprects = NULL; struct drm_i915_gem_request *request = NULL; - int ret, i, pinned = 0; + int ret, i, flips; uint64_t exec_offset; - int pin_tries, flips; struct intel_ring_buffer *ring = NULL; @@ -3639,7 +3658,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, /* Look up object handles */ for (i = 0; i < args->buffer_count; i++) { - object_list[i] = drm_gem_object_lookup(dev, file_priv, + object_list[i] = drm_gem_object_lookup(dev, file, exec_list[i].handle); if (object_list[i] == NULL) { DRM_ERROR("Invalid object handle %d at index %d\n", @@ -3662,63 +3681,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, obj_priv->in_execbuffer = true; } - /* Pin and relocate */ - for (pin_tries = 0; ; pin_tries++) { - ret = 0; + /* Move the objects en-masse into the GTT, evicting if necessary. */ + ret = i915_gem_execbuffer_pin(dev, file, + object_list, exec_list, + args->buffer_count); + if (ret) + goto err; - for (i = 0; i < args->buffer_count; i++) { - object_list[i]->pending_read_domains = 0; - object_list[i]->pending_write_domain = 0; - ret = i915_gem_object_pin_and_relocate(object_list[i], - file_priv, - &exec_list[i]); - if (ret) - break; - pinned = i + 1; - } - /* success */ - if (ret == 0) - break; - - /* error other than GTT full, or we've already tried again */ - if (ret != -ENOSPC || pin_tries >= 1) { - if (ret != -ERESTARTSYS) { - unsigned long long total_size = 0; - int num_fences = 0; - for (i = 0; i < args->buffer_count; i++) { - obj_priv = to_intel_bo(object_list[i]); - - total_size += object_list[i]->size; - num_fences += - exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE && - obj_priv->tiling_mode != I915_TILING_NONE; - } - DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n", - pinned+1, args->buffer_count, - total_size, num_fences, - ret); - DRM_ERROR("%u objects [%u pinned, %u GTT], " - "%zu object bytes [%zu pinned], " - "%zu /%zu gtt bytes\n", - dev_priv->mm.object_count, - dev_priv->mm.pin_count, - dev_priv->mm.gtt_count, - dev_priv->mm.object_memory, - dev_priv->mm.pin_memory, - dev_priv->mm.gtt_memory, - dev_priv->mm.gtt_total); - } - goto err; - } - - /* unpin all of our buffers */ - for (i = 0; i < pinned; i++) - i915_gem_object_unpin(object_list[i]); - pinned = 0; - - /* evict everyone we can from the aperture */ - ret = i915_gem_evict_everything(dev); - if (ret && ret != -ENOSPC) + /* The objects are in their final locations, apply the relocations. */ + for (i = 0; i < args->buffer_count; i++) { + struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); + obj->base.pending_read_domains = 0; + obj->base.pending_write_domain = 0; + ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]); + if (ret) goto err; } @@ -3731,9 +3707,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND; - /* Sanity check the batch buffer, prior to moving objects */ - exec_offset = exec_list[args->buffer_count - 1].offset; - ret = i915_gem_check_execbuffer (args, exec_offset); + /* Sanity check the batch buffer */ + exec_offset = to_intel_bo(batch_obj)->gtt_offset; + ret = i915_gem_check_execbuffer(args, exec_offset); if (ret != 0) { DRM_ERROR("execbuf with invalid offset/length\n"); goto err; @@ -3761,7 +3737,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, dev->invalidate_domains, dev->flush_domains); #endif - i915_gem_flush(dev, file_priv, + i915_gem_flush(dev, file, dev->invalidate_domains, dev->flush_domains, dev_priv->mm.flush_rings); @@ -3846,13 +3822,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, i915_gem_object_move_to_active(obj, ring); } - i915_add_request(dev, file_priv, request, ring); + i915_add_request(dev, file, request, ring); request = NULL; err: - for (i = 0; i < pinned; i++) - i915_gem_object_unpin(object_list[i]); - for (i = 0; i < args->buffer_count; i++) { if (object_list[i]) { obj_priv = to_intel_bo(object_list[i]); From 87acb0a550694ff1a7725ea3a73b80d8ccf56180 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 19 Oct 2010 10:13:00 +0100 Subject: [PATCH 449/476] drm/i915: Simplify most HAS_BSD() checks ... by always initialising the empty ringbuffer it is always then safe to check whether it is active. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 10 +++---- drivers/gpu/drm/i915/i915_gem.c | 43 ++++++++++----------------- drivers/gpu/drm/i915/i915_gem_evict.c | 6 ++-- 3 files changed, 22 insertions(+), 37 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 35d121c70a1f..1ffeb1c5e7c4 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -132,8 +132,7 @@ static int i915_dma_cleanup(struct drm_device * dev) mutex_lock(&dev->struct_mutex); intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); - if (HAS_BSD(dev)) - intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); + intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); mutex_unlock(&dev->struct_mutex); /* Clear the HWS virtual address at teardown */ @@ -1199,9 +1198,6 @@ static int i915_load_modeset_init(struct drm_device *dev, /* Basic memrange allocator for stolen space (aka mm.vram) */ drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size); - /* We're off and running w/KMS */ - dev_priv->mm.suspended = 0; - /* Let GEM Manage from end of prealloc space to end of aperture. * * However, leave one page at the end still bound to the scratch page. @@ -1271,6 +1267,10 @@ static int i915_load_modeset_init(struct drm_device *dev, goto cleanup_irq; drm_kms_helper_poll_init(dev); + + /* We're off and running w/KMS */ + dev_priv->mm.suspended = 0; + return 0; cleanup_irq: diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f6a615ea3025..56153685d145 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1795,8 +1795,7 @@ void i915_gem_reset(struct drm_device *dev) int i; i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring); - if (HAS_BSD(dev)) - i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring); + i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring); /* Remove anything from the flushing lists. The GPU cache is likely * to be lost on reset along with the data, so simply move the @@ -1918,8 +1917,7 @@ i915_gem_retire_requests(struct drm_device *dev) } i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); - if (HAS_BSD(dev)) - i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); + i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); } static void @@ -1942,8 +1940,7 @@ i915_gem_retire_work_handler(struct work_struct *work) if (!dev_priv->mm.suspended && (!list_empty(&dev_priv->render_ring.request_list) || - (HAS_BSD(dev) && - !list_empty(&dev_priv->bsd_ring.request_list)))) + !list_empty(&dev_priv->bsd_ring.request_list))) queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); mutex_unlock(&dev->struct_mutex); } @@ -2181,8 +2178,7 @@ i915_gpu_idle(struct drm_device *dev) lists_empty = (list_empty(&dev_priv->mm.flushing_list) && list_empty(&dev_priv->render_ring.active_list) && - (!HAS_BSD(dev) || - list_empty(&dev_priv->bsd_ring.active_list))); + list_empty(&dev_priv->bsd_ring.active_list)); if (lists_empty) return 0; @@ -2191,11 +2187,9 @@ i915_gpu_idle(struct drm_device *dev) if (ret) return ret; - if (HAS_BSD(dev)) { - ret = i915_ring_idle(dev, &dev_priv->bsd_ring); - if (ret) - return ret; - } + ret = i915_ring_idle(dev, &dev_priv->bsd_ring); + if (ret) + return ret; return 0; } @@ -4349,10 +4343,7 @@ i915_gem_idle(struct drm_device *dev) mutex_lock(&dev->struct_mutex); - if (dev_priv->mm.suspended || - (dev_priv->render_ring.gem_object == NULL) || - (HAS_BSD(dev) && - dev_priv->bsd_ring.gem_object == NULL)) { + if (dev_priv->mm.suspended) { mutex_unlock(&dev->struct_mutex); return 0; } @@ -4491,8 +4482,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); - if (HAS_BSD(dev)) - intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); + intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); if (HAS_PIPE_CONTROL(dev)) i915_gem_cleanup_pipe_control(dev); } @@ -4522,11 +4512,11 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, } BUG_ON(!list_empty(&dev_priv->render_ring.active_list)); - BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list)); + BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list)); BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); BUG_ON(!list_empty(&dev_priv->render_ring.request_list)); - BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list)); + BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list)); mutex_unlock(&dev->struct_mutex); ret = drm_irq_install(dev); @@ -4582,10 +4572,8 @@ i915_gem_load(struct drm_device *dev) INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); INIT_LIST_HEAD(&dev_priv->render_ring.active_list); INIT_LIST_HEAD(&dev_priv->render_ring.request_list); - if (HAS_BSD(dev)) { - INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list); - INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list); - } + INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list); + INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list); for (i = 0; i < 16; i++) INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); INIT_DELAYED_WORK(&dev_priv->mm.retire_work, @@ -4848,9 +4836,8 @@ i915_gpu_is_active(struct drm_device *dev) int lists_empty; lists_empty = list_empty(&dev_priv->mm.flushing_list) && - list_empty(&dev_priv->render_ring.active_list); - if (HAS_BSD(dev)) - lists_empty &= list_empty(&dev_priv->bsd_ring.active_list); + list_empty(&dev_priv->render_ring.active_list) && + list_empty(&dev_priv->bsd_ring.active_list); return !lists_empty; } diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 3d7fbf32bb18..d2733a1e2bcc 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -215,8 +215,7 @@ i915_gem_evict_everything(struct drm_device *dev) lists_empty = (list_empty(&dev_priv->mm.inactive_list) && list_empty(&dev_priv->mm.flushing_list) && list_empty(&dev_priv->render_ring.active_list) && - (!HAS_BSD(dev) - || list_empty(&dev_priv->bsd_ring.active_list))); + list_empty(&dev_priv->bsd_ring.active_list)); if (lists_empty) return -ENOSPC; @@ -234,8 +233,7 @@ i915_gem_evict_everything(struct drm_device *dev) lists_empty = (list_empty(&dev_priv->mm.inactive_list) && list_empty(&dev_priv->mm.flushing_list) && list_empty(&dev_priv->render_ring.active_list) && - (!HAS_BSD(dev) - || list_empty(&dev_priv->bsd_ring.active_list))); + list_empty(&dev_priv->bsd_ring.active_list)); BUG_ON(!lists_empty); return 0; From 69dc4987cbe5fe70ae1c2a08906d431d53cdd242 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 19 Oct 2010 10:36:51 +0100 Subject: [PATCH 450/476] drm/i915: Track objects in global active list (as well as per-ring) To handle retirements, we need per-ring tracking of active objects. To handle evictions, we need global tracking of active objects. As we enable more rings, rebuilding the global list from the individual per-ring lists quickly grows tiresome and overly complicated. Tracking the active objects in two lists is the lesser of two evils. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 23 ++++------- drivers/gpu/drm/i915/i915_drv.h | 14 ++++++- drivers/gpu/drm/i915/i915_gem.c | 47 ++++++++++++--------- drivers/gpu/drm/i915/i915_gem_evict.c | 59 +++------------------------ drivers/gpu/drm/i915/i915_irq.c | 11 ++--- 5 files changed, 58 insertions(+), 96 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index d598070fb279..f9e3295f0457 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -41,8 +41,7 @@ #if defined(CONFIG_DEBUG_FS) enum { - RENDER_LIST, - BSD_LIST, + ACTIVE_LIST, FLUSHING_LIST, INACTIVE_LIST, PINNED_LIST, @@ -125,6 +124,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) seq_printf(m, " (fence: %d)", obj->fence_reg); if (obj->gtt_space != NULL) seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset); + if (obj->ring != NULL) + seq_printf(m, " (%s)", obj->ring->name); } static int i915_gem_object_list_info(struct seq_file *m, void *data) @@ -143,13 +144,9 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) return ret; switch (list) { - case RENDER_LIST: - seq_printf(m, "Render:\n"); - head = &dev_priv->render_ring.active_list; - break; - case BSD_LIST: - seq_printf(m, "BSD:\n"); - head = &dev_priv->bsd_ring.active_list; + case ACTIVE_LIST: + seq_printf(m, "Active:\n"); + head = &dev_priv->mm.active_list; break; case INACTIVE_LIST: seq_printf(m, "Inactive:\n"); @@ -173,7 +170,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) } total_obj_size = total_gtt_size = count = 0; - list_for_each_entry(obj_priv, head, list) { + list_for_each_entry(obj_priv, head, mm_list) { seq_printf(m, " "); describe_obj(m, obj_priv); seq_printf(m, "\n"); @@ -460,8 +457,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data) if (ret) return ret; - list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list, - list) { + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { obj = &obj_priv->base; if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { seq_printf(m, "--- gtt_offset = 0x%08x\n", @@ -1020,8 +1016,7 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor) static struct drm_info_list i915_debugfs_list[] = { {"i915_capabilities", i915_capabilities, 0, 0}, {"i915_gem_objects", i915_gem_object_info, 0}, - {"i915_gem_render_active", i915_gem_object_list_info, 0, (void *) RENDER_LIST}, - {"i915_gem_bsd_active", i915_gem_object_list_info, 0, (void *) BSD_LIST}, + {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST}, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 84e33aeececd..817d8be6ff49 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -547,6 +547,17 @@ typedef struct drm_i915_private { */ struct list_head shrink_list; + /** + * List of objects currently involved in rendering. + * + * Includes buffers having the contents of their GPU caches + * flushed, not necessarily primitives. last_rendering_seqno + * represents when the rendering involved will be completed. + * + * A reference is held on the buffer while on this list. + */ + struct list_head active_list; + /** * List of objects which are not in the ringbuffer but which * still have a write_domain which needs to be flushed before @@ -714,7 +725,8 @@ struct drm_i915_gem_object { struct drm_mm_node *gtt_space; /** This object's place on the active/flushing/inactive lists */ - struct list_head list; + struct list_head ring_list; + struct list_head mm_list; /** This object's place on GPU write list */ struct list_head gpu_write_list; /** This object's place on eviction list */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 56153685d145..6e85496f9164 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1139,7 +1139,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, /* Maintain LRU order of "inactive" objects */ if (ret == 0 && i915_gem_object_is_inactive(obj_priv)) - list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); drm_gem_object_unreference(obj); unlock: @@ -1271,7 +1271,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } if (i915_gem_object_is_inactive(obj_priv)) - list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + page_offset; @@ -1565,6 +1565,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, struct intel_ring_buffer *ring) { struct drm_device *dev = obj->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t seqno = i915_gem_next_request_seqno(dev, ring); @@ -1578,7 +1579,8 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, } /* Move from whatever list we were on to the tail of execution. */ - list_move_tail(&obj_priv->list, &ring->active_list); + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list); + list_move_tail(&obj_priv->ring_list, &ring->active_list); obj_priv->last_rendering_seqno = seqno; } @@ -1590,7 +1592,8 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj) struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); BUG_ON(!obj_priv->active); - list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list); + list_del_init(&obj_priv->ring_list); obj_priv->last_rendering_seqno = 0; } @@ -1629,9 +1632,10 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); if (obj_priv->pin_count != 0) - list_move_tail(&obj_priv->list, &dev_priv->mm.pinned_list); + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list); else - list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); + list_del_init(&obj_priv->ring_list); BUG_ON(!list_empty(&obj_priv->gpu_write_list)); @@ -1780,7 +1784,7 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, obj_priv = list_first_entry(&ring->active_list, struct drm_i915_gem_object, - list); + ring_list); obj_priv->base.write_domain = 0; list_del_init(&obj_priv->gpu_write_list); @@ -1804,7 +1808,7 @@ void i915_gem_reset(struct drm_device *dev) while (!list_empty(&dev_priv->mm.flushing_list)) { obj_priv = list_first_entry(&dev_priv->mm.flushing_list, struct drm_i915_gem_object, - list); + mm_list); obj_priv->base.write_domain = 0; list_del_init(&obj_priv->gpu_write_list); @@ -1816,7 +1820,7 @@ void i915_gem_reset(struct drm_device *dev) */ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, - list) + mm_list) { obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS; } @@ -1876,7 +1880,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev, obj_priv = list_first_entry(&ring->active_list, struct drm_i915_gem_object, - list); + ring_list); if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno)) break; @@ -1912,7 +1916,7 @@ i915_gem_retire_requests(struct drm_device *dev) */ list_for_each_entry_safe(obj_priv, tmp, &dev_priv->mm.deferred_free_list, - list) + mm_list) i915_gem_free_object_tail(&obj_priv->base); } @@ -2145,7 +2149,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) BUG_ON(obj_priv->pages_refcount); i915_gem_info_remove_gtt(dev_priv, obj->size); - list_del_init(&obj_priv->list); + list_del_init(&obj_priv->mm_list); drm_mm_put_block(obj_priv->gtt_space); obj_priv->gtt_space = NULL; @@ -2700,7 +2704,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) } /* keep track of bounds object by adding it to the inactive list */ - list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); i915_gem_info_add_gtt(dev_priv, obj->size); /* Assert that the object is not currently in any GPU domain. As it @@ -4022,7 +4026,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) if (obj_priv->pin_count == 1) { i915_gem_info_add_pin(dev_priv, obj->size); if (!obj_priv->active) - list_move_tail(&obj_priv->list, + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list); } @@ -4048,7 +4052,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj) */ if (obj_priv->pin_count == 0) { if (!obj_priv->active) - list_move_tail(&obj_priv->list, + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); i915_gem_info_remove_pin(dev_priv, obj->size); } @@ -4280,7 +4284,8 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, obj->agp_type = AGP_USER_MEMORY; obj->base.driver_private = NULL; obj->fence_reg = I915_FENCE_REG_NONE; - INIT_LIST_HEAD(&obj->list); + INIT_LIST_HEAD(&obj->mm_list); + INIT_LIST_HEAD(&obj->ring_list); INIT_LIST_HEAD(&obj->gpu_write_list); obj->madv = I915_MADV_WILLNEED; @@ -4303,7 +4308,7 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj) ret = i915_gem_object_unbind(obj); if (ret == -ERESTARTSYS) { - list_move(&obj_priv->list, + list_move(&obj_priv->mm_list, &dev_priv->mm.deferred_free_list); return; } @@ -4511,6 +4516,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, return ret; } + BUG_ON(!list_empty(&dev_priv->mm.active_list)); BUG_ON(!list_empty(&dev_priv->render_ring.active_list)); BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list)); BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); @@ -4564,6 +4570,7 @@ i915_gem_load(struct drm_device *dev) int i; drm_i915_private_t *dev_priv = dev->dev_private; + INIT_LIST_HEAD(&dev_priv->mm.active_list); INIT_LIST_HEAD(&dev_priv->mm.flushing_list); INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); INIT_LIST_HEAD(&dev_priv->mm.inactive_list); @@ -4859,7 +4866,7 @@ i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) if (mutex_trylock(&dev->struct_mutex)) { list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, - list) + mm_list) cnt++; mutex_unlock(&dev->struct_mutex); } @@ -4885,7 +4892,7 @@ rescan: list_for_each_entry_safe(obj_priv, next_obj, &dev_priv->mm.inactive_list, - list) { + mm_list) { if (i915_gem_object_is_purgeable(obj_priv)) { i915_gem_object_unbind(&obj_priv->base); if (--nr_to_scan <= 0) @@ -4914,7 +4921,7 @@ rescan: list_for_each_entry_safe(obj_priv, next_obj, &dev_priv->mm.inactive_list, - list) { + mm_list) { if (nr_to_scan > 0) { i915_gem_object_unbind(&obj_priv->base); nr_to_scan--; diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index d2733a1e2bcc..70db2f1ee369 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -31,49 +31,6 @@ #include "i915_drv.h" #include "i915_drm.h" -static struct drm_i915_gem_object * -i915_gem_next_active_object(struct drm_device *dev, - struct list_head **render_iter, - struct list_head **bsd_iter) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL; - - if (*render_iter != &dev_priv->render_ring.active_list) - render_obj = list_entry(*render_iter, - struct drm_i915_gem_object, - list); - - if (HAS_BSD(dev)) { - if (*bsd_iter != &dev_priv->bsd_ring.active_list) - bsd_obj = list_entry(*bsd_iter, - struct drm_i915_gem_object, - list); - - if (render_obj == NULL) { - *bsd_iter = (*bsd_iter)->next; - return bsd_obj; - } - - if (bsd_obj == NULL) { - *render_iter = (*render_iter)->next; - return render_obj; - } - - /* XXX can we handle seqno wrapping? */ - if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) { - *render_iter = (*render_iter)->next; - return render_obj; - } else { - *bsd_iter = (*bsd_iter)->next; - return bsd_obj; - } - } else { - *render_iter = (*render_iter)->next; - return render_obj; - } -} - static bool mark_free(struct drm_i915_gem_object *obj_priv, struct list_head *unwind) @@ -83,18 +40,12 @@ mark_free(struct drm_i915_gem_object *obj_priv, return drm_mm_scan_add_block(obj_priv->gtt_space); } -#define i915_for_each_active_object(OBJ, R, B) \ - *(R) = dev_priv->render_ring.active_list.next; \ - *(B) = dev_priv->bsd_ring.active_list.next; \ - while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL) - int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment) { drm_i915_private_t *dev_priv = dev->dev_private; struct list_head eviction_list, unwind_list; struct drm_i915_gem_object *obj_priv; - struct list_head *render_iter, *bsd_iter; int ret = 0; i915_gem_retire_requests(dev); @@ -131,13 +82,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); /* First see if there is a large enough contiguous idle region... */ - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { if (mark_free(obj_priv, &unwind_list)) goto found; } /* Now merge in the soon-to-be-expired objects... */ - i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) { + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { /* Does the object require an outstanding flush? */ if (obj_priv->base.write_domain || obj_priv->pin_count) continue; @@ -147,14 +98,14 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen } /* Finally add anything with a pending flush (in order of retirement) */ - list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { + list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { if (obj_priv->pin_count) continue; if (mark_free(obj_priv, &unwind_list)) goto found; } - i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) { + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { if (! obj_priv->base.write_domain || obj_priv->pin_count) continue; @@ -251,7 +202,7 @@ i915_gem_evict_inactive(struct drm_device *dev) obj = &list_first_entry(&dev_priv->mm.inactive_list, struct drm_i915_gem_object, - list)->base; + mm_list)->base; ret = i915_gem_object_unbind(obj); if (ret != 0) { diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 1e30c250140b..f94cd7ffd74d 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -608,9 +608,7 @@ static void i915_capture_error_state(struct drm_device *dev) batchbuffer[0] = NULL; batchbuffer[1] = NULL; count = 0; - list_for_each_entry(obj_priv, - &dev_priv->render_ring.active_list, list) { - + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { struct drm_gem_object *obj = &obj_priv->base; if (batchbuffer[0] == NULL && @@ -627,7 +625,7 @@ static void i915_capture_error_state(struct drm_device *dev) } /* Scan the other lists for completeness for those bizarre errors. */ if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { - list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { + list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { struct drm_gem_object *obj = &obj_priv->base; if (batchbuffer[0] == NULL && @@ -645,7 +643,7 @@ static void i915_capture_error_state(struct drm_device *dev) } } if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { struct drm_gem_object *obj = &obj_priv->base; if (batchbuffer[0] == NULL && @@ -686,8 +684,7 @@ static void i915_capture_error_state(struct drm_device *dev) if (error->active_bo) { int i = 0; - list_for_each_entry(obj_priv, - &dev_priv->render_ring.active_list, list) { + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { struct drm_gem_object *obj = &obj_priv->base; error->active_bo[i].size = obj->size; From b5dc608c98d929abbf2fe932ed07b3c868d83342 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 20 Oct 2010 20:59:57 +0100 Subject: [PATCH 451/476] drm/i915: Copy the updated reloc->presumed_offset back to the user If the userspace driver is using a constant relocation array with a static buffer, they will pass the same relocation array back to the kernel. So we *do* need to update the presumed offset value in those relocations to reflect the current object so that they remain correct with future batchbuffers and we avoid the necessity of having to suspend execution and perform redundant relocations. Fixes the regression introduced by 12f889c for applications using absolute addressing on trees of buffer (i.e. the current consumers of libdrm_intel.so). Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=30996 Reported-by: Wang, Jinjin Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6e85496f9164..5041ebe3fdf9 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3399,6 +3399,15 @@ i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj, iowrite32(reloc.delta, reloc_entry); io_mapping_unmap_atomic(reloc_page, KM_USER0); } + + /* and update the user's relocation entry */ + reloc.presumed_offset = target_offset; + if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset, + &reloc.presumed_offset, + sizeof(reloc.presumed_offset))) { + ret = -EFAULT; + break; + } } drm_gem_object_unreference(target_obj); @@ -3560,6 +3569,10 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, if (!access_ok(VERIFY_READ, ptr, length)) return -EFAULT; + /* we may also need to update the presumed offsets */ + if (!access_ok(VERIFY_WRITE, ptr, length)) + return -EFAULT; + if (fault_in_pages_readable(ptr, length)) return -EFAULT; } From 40d857bba2915a4e8d82f44744a186bfdd1a46ea Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 19 Oct 2010 09:01:00 +0200 Subject: [PATCH 452/476] drm/ttm: Avoid using the ttm_mem_type_manager::put_locked function Release the lru spinlock early. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/ttm_bo.c | 32 ++++++---------------------- drivers/gpu/drm/ttm/ttm_bo_manager.c | 10 --------- include/drm/ttm/ttm_bo_driver.h | 2 -- 3 files changed, 6 insertions(+), 38 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 9f2eed520fc3..4a73f401644d 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -434,36 +434,25 @@ out_err: } /** - * Call bo::reserved and with the lru lock held. + * Call bo::reserved. * Will release GPU memory type usage on destruction. - * This is the place to put in driver specific hooks. - * Will release the bo::reserved lock and the - * lru lock on exit. + * This is the place to put in driver specific hooks to release + * driver private resources. + * Will release the bo::reserved lock. */ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) { - struct ttm_bo_global *glob = bo->glob; - if (bo->ttm) { - - /** - * Release the lru_lock, since we don't want to have - * an atomic requirement on ttm_tt[unbind|destroy]. - */ - - spin_unlock(&glob->lru_lock); ttm_tt_unbind(bo->ttm); ttm_tt_destroy(bo->ttm); bo->ttm = NULL; - spin_lock(&glob->lru_lock); } - ttm_bo_mem_put_locked(bo, &bo->mem); + ttm_bo_mem_put(bo, &bo->mem); atomic_set(&bo->reserved, 0); wake_up_all(&bo->event_queue); - spin_unlock(&glob->lru_lock); } @@ -528,7 +517,7 @@ retry: list_del_init(&bo->ddestroy); ++put_count; } - + spin_unlock(&glob->lru_lock); ttm_bo_cleanup_memtype_use(bo); while (put_count--) @@ -784,15 +773,6 @@ void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) } EXPORT_SYMBOL(ttm_bo_mem_put); -void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) -{ - struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; - - if (mem->mm_node) - (*man->func->put_node_locked)(man, mem); -} -EXPORT_SYMBOL(ttm_bo_mem_put_locked); - /** * Repeatedly evict memory from the LRU for @mem_type until we create enough * space, or we've evicted everything and there isn't enough space. diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c index 35c97b20bdae..7410c190c891 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c @@ -90,15 +90,6 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, } } -static void ttm_bo_man_put_node_locked(struct ttm_mem_type_manager *man, - struct ttm_mem_reg *mem) -{ - if (mem->mm_node) { - drm_mm_put_block(mem->mm_node); - mem->mm_node = NULL; - } -} - static int ttm_bo_man_init(struct ttm_mem_type_manager *man, unsigned long p_size) { @@ -152,7 +143,6 @@ const struct ttm_mem_type_manager_func ttm_bo_manager_func = { ttm_bo_man_takedown, ttm_bo_man_get_node, ttm_bo_man_put_node, - ttm_bo_man_put_node_locked, ttm_bo_man_debug }; EXPORT_SYMBOL(ttm_bo_manager_func); diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index d0ff529fedde..d01b4ddbdc56 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -214,8 +214,6 @@ struct ttm_mem_type_manager_func { struct ttm_mem_reg *mem); void (*put_node)(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem); - void (*put_node_locked)(struct ttm_mem_type_manager *man, - struct ttm_mem_reg *mem); void (*debug)(struct ttm_mem_type_manager *man, const char *prefix); }; From e1efc9b6ac22c605fd326b3f6af9b393325d43b4 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 19 Oct 2010 09:01:01 +0200 Subject: [PATCH 453/476] drm/ttm: Optimize delayed buffer destruction This commit replaces the ttm_bo_cleanup_ref function with two new functions. One for the case where the bo is not yet on the delayed destroy list, and one for the case where the bo was on the delayed destroy list, at least at the time of call. This makes it possible to optimize the two cases somewhat. It also enables the possibility to directly destroy buffers on the delayed delete list when they are about to be evicted or swapped out. Currently they were only evicted / swapped and destruction was left for the delayed buffer destruction thread. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/ttm_bo.c | 181 ++++++++++++++++++++++------------- 1 file changed, 112 insertions(+), 69 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 4a73f401644d..a1cb783c7131 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -455,100 +455,123 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) wake_up_all(&bo->event_queue); } - -/** - * If bo idle, remove from delayed- and lru lists, and unref. - * If not idle, and already on delayed list, do nothing. - * If not idle, and not on delayed list, put on delayed list, - * up the list_kref and schedule a delayed list check. - */ - -static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) +static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_global *glob = bo->glob; - struct ttm_bo_driver *driver = bdev->driver; + struct ttm_bo_driver *driver; + void *sync_obj; + void *sync_obj_arg; + int put_count; int ret; spin_lock(&bo->lock); -retry: - (void) ttm_bo_wait(bo, false, false, !remove_all); - + (void) ttm_bo_wait(bo, false, false, true); if (!bo->sync_obj) { - int put_count; - - spin_unlock(&bo->lock); spin_lock(&glob->lru_lock); - ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0); /** - * Someone else has the object reserved. Bail and retry. + * Lock inversion between bo::reserve and bo::lock here, + * but that's OK, since we're only trylocking. */ - if (unlikely(ret == -EBUSY)) { - spin_unlock(&glob->lru_lock); - spin_lock(&bo->lock); - goto requeue; - } + ret = ttm_bo_reserve_locked(bo, false, true, false, 0); - /** - * We can re-check for sync object without taking - * the bo::lock since setting the sync object requires - * also bo::reserved. A busy object at this point may - * be caused by another thread starting an accelerated - * eviction. - */ - - if (unlikely(bo->sync_obj)) { - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); - spin_unlock(&glob->lru_lock); - spin_lock(&bo->lock); - if (remove_all) - goto retry; - else - goto requeue; - } + if (unlikely(ret == -EBUSY)) + goto queue; + spin_unlock(&bo->lock); put_count = ttm_bo_del_from_lru(bo); - if (!list_empty(&bo->ddestroy)) { - list_del_init(&bo->ddestroy); - ++put_count; - } spin_unlock(&glob->lru_lock); ttm_bo_cleanup_memtype_use(bo); while (put_count--) kref_put(&bo->list_kref, ttm_bo_ref_bug); - return 0; - } -requeue: - spin_lock(&glob->lru_lock); - if (list_empty(&bo->ddestroy)) { - void *sync_obj = bo->sync_obj; - void *sync_obj_arg = bo->sync_obj_arg; - - kref_get(&bo->list_kref); - list_add_tail(&bo->ddestroy, &bdev->ddestroy); - spin_unlock(&glob->lru_lock); - spin_unlock(&bo->lock); - - if (sync_obj) - driver->sync_obj_flush(sync_obj, sync_obj_arg); - schedule_delayed_work(&bdev->wq, - ((HZ / 100) < 1) ? 1 : HZ / 100); - ret = 0; - + return; } else { + spin_lock(&glob->lru_lock); + } +queue: + sync_obj = bo->sync_obj; + sync_obj_arg = bo->sync_obj_arg; + driver = bdev->driver; + + kref_get(&bo->list_kref); + list_add_tail(&bo->ddestroy, &bdev->ddestroy); + spin_unlock(&glob->lru_lock); + spin_unlock(&bo->lock); + + if (sync_obj) + driver->sync_obj_flush(sync_obj, sync_obj_arg); + schedule_delayed_work(&bdev->wq, + ((HZ / 100) < 1) ? 1 : HZ / 100); +} + +/** + * function ttm_bo_cleanup_refs + * If bo idle, remove from delayed- and lru lists, and unref. + * If not idle, do nothing. + * + * @interruptible Any sleeps should occur interruptibly. + * @no_wait_reserve Never wait for reserve. Return -EBUSY instead. + * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. + */ + +static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, + bool interruptible, + bool no_wait_reserve, + bool no_wait_gpu) +{ + struct ttm_bo_global *glob = bo->glob; + int put_count; + int ret = 0; + +retry: + spin_lock(&bo->lock); + ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); + spin_unlock(&bo->lock); + + if (unlikely(ret != 0)) + return ret; + + spin_lock(&glob->lru_lock); + ret = ttm_bo_reserve_locked(bo, interruptible, + no_wait_reserve, false, 0); + + if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) { spin_unlock(&glob->lru_lock); - spin_unlock(&bo->lock); - ret = -EBUSY; + return ret; } - return ret; + /** + * We can re-check for sync object without taking + * the bo::lock since setting the sync object requires + * also bo::reserved. A busy object at this point may + * be caused by another thread recently starting an accelerated + * eviction. + */ + + if (unlikely(bo->sync_obj)) { + atomic_set(&bo->reserved, 0); + wake_up_all(&bo->event_queue); + spin_unlock(&glob->lru_lock); + goto retry; + } + + put_count = ttm_bo_del_from_lru(bo); + list_del_init(&bo->ddestroy); + ++put_count; + + spin_unlock(&glob->lru_lock); + ttm_bo_cleanup_memtype_use(bo); + + while (put_count--) + kref_put(&bo->list_kref, ttm_bo_ref_bug); + + return 0; } /** @@ -580,7 +603,8 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) } spin_unlock(&glob->lru_lock); - ret = ttm_bo_cleanup_refs(entry, remove_all); + ret = ttm_bo_cleanup_refs(entry, false, !remove_all, + !remove_all); kref_put(&entry->list_kref, ttm_bo_release_list); entry = nentry; @@ -623,7 +647,7 @@ static void ttm_bo_release(struct kref *kref) bo->vm_node = NULL; } write_unlock(&bdev->vm_lock); - ttm_bo_cleanup_refs(bo, false); + ttm_bo_cleanup_refs_or_queue(bo); kref_put(&bo->list_kref, ttm_bo_release_list); write_lock(&bdev->vm_lock); } @@ -731,6 +755,18 @@ retry: bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); kref_get(&bo->list_kref); + if (!list_empty(&bo->ddestroy)) { + spin_unlock(&glob->lru_lock); + ret = ttm_bo_cleanup_refs(bo, interruptible, + no_wait_reserve, no_wait_gpu); + kref_put(&bo->list_kref, ttm_bo_release_list); + + if (likely(ret == 0 || ret == -ERESTARTSYS)) + return ret; + + goto retry; + } + ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0); if (unlikely(ret == -EBUSY)) { @@ -1754,6 +1790,13 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) struct ttm_buffer_object, swap); kref_get(&bo->list_kref); + if (!list_empty(&bo->ddestroy)) { + spin_unlock(&glob->lru_lock); + (void) ttm_bo_cleanup_refs(bo, false, false, false); + kref_put(&bo->list_kref, ttm_bo_release_list); + continue; + } + /** * Reserve buffer. Since we unlock while sleeping, we need * to re-check that nobody removed us from the swap-list while From c717966744e618af76aa5d7fe6cc44dba487bdc6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 21 Oct 2010 18:51:09 +0100 Subject: [PATCH 454/476] drm/i915/ringbuffer: Fix emit batch buffer regression from 8187a2b In commit 8187a2b, the number of dwords used in the ringbuffer for executing the batch buffer was erroneously changed from 2 to 4. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_ringbuffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 89004a622f49..f1e0538b21f5 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -476,7 +476,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev, intel_ring_emit(dev, ring, exec_start + exec_len - 4); intel_ring_emit(dev, ring, 0); } else { - intel_ring_begin(dev, ring, 4); + intel_ring_begin(dev, ring, 2); if (INTEL_INFO(dev)->gen >= 4) { intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | (2 << 6) From e36c1cd7292efcb8daca26cd6331481736544742 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 21 Oct 2010 19:00:02 +0100 Subject: [PATCH 455/476] drm/i915/ringbuffer: Remove broken intel_fill_struct() ... before someone tries to use it. The code both calls intel_ring_begin/advance() and open-codes the bookkeeping performed by those two functions. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_ringbuffer.c | 15 --------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 4 ---- 2 files changed, 19 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index f1e0538b21f5..8da5ff790da3 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -733,21 +733,6 @@ void intel_ring_advance(struct drm_device *dev, ring->set_tail(dev, ring, ring->tail); } -void intel_fill_struct(struct drm_device *dev, - struct intel_ring_buffer *ring, - void *data, - unsigned int len) -{ - unsigned int *virt = ring->virtual_start + ring->tail; - BUG_ON((len&~(4-1)) != 0); - intel_ring_begin(dev, ring, len/4); - memcpy(virt, data, len); - ring->tail += len; - ring->tail &= ring->size - 1; - ring->space -= len; - intel_ring_advance(dev, ring); -} - static const struct intel_ring_buffer render_ring = { .name = "render ring", .id = RING_RENDER, diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 9725f783db20..5b37ff3a6949 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -116,10 +116,6 @@ static inline void intel_ring_emit(struct drm_device *dev, ring->tail += 4; } -void intel_fill_struct(struct drm_device *dev, - struct intel_ring_buffer *ring, - void *data, - unsigned int len); void intel_ring_advance(struct drm_device *dev, struct intel_ring_buffer *ring); From 549f7365820a212a1cfd0871d377b1ad0d1e5723 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 19 Oct 2010 11:19:32 +0100 Subject: [PATCH 456/476] drm/i915: Enable SandyBridge blitter ring Based on an original patch by Zhenyu Wang, this initializes the BLT ring for SandyBridge and enables support for user execbuffers. Cc: Zhenyu Wang Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 2 + drivers/gpu/drm/i915/i915_dma.c | 4 ++ drivers/gpu/drm/i915/i915_drv.c | 2 + drivers/gpu/drm/i915/i915_drv.h | 3 + drivers/gpu/drm/i915/i915_gem.c | 55 +++++++++++++-- drivers/gpu/drm/i915/i915_gem_evict.c | 6 +- drivers/gpu/drm/i915/i915_irq.c | 64 ++++++++++------- drivers/gpu/drm/i915/i915_reg.h | 2 + drivers/gpu/drm/i915/intel_ringbuffer.c | 92 +++++++++++++++++-------- drivers/gpu/drm/i915/intel_ringbuffer.h | 2 + include/drm/i915_drm.h | 6 +- 11 files changed, 175 insertions(+), 63 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index f9e3295f0457..d521de3e0680 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -80,6 +80,8 @@ static int i915_capabilities(struct seq_file *m, void *data) B(has_overlay); B(overlay_needs_physical); B(supports_tv); + B(has_bsd_ring); + B(has_blt_ring); #undef B return 0; diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 1ffeb1c5e7c4..1851ca4087f9 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -133,6 +133,7 @@ static int i915_dma_cleanup(struct drm_device * dev) mutex_lock(&dev->struct_mutex); intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); + intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring); mutex_unlock(&dev->struct_mutex); /* Clear the HWS virtual address at teardown */ @@ -763,6 +764,9 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_HAS_BSD: value = HAS_BSD(dev); break; + case I915_PARAM_HAS_BLT: + value = HAS_BLT(dev); + break; default: DRM_DEBUG_DRIVER("Unknown parameter %d\n", param->param); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index c3decb2fef4b..90f9c3e3fee3 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -158,12 +158,14 @@ static const struct intel_device_info intel_sandybridge_d_info = { .gen = 6, .need_gfx_hws = 1, .has_hotplug = 1, .has_bsd_ring = 1, + .has_blt_ring = 1, }; static const struct intel_device_info intel_sandybridge_m_info = { .gen = 6, .is_mobile = 1, .need_gfx_hws = 1, .has_hotplug = 1, .has_bsd_ring = 1, + .has_blt_ring = 1, }; static const struct pci_device_id pciidlist[] = { /* aka */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 817d8be6ff49..a9a0e220176e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -216,6 +216,7 @@ struct intel_device_info { u8 overlay_needs_physical : 1; u8 supports_tv : 1; u8 has_bsd_ring : 1; + u8 has_blt_ring : 1; }; enum no_fbc_reason { @@ -255,6 +256,7 @@ typedef struct drm_i915_private { struct pci_dev *bridge_dev; struct intel_ring_buffer render_ring; struct intel_ring_buffer bsd_ring; + struct intel_ring_buffer blt_ring; uint32_t next_seqno; drm_dma_handle_t *status_page_dmah; @@ -1300,6 +1302,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) +#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 5041ebe3fdf9..c3398d396419 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1800,6 +1800,7 @@ void i915_gem_reset(struct drm_device *dev) i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring); i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring); + i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring); /* Remove anything from the flushing lists. The GPU cache is likely * to be lost on reset along with the data, so simply move the @@ -1922,6 +1923,7 @@ i915_gem_retire_requests(struct drm_device *dev) i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); + i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring); } static void @@ -1944,7 +1946,8 @@ i915_gem_retire_work_handler(struct work_struct *work) if (!dev_priv->mm.suspended && (!list_empty(&dev_priv->render_ring.request_list) || - !list_empty(&dev_priv->bsd_ring.request_list))) + !list_empty(&dev_priv->bsd_ring.request_list) || + !list_empty(&dev_priv->blt_ring.request_list))) queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); mutex_unlock(&dev->struct_mutex); } @@ -2063,6 +2066,10 @@ i915_gem_flush(struct drm_device *dev, i915_gem_flush_ring(dev, file_priv, &dev_priv->bsd_ring, invalidate_domains, flush_domains); + if (flush_rings & RING_BLT) + i915_gem_flush_ring(dev, file_priv, + &dev_priv->blt_ring, + invalidate_domains, flush_domains); } } @@ -2182,7 +2189,8 @@ i915_gpu_idle(struct drm_device *dev) lists_empty = (list_empty(&dev_priv->mm.flushing_list) && list_empty(&dev_priv->render_ring.active_list) && - list_empty(&dev_priv->bsd_ring.active_list)); + list_empty(&dev_priv->bsd_ring.active_list) && + list_empty(&dev_priv->blt_ring.active_list)); if (lists_empty) return 0; @@ -2195,6 +2203,10 @@ i915_gpu_idle(struct drm_device *dev) if (ret) return ret; + ret = i915_ring_idle(dev, &dev_priv->blt_ring); + if (ret) + return ret; + return 0; } @@ -3609,14 +3621,29 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", (int) args->buffers_ptr, args->buffer_count, args->batch_len); #endif - if (args->flags & I915_EXEC_BSD) { + switch (args->flags & I915_EXEC_RING_MASK) { + case I915_EXEC_DEFAULT: + case I915_EXEC_RENDER: + ring = &dev_priv->render_ring; + break; + case I915_EXEC_BSD: if (!HAS_BSD(dev)) { - DRM_ERROR("execbuf with wrong flag\n"); + DRM_ERROR("execbuf with invalid ring (BSD)\n"); return -EINVAL; } ring = &dev_priv->bsd_ring; - } else { - ring = &dev_priv->render_ring; + break; + case I915_EXEC_BLT: + if (!HAS_BLT(dev)) { + DRM_ERROR("execbuf with invalid ring (BLT)\n"); + return -EINVAL; + } + ring = &dev_priv->blt_ring; + break; + default: + DRM_ERROR("execbuf with unknown ring: %d\n", + (int)(args->flags & I915_EXEC_RING_MASK)); + return -EINVAL; } if (args->buffer_count < 1) { @@ -4482,10 +4509,18 @@ i915_gem_init_ringbuffer(struct drm_device *dev) goto cleanup_render_ring; } + if (HAS_BLT(dev)) { + ret = intel_init_blt_ring_buffer(dev); + if (ret) + goto cleanup_bsd_ring; + } + dev_priv->next_seqno = 1; return 0; +cleanup_bsd_ring: + intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); cleanup_render_ring: intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); cleanup_pipe_control: @@ -4501,6 +4536,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); + intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring); if (HAS_PIPE_CONTROL(dev)) i915_gem_cleanup_pipe_control(dev); } @@ -4532,10 +4568,12 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, BUG_ON(!list_empty(&dev_priv->mm.active_list)); BUG_ON(!list_empty(&dev_priv->render_ring.active_list)); BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list)); + BUG_ON(!list_empty(&dev_priv->blt_ring.active_list)); BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); BUG_ON(!list_empty(&dev_priv->render_ring.request_list)); BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list)); + BUG_ON(!list_empty(&dev_priv->blt_ring.request_list)); mutex_unlock(&dev->struct_mutex); ret = drm_irq_install(dev); @@ -4594,6 +4632,8 @@ i915_gem_load(struct drm_device *dev) INIT_LIST_HEAD(&dev_priv->render_ring.request_list); INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list); INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list); + INIT_LIST_HEAD(&dev_priv->blt_ring.active_list); + INIT_LIST_HEAD(&dev_priv->blt_ring.request_list); for (i = 0; i < 16; i++) INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); INIT_DELAYED_WORK(&dev_priv->mm.retire_work, @@ -4857,7 +4897,8 @@ i915_gpu_is_active(struct drm_device *dev) lists_empty = list_empty(&dev_priv->mm.flushing_list) && list_empty(&dev_priv->render_ring.active_list) && - list_empty(&dev_priv->bsd_ring.active_list); + list_empty(&dev_priv->bsd_ring.active_list) && + list_empty(&dev_priv->blt_ring.active_list); return !lists_empty; } diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 70db2f1ee369..43a4013f53fa 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -166,7 +166,8 @@ i915_gem_evict_everything(struct drm_device *dev) lists_empty = (list_empty(&dev_priv->mm.inactive_list) && list_empty(&dev_priv->mm.flushing_list) && list_empty(&dev_priv->render_ring.active_list) && - list_empty(&dev_priv->bsd_ring.active_list)); + list_empty(&dev_priv->bsd_ring.active_list) && + list_empty(&dev_priv->blt_ring.active_list)); if (lists_empty) return -ENOSPC; @@ -184,7 +185,8 @@ i915_gem_evict_everything(struct drm_device *dev) lists_empty = (list_empty(&dev_priv->mm.inactive_list) && list_empty(&dev_priv->mm.flushing_list) && list_empty(&dev_priv->render_ring.active_list) && - list_empty(&dev_priv->bsd_ring.active_list)); + list_empty(&dev_priv->bsd_ring.active_list) && + list_empty(&dev_priv->blt_ring.active_list)); BUG_ON(!lists_empty); return 0; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index f94cd7ffd74d..237b8bdb5994 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -293,6 +293,19 @@ static void i915_handle_rps_change(struct drm_device *dev) return; } +static void notify_ring(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 seqno = ring->get_seqno(dev, ring); + ring->irq_gem_seqno = seqno; + trace_i915_gem_request_complete(dev, seqno); + wake_up_all(&ring->irq_queue); + dev_priv->hangcheck_count = 0; + mod_timer(&dev_priv->hangcheck_timer, + jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); +} + static irqreturn_t ironlake_irq_handler(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -300,7 +313,6 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) u32 de_iir, gt_iir, de_ier, pch_iir; u32 hotplug_mask; struct drm_i915_master_private *master_priv; - struct intel_ring_buffer *render_ring = &dev_priv->render_ring; u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; if (IS_GEN6(dev)) @@ -332,17 +344,12 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) READ_BREADCRUMB(dev_priv); } - if (gt_iir & GT_PIPE_NOTIFY) { - u32 seqno = render_ring->get_seqno(dev, render_ring); - render_ring->irq_gem_seqno = seqno; - trace_i915_gem_request_complete(dev, seqno); - wake_up_all(&dev_priv->render_ring.irq_queue); - dev_priv->hangcheck_count = 0; - mod_timer(&dev_priv->hangcheck_timer, - jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); - } + if (gt_iir & GT_PIPE_NOTIFY) + notify_ring(dev, &dev_priv->render_ring); if (gt_iir & bsd_usr_interrupt) - wake_up_all(&dev_priv->bsd_ring.irq_queue); + notify_ring(dev, &dev_priv->bsd_ring); + if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT) + notify_ring(dev, &dev_priv->blt_ring); if (de_iir & DE_GSE) intel_opregion_gse_intr(dev); @@ -881,6 +888,8 @@ static void i915_handle_error(struct drm_device *dev, bool wedged) wake_up_all(&dev_priv->render_ring.irq_queue); if (HAS_BSD(dev)) wake_up_all(&dev_priv->bsd_ring.irq_queue); + if (HAS_BLT(dev)) + wake_up_all(&dev_priv->blt_ring.irq_queue); } queue_work(dev_priv->wq, &dev_priv->error_work); @@ -941,7 +950,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) unsigned long irqflags; int irq_received; int ret = IRQ_NONE; - struct intel_ring_buffer *render_ring = &dev_priv->render_ring; atomic_inc(&dev_priv->irq_received); @@ -1018,18 +1026,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) READ_BREADCRUMB(dev_priv); } - if (iir & I915_USER_INTERRUPT) { - u32 seqno = render_ring->get_seqno(dev, render_ring); - render_ring->irq_gem_seqno = seqno; - trace_i915_gem_request_complete(dev, seqno); - wake_up_all(&dev_priv->render_ring.irq_queue); - dev_priv->hangcheck_count = 0; - mod_timer(&dev_priv->hangcheck_timer, - jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); - } - + if (iir & I915_USER_INTERRUPT) + notify_ring(dev, &dev_priv->render_ring); if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) - wake_up_all(&dev_priv->bsd_ring.irq_queue); + notify_ring(dev, &dev_priv->bsd_ring); if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { intel_prepare_page_flip(dev, 0); @@ -1358,6 +1358,12 @@ void i915_hangcheck_elapsed(unsigned long data) missed_wakeup = true; } + if (dev_priv->blt_ring.waiting_gem_seqno && + waitqueue_active(&dev_priv->blt_ring.irq_queue)) { + wake_up_all(&dev_priv->blt_ring.irq_queue); + missed_wakeup = true; + } + if (missed_wakeup) DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n"); return; @@ -1443,8 +1449,12 @@ static int ironlake_irq_postinstall(struct drm_device *dev) I915_WRITE(DEIER, dev_priv->de_irq_enable_reg); (void) I915_READ(DEIER); - if (IS_GEN6(dev)) - render_mask = GT_PIPE_NOTIFY | GT_GEN6_BSD_USER_INTERRUPT; + if (IS_GEN6(dev)) { + render_mask = + GT_PIPE_NOTIFY | + GT_GEN6_BSD_USER_INTERRUPT | + GT_BLT_USER_INTERRUPT; + } dev_priv->gt_irq_mask_reg = ~render_mask; dev_priv->gt_irq_enable_reg = render_mask; @@ -1454,6 +1464,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) if (IS_GEN6(dev)) { I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT); I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT); + I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT); } I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); @@ -1523,9 +1534,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev) u32 error_mask; DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue); - if (HAS_BSD(dev)) DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue); + if (HAS_BLT(dev)) + DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue); dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 557f27134d05..c52e209321c1 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -263,6 +263,7 @@ #define RENDER_RING_BASE 0x02000 #define BSD_RING_BASE 0x04000 #define GEN6_BSD_RING_BASE 0x12000 +#define BLT_RING_BASE 0x22000 #define RING_TAIL(base) ((base)+0x30) #define RING_HEAD(base) ((base)+0x34) #define RING_START(base) ((base)+0x38) @@ -2561,6 +2562,7 @@ #define GT_USER_INTERRUPT (1 << 0) #define GT_BSD_USER_INTERRUPT (1 << 5) #define GT_GEN6_BSD_USER_INTERRUPT (1 << 12) +#define GT_BLT_USER_INTERRUPT (1 << 22) #define GTISR 0x44010 #define GTIMR 0x44014 diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 8da5ff790da3..a8f408fe4e71 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -383,9 +383,9 @@ static int init_bsd_ring(struct drm_device *dev, } static u32 -bsd_ring_add_request(struct drm_device *dev, - struct intel_ring_buffer *ring, - u32 flush_domains) +ring_add_request(struct drm_device *dev, + struct intel_ring_buffer *ring, + u32 flush_domains) { u32 seqno; @@ -418,18 +418,18 @@ bsd_ring_put_user_irq(struct drm_device *dev, } static u32 -bsd_ring_get_seqno(struct drm_device *dev, - struct intel_ring_buffer *ring) +ring_status_page_get_seqno(struct drm_device *dev, + struct intel_ring_buffer *ring) { return intel_read_status_page(ring, I915_GEM_HWS_INDEX); } static int -bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, - struct intel_ring_buffer *ring, - struct drm_i915_gem_execbuffer2 *exec, - struct drm_clip_rect *cliprects, - uint64_t exec_offset) +ring_dispatch_gem_execbuffer(struct drm_device *dev, + struct intel_ring_buffer *ring, + struct drm_i915_gem_execbuffer2 *exec, + struct drm_clip_rect *cliprects, + uint64_t exec_offset) { uint32_t exec_start; exec_start = (uint32_t) exec_offset + exec->batch_start_offset; @@ -441,7 +441,6 @@ bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, return 0; } - static int render_ring_dispatch_gem_execbuffer(struct drm_device *dev, struct intel_ring_buffer *ring, @@ -758,11 +757,11 @@ static const struct intel_ring_buffer bsd_ring = { .init = init_bsd_ring, .set_tail = ring_set_tail, .flush = bsd_ring_flush, - .add_request = bsd_ring_add_request, - .get_seqno = bsd_ring_get_seqno, + .add_request = ring_add_request, + .get_seqno = ring_status_page_get_seqno, .user_irq_get = bsd_ring_get_user_irq, .user_irq_put = bsd_ring_put_user_irq, - .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer, + .dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer, }; @@ -789,10 +788,10 @@ static void gen6_bsd_ring_set_tail(struct drm_device *dev, GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); } -static void gen6_bsd_ring_flush(struct drm_device *dev, - struct intel_ring_buffer *ring, - u32 invalidate_domains, - u32 flush_domains) +static void gen6_ring_flush(struct drm_device *dev, + struct intel_ring_buffer *ring, + u32 invalidate_domains, + u32 flush_domains) { intel_ring_begin(dev, ring, 4); intel_ring_emit(dev, ring, MI_FLUSH_DW); @@ -803,11 +802,11 @@ static void gen6_bsd_ring_flush(struct drm_device *dev, } static int -gen6_bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, - struct intel_ring_buffer *ring, - struct drm_i915_gem_execbuffer2 *exec, - struct drm_clip_rect *cliprects, - uint64_t exec_offset) +gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev, + struct intel_ring_buffer *ring, + struct drm_i915_gem_execbuffer2 *exec, + struct drm_clip_rect *cliprects, + uint64_t exec_offset) { uint32_t exec_start; @@ -831,12 +830,42 @@ static const struct intel_ring_buffer gen6_bsd_ring = { .size = 32 * PAGE_SIZE, .init = init_bsd_ring, .set_tail = gen6_bsd_ring_set_tail, - .flush = gen6_bsd_ring_flush, - .add_request = bsd_ring_add_request, - .get_seqno = bsd_ring_get_seqno, + .flush = gen6_ring_flush, + .add_request = ring_add_request, + .get_seqno = ring_status_page_get_seqno, .user_irq_get = bsd_ring_get_user_irq, .user_irq_put = bsd_ring_put_user_irq, - .dispatch_gem_execbuffer = gen6_bsd_ring_dispatch_gem_execbuffer, + .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, +}; + +/* Blitter support (SandyBridge+) */ + +static void +blt_ring_get_user_irq(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + /* do nothing */ +} +static void +blt_ring_put_user_irq(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + /* do nothing */ +} + +static const struct intel_ring_buffer gen6_blt_ring = { + .name = "blt ring", + .id = RING_BLT, + .mmio_base = BLT_RING_BASE, + .size = 32 * PAGE_SIZE, + .init = init_ring_common, + .set_tail = ring_set_tail, + .flush = gen6_ring_flush, + .add_request = ring_add_request, + .get_seqno = ring_status_page_get_seqno, + .user_irq_get = blt_ring_get_user_irq, + .user_irq_put = blt_ring_put_user_irq, + .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, }; int intel_init_render_ring_buffer(struct drm_device *dev) @@ -866,3 +895,12 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) return intel_init_ring_buffer(dev, &dev_priv->bsd_ring); } + +int intel_init_blt_ring_buffer(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + dev_priv->blt_ring = gen6_blt_ring; + + return intel_init_ring_buffer(dev, &dev_priv->blt_ring); +} diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 5b37ff3a6949..9e81ff3b39cd 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -22,6 +22,7 @@ struct intel_ring_buffer { enum intel_ring_id { RING_RENDER = 0x1, RING_BSD = 0x2, + RING_BLT = 0x4, } id; u32 mmio_base; unsigned long size; @@ -124,6 +125,7 @@ u32 intel_ring_get_seqno(struct drm_device *dev, int intel_init_render_ring_buffer(struct drm_device *dev); int intel_init_bsd_ring_buffer(struct drm_device *dev); +int intel_init_blt_ring_buffer(struct drm_device *dev); u32 intel_ring_get_active_head(struct drm_device *dev, struct intel_ring_buffer *ring); diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index e41c74facb6a..8c641bed9bbd 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -286,6 +286,7 @@ typedef struct drm_i915_irq_wait { #define I915_PARAM_HAS_PAGEFLIPPING 8 #define I915_PARAM_HAS_EXECBUF2 9 #define I915_PARAM_HAS_BSD 10 +#define I915_PARAM_HAS_BLT 11 typedef struct drm_i915_getparam { int param; @@ -627,8 +628,11 @@ struct drm_i915_gem_execbuffer2 { __u32 num_cliprects; /** This is a struct drm_clip_rect *cliprects */ __u64 cliprects_ptr; +#define I915_EXEC_RING_MASK (7<<0) +#define I915_EXEC_DEFAULT (0<<0) #define I915_EXEC_RENDER (1<<0) -#define I915_EXEC_BSD (1<<1) +#define I915_EXEC_BSD (2<<0) +#define I915_EXEC_BLT (3<<0) __u64 flags; __u64 rsvd1; __u64 rsvd2; From f00a3ddf91d596bece5fa31e8ce2e8a3b4c0623b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 21 Oct 2010 14:57:17 +0100 Subject: [PATCH 457/476] drm/i915: IS_IRONLAKE is synonymous with gen == 5 So remove the redundant bit in the capabilities block and s/IS_IRONLAKE/IS_GEN5/. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 3 +-- drivers/gpu/drm/i915/i915_dma.c | 6 +++--- drivers/gpu/drm/i915/i915_drv.c | 4 ++-- drivers/gpu/drm/i915/i915_drv.h | 11 ++++------- drivers/gpu/drm/i915/i915_gem_tiling.c | 2 +- drivers/gpu/drm/i915/intel_bios.c | 4 ++-- drivers/gpu/drm/i915/intel_display.c | 8 ++++---- drivers/gpu/drm/i915/intel_ringbuffer.c | 2 +- 8 files changed, 18 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index d521de3e0680..7698983577d1 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -71,7 +71,6 @@ static int i915_capabilities(struct seq_file *m, void *data) B(is_pineview); B(is_broadwater); B(is_crestline); - B(is_ironlake); B(has_fbc); B(has_rc6); B(has_pipe_cxsr); @@ -795,7 +794,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) drm_i915_private_t *dev_priv = dev->dev_private; bool sr_enabled = false; - if (IS_IRONLAKE(dev)) + if (IS_GEN5(dev)) sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 1851ca4087f9..7a26f4dd21ae 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -499,7 +499,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, } - if (IS_G4X(dev) || IS_IRONLAKE(dev)) { + if (IS_G4X(dev) || IS_GEN5(dev)) { BEGIN_LP_RING(2); OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); OUT_RING(MI_NOOP); @@ -1995,7 +1995,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev->driver->get_vblank_counter = i915_get_vblank_counter; dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ - if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { + if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ dev->driver->get_vblank_counter = gm45_get_vblank_counter; } @@ -2019,7 +2019,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (IS_PINEVIEW(dev)) i915_pineview_get_mem_freq(dev); - else if (IS_IRONLAKE(dev)) + else if (IS_GEN5(dev)) i915_ironlake_get_mem_freq(dev); /* On the 945G/GM, the chipset reports the MSI capability on the diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 90f9c3e3fee3..8e632110c58f 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -143,13 +143,13 @@ static const struct intel_device_info intel_pineview_info = { }; static const struct intel_device_info intel_ironlake_d_info = { - .gen = 5, .is_ironlake = 1, + .gen = 5, .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, .has_bsd_ring = 1, }; static const struct intel_device_info intel_ironlake_m_info = { - .gen = 5, .is_ironlake = 1, .is_mobile = 1, + .gen = 5, .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, .has_bsd_ring = 1, }; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a9a0e220176e..cc9cb0dda6fc 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -206,7 +206,6 @@ struct intel_device_info { u8 is_pineview : 1; u8 is_broadwater : 1; u8 is_crestline : 1; - u8 is_ironlake : 1; u8 has_fbc : 1; u8 has_rc6 : 1; u8 has_pipe_cxsr : 1; @@ -1292,7 +1291,6 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) -#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) @@ -1314,8 +1312,8 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ IS_I915GM(dev))) #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) -#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) -#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) +#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) +#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) @@ -1327,9 +1325,8 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) #define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) -#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \ - IS_GEN6(dev)) -#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev)) +#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev)) +#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev)) #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 8c9ffc4768ee..af352de70be1 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -92,7 +92,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; - if (IS_IRONLAKE(dev) || IS_GEN6(dev)) { + if (IS_GEN5(dev) || IS_GEN6(dev)) { /* On Ironlake whatever DRAM config, GPU always do * same swizzling setup. */ diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index b9560f3cbb3d..b0b1200ed650 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -265,10 +265,10 @@ parse_general_features(struct drm_i915_private *dev_priv, dev_priv->lvds_use_ssc = general->enable_ssc; if (dev_priv->lvds_use_ssc) { - if (IS_I85X(dev_priv->dev)) + if (IS_I85X(dev)) dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48; - else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev)) + else if (IS_GEN5(dev) || IS_GEN6(dev)) dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 120; else diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index cda36b348fe8..e031d82381e5 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4152,7 +4152,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, intel_wait_for_vblank(dev, pipe); - if (IS_IRONLAKE(dev)) { + if (IS_GEN5(dev)) { /* enable address swizzle for tiling buffer */ temp = I915_READ(DISP_ARB_CTL); I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); @@ -5736,7 +5736,7 @@ void intel_init_clock_gating(struct drm_device *dev) if (HAS_PCH_SPLIT(dev)) { uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; - if (IS_IRONLAKE(dev)) { + if (IS_GEN5(dev)) { /* Required for FBC */ dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE; /* Required for CxSR */ @@ -5763,7 +5763,7 @@ void intel_init_clock_gating(struct drm_device *dev) * The bit 5 of 0x42020 * The bit 15 of 0x45000 */ - if (IS_IRONLAKE(dev)) { + if (IS_GEN5(dev)) { I915_WRITE(ILK_DISPLAY_CHICKEN2, (I915_READ(ILK_DISPLAY_CHICKEN2) | ILK_DPARB_GATE | ILK_VSDPFD_FULL)); @@ -5939,7 +5939,7 @@ static void intel_init_display(struct drm_device *dev) /* For FIFO watermark updates */ if (HAS_PCH_SPLIT(dev)) { - if (IS_IRONLAKE(dev)) { + if (IS_GEN5(dev)) { if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) dev_priv->display.update_wm = ironlake_update_wm; else { diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index a8f408fe4e71..0c6eb97d60fd 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -491,7 +491,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev, intel_ring_advance(dev, ring); } - if (IS_G4X(dev) || IS_IRONLAKE(dev)) { + if (IS_G4X(dev) || IS_GEN5(dev)) { intel_ring_begin(dev, ring, 2); intel_ring_emit(dev, ring, MI_FLUSH | MI_NO_WRITE_FLUSH | From dc3f82c2e5c3f06e43855f417e4fcfc244383916 Mon Sep 17 00:00:00 2001 From: Chia-I Wu Date: Thu, 21 Oct 2010 19:35:54 +0100 Subject: [PATCH 458/476] drm/i915: Fix current fb blocking for page flip MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Block execbuffer for the fb to be flipped away, not the one that is to be flipped in. [ickle: rewritten for -next] Signed-off-by: Chia-I Wu Acked-by: Jesse Barnes Acked-by: Kristian Høgsberg Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e031d82381e5..990f065374b2 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4991,9 +4991,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev, spin_unlock_irqrestore(&dev->event_lock, flags); - obj_priv = to_intel_bo(work->pending_flip_obj); - - /* Initial scanout buffer will have a 0 pending flip count */ + obj_priv = to_intel_bo(work->old_fb_obj); atomic_clear_mask(1 << intel_crtc->plane, &obj_priv->pending_flip.counter); if (atomic_read(&obj_priv->pending_flip) == 0) @@ -5092,9 +5090,14 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, if (ret) goto cleanup_objs; - obj_priv = to_intel_bo(obj); - atomic_add(1 << intel_crtc->plane, &obj_priv->pending_flip); + /* Block clients from rendering to the new back buffer until + * the flip occurs and the object is no longer visible. + */ + atomic_add(1 << intel_crtc->plane, + &to_intel_bo(work->old_fb_obj)->pending_flip); + work->pending_flip_obj = obj; + obj_priv = to_intel_bo(obj); if (IS_GEN3(dev) || IS_GEN2(dev)) { u32 flip_mask; From 3c17fe4b8f40a112a85758a9ab2aebf772bdd647 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20H=C3=A4rdeman?= Date: Fri, 24 Sep 2010 21:44:32 +0200 Subject: [PATCH 459/476] i915: enable AVI infoframe for intel_hdmi.c [v4] MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch enables the sending of AVI infoframes in drivers/gpu/drm/i915/intel_hdmi.c. My receiver currently loses sync when the HDMI output on my computer (DG45FC motherboard) is switched from 800x600 (the BIOS resolution) to 1920x1080 as part of the boot. Fixable by switching inputs on the receiver a couple of times. With this patch, my receiver has not lost sync yet (> 40 tries). Fourth version, now based on drm-intel-next from: git://git.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel.git Two questions still remain: I'm assuming that the sdvo hardware also stores a header ECC byte in the MSB of the first dword - is this correct? Does the SDVOB and SDVOC handling in intel_hdmi_set_avi_infoframe() look correct? Signed-off-by: David Härdeman Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_reg.h | 16 ++++ drivers/gpu/drm/i915/intel_drv.h | 33 ++++++++ drivers/gpu/drm/i915/intel_hdmi.c | 60 +++++++++++++- drivers/gpu/drm/i915/intel_sdvo.c | 126 ++++++------------------------ 4 files changed, 130 insertions(+), 105 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c52e209321c1..25ed911a3112 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1353,6 +1353,22 @@ #define LVDS_B0B3_POWER_DOWN (0 << 2) #define LVDS_B0B3_POWER_UP (3 << 2) +/* Video Data Island Packet control */ +#define VIDEO_DIP_DATA 0x61178 +#define VIDEO_DIP_CTL 0x61170 +#define VIDEO_DIP_ENABLE (1 << 31) +#define VIDEO_DIP_PORT_B (1 << 29) +#define VIDEO_DIP_PORT_C (2 << 29) +#define VIDEO_DIP_ENABLE_AVI (1 << 21) +#define VIDEO_DIP_ENABLE_VENDOR (2 << 21) +#define VIDEO_DIP_ENABLE_SPD (8 << 21) +#define VIDEO_DIP_SELECT_AVI (0 << 19) +#define VIDEO_DIP_SELECT_VENDOR (1 << 19) +#define VIDEO_DIP_SELECT_SPD (3 << 19) +#define VIDEO_DIP_FREQ_ONCE (0 << 16) +#define VIDEO_DIP_FREQ_VSYNC (1 << 16) +#define VIDEO_DIP_FREQ_2VSYNC (2 << 16) + /* Panel power sequencing */ #define PP_STATUS 0x61200 #define PP_ON (1 << 31) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 0581e5e5ac55..9af9f86a8765 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -178,6 +178,38 @@ struct intel_crtc { #define to_intel_encoder(x) container_of(x, struct intel_encoder, base) #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) +#define DIP_TYPE_AVI 0x82 +#define DIP_VERSION_AVI 0x2 +#define DIP_LEN_AVI 13 + +struct dip_infoframe { + uint8_t type; /* HB0 */ + uint8_t ver; /* HB1 */ + uint8_t len; /* HB2 - body len, not including checksum */ + uint8_t ecc; /* Header ECC */ + uint8_t checksum; /* PB0 */ + union { + struct { + /* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */ + uint8_t Y_A_B_S; + /* PB2 - C 7:6, M 5:4, R 3:0 */ + uint8_t C_M_R; + /* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */ + uint8_t ITC_EC_Q_SC; + /* PB4 - VIC 6:0 */ + uint8_t VIC; + /* PB5 - PR 3:0 */ + uint8_t PR; + /* PB6 to PB13 */ + uint16_t top_bar_end; + uint16_t bottom_bar_start; + uint16_t left_bar_end; + uint16_t right_bar_start; + } avi; + uint8_t payload[27]; + } __attribute__ ((packed)) body; +} __attribute__((packed)); + static inline struct drm_crtc * intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) { @@ -200,6 +232,7 @@ extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus); extern void intel_crt_init(struct drm_device *dev); extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); +void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); extern bool intel_sdvo_init(struct drm_device *dev, int output_device); extern void intel_dvo_init(struct drm_device *dev); extern void intel_tv_init(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 6c3b2ecd59d5..0d0273e7b029 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -58,6 +58,60 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) struct intel_hdmi, base); } +void intel_dip_infoframe_csum(struct dip_infoframe *avi_if) +{ + uint8_t *data = (uint8_t *)avi_if; + uint8_t sum = 0; + unsigned i; + + avi_if->checksum = 0; + avi_if->ecc = 0; + + for (i = 0; i < sizeof(*avi_if); i++) + sum += data[i]; + + avi_if->checksum = 0x100 - sum; +} + +static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder) +{ + struct dip_infoframe avi_if = { + .type = DIP_TYPE_AVI, + .ver = DIP_VERSION_AVI, + .len = DIP_LEN_AVI, + }; + uint32_t *data = (uint32_t *)&avi_if; + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + u32 port; + unsigned i; + + if (!intel_hdmi->has_hdmi_sink) + return; + + /* XXX first guess at handling video port, is this corrent? */ + if (intel_hdmi->sdvox_reg == SDVOB) + port = VIDEO_DIP_PORT_B; + else if (intel_hdmi->sdvox_reg == SDVOC) + port = VIDEO_DIP_PORT_C; + else + return; + + I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port | + VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC); + + intel_dip_infoframe_csum(&avi_if); + for (i = 0; i < sizeof(avi_if); i += 4) { + I915_WRITE(VIDEO_DIP_DATA, *data); + data++; + } + + I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port | + VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC | + VIDEO_DIP_ENABLE_AVI); +} + static void intel_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -79,8 +133,10 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev)) sdvox |= HDMI_MODE_SELECT; - if (intel_hdmi->has_audio) + if (intel_hdmi->has_audio) { sdvox |= SDVO_AUDIO_ENABLE; + sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC; + } if (intel_crtc->pipe == 1) { if (HAS_PCH_CPT(dev)) @@ -91,6 +147,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, I915_WRITE(intel_hdmi->sdvox_reg, sdvox); POSTING_READ(intel_hdmi->sdvox_reg); + + intel_hdmi_set_avi_infoframe(encoder); } static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index c245383cf7ed..6739a7455174 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -854,115 +854,33 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo) } #endif -static bool intel_sdvo_set_hdmi_buf(struct intel_sdvo *intel_sdvo, - int index, - uint8_t *data, int8_t size, uint8_t tx_rate) -{ - uint8_t set_buf_index[2]; - - set_buf_index[0] = index; - set_buf_index[1] = 0; - - if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX, - set_buf_index, 2)) - return false; - - for (; size > 0; size -= 8) { - if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, data, 8)) - return false; - - data += 8; - } - - return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); -} - -static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size) -{ - uint8_t csum = 0; - int i; - - for (i = 0; i < size; i++) - csum += data[i]; - - return 0x100 - csum; -} - -#define DIP_TYPE_AVI 0x82 -#define DIP_VERSION_AVI 0x2 -#define DIP_LEN_AVI 13 - -struct dip_infoframe { - uint8_t type; - uint8_t version; - uint8_t len; - uint8_t checksum; - union { - struct { - /* Packet Byte #1 */ - uint8_t S:2; - uint8_t B:2; - uint8_t A:1; - uint8_t Y:2; - uint8_t rsvd1:1; - /* Packet Byte #2 */ - uint8_t R:4; - uint8_t M:2; - uint8_t C:2; - /* Packet Byte #3 */ - uint8_t SC:2; - uint8_t Q:2; - uint8_t EC:3; - uint8_t ITC:1; - /* Packet Byte #4 */ - uint8_t VIC:7; - uint8_t rsvd2:1; - /* Packet Byte #5 */ - uint8_t PR:4; - uint8_t rsvd3:4; - /* Packet Byte #6~13 */ - uint16_t top_bar_end; - uint16_t bottom_bar_start; - uint16_t left_bar_end; - uint16_t right_bar_start; - } avi; - struct { - /* Packet Byte #1 */ - uint8_t channel_count:3; - uint8_t rsvd1:1; - uint8_t coding_type:4; - /* Packet Byte #2 */ - uint8_t sample_size:2; /* SS0, SS1 */ - uint8_t sample_frequency:3; - uint8_t rsvd2:3; - /* Packet Byte #3 */ - uint8_t coding_type_private:5; - uint8_t rsvd3:3; - /* Packet Byte #4 */ - uint8_t channel_allocation; - /* Packet Byte #5 */ - uint8_t rsvd4:3; - uint8_t level_shift:4; - uint8_t downmix_inhibit:1; - } audio; - uint8_t payload[28]; - } __attribute__ ((packed)) u; -} __attribute__((packed)); - -static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, - struct drm_display_mode * mode) +static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo) { struct dip_infoframe avi_if = { .type = DIP_TYPE_AVI, - .version = DIP_VERSION_AVI, + .ver = DIP_VERSION_AVI, .len = DIP_LEN_AVI, }; + uint8_t tx_rate = SDVO_HBUF_TX_VSYNC; + uint8_t set_buf_index[2] = { 1, 0 }; + uint64_t *data = (uint64_t *)&avi_if; + unsigned i; - avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if, - 4 + avi_if.len); - return intel_sdvo_set_hdmi_buf(intel_sdvo, 1, (uint8_t *)&avi_if, - 4 + avi_if.len, - SDVO_HBUF_TX_VSYNC); + intel_dip_infoframe_csum(&avi_if); + + if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX, + set_buf_index, 2)) + return false; + + for (i = 0; i < sizeof(avi_if); i += 8) { + if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, + data, 8)) + return false; + data++; + } + + return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, + &tx_rate, 1); } static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo) @@ -1116,7 +1034,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, return; if (intel_sdvo->is_hdmi && - !intel_sdvo_set_avi_infoframe(intel_sdvo, mode)) + !intel_sdvo_set_avi_infoframe(intel_sdvo)) return; if (intel_sdvo->is_tv && From e27d8538695d1aee69eb4fdd6f98988e6ffc5c33 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 22 Oct 2010 09:15:22 +0100 Subject: [PATCH 460/476] drm/i915/sdvo: Remove unused encoding member This block is only used when detecting whether the connector is HDMI and never again, so scope the variable to the detection routine. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_sdvo.c | 45 +++++++++++++++---------------- 1 file changed, 22 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 6739a7455174..de158b76bcd5 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -120,12 +120,6 @@ struct intel_sdvo { */ struct drm_display_mode *sdvo_lvds_fixed_mode; - /* - * supported encoding mode, used to determine whether HDMI is - * supported - */ - struct intel_sdvo_encode encode; - /* DDC bus used by this SDVO encoder */ uint8_t ddc_bus; @@ -799,17 +793,13 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, mode->flags |= DRM_MODE_FLAG_PVSYNC; } -static bool intel_sdvo_get_supp_encode(struct intel_sdvo *intel_sdvo, - struct intel_sdvo_encode *encode) +static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo) { - if (intel_sdvo_get_value(intel_sdvo, - SDVO_CMD_GET_SUPP_ENCODE, - encode, sizeof(*encode))) - return true; + struct intel_sdvo_encode encode; - /* non-support means DVI */ - memset(encode, 0, sizeof(*encode)); - return false; + return intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_SUPP_ENCODE, + &encode, sizeof(encode)); } static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo, @@ -1958,12 +1948,22 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, } static bool -intel_sdvo_get_digital_encoding_mode(struct intel_sdvo *intel_sdvo, int device) +intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device) { - return intel_sdvo_set_target_output(intel_sdvo, - device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1) && - intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, - &intel_sdvo->is_hdmi, 1); + int is_hdmi; + + if (!intel_sdvo_check_supp_encode(intel_sdvo)) + return false; + + if (!intel_sdvo_set_target_output(intel_sdvo, + device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1)) + return false; + + is_hdmi = 0; + if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &is_hdmi, 1)) + return false; + + return !!is_hdmi; } static u8 @@ -2064,14 +2064,13 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) encoder->encoder_type = DRM_MODE_ENCODER_TMDS; connector->connector_type = DRM_MODE_CONNECTOR_DVID; - if (intel_sdvo_get_supp_encode(intel_sdvo, &intel_sdvo->encode) - && intel_sdvo_get_digital_encoding_mode(intel_sdvo, device) - && intel_sdvo->is_hdmi) { + if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { /* enable hdmi encoding mode if supported */ intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); intel_sdvo_set_colorimetry(intel_sdvo, SDVO_COLORIMETRY_RGB256); connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; + intel_sdvo->is_hdmi = true; } intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT)); From 878a3c37d36142a192bdf5b6bfcf920832f431d7 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 22 Oct 2010 10:48:12 +0100 Subject: [PATCH 461/476] drm/i915: Fix flushing regression from 9af90d19f Whilst moving the code around in 9af90d19f, I dropped the or'ing in of new write domains which would zero out the write domain for a render target if later reused as a source later in the batch. This meant that we might drop a required flush before reading from the render target. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=31043 Reported-by: xunx.fang@intel.com Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c3398d396419..9290f02215cb 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3349,7 +3349,7 @@ i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj, } target_obj->pending_read_domains |= reloc.read_domains; - target_obj->pending_write_domain = reloc.write_domain; + target_obj->pending_write_domain |= reloc.write_domain; /* If the relocation already has the right value in it, no * more work needs to be done. From 85ccc35b7e4a5e7894570fe9b4e4b56d82fc3181 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 22 Oct 2010 14:59:29 +0100 Subject: [PATCH 462/476] agp/intel: Restore valid PTE bit for Sandybridge after bdd3072 In cleaning up the mask functions in bdd3072, the setting of the PTE valid bit was dropped for Sandybridge. Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 0c8ff6d8824b..6b6760ea2435 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1211,13 +1211,13 @@ static void gen6_write_entry(dma_addr_t addr, unsigned int entry, u32 pte_flags; if (type_mask == AGP_USER_UNCACHED_MEMORY) - pte_flags = GEN6_PTE_UNCACHED; + pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID; else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) { - pte_flags = GEN6_PTE_LLC; + pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; if (gfdt) pte_flags |= GEN6_PTE_GFDT; } else { /* set 'normal'/'cached' to LLC by default */ - pte_flags = GEN6_PTE_LLC_MLC; + pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID; if (gfdt) pte_flags |= GEN6_PTE_GFDT; } From 297b0c5be3b6e08890cbd7149313408847e81715 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 22 Oct 2010 17:02:41 +0100 Subject: [PATCH 463/476] drm/i915/ringbuffer: Write the value passed in to the tail register This should fix the error along the reset path were we tried to clear the tail register by setting it to 0, but were in fact setting it to the current value and complaining when it did not reset to 0. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_ringbuffer.c | 26 ++++++++++++------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 6 +++--- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 0c6eb97d60fd..4b53ca81ea4d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -119,12 +119,12 @@ render_ring_flush(struct drm_device *dev, } } -static void ring_set_tail(struct drm_device *dev, - struct intel_ring_buffer *ring, - u32 value) +static void ring_write_tail(struct drm_device *dev, + struct intel_ring_buffer *ring, + u32 value) { drm_i915_private_t *dev_priv = dev->dev_private; - I915_WRITE_TAIL(ring, ring->tail); + I915_WRITE_TAIL(ring, value); } u32 intel_ring_get_active_head(struct drm_device *dev, @@ -148,7 +148,7 @@ static int init_ring_common(struct drm_device *dev, /* Stop the ring if it's running. */ I915_WRITE_CTL(ring, 0); I915_WRITE_HEAD(ring, 0); - ring->set_tail(dev, ring, 0); + ring->write_tail(dev, ring, 0); /* Initialize the ring. */ I915_WRITE_START(ring, obj_priv->gtt_offset); @@ -729,7 +729,7 @@ void intel_ring_advance(struct drm_device *dev, struct intel_ring_buffer *ring) { ring->tail &= ring->size - 1; - ring->set_tail(dev, ring, ring->tail); + ring->write_tail(dev, ring, ring->tail); } static const struct intel_ring_buffer render_ring = { @@ -738,7 +738,7 @@ static const struct intel_ring_buffer render_ring = { .mmio_base = RENDER_RING_BASE, .size = 32 * PAGE_SIZE, .init = init_render_ring, - .set_tail = ring_set_tail, + .write_tail = ring_write_tail, .flush = render_ring_flush, .add_request = render_ring_add_request, .get_seqno = render_ring_get_seqno, @@ -755,7 +755,7 @@ static const struct intel_ring_buffer bsd_ring = { .mmio_base = BSD_RING_BASE, .size = 32 * PAGE_SIZE, .init = init_bsd_ring, - .set_tail = ring_set_tail, + .write_tail = ring_write_tail, .flush = bsd_ring_flush, .add_request = ring_add_request, .get_seqno = ring_status_page_get_seqno, @@ -765,9 +765,9 @@ static const struct intel_ring_buffer bsd_ring = { }; -static void gen6_bsd_ring_set_tail(struct drm_device *dev, - struct intel_ring_buffer *ring, - u32 value) +static void gen6_bsd_ring_write_tail(struct drm_device *dev, + struct intel_ring_buffer *ring, + u32 value) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -829,7 +829,7 @@ static const struct intel_ring_buffer gen6_bsd_ring = { .mmio_base = GEN6_BSD_RING_BASE, .size = 32 * PAGE_SIZE, .init = init_bsd_ring, - .set_tail = gen6_bsd_ring_set_tail, + .write_tail = gen6_bsd_ring_write_tail, .flush = gen6_ring_flush, .add_request = ring_add_request, .get_seqno = ring_status_page_get_seqno, @@ -859,7 +859,7 @@ static const struct intel_ring_buffer gen6_blt_ring = { .mmio_base = BLT_RING_BASE, .size = 32 * PAGE_SIZE, .init = init_ring_common, - .set_tail = ring_set_tail, + .write_tail = ring_write_tail, .flush = gen6_ring_flush, .add_request = ring_add_request, .get_seqno = ring_status_page_get_seqno, diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 9e81ff3b39cd..6ab40c6058f7 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -46,9 +46,9 @@ struct intel_ring_buffer { int (*init)(struct drm_device *dev, struct intel_ring_buffer *ring); - void (*set_tail)(struct drm_device *dev, - struct intel_ring_buffer *ring, - u32 value); + void (*write_tail)(struct drm_device *dev, + struct intel_ring_buffer *ring, + u32 value); void (*flush)(struct drm_device *dev, struct intel_ring_buffer *ring, u32 invalidate_domains, From b6651458d33c309767762a6c3da041573413fd88 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 23 Oct 2010 10:15:06 +0100 Subject: [PATCH 464/476] drm/i915: Invalidate the to-ring, flush the old-ring when updating domains When the object has been written to by the gpu it remains on the ring until its flush has been retired. However, when the object is moving to the ring and the associated cache needs to be invalidated, we need to perform the flush on the target ring, not the one it came from (which is NULL in the reported case and so the flush was entirely absent). Reported-by: Peter Clifton Reported-and-tested-by: Alexey Fisher Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 9290f02215cb..e7f27a5b89dc 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3078,7 +3078,8 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) * drm_agp_chipset_flush */ static void -i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) +i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, + struct intel_ring_buffer *ring) { struct drm_device *dev = obj->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -3132,8 +3133,10 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) dev->invalidate_domains |= invalidate_domains; dev->flush_domains |= flush_domains; - if (obj_priv->ring) + if (flush_domains & I915_GEM_GPU_DOMAINS) dev_priv->mm.flush_rings |= obj_priv->ring->id; + if (invalidate_domains & I915_GEM_GPU_DOMAINS) + dev_priv->mm.flush_rings |= ring->id; trace_i915_gem_object_change_domain(obj, old_read_domains, @@ -3765,7 +3768,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_gem_object *obj = object_list[i]; /* Compute new gpu domains and update invalidate/flush */ - i915_gem_object_set_to_gpu_domain(obj); + i915_gem_object_set_to_gpu_domain(obj, ring); } if (dev->invalidate_domains | dev->flush_domains) { From 641934069d29211baf82afb93622a426172b67b6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 24 Oct 2010 12:38:05 +0100 Subject: [PATCH 465/476] drm/i915: Move gpu_write_list to per-ring ... to prevent flush processing of an idle (or even absent) ring. This fixes a regression during suspend from 87acb0a5. Reported-and-tested-by: Alexey Fisher Tested-by: Peter Clifton Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 9 ------- drivers/gpu/drm/i915/i915_gem.c | 36 +++++++++++++------------ drivers/gpu/drm/i915/intel_ringbuffer.c | 1 + drivers/gpu/drm/i915/intel_ringbuffer.h | 9 +++++++ 4 files changed, 29 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index cc9cb0dda6fc..2c2c19b6285e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -570,15 +570,6 @@ typedef struct drm_i915_private { */ struct list_head flushing_list; - /** - * List of objects currently pending a GPU write flush. - * - * All elements on this list will belong to either the - * active_list or flushing_list, last_rendering_seqno can - * be used to differentiate between the two elements. - */ - struct list_head gpu_write_list; - /** * LRU list of objects which are not in the ringbuffer and * are ready to unbind, but are still in the GTT. diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e7f27a5b89dc..6c2618d884e7 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1657,12 +1657,11 @@ i915_gem_process_flushing_list(struct drm_device *dev, struct drm_i915_gem_object *obj_priv, *next; list_for_each_entry_safe(obj_priv, next, - &dev_priv->mm.gpu_write_list, + &ring->gpu_write_list, gpu_write_list) { struct drm_gem_object *obj = &obj_priv->base; - if (obj->write_domain & flush_domains && - obj_priv->ring == ring) { + if (obj->write_domain & flush_domains) { uint32_t old_write_domain = obj->write_domain; obj->write_domain = 0; @@ -2173,6 +2172,9 @@ i915_gem_object_unbind(struct drm_gem_object *obj) static int i915_ring_idle(struct drm_device *dev, struct intel_ring_buffer *ring) { + if (list_empty(&ring->gpu_write_list)) + return 0; + i915_gem_flush_ring(dev, NULL, ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); return i915_wait_request(dev, @@ -3786,14 +3788,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t old_write_domain = obj->write_domain; - obj->write_domain = obj->pending_write_domain; - if (obj->write_domain) - list_move_tail(&obj_priv->gpu_write_list, - &dev_priv->mm.gpu_write_list); - trace_i915_gem_object_change_domain(obj, obj->read_domains, old_write_domain); @@ -3858,9 +3854,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; - obj_priv = to_intel_bo(obj); i915_gem_object_move_to_active(obj, ring); + if (obj->write_domain) + list_move_tail(&to_intel_bo(obj)->gpu_write_list, + &ring->gpu_write_list); } i915_add_request(dev, file, request, ring); @@ -4618,6 +4616,14 @@ i915_gem_lastclose(struct drm_device *dev) DRM_ERROR("failed to idle hardware: %d\n", ret); } +static void +init_ring_lists(struct intel_ring_buffer *ring) +{ + INIT_LIST_HEAD(&ring->active_list); + INIT_LIST_HEAD(&ring->request_list); + INIT_LIST_HEAD(&ring->gpu_write_list); +} + void i915_gem_load(struct drm_device *dev) { @@ -4626,17 +4632,13 @@ i915_gem_load(struct drm_device *dev) INIT_LIST_HEAD(&dev_priv->mm.active_list); INIT_LIST_HEAD(&dev_priv->mm.flushing_list); - INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); INIT_LIST_HEAD(&dev_priv->mm.inactive_list); INIT_LIST_HEAD(&dev_priv->mm.pinned_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list); INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); - INIT_LIST_HEAD(&dev_priv->render_ring.active_list); - INIT_LIST_HEAD(&dev_priv->render_ring.request_list); - INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list); - INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list); - INIT_LIST_HEAD(&dev_priv->blt_ring.active_list); - INIT_LIST_HEAD(&dev_priv->blt_ring.request_list); + init_ring_lists(&dev_priv->render_ring); + init_ring_lists(&dev_priv->bsd_ring); + init_ring_lists(&dev_priv->blt_ring); for (i = 0; i < 16; i++) INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); INIT_DELAYED_WORK(&dev_priv->mm.retire_work, diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 4b53ca81ea4d..09f2dc353ae2 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -580,6 +580,7 @@ int intel_init_ring_buffer(struct drm_device *dev, ring->dev = dev; INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&ring->request_list); + INIT_LIST_HEAD(&ring->gpu_write_list); if (I915_NEED_GFX_HWS(dev)) { ret = init_status_page(dev, ring); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 6ab40c6058f7..a05aff0e5764 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -82,6 +82,15 @@ struct intel_ring_buffer { */ struct list_head request_list; + /** + * List of objects currently pending a GPU write flush. + * + * All elements on this list will belong to either the + * active_list or flushing_list, last_rendering_seqno can + * be used to differentiate between the two elements. + */ + struct list_head gpu_write_list; + /** * Do we have some not yet emitted requests outstanding? */ From f30df2fad0c901e74ac9a52a488a54c69a373a41 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Thu, 21 Oct 2010 13:55:40 +1000 Subject: [PATCH 466/476] drm/radeon/r600: fix tiling issues in CS checker. The CS checker had some incorrect alignment requirements for 2D surfaces, this made rendering to mipmap levels that were 2D broken. Also the CB height was being worked out from the BO size, this doesn't work at all when rendering mipmap levels, instead we work out what height userspace wanted from slice max and use that to check it fits inside the BO, however the DDX send the wrong slice max for an unaligned buffer so we have to workaround for that even though its a userspace bug. Reviewed-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/r600_cs.c | 29 ++++++++++++++++++++--------- drivers/gpu/drm/radeon/radeon_drv.c | 3 ++- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 250a3a918193..f82832780a7e 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -170,6 +170,7 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) struct r600_cs_track *track = p->track; u32 bpe = 0, pitch, slice_tile_max, size, tmp, height, pitch_align; volatile u32 *ib = p->ib->ptr; + unsigned array_mode; if (G_0280A0_TILE_MODE(track->cb_color_info[i])) { dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n"); @@ -185,12 +186,12 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) /* pitch is the number of 8x8 tiles per row */ pitch = G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1; slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1; - height = size / (pitch * 8 * bpe); + slice_tile_max *= 64; + height = slice_tile_max / (pitch * 8); if (height > 8192) height = 8192; - if (height > 7) - height &= ~0x7; - switch (G_0280A0_ARRAY_MODE(track->cb_color_info[i])) { + array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]); + switch (array_mode) { case V_0280A0_ARRAY_LINEAR_GENERAL: /* technically height & 0x7 */ break; @@ -222,7 +223,7 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) break; case V_0280A0_ARRAY_2D_TILED_THIN1: pitch_align = max((u32)track->nbanks, - (u32)(((track->group_size / 8) / (bpe * track->nsamples)) * track->nbanks)); + (u32)(((track->group_size / 8) / (bpe * track->nsamples)) * track->nbanks)) / 8; if (!IS_ALIGNED(pitch, pitch_align)) { dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", __func__, __LINE__, pitch); @@ -243,8 +244,18 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) /* check offset */ tmp = height * pitch * 8 * bpe; if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { - dev_warn(p->dev, "%s offset[%d] %d too big\n", __func__, i, track->cb_color_bo_offset[i]); - return -EINVAL; + if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { + /* the initial DDX does bad things with the CB size occasionally */ + /* it rounds up height too far for slice tile max but the BO is smaller */ + tmp = (height - 7) * 8 * bpe; + if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { + dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i])); + return -EINVAL; + } + } else { + dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i])); + return -EINVAL; + } } if (!IS_ALIGNED(track->cb_color_bo_offset[i], track->group_size)) { dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->cb_color_bo_offset[i]); @@ -361,7 +372,7 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) break; case V_028010_ARRAY_2D_TILED_THIN1: pitch_align = max((u32)track->nbanks, - (u32)(((track->group_size / 8) / bpe) * track->nbanks)); + (u32)(((track->group_size / 8) / bpe) * track->nbanks)) / 8; if (!IS_ALIGNED(pitch, pitch_align)) { dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n", __func__, __LINE__, pitch); @@ -1138,7 +1149,7 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i break; case V_038000_ARRAY_2D_TILED_THIN1: pitch_align = max((u32)track->nbanks, - (u32)(((track->group_size / 8) / bpe) * track->nbanks)); + (u32)(((track->group_size / 8) / bpe) * track->nbanks)) / 8; if (!IS_ALIGNED(pitch, pitch_align)) { dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", __func__, __LINE__, pitch); diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index f29a2695d961..f9e228661119 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -47,9 +47,10 @@ * - 2.4.0 - add crtc id query * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500) + * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change) */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 6 +#define KMS_DRIVER_MINOR 7 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); From 71901cc4109b3794b863884e348aff3c71e693cc Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 21 Oct 2010 13:45:30 -0400 Subject: [PATCH 467/476] drm/radeon/kms/evergreen: add some additional safe regs v2 These are needed for enabling dynamic GPR allocation in the shaders in the userspace acceleration drivers. v2: fix typo in reg name Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_drv.c | 2 +- drivers/gpu/drm/radeon/reg_srcs/evergreen | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index f9e228661119..c20669b9a6a1 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -47,7 +47,7 @@ * - 2.4.0 - add crtc id query * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500) - * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change) + * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs */ #define KMS_DRIVER_MAJOR 2 #define KMS_DRIVER_MINOR 7 diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen index f78fd592544d..ac40fd39d787 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/evergreen +++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen @@ -22,6 +22,10 @@ evergreen 0x9400 0x00008B10 PA_SC_LINE_STIPPLE_STATE 0x00008BF0 PA_SC_ENHANCE 0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ +0x00008D90 SQ_DYN_GPR_OPTIMIZATION +0x00008D94 SQ_DYN_GPR_SIMD_LOCK_EN +0x00008D98 SQ_DYN_GPR_THREAD_LIMIT +0x00008D9C SQ_DYN_GPR_LDS_LIMIT 0x00008C00 SQ_CONFIG 0x00008C04 SQ_GPR_RESOURCE_MGMT_1 0x00008C08 SQ_GPR_RESOURCE_MGMT_2 @@ -34,6 +38,10 @@ evergreen 0x9400 0x00008C24 SQ_STACK_RESOURCE_MGMT_2 0x00008C28 SQ_STACK_RESOURCE_MGMT_3 0x00008DF8 SQ_CONST_MEM_BASE +0x00008E20 SQ_STATIC_THREAD_MGMT_1 +0x00008E24 SQ_STATIC_THREAD_MGMT_2 +0x00008E28 SQ_STATIC_THREAD_MGMT_3 +0x00008E2C SQ_LDS_RESOURCE_MGMT 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS 0x00009100 SPI_CONFIG_CNTL 0x0000913C SPI_CONFIG_CNTL_1 From f9d9c36204243d81e9d4dd28e58ee335257847d2 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 22 Oct 2010 02:51:05 -0400 Subject: [PATCH 468/476] drm/radeon/kms: implement display watermark support for evergreen Improper display watermarks can result in underflow to the display controllers which can cause flickering or other artifacts. This patch implements display watermark support and line buffer allocation for evergreen asics. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen.c | 437 +++++++++++++++++++++++++++- drivers/gpu/drm/radeon/evergreend.h | 13 + 2 files changed, 449 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 77ebcbc1b6e3..17b2fe925ce0 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -284,9 +284,444 @@ void evergreen_hpd_fini(struct radeon_device *rdev) } } +/* watermark setup */ + +static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev, + struct radeon_crtc *radeon_crtc, + struct drm_display_mode *mode, + struct drm_display_mode *other_mode) +{ + u32 tmp = 0; + /* + * Line Buffer Setup + * There are 3 line buffers, each one shared by 2 display controllers. + * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between + * the display controllers. The paritioning is done via one of four + * preset allocations specified in bits 2:0: + * first display controller + * 0 - first half of lb (3840 * 2) + * 1 - first 3/4 of lb (5760 * 2) + * 2 - whole lb (7680 * 2) + * 3 - first 1/4 of lb (1920 * 2) + * second display controller + * 4 - second half of lb (3840 * 2) + * 5 - second 3/4 of lb (5760 * 2) + * 6 - whole lb (7680 * 2) + * 7 - last 1/4 of lb (1920 * 2) + */ + if (mode && other_mode) { + if (mode->hdisplay > other_mode->hdisplay) { + if (mode->hdisplay > 2560) + tmp = 1; /* 3/4 */ + else + tmp = 0; /* 1/2 */ + } else if (other_mode->hdisplay > mode->hdisplay) { + if (other_mode->hdisplay > 2560) + tmp = 3; /* 1/4 */ + else + tmp = 0; /* 1/2 */ + } else + tmp = 0; /* 1/2 */ + } else if (mode) + tmp = 2; /* whole */ + else if (other_mode) + tmp = 3; /* 1/4 */ + + /* second controller of the pair uses second half of the lb */ + if (radeon_crtc->crtc_id % 2) + tmp += 4; + WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp); + + switch (tmp) { + case 0: + case 4: + default: + return 3840 * 2; + case 1: + case 5: + return 5760 * 2; + case 2: + case 6: + return 7680 * 2; + case 3: + case 7: + return 1920 * 2; + } +} + +static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev) +{ + u32 tmp = RREG32(MC_SHARED_CHMAP); + + switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { + case 0: + default: + return 1; + case 1: + return 2; + case 2: + return 4; + case 3: + return 8; + } +} + +struct evergreen_wm_params { + u32 dram_channels; /* number of dram channels */ + u32 yclk; /* bandwidth per dram data pin in kHz */ + u32 sclk; /* engine clock in kHz */ + u32 disp_clk; /* display clock in kHz */ + u32 src_width; /* viewport width */ + u32 active_time; /* active display time in ns */ + u32 blank_time; /* blank time in ns */ + bool interlaced; /* mode is interlaced */ + fixed20_12 vsc; /* vertical scale ratio */ + u32 num_heads; /* number of active crtcs */ + u32 bytes_per_pixel; /* bytes per pixel display + overlay */ + u32 lb_size; /* line buffer allocated to pipe */ + u32 vtaps; /* vertical scaler taps */ +}; + +static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm) +{ + /* Calculate DRAM Bandwidth and the part allocated to display. */ + fixed20_12 dram_efficiency; /* 0.7 */ + fixed20_12 yclk, dram_channels, bandwidth; + fixed20_12 a; + + a.full = dfixed_const(1000); + yclk.full = dfixed_const(wm->yclk); + yclk.full = dfixed_div(yclk, a); + dram_channels.full = dfixed_const(wm->dram_channels * 4); + a.full = dfixed_const(10); + dram_efficiency.full = dfixed_const(7); + dram_efficiency.full = dfixed_div(dram_efficiency, a); + bandwidth.full = dfixed_mul(dram_channels, yclk); + bandwidth.full = dfixed_mul(bandwidth, dram_efficiency); + + return dfixed_trunc(bandwidth); +} + +static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm) +{ + /* Calculate DRAM Bandwidth and the part allocated to display. */ + fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */ + fixed20_12 yclk, dram_channels, bandwidth; + fixed20_12 a; + + a.full = dfixed_const(1000); + yclk.full = dfixed_const(wm->yclk); + yclk.full = dfixed_div(yclk, a); + dram_channels.full = dfixed_const(wm->dram_channels * 4); + a.full = dfixed_const(10); + disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */ + disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a); + bandwidth.full = dfixed_mul(dram_channels, yclk); + bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation); + + return dfixed_trunc(bandwidth); +} + +static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm) +{ + /* Calculate the display Data return Bandwidth */ + fixed20_12 return_efficiency; /* 0.8 */ + fixed20_12 sclk, bandwidth; + fixed20_12 a; + + a.full = dfixed_const(1000); + sclk.full = dfixed_const(wm->sclk); + sclk.full = dfixed_div(sclk, a); + a.full = dfixed_const(10); + return_efficiency.full = dfixed_const(8); + return_efficiency.full = dfixed_div(return_efficiency, a); + a.full = dfixed_const(32); + bandwidth.full = dfixed_mul(a, sclk); + bandwidth.full = dfixed_mul(bandwidth, return_efficiency); + + return dfixed_trunc(bandwidth); +} + +static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm) +{ + /* Calculate the DMIF Request Bandwidth */ + fixed20_12 disp_clk_request_efficiency; /* 0.8 */ + fixed20_12 disp_clk, bandwidth; + fixed20_12 a; + + a.full = dfixed_const(1000); + disp_clk.full = dfixed_const(wm->disp_clk); + disp_clk.full = dfixed_div(disp_clk, a); + a.full = dfixed_const(10); + disp_clk_request_efficiency.full = dfixed_const(8); + disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a); + a.full = dfixed_const(32); + bandwidth.full = dfixed_mul(a, disp_clk); + bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency); + + return dfixed_trunc(bandwidth); +} + +static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm) +{ + /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */ + u32 dram_bandwidth = evergreen_dram_bandwidth(wm); + u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm); + u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm); + + return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth)); +} + +static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm) +{ + /* Calculate the display mode Average Bandwidth + * DisplayMode should contain the source and destination dimensions, + * timing, etc. + */ + fixed20_12 bpp; + fixed20_12 line_time; + fixed20_12 src_width; + fixed20_12 bandwidth; + fixed20_12 a; + + a.full = dfixed_const(1000); + line_time.full = dfixed_const(wm->active_time + wm->blank_time); + line_time.full = dfixed_div(line_time, a); + bpp.full = dfixed_const(wm->bytes_per_pixel); + src_width.full = dfixed_const(wm->src_width); + bandwidth.full = dfixed_mul(src_width, bpp); + bandwidth.full = dfixed_mul(bandwidth, wm->vsc); + bandwidth.full = dfixed_div(bandwidth, line_time); + + return dfixed_trunc(bandwidth); +} + +static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm) +{ + /* First calcualte the latency in ns */ + u32 mc_latency = 2000; /* 2000 ns. */ + u32 available_bandwidth = evergreen_available_bandwidth(wm); + u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth; + u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth; + u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ + u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + + (wm->num_heads * cursor_line_pair_return_time); + u32 latency = mc_latency + other_heads_data_return_time + dc_latency; + u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time; + fixed20_12 a, b, c; + + if (wm->num_heads == 0) + return 0; + + a.full = dfixed_const(2); + b.full = dfixed_const(1); + if ((wm->vsc.full > a.full) || + ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || + (wm->vtaps >= 5) || + ((wm->vsc.full >= a.full) && wm->interlaced)) + max_src_lines_per_dst_line = 4; + else + max_src_lines_per_dst_line = 2; + + a.full = dfixed_const(available_bandwidth); + b.full = dfixed_const(wm->num_heads); + a.full = dfixed_div(a, b); + + b.full = dfixed_const(1000); + c.full = dfixed_const(wm->disp_clk); + b.full = dfixed_div(c, b); + c.full = dfixed_const(wm->bytes_per_pixel); + b.full = dfixed_mul(b, c); + + lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b)); + + a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); + b.full = dfixed_const(1000); + c.full = dfixed_const(lb_fill_bw); + b.full = dfixed_div(c, b); + a.full = dfixed_div(a, b); + line_fill_time = dfixed_trunc(a); + + if (line_fill_time < wm->active_time) + return latency; + else + return latency + (line_fill_time - wm->active_time); + +} + +static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm) +{ + if (evergreen_average_bandwidth(wm) <= + (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads)) + return true; + else + return false; +}; + +static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm) +{ + if (evergreen_average_bandwidth(wm) <= + (evergreen_available_bandwidth(wm) / wm->num_heads)) + return true; + else + return false; +}; + +static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm) +{ + u32 lb_partitions = wm->lb_size / wm->src_width; + u32 line_time = wm->active_time + wm->blank_time; + u32 latency_tolerant_lines; + u32 latency_hiding; + fixed20_12 a; + + a.full = dfixed_const(1); + if (wm->vsc.full > a.full) + latency_tolerant_lines = 1; + else { + if (lb_partitions <= (wm->vtaps + 1)) + latency_tolerant_lines = 1; + else + latency_tolerant_lines = 2; + } + + latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); + + if (evergreen_latency_watermark(wm) <= latency_hiding) + return true; + else + return false; +} + +static void evergreen_program_watermarks(struct radeon_device *rdev, + struct radeon_crtc *radeon_crtc, + u32 lb_size, u32 num_heads) +{ + struct drm_display_mode *mode = &radeon_crtc->base.mode; + struct evergreen_wm_params wm; + u32 pixel_period; + u32 line_time = 0; + u32 latency_watermark_a = 0, latency_watermark_b = 0; + u32 priority_a_mark = 0, priority_b_mark = 0; + u32 priority_a_cnt = PRIORITY_OFF; + u32 priority_b_cnt = PRIORITY_OFF; + u32 pipe_offset = radeon_crtc->crtc_id * 16; + u32 tmp, arb_control3; + fixed20_12 a, b, c; + + if (radeon_crtc->base.enabled && num_heads && mode) { + pixel_period = 1000000 / (u32)mode->clock; + line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); + priority_a_cnt = 0; + priority_b_cnt = 0; + + wm.yclk = rdev->pm.current_mclk * 10; + wm.sclk = rdev->pm.current_sclk * 10; + wm.disp_clk = mode->clock; + wm.src_width = mode->crtc_hdisplay; + wm.active_time = mode->crtc_hdisplay * pixel_period; + wm.blank_time = line_time - wm.active_time; + wm.interlaced = false; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + wm.interlaced = true; + wm.vsc = radeon_crtc->vsc; + wm.vtaps = 1; + if (radeon_crtc->rmx_type != RMX_OFF) + wm.vtaps = 2; + wm.bytes_per_pixel = 4; /* XXX: get this from fb config */ + wm.lb_size = lb_size; + wm.dram_channels = evergreen_get_number_of_dram_channels(rdev); + wm.num_heads = num_heads; + + /* set for high clocks */ + latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535); + /* set for low clocks */ + /* wm.yclk = low clk; wm.sclk = low clk */ + latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535); + + /* possibly force display priority to high */ + /* should really do this at mode validation time... */ + if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) || + !evergreen_average_bandwidth_vs_available_bandwidth(&wm) || + !evergreen_check_latency_hiding(&wm) || + (rdev->disp_priority == 2)) { + DRM_INFO("force priority to high\n"); + priority_a_cnt |= PRIORITY_ALWAYS_ON; + priority_b_cnt |= PRIORITY_ALWAYS_ON; + } + + a.full = dfixed_const(1000); + b.full = dfixed_const(mode->clock); + b.full = dfixed_div(b, a); + c.full = dfixed_const(latency_watermark_a); + c.full = dfixed_mul(c, b); + c.full = dfixed_mul(c, radeon_crtc->hsc); + c.full = dfixed_div(c, a); + a.full = dfixed_const(16); + c.full = dfixed_div(c, a); + priority_a_mark = dfixed_trunc(c); + priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK; + + a.full = dfixed_const(1000); + b.full = dfixed_const(mode->clock); + b.full = dfixed_div(b, a); + c.full = dfixed_const(latency_watermark_b); + c.full = dfixed_mul(c, b); + c.full = dfixed_mul(c, radeon_crtc->hsc); + c.full = dfixed_div(c, a); + a.full = dfixed_const(16); + c.full = dfixed_div(c, a); + priority_b_mark = dfixed_trunc(c); + priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; + } + + /* select wm A */ + arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset); + tmp = arb_control3; + tmp &= ~LATENCY_WATERMARK_MASK(3); + tmp |= LATENCY_WATERMARK_MASK(1); + WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp); + WREG32(PIPE0_LATENCY_CONTROL + pipe_offset, + (LATENCY_LOW_WATERMARK(latency_watermark_a) | + LATENCY_HIGH_WATERMARK(line_time))); + /* select wm B */ + tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset); + tmp &= ~LATENCY_WATERMARK_MASK(3); + tmp |= LATENCY_WATERMARK_MASK(2); + WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp); + WREG32(PIPE0_LATENCY_CONTROL + pipe_offset, + (LATENCY_LOW_WATERMARK(latency_watermark_b) | + LATENCY_HIGH_WATERMARK(line_time))); + /* restore original selection */ + WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3); + + /* write the priority marks */ + WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt); + WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt); + +} + void evergreen_bandwidth_update(struct radeon_device *rdev) { - /* XXX */ + struct drm_display_mode *mode0 = NULL; + struct drm_display_mode *mode1 = NULL; + u32 num_heads = 0, lb_size; + int i; + + radeon_update_display_priority(rdev); + + for (i = 0; i < rdev->num_crtc; i++) { + if (rdev->mode_info.crtcs[i]->base.enabled) + num_heads++; + } + for (i = 0; i < rdev->num_crtc; i += 2) { + mode0 = &rdev->mode_info.crtcs[i]->base.mode; + mode1 = &rdev->mode_info.crtcs[i+1]->base.mode; + lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1); + evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads); + lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0); + evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads); + } } static int evergreen_mc_wait_for_idle(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 319aa9752d40..d507f438eed0 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -412,6 +412,19 @@ #define SOFT_RESET_REGBB (1 << 22) #define SOFT_RESET_ORB (1 << 23) +/* display watermarks */ +#define DC_LB_MEMORY_SPLIT 0x6b0c +#define PRIORITY_A_CNT 0x6b18 +#define PRIORITY_MARK_MASK 0x7fff +#define PRIORITY_OFF (1 << 16) +#define PRIORITY_ALWAYS_ON (1 << 20) +#define PRIORITY_B_CNT 0x6b1c +#define PIPE0_ARBITRATION_CONTROL3 0x0bf0 +# define LATENCY_WATERMARK_MASK(x) ((x) << 16) +#define PIPE0_LATENCY_CONTROL 0x0bf4 +# define LATENCY_LOW_WATERMARK(x) ((x) << 0) +# define LATENCY_HIGH_WATERMARK(x) ((x) << 16) + #define IH_RB_CNTL 0x3e00 # define IH_RB_ENABLE (1 << 0) # define IH_IB_SIZE(x) ((x) << 1) /* log2 */ From b7d8cce5b558e0c0aa6898c9865356481598b46d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 25 Oct 2010 19:44:00 -0400 Subject: [PATCH 469/476] drm/radeon/kms: MC vram map needs to be >= pci aperture size The vram map in the radeon memory controller needs to be >= the pci aperture size. Fixes: https://bugs.freedesktop.org/show_bug.cgi?id=28402 The problematic cards in the above bug have 64 MB of vram, but the pci aperture is 128 MB and the MC vram map was only 64 MB. This can lead to hangs. Signed-off-by: Alex Deucher Cc: stable@kernel.org Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/r100.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 6112ac99ccd7..6d1540c0bfed 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -2270,6 +2270,9 @@ void r100_vram_init_sizes(struct radeon_device *rdev) /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - * Novell bug 204882 + along with lots of ubuntu ones */ + if (rdev->mc.aper_size > config_aper_size) + config_aper_size = rdev->mc.aper_size; + if (config_aper_size > rdev->mc.real_vram_size) rdev->mc.mc_vram_size = config_aper_size; else From e26fd1199ebab0d469fc3e037da5932d382f0669 Mon Sep 17 00:00:00 2001 From: "Lee, Chun-Yi" Date: Wed, 20 Oct 2010 13:51:19 +0800 Subject: [PATCH 470/476] gpu: Add Intel GMA500(Poulsbo) Stub Driver Currently, there have no GMA500(Poulsbo) native video driver to support intel opregion. So, use this stub driver to enable the acpi backlight control sysfs entry files by requrest acpi_video_register. [airlied: fix warnings] Signed-off-by: Lee, Chun-Yi Signed-off-by: Dave Airlie --- drivers/gpu/Makefile | 2 +- drivers/gpu/stub/Kconfig | 13 ++++++++ drivers/gpu/stub/Makefile | 1 + drivers/gpu/stub/poulsbo.c | 64 ++++++++++++++++++++++++++++++++++++++ drivers/video/Kconfig | 2 ++ 5 files changed, 81 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/stub/Kconfig create mode 100644 drivers/gpu/stub/Makefile create mode 100644 drivers/gpu/stub/poulsbo.c diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile index 30879df3daea..cc9277885dd0 100644 --- a/drivers/gpu/Makefile +++ b/drivers/gpu/Makefile @@ -1 +1 @@ -obj-y += drm/ vga/ +obj-y += drm/ vga/ stub/ diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig new file mode 100644 index 000000000000..742c423567cf --- /dev/null +++ b/drivers/gpu/stub/Kconfig @@ -0,0 +1,13 @@ +config STUB_POULSBO + tristate "Intel GMA500 Stub Driver" + depends on PCI + # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled + # but for select to work, need to select ACPI_VIDEO's dependencies, ick + select ACPI_VIDEO if ACPI + help + Choose this option if you have a system that has Intel GMA500 + (Poulsbo) integrated graphics. If M is selected, the module will + be called Poulsbo. This driver is a stub driver for Poulsbo that + will call poulsbo.ko to enable the acpi backlight control sysfs + entry file because there have no poulsbo native driver can support + intel opregion. diff --git a/drivers/gpu/stub/Makefile b/drivers/gpu/stub/Makefile new file mode 100644 index 000000000000..cd940cc9d36d --- /dev/null +++ b/drivers/gpu/stub/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_STUB_POULSBO) += poulsbo.o diff --git a/drivers/gpu/stub/poulsbo.c b/drivers/gpu/stub/poulsbo.c new file mode 100644 index 000000000000..7edfd27b8dee --- /dev/null +++ b/drivers/gpu/stub/poulsbo.c @@ -0,0 +1,64 @@ +/* + * Intel Poulsbo Stub driver + * + * Copyright (C) 2010 Novell + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + */ + +#include +#include +#include +#include + +#define DRIVER_NAME "poulsbo" + +enum { + CHIP_PSB_8108 = 0, + CHIP_PSB_8109 = 1, +}; + +static DEFINE_PCI_DEVICE_TABLE(pciidlist) = { + {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \ + {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \ + {0, 0, 0} +}; + +static int poulsbo_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + return acpi_video_register(); +} + +static void poulsbo_remove(struct pci_dev *pdev) +{ + acpi_video_unregister(); +} + +static struct pci_driver poulsbo_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = poulsbo_probe, + .remove = poulsbo_remove, +}; + +static int __init poulsbo_init(void) +{ + return pci_register_driver(&poulsbo_driver); +} + +static void __exit poulsbo_exit(void) +{ + pci_unregister_driver(&poulsbo_driver); +} + +module_init(poulsbo_init); +module_exit(poulsbo_exit); + +MODULE_AUTHOR("Lee, Chun-Yi "); +MODULE_DESCRIPTION("Poulsbo Stub Driver"); +MODULE_LICENSE("GPL"); + +MODULE_DEVICE_TABLE(pci, pciidlist); diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 8b31fdfefc98..8807ae5f8b21 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -17,6 +17,8 @@ source "drivers/gpu/vga/Kconfig" source "drivers/gpu/drm/Kconfig" +source "drivers/gpu/stub/Kconfig" + config VGASTATE tristate default n From c3cceeddf0b5f97b0d2352b98ef0f025e31a9ae3 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 26 Oct 2010 12:55:52 +1000 Subject: [PATCH 471/476] drm/radeon/kms: don't poll dac load detect. This is slightly destructive, cpu intensive and can cause lockups. Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_connectors.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 64c3ddf02167..4dac4b0a02ee 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -663,6 +663,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force) ret = connector_status_connected; } } else { + + /* if we aren't forcing don't do destructive polling */ + if (!force) + return connector->status; + if (radeon_connector->dac_load_detect && encoder) { encoder_funcs = encoder->helper_private; ret = encoder_funcs->detect(encoder, connector); @@ -850,6 +855,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) if ((ret == connector_status_connected) && (radeon_connector->use_digital == true)) goto out; + if (!force) { + ret = connector->status; + goto out; + } + /* find analog encoder */ if (radeon_connector->dac_load_detect) { for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { From 2281a378e1830d7ab78d3067f228e4e55d368b0d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 21 Oct 2010 13:31:38 -0400 Subject: [PATCH 472/476] drm/radeon/kms/evergreen: set the clear state to the blit state The hw stores a default clear state for registers in the context range that can be initialized when the CP is set up. Set the blit state as the default clear state and use the CLEAR_STATE packet to load the blit state rather than loading it from an IB. This reduces overhead when doing bo moves using the 3D engine. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen.c | 38 +++++++++--- drivers/gpu/drm/radeon/evergreen_blit_kms.c | 60 +++++++++---------- .../gpu/drm/radeon/evergreen_blit_shaders.c | 19 ++---- drivers/gpu/drm/radeon/evergreend.h | 2 + 4 files changed, 65 insertions(+), 54 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 17b2fe925ce0..f12a5b3ec050 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -32,6 +32,7 @@ #include "atom.h" #include "avivod.h" #include "evergreen_reg.h" +#include "evergreen_blit_shaders.h" #define EVERGREEN_PFP_UCODE_SIZE 1120 #define EVERGREEN_PM4_UCODE_SIZE 1376 @@ -1112,7 +1113,7 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev) static int evergreen_cp_start(struct radeon_device *rdev) { - int r; + int r, i; uint32_t cp_me; r = radeon_ring_lock(rdev, 7); @@ -1132,16 +1133,39 @@ static int evergreen_cp_start(struct radeon_device *rdev) cp_me = 0xff; WREG32(CP_ME_CNTL, cp_me); - r = radeon_ring_lock(rdev, 4); + r = radeon_ring_lock(rdev, evergreen_default_size + 15); if (r) { DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); return r; } - /* init some VGT regs */ - radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); - radeon_ring_write(rdev, (VGT_VERTEX_REUSE_BLOCK_CNTL - PACKET3_SET_CONTEXT_REG_START) >> 2); - radeon_ring_write(rdev, 0xe); - radeon_ring_write(rdev, 0x10); + + /* setup clear context state */ + radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); + + for (i = 0; i < evergreen_default_size; i++) + radeon_ring_write(rdev, evergreen_default_state[i]); + + radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE); + + /* set clear context state */ + radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0)); + radeon_ring_write(rdev, 0); + + /* SQ_VTX_BASE_VTX_LOC */ + radeon_ring_write(rdev, 0xc0026f00); + radeon_ring_write(rdev, 0x00000000); + radeon_ring_write(rdev, 0x00000000); + radeon_ring_write(rdev, 0x00000000); + + /* Clear consts */ + radeon_ring_write(rdev, 0xc0036f00); + radeon_ring_write(rdev, 0x00000bc4); + radeon_ring_write(rdev, 0xffffffff); + radeon_ring_write(rdev, 0xffffffff); + radeon_ring_write(rdev, 0xffffffff); + radeon_ring_unlock_commit(rdev); return 0; diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c index a9825aa324b4..086b9b0416c4 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c @@ -230,7 +230,7 @@ draw_auto(struct radeon_device *rdev) } -/* emits 20 */ +/* emits 30 */ static void set_default_state(struct radeon_device *rdev) { @@ -243,8 +243,6 @@ set_default_state(struct radeon_device *rdev) int num_hs_threads, num_ls_threads; int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries; int num_hs_stack_entries, num_ls_stack_entries; - u64 gpu_addr; - int dwords; switch (rdev->family) { case CHIP_CEDAR: @@ -369,13 +367,9 @@ set_default_state(struct radeon_device *rdev) sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) | NUM_LS_STACK_ENTRIES(num_ls_stack_entries)); - /* emit an IB pointing at default state */ - dwords = ALIGN(rdev->r600_blit.state_len, 0x10); - gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; - radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); - radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC); - radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF); - radeon_ring_write(rdev, dwords); + /* set clear context state */ + radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0)); + radeon_ring_write(rdev, 0); /* disable dyn gprs */ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); @@ -396,6 +390,25 @@ set_default_state(struct radeon_device *rdev) radeon_ring_write(rdev, sq_stack_resource_mgmt_1); radeon_ring_write(rdev, sq_stack_resource_mgmt_2); radeon_ring_write(rdev, sq_stack_resource_mgmt_3); + + /* CONTEXT_CONTROL */ + radeon_ring_write(rdev, 0xc0012800); + radeon_ring_write(rdev, 0x80000000); + radeon_ring_write(rdev, 0x80000000); + + /* SQ_VTX_BASE_VTX_LOC */ + radeon_ring_write(rdev, 0xc0026f00); + radeon_ring_write(rdev, 0x00000000); + radeon_ring_write(rdev, 0x00000000); + radeon_ring_write(rdev, 0x00000000); + + /* SET_SAMPLER */ + radeon_ring_write(rdev, 0xc0036e00); + radeon_ring_write(rdev, 0x00000000); + radeon_ring_write(rdev, 0x00000012); + radeon_ring_write(rdev, 0x00000000); + radeon_ring_write(rdev, 0x00000000); + } static inline uint32_t i2f(uint32_t input) @@ -426,10 +439,8 @@ static inline uint32_t i2f(uint32_t input) int evergreen_blit_init(struct radeon_device *rdev) { u32 obj_size; - int r, dwords; + int r; void *ptr; - u32 packet2s[16]; - int num_packet2s = 0; /* pin copy shader into vram if already initialized */ if (rdev->r600_blit.shader_obj) @@ -437,17 +448,8 @@ int evergreen_blit_init(struct radeon_device *rdev) mutex_init(&rdev->r600_blit.mutex); rdev->r600_blit.state_offset = 0; - - rdev->r600_blit.state_len = evergreen_default_size; - - dwords = rdev->r600_blit.state_len; - while (dwords & 0xf) { - packet2s[num_packet2s++] = PACKET2(0); - dwords++; - } - - obj_size = dwords * 4; - obj_size = ALIGN(obj_size, 256); + rdev->r600_blit.state_len = 0; + obj_size = 0; rdev->r600_blit.vs_offset = obj_size; obj_size += evergreen_vs_size * 4; @@ -477,12 +479,6 @@ int evergreen_blit_init(struct radeon_device *rdev) return r; } - memcpy_toio(ptr + rdev->r600_blit.state_offset, - evergreen_default_state, rdev->r600_blit.state_len * 4); - - if (num_packet2s) - memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), - packet2s, num_packet2s * 4); memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4); memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4); radeon_bo_kunmap(rdev->r600_blit.shader_obj); @@ -566,7 +562,7 @@ int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes) /* calculate number of loops correctly */ ring_size = num_loops * dwords_per_loop; /* set default + shaders */ - ring_size += 36; /* shaders + def state */ + ring_size += 46; /* shaders + def state */ ring_size += 10; /* fence emit for VB IB */ ring_size += 5; /* done copy */ ring_size += 10; /* fence emit for done copy */ @@ -574,7 +570,7 @@ int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes) if (r) return r; - set_default_state(rdev); /* 20 */ + set_default_state(rdev); /* 30 */ set_shaders(rdev); /* 16 */ return 0; } diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c index 5d5045027b46..ef1d28c07fbf 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c @@ -39,10 +39,6 @@ const u32 evergreen_default_state[] = { - 0xc0012800, /* CONTEXT_CONTROL */ - 0x80000000, - 0x80000000, - 0xc0016900, 0x0000023b, 0x00000000, /* SQ_LDS_ALLOC_PS */ @@ -63,17 +59,11 @@ const u32 evergreen_default_state[] = 0x00000000, 0x00000000, - 0xc0026f00, - 0x00000000, - 0x00000000, /* SQ_VTX_BASE_VTX_LOC */ - 0x00000000, - 0xc0026900, 0x00000010, 0x00000000, /* DB_Z_INFO */ 0x00000000, /* DB_STENCIL_INFO */ - 0xc0016900, 0x00000200, 0x00000000, /* DB_DEPTH_CONTROL */ @@ -303,11 +293,10 @@ const u32 evergreen_default_state[] = 0x00000000, /* */ 0x00000000, /* */ - 0xc0036e00, /* SET_SAMPLER */ - 0x00000000, - 0x00000012, - 0x00000000, - 0x00000000, + 0xc0026900, + 0x00000316, + 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */ + 0x00000010, /* */ }; const u32 evergreen_vs[] = diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index d507f438eed0..113c70cc8b39 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -658,6 +658,8 @@ #define PACKET3_EVENT_WRITE_EOP 0x47 #define PACKET3_EVENT_WRITE_EOS 0x48 #define PACKET3_PREAMBLE_CNTL 0x4A +# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28) +# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28) #define PACKET3_RB_OFFSET 0x4B #define PACKET3_ALU_PS_CONST_BUFFER_COPY 0x4C #define PACKET3_ALU_VS_CONST_BUFFER_COPY 0x4D From 354da653233898ed1e51f20cebac9705456bf9b1 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 18 Oct 2010 23:45:39 -0400 Subject: [PATCH 473/476] drm/radeon/kms: fix 2D tile height alignment in the r600 CS checker macro tile heights are aligned to num channels, not num banks. Noticed by Dave Airlie. Signed-off-by: Alex Deucher Cc: stable@kernel.org Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/r600_cs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index f82832780a7e..41802915f93f 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -229,7 +229,7 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) __func__, __LINE__, pitch); return -EINVAL; } - if (!IS_ALIGNED((height / 8), track->nbanks)) { + if (!IS_ALIGNED((height / 8), track->npipes)) { dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", __func__, __LINE__, height); return -EINVAL; @@ -378,7 +378,7 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) __func__, __LINE__, pitch); return -EINVAL; } - if ((height / 8) & (track->nbanks - 1)) { + if (!IS_ALIGNED((height / 8), track->npipes)) { dev_warn(p->dev, "%s:%d db height (%d) invalid\n", __func__, __LINE__, height); return -EINVAL; From 881fe6c1d06bf49f4ab7aef212cdaf66bd059614 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 18 Oct 2010 23:54:56 -0400 Subject: [PATCH 474/476] drm/radeon/kms: properly compute group_size on 6xx/7xx Needed for tiled surfaces. Signed-off-by: Alex Deucher Cc: stable@kernel.org Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/r600.c | 7 +++++-- drivers/gpu/drm/radeon/rv770.c | 9 +++++---- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 83ba9644dcb9..33952a12f0a3 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1608,8 +1608,11 @@ void r600_gpu_init(struct radeon_device *rdev) rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes; rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); - tiling_config |= GROUP_SIZE(0); - rdev->config.r600.tiling_group_size = 256; + tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); + if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) + rdev->config.r600.tiling_group_size = 512; + else + rdev->config.r600.tiling_group_size = 256; tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT; if (tmp > 3) { tiling_config |= ROW_TILING(3); diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index ab83f688263a..245374e2b778 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -644,10 +644,11 @@ static void rv770_gpu_init(struct radeon_device *rdev) else gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3); - - gb_tiling_config |= GROUP_SIZE(0); - rdev->config.rv770.tiling_group_size = 256; - + gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); + if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) + rdev->config.rv770.tiling_group_size = 512; + else + rdev->config.rv770.tiling_group_size = 256; if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) { gb_tiling_config |= ROW_TILING(3); gb_tiling_config |= SAMPLE_SPLIT(3); From 8f895da57da80b307efa2f94b5d4caf801e959a5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 26 Oct 2010 20:22:42 -0400 Subject: [PATCH 475/476] drm/radeon/kms: fix r6xx/7xx 1D tiling CS checker v2 broken by: drm/radeon/r600: fix tiling issues in CS checker. v2: only apply it to 1D tiling case. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/r600_cs.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 41802915f93f..7b294c127c5f 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -215,6 +215,9 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) __func__, __LINE__, pitch); return -EINVAL; } + /* avoid breaking userspace */ + if (height > 7) + height &= ~0x7; if (!IS_ALIGNED(height, 8)) { dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", __func__, __LINE__, height); From 135cba0dc399fdd47bd3ae305c1db75fcd77243f Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 26 Oct 2010 21:21:47 +0200 Subject: [PATCH 476/476] vmwgfx: Implement a proper GMR eviction mechanism Use Ben's new range manager hooks to implement a manager for GMRs that manages ids rather than ranges. This means we can use the standard TTM code for binding, unbinding and eviction. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/vmwgfx/Makefile | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 81 ++++++++--- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 16 +- drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 29 ++-- drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 29 ++-- drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | 5 +- drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c | 38 ++--- drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | 137 ++++++++++++++++++ drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 75 ---------- 9 files changed, 252 insertions(+), 160 deletions(-) create mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 4505e17df3f5..c9281a1b1d3b 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ - vmwgfx_overlay.o vmwgfx_fence.o + vmwgfx_overlay.o vmwgfx_fence.o vmwgfx_gmrid_manager.o obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 1b3bd8c6c67e..80bc37b274e7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -39,6 +39,9 @@ static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM | static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; +static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | + TTM_PL_FLAG_CACHED; + struct ttm_placement vmw_vram_placement = { .fpfn = 0, .lpfn = 0, @@ -48,6 +51,20 @@ struct ttm_placement vmw_vram_placement = { .busy_placement = &vram_placement_flags }; +static uint32_t vram_gmr_placement_flags[] = { + TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, + VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED +}; + +struct ttm_placement vmw_vram_gmr_placement = { + .fpfn = 0, + .lpfn = 0, + .num_placement = 2, + .placement = vram_gmr_placement_flags, + .num_busy_placement = 1, + .busy_placement = &gmr_placement_flags +}; + struct ttm_placement vmw_vram_sys_placement = { .fpfn = 0, .lpfn = 0, @@ -77,27 +94,52 @@ struct ttm_placement vmw_sys_placement = { struct vmw_ttm_backend { struct ttm_backend backend; + struct page **pages; + unsigned long num_pages; + struct vmw_private *dev_priv; + int gmr_id; }; static int vmw_ttm_populate(struct ttm_backend *backend, unsigned long num_pages, struct page **pages, struct page *dummy_read_page) { + struct vmw_ttm_backend *vmw_be = + container_of(backend, struct vmw_ttm_backend, backend); + + vmw_be->pages = pages; + vmw_be->num_pages = num_pages; + return 0; } static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) { - return 0; + struct vmw_ttm_backend *vmw_be = + container_of(backend, struct vmw_ttm_backend, backend); + + vmw_be->gmr_id = bo_mem->start; + + return vmw_gmr_bind(vmw_be->dev_priv, vmw_be->pages, + vmw_be->num_pages, vmw_be->gmr_id); } static int vmw_ttm_unbind(struct ttm_backend *backend) { + struct vmw_ttm_backend *vmw_be = + container_of(backend, struct vmw_ttm_backend, backend); + + vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); return 0; } static void vmw_ttm_clear(struct ttm_backend *backend) { + struct vmw_ttm_backend *vmw_be = + container_of(backend, struct vmw_ttm_backend, backend); + + vmw_be->pages = NULL; + vmw_be->num_pages = 0; } static void vmw_ttm_destroy(struct ttm_backend *backend) @@ -125,6 +167,7 @@ struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev) return NULL; vmw_be->backend.func = &vmw_ttm_func; + vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); return &vmw_be->backend; } @@ -142,7 +185,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, /* System memory */ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_MASK_CACHING; + man->available_caching = TTM_PL_FLAG_CACHED; man->default_caching = TTM_PL_FLAG_CACHED; break; case TTM_PL_VRAM: @@ -150,8 +193,20 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->func = &ttm_bo_manager_func; man->gpu_offset = 0; man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_MASK_CACHING; - man->default_caching = TTM_PL_FLAG_WC; + man->available_caching = TTM_PL_FLAG_CACHED; + man->default_caching = TTM_PL_FLAG_CACHED; + break; + case VMW_PL_GMR: + /* + * "Guest Memory Regions" is an aperture like feature with + * one slot per bo. There is an upper limit of the number of + * slots as well as the bo size. + */ + man->func = &vmw_gmrid_manager_func; + man->gpu_offset = 0; + man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_FLAG_CACHED; + man->default_caching = TTM_PL_FLAG_CACHED; break; default: DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); @@ -175,18 +230,6 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) return 0; } -static void vmw_move_notify(struct ttm_buffer_object *bo, - struct ttm_mem_reg *new_mem) -{ - if (new_mem->mem_type != TTM_PL_SYSTEM) - vmw_dmabuf_gmr_unbind(bo); -} - -static void vmw_swap_notify(struct ttm_buffer_object *bo) -{ - vmw_dmabuf_gmr_unbind(bo); -} - static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; @@ -201,7 +244,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg return -EINVAL; switch (mem->mem_type) { case TTM_PL_SYSTEM: - /* System memory */ + case VMW_PL_GMR: return 0; case TTM_PL_VRAM: mem->bus.offset = mem->start << PAGE_SHIFT; @@ -277,8 +320,8 @@ struct ttm_bo_driver vmw_bo_driver = { .sync_obj_flush = vmw_sync_obj_flush, .sync_obj_unref = vmw_sync_obj_unref, .sync_obj_ref = vmw_sync_obj_ref, - .move_notify = vmw_move_notify, - .swap_notify = vmw_swap_notify, + .move_notify = NULL, + .swap_notify = NULL, .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, .io_mem_reserve = &vmw_ttm_io_mem_reserve, .io_mem_free = &vmw_ttm_io_mem_free, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index f2942b3c59c0..d0ef624fbdcc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -260,13 +260,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) idr_init(&dev_priv->context_idr); idr_init(&dev_priv->surface_idr); idr_init(&dev_priv->stream_idr); - ida_init(&dev_priv->gmr_ida); mutex_init(&dev_priv->init_mutex); init_waitqueue_head(&dev_priv->fence_queue); init_waitqueue_head(&dev_priv->fifo_queue); atomic_set(&dev_priv->fence_queue_waiters, 0); atomic_set(&dev_priv->fifo_queue_waiters, 0); - INIT_LIST_HEAD(&dev_priv->gmr_lru); dev_priv->io_start = pci_resource_start(dev->pdev, 0); dev_priv->vram_start = pci_resource_start(dev->pdev, 1); @@ -341,6 +339,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) goto out_err2; } + dev_priv->has_gmr = true; + if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, + dev_priv->max_gmr_ids) != 0) { + DRM_INFO("No GMR memory available. " + "Graphics memory resources are very limited.\n"); + dev_priv->has_gmr = false; + } + dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start, dev_priv->mmio_size, DRM_MTRR_WC); @@ -440,13 +446,14 @@ out_err4: out_err3: drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, dev_priv->mmio_size, DRM_MTRR_WC); + if (dev_priv->has_gmr) + (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); out_err2: (void)ttm_bo_device_release(&dev_priv->bdev); out_err1: vmw_ttm_global_release(dev_priv); out_err0: - ida_destroy(&dev_priv->gmr_ida); idr_destroy(&dev_priv->surface_idr); idr_destroy(&dev_priv->context_idr); idr_destroy(&dev_priv->stream_idr); @@ -478,10 +485,11 @@ static int vmw_driver_unload(struct drm_device *dev) iounmap(dev_priv->mmio_virt); drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, dev_priv->mmio_size, DRM_MTRR_WC); + if (dev_priv->has_gmr) + (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); (void)ttm_bo_device_release(&dev_priv->bdev); vmw_ttm_global_release(dev_priv); - ida_destroy(&dev_priv->gmr_ida); idr_destroy(&dev_priv->surface_idr); idr_destroy(&dev_priv->context_idr); idr_destroy(&dev_priv->stream_idr); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 9d55fa8cd0fe..e7a58d055041 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -49,6 +49,9 @@ #define VMWGFX_MAX_GMRS 2048 #define VMWGFX_MAX_DISPLAYS 16 +#define VMW_PL_GMR TTM_PL_PRIV0 +#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0 + struct vmw_fpriv { struct drm_master *locked_master; struct ttm_object_file *tfile; @@ -57,8 +60,6 @@ struct vmw_fpriv { struct vmw_dma_buffer { struct ttm_buffer_object base; struct list_head validate_list; - struct list_head gmr_lru; - uint32_t gmr_id; bool gmr_bound; uint32_t cur_validate_node; bool on_validate_list; @@ -184,6 +185,7 @@ struct vmw_private { uint32_t capabilities; uint32_t max_gmr_descriptors; uint32_t max_gmr_ids; + bool has_gmr; struct mutex hw_mutex; /* @@ -265,14 +267,6 @@ struct vmw_private { uint32_t val_seq; struct mutex cmdbuf_mutex; - /** - * GMR management. Protected by the lru spinlock. - */ - - struct ida gmr_ida; - struct list_head gmr_lru; - - /** * Operating mode. */ @@ -334,7 +328,9 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv); */ extern int vmw_gmr_bind(struct vmw_private *dev_priv, - struct ttm_buffer_object *bo); + struct page *pages[], + unsigned long num_pages, + int gmr_id); extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); /** @@ -383,14 +379,10 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, uint32_t id, struct vmw_dma_buffer **out); -extern uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo); -extern void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id); -extern int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id); extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, struct vmw_dma_buffer *bo); extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, struct vmw_dma_buffer *bo); -extern void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo); extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, @@ -442,6 +434,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); extern struct ttm_placement vmw_vram_placement; extern struct ttm_placement vmw_vram_ne_placement; extern struct ttm_placement vmw_vram_sys_placement; +extern struct ttm_placement vmw_vram_gmr_placement; extern struct ttm_placement vmw_sys_placement; extern struct ttm_bo_driver vmw_bo_driver; extern int vmw_dma_quiescent(struct drm_device *dev); @@ -543,6 +536,12 @@ int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id); int vmw_overlay_num_overlays(struct vmw_private *dev_priv); int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv); +/** + * GMR Id manager + */ + +extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func; + /** * Inline helper functions */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 8e396850513c..51d9f9f1d7f2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -538,8 +538,11 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context) reloc = &sw_context->relocs[i]; validate = &sw_context->val_bufs[reloc->index]; bo = validate->bo; - reloc->location->offset += bo->offset; - reloc->location->gmrId = vmw_dmabuf_gmr(bo); + if (bo->mem.mem_type == TTM_PL_VRAM) { + reloc->location->offset += bo->offset; + reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER; + } else + reloc->location->gmrId = bo->mem.start; } vmw_free_relocations(sw_context); } @@ -563,25 +566,14 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, { int ret; - if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL) - return 0; - /** - * Put BO in VRAM, only if there is space. + * Put BO in VRAM if there is space, otherwise as a GMR. + * If there is no space in VRAM and GMR ids are all used up, + * start evicting GMRs to make room. If the DMA buffer can't be + * used as a GMR, this will return -ENOMEM. */ - ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false, false); - if (unlikely(ret == -ERESTARTSYS)) - return ret; - - /** - * Otherwise, set it up as GMR. - */ - - if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL) - return 0; - - ret = vmw_gmr_bind(dev_priv, bo); + ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false); if (likely(ret == 0 || ret == -ERESTARTSYS)) return ret; @@ -590,6 +582,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, * previous contents. */ + DRM_INFO("Falling through to VRAM.\n"); ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index e7304188a784..41d9a5b73c03 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -612,7 +612,6 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, { struct ttm_buffer_object *bo = &vmw_bo->base; struct ttm_placement ne_placement = vmw_vram_ne_placement; - struct drm_mm_node *mm_node; int ret = 0; ne_placement.lpfn = bo->num_pages; @@ -626,9 +625,9 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, if (unlikely(ret != 0)) goto err_unlock; - mm_node = bo->mem.mm_node; if (bo->mem.mem_type == TTM_PL_VRAM && - mm_node->start < bo->num_pages) + bo->mem.start < bo->num_pages && + bo->mem.start > 0) (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false, false); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c index 5f8908a5d7fd..de0c5948521d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c @@ -146,7 +146,7 @@ static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv, */ static unsigned long vmw_gmr_count_descriptors(struct page *pages[], - unsigned long num_pages) + unsigned long num_pages) { unsigned long prev_pfn = ~(0UL); unsigned long pfn; @@ -163,45 +163,33 @@ static unsigned long vmw_gmr_count_descriptors(struct page *pages[], } int vmw_gmr_bind(struct vmw_private *dev_priv, - struct ttm_buffer_object *bo) + struct page *pages[], + unsigned long num_pages, + int gmr_id) { - struct ttm_tt *ttm = bo->ttm; - unsigned long descriptors; - int ret; - uint32_t id; struct list_head desc_pages; + int ret; - if (!(dev_priv->capabilities & SVGA_CAP_GMR)) + if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR))) return -EINVAL; - ret = ttm_tt_populate(ttm); - if (unlikely(ret != 0)) - return ret; - - descriptors = vmw_gmr_count_descriptors(ttm->pages, ttm->num_pages); - if (unlikely(descriptors > dev_priv->max_gmr_descriptors)) + if (vmw_gmr_count_descriptors(pages, num_pages) > + dev_priv->max_gmr_descriptors) return -EINVAL; INIT_LIST_HEAD(&desc_pages); - ret = vmw_gmr_build_descriptors(&desc_pages, ttm->pages, - ttm->num_pages); + + ret = vmw_gmr_build_descriptors(&desc_pages, pages, num_pages); if (unlikely(ret != 0)) return ret; - ret = vmw_gmr_id_alloc(dev_priv, &id); - if (unlikely(ret != 0)) - goto out_no_id; - - vmw_gmr_fire_descriptors(dev_priv, id, &desc_pages); + vmw_gmr_fire_descriptors(dev_priv, gmr_id, &desc_pages); vmw_gmr_free_descriptors(&desc_pages); - vmw_dmabuf_set_gmr(bo, id); + return 0; - -out_no_id: - vmw_gmr_free_descriptors(&desc_pages); - return ret; } + void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id) { mutex_lock(&dev_priv->hw_mutex); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c new file mode 100644 index 000000000000..ac6e0d1bd629 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -0,0 +1,137 @@ +/************************************************************************** + * + * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#include "vmwgfx_drv.h" +#include "ttm/ttm_module.h" +#include "ttm/ttm_bo_driver.h" +#include "ttm/ttm_placement.h" +#include +#include +#include + +struct vmwgfx_gmrid_man { + spinlock_t lock; + struct ida gmr_ida; + uint32_t max_gmr_ids; +}; + +static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, + struct ttm_buffer_object *bo, + struct ttm_placement *placement, + struct ttm_mem_reg *mem) +{ + struct vmwgfx_gmrid_man *gman = + (struct vmwgfx_gmrid_man *)man->priv; + int ret; + int id; + + mem->mm_node = NULL; + + do { + if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) + return -ENOMEM; + + spin_lock(&gman->lock); + ret = ida_get_new(&gman->gmr_ida, &id); + + if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) { + ida_remove(&gman->gmr_ida, id); + spin_unlock(&gman->lock); + return 0; + } + + spin_unlock(&gman->lock); + + } while (ret == -EAGAIN); + + if (likely(ret == 0)) { + mem->mm_node = gman; + mem->start = id; + } + + return ret; +} + +static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man, + struct ttm_mem_reg *mem) +{ + struct vmwgfx_gmrid_man *gman = + (struct vmwgfx_gmrid_man *)man->priv; + + if (mem->mm_node) { + spin_lock(&gman->lock); + ida_remove(&gman->gmr_ida, mem->start); + spin_unlock(&gman->lock); + mem->mm_node = NULL; + } +} + +static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man, + unsigned long p_size) +{ + struct vmwgfx_gmrid_man *gman = + kzalloc(sizeof(*gman), GFP_KERNEL); + + if (unlikely(gman == NULL)) + return -ENOMEM; + + spin_lock_init(&gman->lock); + ida_init(&gman->gmr_ida); + gman->max_gmr_ids = p_size; + man->priv = (void *) gman; + return 0; +} + +static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man) +{ + struct vmwgfx_gmrid_man *gman = + (struct vmwgfx_gmrid_man *)man->priv; + + if (gman) { + ida_destroy(&gman->gmr_ida); + kfree(gman); + } + return 0; +} + +static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man, + const char *prefix) +{ + printk(KERN_INFO "%s: No debug info available for the GMR " + "id manager.\n", prefix); +} + +const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = { + vmw_gmrid_man_init, + vmw_gmrid_man_takedown, + vmw_gmrid_man_get_node, + vmw_gmrid_man_put_node, + vmw_gmrid_man_debug +}; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index c8c40e9979db..36e129f0023f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -765,28 +765,11 @@ static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob, return bo_user_size + page_array_size; } -void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo) -{ - struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); - struct ttm_bo_global *glob = bo->glob; - struct vmw_private *dev_priv = - container_of(bo->bdev, struct vmw_private, bdev); - - if (vmw_bo->gmr_bound) { - vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id); - spin_lock(&glob->lru_lock); - ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id); - spin_unlock(&glob->lru_lock); - vmw_bo->gmr_bound = false; - } -} - void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) { struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); struct ttm_bo_global *glob = bo->glob; - vmw_dmabuf_gmr_unbind(bo); ttm_mem_global_free(glob->mem_glob, bo->acc_size); kfree(vmw_bo); } @@ -818,10 +801,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv, memset(vmw_bo, 0, sizeof(*vmw_bo)); - INIT_LIST_HEAD(&vmw_bo->gmr_lru); INIT_LIST_HEAD(&vmw_bo->validate_list); - vmw_bo->gmr_id = 0; - vmw_bo->gmr_bound = false; ret = ttm_bo_init(bdev, &vmw_bo->base, size, ttm_bo_type_device, placement, @@ -835,7 +815,6 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); struct ttm_bo_global *glob = bo->glob; - vmw_dmabuf_gmr_unbind(bo); ttm_mem_global_free(glob->mem_glob, bo->acc_size); kfree(vmw_user_bo); } @@ -938,25 +917,6 @@ void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo) vmw_bo->on_validate_list = false; } -uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo) -{ - struct vmw_dma_buffer *vmw_bo; - - if (bo->mem.mem_type == TTM_PL_VRAM) - return SVGA_GMR_FRAMEBUFFER; - - vmw_bo = vmw_dma_buffer(bo); - - return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL; -} - -void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id) -{ - struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); - vmw_bo->gmr_bound = true; - vmw_bo->gmr_id = id; -} - int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, uint32_t handle, struct vmw_dma_buffer **out) { @@ -985,41 +945,6 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, return 0; } -/** - * TODO: Implement a gmr id eviction mechanism. Currently we just fail - * when we're out of ids, causing GMR space to be allocated - * out of VRAM. - */ - -int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id) -{ - struct ttm_bo_global *glob = dev_priv->bdev.glob; - int id; - int ret; - - do { - if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0)) - return -ENOMEM; - - spin_lock(&glob->lru_lock); - ret = ida_get_new(&dev_priv->gmr_ida, &id); - spin_unlock(&glob->lru_lock); - } while (ret == -EAGAIN); - - if (unlikely(ret != 0)) - return ret; - - if (unlikely(id >= dev_priv->max_gmr_ids)) { - spin_lock(&glob->lru_lock); - ida_remove(&dev_priv->gmr_ida, id); - spin_unlock(&glob->lru_lock); - return -EBUSY; - } - - *p_id = (uint32_t) id; - return 0; -} - /* * Stream management */