drm/i915: Finish enabling rps before use by sysfs or debugfs

Enabling rps (turbo setup) was put in a work queue because it may
take quite awhile.  This change flushes the work queue to initialize
rps values before use by sysfs or debugfs.  Specifically,
rps.delayed_resume_work is flushed before using rps.hw_max,
rps.max_delay, rps.min_delay, or rps.cur_delay.

This change fixes a problem in sysfs where show functions using
uninitialized values show incorrect values and store functions
using uninitialized values in range checks incorrectly fail to
store valid input values.  This change also addresses similar use
before initialized problems in debugfs.

Signed-off-by: Tom O'Rourke <Tom.O'Rourke@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
Tom O'Rourke 2013-09-16 14:56:43 -07:00 committed by Daniel Vetter
parent 09e14bf3ba
commit 5c9669cee5
2 changed files with 22 additions and 0 deletions

View file

@ -847,6 +847,8 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
if (IS_GEN5(dev)) {
u16 rgvswctl = I915_READ16(MEMSWCTL);
u16 rgvstat = I915_READ16(MEMSTAT_ILK);
@ -1325,6 +1327,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
return 0;
}
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
@ -1940,6 +1944,8 @@ i915_max_freq_get(void *data, u64 *val)
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
@ -1964,6 +1970,8 @@ i915_max_freq_set(void *data, u64 val)
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
@ -2002,6 +2010,8 @@ i915_min_freq_get(void *data, u64 *val)
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
@ -2026,6 +2036,8 @@ i915_min_freq_set(void *data, u64 val)
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);

View file

@ -251,6 +251,8 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev)) {
u32 freq;
@ -283,6 +285,8 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev))
ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
@ -307,6 +311,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev)) {
@ -355,6 +361,8 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev))
ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
@ -379,6 +387,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev)) {