2012-04-16 19:20:34 -06:00
/*
* Copyright © 2012 Intel Corporation
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING
* FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE .
*
* Authors :
* Eugeni Dodonov < eugeni . dodonov @ intel . com >
*
*/
# include "i915_drv.h"
# include "intel_drv.h"
/* FBC, or Frame Buffer Compression, is a technique employed to compress the framebuffer contents in-memory, aiming at reducing the required bandwidth during in-memory transfers and, therefore, reduce the power packet.
*
* The benefits of FBC are mostly visible with solid backgrounds and variation - less patterns .
*
* FBC - related functionality can be enabled by the means of the i915 . i915_enable_fbc parameter
*/
void i8xx_disable_fbc ( struct drm_device * dev )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
u32 fbc_ctl ;
/* Disable compression */
fbc_ctl = I915_READ ( FBC_CONTROL ) ;
if ( ( fbc_ctl & FBC_CTL_EN ) = = 0 )
return ;
fbc_ctl & = ~ FBC_CTL_EN ;
I915_WRITE ( FBC_CONTROL , fbc_ctl ) ;
/* Wait for compressing bit to clear */
if ( wait_for ( ( I915_READ ( FBC_STATUS ) & FBC_STAT_COMPRESSING ) = = 0 , 10 ) ) {
DRM_DEBUG_KMS ( " FBC idle timed out \n " ) ;
return ;
}
DRM_DEBUG_KMS ( " disabled FBC \n " ) ;
}
void i8xx_enable_fbc ( struct drm_crtc * crtc , unsigned long interval )
{
struct drm_device * dev = crtc - > dev ;
struct drm_i915_private * dev_priv = dev - > dev_private ;
struct drm_framebuffer * fb = crtc - > fb ;
struct intel_framebuffer * intel_fb = to_intel_framebuffer ( fb ) ;
struct drm_i915_gem_object * obj = intel_fb - > obj ;
struct intel_crtc * intel_crtc = to_intel_crtc ( crtc ) ;
int cfb_pitch ;
int plane , i ;
u32 fbc_ctl , fbc_ctl2 ;
cfb_pitch = dev_priv - > cfb_size / FBC_LL_SIZE ;
if ( fb - > pitches [ 0 ] < cfb_pitch )
cfb_pitch = fb - > pitches [ 0 ] ;
/* FBC_CTL wants 64B units */
cfb_pitch = ( cfb_pitch / 64 ) - 1 ;
plane = intel_crtc - > plane = = 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB ;
/* Clear old tags */
for ( i = 0 ; i < ( FBC_LL_SIZE / 32 ) + 1 ; i + + )
I915_WRITE ( FBC_TAG + ( i * 4 ) , 0 ) ;
/* Set it up... */
fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE ;
fbc_ctl2 | = plane ;
I915_WRITE ( FBC_CONTROL2 , fbc_ctl2 ) ;
I915_WRITE ( FBC_FENCE_OFF , crtc - > y ) ;
/* enable it... */
fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC ;
if ( IS_I945GM ( dev ) )
fbc_ctl | = FBC_CTL_C3_IDLE ; /* 945 needs special SR handling */
fbc_ctl | = ( cfb_pitch & 0xff ) < < FBC_CTL_STRIDE_SHIFT ;
fbc_ctl | = ( interval & 0x2fff ) < < FBC_CTL_INTERVAL_SHIFT ;
fbc_ctl | = obj - > fence_reg ;
I915_WRITE ( FBC_CONTROL , fbc_ctl ) ;
DRM_DEBUG_KMS ( " enabled FBC, pitch %d, yoff %d, plane %d, " ,
cfb_pitch , crtc - > y , intel_crtc - > plane ) ;
}
bool i8xx_fbc_enabled ( struct drm_device * dev )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
return I915_READ ( FBC_CONTROL ) & FBC_CTL_EN ;
}
void g4x_enable_fbc ( struct drm_crtc * crtc , unsigned long interval )
{
struct drm_device * dev = crtc - > dev ;
struct drm_i915_private * dev_priv = dev - > dev_private ;
struct drm_framebuffer * fb = crtc - > fb ;
struct intel_framebuffer * intel_fb = to_intel_framebuffer ( fb ) ;
struct drm_i915_gem_object * obj = intel_fb - > obj ;
struct intel_crtc * intel_crtc = to_intel_crtc ( crtc ) ;
int plane = intel_crtc - > plane = = 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB ;
unsigned long stall_watermark = 200 ;
u32 dpfc_ctl ;
dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X ;
dpfc_ctl | = DPFC_CTL_FENCE_EN | obj - > fence_reg ;
I915_WRITE ( DPFC_CHICKEN , DPFC_HT_MODIFY ) ;
I915_WRITE ( DPFC_RECOMP_CTL , DPFC_RECOMP_STALL_EN |
( stall_watermark < < DPFC_RECOMP_STALL_WM_SHIFT ) |
( interval < < DPFC_RECOMP_TIMER_COUNT_SHIFT ) ) ;
I915_WRITE ( DPFC_FENCE_YOFF , crtc - > y ) ;
/* enable it... */
I915_WRITE ( DPFC_CONTROL , I915_READ ( DPFC_CONTROL ) | DPFC_CTL_EN ) ;
DRM_DEBUG_KMS ( " enabled fbc on plane %d \n " , intel_crtc - > plane ) ;
}
void g4x_disable_fbc ( struct drm_device * dev )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
u32 dpfc_ctl ;
/* Disable compression */
dpfc_ctl = I915_READ ( DPFC_CONTROL ) ;
if ( dpfc_ctl & DPFC_CTL_EN ) {
dpfc_ctl & = ~ DPFC_CTL_EN ;
I915_WRITE ( DPFC_CONTROL , dpfc_ctl ) ;
DRM_DEBUG_KMS ( " disabled FBC \n " ) ;
}
}
bool g4x_fbc_enabled ( struct drm_device * dev )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
return I915_READ ( DPFC_CONTROL ) & DPFC_CTL_EN ;
}
static void sandybridge_blit_fbc_update ( struct drm_device * dev )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
u32 blt_ecoskpd ;
/* Make sure blitter notifies FBC of writes */
gen6_gt_force_wake_get ( dev_priv ) ;
blt_ecoskpd = I915_READ ( GEN6_BLITTER_ECOSKPD ) ;
blt_ecoskpd | = GEN6_BLITTER_FBC_NOTIFY < <
GEN6_BLITTER_LOCK_SHIFT ;
I915_WRITE ( GEN6_BLITTER_ECOSKPD , blt_ecoskpd ) ;
blt_ecoskpd | = GEN6_BLITTER_FBC_NOTIFY ;
I915_WRITE ( GEN6_BLITTER_ECOSKPD , blt_ecoskpd ) ;
blt_ecoskpd & = ~ ( GEN6_BLITTER_FBC_NOTIFY < <
GEN6_BLITTER_LOCK_SHIFT ) ;
I915_WRITE ( GEN6_BLITTER_ECOSKPD , blt_ecoskpd ) ;
POSTING_READ ( GEN6_BLITTER_ECOSKPD ) ;
gen6_gt_force_wake_put ( dev_priv ) ;
}
void ironlake_enable_fbc ( struct drm_crtc * crtc , unsigned long interval )
{
struct drm_device * dev = crtc - > dev ;
struct drm_i915_private * dev_priv = dev - > dev_private ;
struct drm_framebuffer * fb = crtc - > fb ;
struct intel_framebuffer * intel_fb = to_intel_framebuffer ( fb ) ;
struct drm_i915_gem_object * obj = intel_fb - > obj ;
struct intel_crtc * intel_crtc = to_intel_crtc ( crtc ) ;
int plane = intel_crtc - > plane = = 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB ;
unsigned long stall_watermark = 200 ;
u32 dpfc_ctl ;
dpfc_ctl = I915_READ ( ILK_DPFC_CONTROL ) ;
dpfc_ctl & = DPFC_RESERVED ;
dpfc_ctl | = ( plane | DPFC_CTL_LIMIT_1X ) ;
/* Set persistent mode for front-buffer rendering, ala X. */
dpfc_ctl | = DPFC_CTL_PERSISTENT_MODE ;
dpfc_ctl | = ( DPFC_CTL_FENCE_EN | obj - > fence_reg ) ;
I915_WRITE ( ILK_DPFC_CHICKEN , DPFC_HT_MODIFY ) ;
I915_WRITE ( ILK_DPFC_RECOMP_CTL , DPFC_RECOMP_STALL_EN |
( stall_watermark < < DPFC_RECOMP_STALL_WM_SHIFT ) |
( interval < < DPFC_RECOMP_TIMER_COUNT_SHIFT ) ) ;
I915_WRITE ( ILK_DPFC_FENCE_YOFF , crtc - > y ) ;
I915_WRITE ( ILK_FBC_RT_BASE , obj - > gtt_offset | ILK_FBC_RT_VALID ) ;
/* enable it... */
I915_WRITE ( ILK_DPFC_CONTROL , dpfc_ctl | DPFC_CTL_EN ) ;
if ( IS_GEN6 ( dev ) ) {
I915_WRITE ( SNB_DPFC_CTL_SA ,
SNB_CPU_FENCE_ENABLE | obj - > fence_reg ) ;
I915_WRITE ( DPFC_CPU_FENCE_OFFSET , crtc - > y ) ;
sandybridge_blit_fbc_update ( dev ) ;
}
DRM_DEBUG_KMS ( " enabled fbc on plane %d \n " , intel_crtc - > plane ) ;
}
void ironlake_disable_fbc ( struct drm_device * dev )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
u32 dpfc_ctl ;
/* Disable compression */
dpfc_ctl = I915_READ ( ILK_DPFC_CONTROL ) ;
if ( dpfc_ctl & DPFC_CTL_EN ) {
dpfc_ctl & = ~ DPFC_CTL_EN ;
I915_WRITE ( ILK_DPFC_CONTROL , dpfc_ctl ) ;
DRM_DEBUG_KMS ( " disabled FBC \n " ) ;
}
}
bool ironlake_fbc_enabled ( struct drm_device * dev )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
return I915_READ ( ILK_DPFC_CONTROL ) & DPFC_CTL_EN ;
}
bool intel_fbc_enabled ( struct drm_device * dev )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
if ( ! dev_priv - > display . fbc_enabled )
return false ;
return dev_priv - > display . fbc_enabled ( dev ) ;
}
static void intel_fbc_work_fn ( struct work_struct * __work )
{
struct intel_fbc_work * work =
container_of ( to_delayed_work ( __work ) ,
struct intel_fbc_work , work ) ;
struct drm_device * dev = work - > crtc - > dev ;
struct drm_i915_private * dev_priv = dev - > dev_private ;
mutex_lock ( & dev - > struct_mutex ) ;
if ( work = = dev_priv - > fbc_work ) {
/* Double check that we haven't switched fb without cancelling
* the prior work .
*/
if ( work - > crtc - > fb = = work - > fb ) {
dev_priv - > display . enable_fbc ( work - > crtc ,
work - > interval ) ;
dev_priv - > cfb_plane = to_intel_crtc ( work - > crtc ) - > plane ;
dev_priv - > cfb_fb = work - > crtc - > fb - > base . id ;
dev_priv - > cfb_y = work - > crtc - > y ;
}
dev_priv - > fbc_work = NULL ;
}
mutex_unlock ( & dev - > struct_mutex ) ;
kfree ( work ) ;
}
static void intel_cancel_fbc_work ( struct drm_i915_private * dev_priv )
{
if ( dev_priv - > fbc_work = = NULL )
return ;
DRM_DEBUG_KMS ( " cancelling pending FBC enable \n " ) ;
/* Synchronisation is provided by struct_mutex and checking of
* dev_priv - > fbc_work , so we can perform the cancellation
* entirely asynchronously .
*/
if ( cancel_delayed_work ( & dev_priv - > fbc_work - > work ) )
/* tasklet was killed before being run, clean up */
kfree ( dev_priv - > fbc_work ) ;
/* Mark the work as no longer wanted so that if it does
* wake - up ( because the work was already running and waiting
* for our mutex ) , it will discover that is no longer
* necessary to run .
*/
dev_priv - > fbc_work = NULL ;
}
void intel_enable_fbc ( struct drm_crtc * crtc , unsigned long interval )
{
struct intel_fbc_work * work ;
struct drm_device * dev = crtc - > dev ;
struct drm_i915_private * dev_priv = dev - > dev_private ;
if ( ! dev_priv - > display . enable_fbc )
return ;
intel_cancel_fbc_work ( dev_priv ) ;
work = kzalloc ( sizeof * work , GFP_KERNEL ) ;
if ( work = = NULL ) {
dev_priv - > display . enable_fbc ( crtc , interval ) ;
return ;
}
work - > crtc = crtc ;
work - > fb = crtc - > fb ;
work - > interval = interval ;
INIT_DELAYED_WORK ( & work - > work , intel_fbc_work_fn ) ;
dev_priv - > fbc_work = work ;
DRM_DEBUG_KMS ( " scheduling delayed FBC enable \n " ) ;
/* Delay the actual enabling to let pageflipping cease and the
* display to settle before starting the compression . Note that
* this delay also serves a second purpose : it allows for a
* vblank to pass after disabling the FBC before we attempt
* to modify the control registers .
*
* A more complicated solution would involve tracking vblanks
* following the termination of the page - flipping sequence
* and indeed performing the enable as a co - routine and not
* waiting synchronously upon the vblank .
*/
schedule_delayed_work ( & work - > work , msecs_to_jiffies ( 50 ) ) ;
}
void intel_disable_fbc ( struct drm_device * dev )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
intel_cancel_fbc_work ( dev_priv ) ;
if ( ! dev_priv - > display . disable_fbc )
return ;
dev_priv - > display . disable_fbc ( dev ) ;
dev_priv - > cfb_plane = - 1 ;
}
/**
* intel_update_fbc - enable / disable FBC as needed
* @ dev : the drm_device
*
* Set up the framebuffer compression hardware at mode set time . We
* enable it if possible :
* - plane A only ( on pre - 965 )
* - no pixel mulitply / line duplication
* - no alpha buffer discard
* - no dual wide
* - framebuffer < = 2048 in width , 1536 in height
*
* We can ' t assume that any compression will take place ( worst case ) ,
* so the compressed buffer has to be the same size as the uncompressed
* one . It also must reside ( along with the line length buffer ) in
* stolen memory .
*
* We need to enable / disable FBC on a global basis .
*/
void intel_update_fbc ( struct drm_device * dev )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
struct drm_crtc * crtc = NULL , * tmp_crtc ;
struct intel_crtc * intel_crtc ;
struct drm_framebuffer * fb ;
struct intel_framebuffer * intel_fb ;
struct drm_i915_gem_object * obj ;
int enable_fbc ;
DRM_DEBUG_KMS ( " \n " ) ;
if ( ! i915_powersave )
return ;
if ( ! I915_HAS_FBC ( dev ) )
return ;
/*
* If FBC is already on , we just have to verify that we can
* keep it that way . . .
* Need to disable if :
* - more than one pipe is active
* - changing FBC params ( stride , fence , mode )
* - new fb is too large to fit in compressed buffer
* - going to an unsupported config ( interlace , pixel multiply , etc . )
*/
list_for_each_entry ( tmp_crtc , & dev - > mode_config . crtc_list , head ) {
if ( tmp_crtc - > enabled & & tmp_crtc - > fb ) {
if ( crtc ) {
DRM_DEBUG_KMS ( " more than one pipe active, disabling compression \n " ) ;
dev_priv - > no_fbc_reason = FBC_MULTIPLE_PIPES ;
goto out_disable ;
}
crtc = tmp_crtc ;
}
}
if ( ! crtc | | crtc - > fb = = NULL ) {
DRM_DEBUG_KMS ( " no output, disabling \n " ) ;
dev_priv - > no_fbc_reason = FBC_NO_OUTPUT ;
goto out_disable ;
}
intel_crtc = to_intel_crtc ( crtc ) ;
fb = crtc - > fb ;
intel_fb = to_intel_framebuffer ( fb ) ;
obj = intel_fb - > obj ;
enable_fbc = i915_enable_fbc ;
if ( enable_fbc < 0 ) {
DRM_DEBUG_KMS ( " fbc set to per-chip default \n " ) ;
enable_fbc = 1 ;
if ( INTEL_INFO ( dev ) - > gen < = 6 )
enable_fbc = 0 ;
}
if ( ! enable_fbc ) {
DRM_DEBUG_KMS ( " fbc disabled per module param \n " ) ;
dev_priv - > no_fbc_reason = FBC_MODULE_PARAM ;
goto out_disable ;
}
if ( intel_fb - > obj - > base . size > dev_priv - > cfb_size ) {
DRM_DEBUG_KMS ( " framebuffer too large, disabling "
" compression \n " ) ;
dev_priv - > no_fbc_reason = FBC_STOLEN_TOO_SMALL ;
goto out_disable ;
}
if ( ( crtc - > mode . flags & DRM_MODE_FLAG_INTERLACE ) | |
( crtc - > mode . flags & DRM_MODE_FLAG_DBLSCAN ) ) {
DRM_DEBUG_KMS ( " mode incompatible with compression, "
" disabling \n " ) ;
dev_priv - > no_fbc_reason = FBC_UNSUPPORTED_MODE ;
goto out_disable ;
}
if ( ( crtc - > mode . hdisplay > 2048 ) | |
( crtc - > mode . vdisplay > 1536 ) ) {
DRM_DEBUG_KMS ( " mode too large for compression, disabling \n " ) ;
dev_priv - > no_fbc_reason = FBC_MODE_TOO_LARGE ;
goto out_disable ;
}
if ( ( IS_I915GM ( dev ) | | IS_I945GM ( dev ) ) & & intel_crtc - > plane ! = 0 ) {
DRM_DEBUG_KMS ( " plane not 0, disabling compression \n " ) ;
dev_priv - > no_fbc_reason = FBC_BAD_PLANE ;
goto out_disable ;
}
/* The use of a CPU fence is mandatory in order to detect writes
* by the CPU to the scanout and trigger updates to the FBC .
*/
if ( obj - > tiling_mode ! = I915_TILING_X | |
obj - > fence_reg = = I915_FENCE_REG_NONE ) {
DRM_DEBUG_KMS ( " framebuffer not tiled or fenced, disabling compression \n " ) ;
dev_priv - > no_fbc_reason = FBC_NOT_TILED ;
goto out_disable ;
}
/* If the kernel debugger is active, always disable compression */
if ( in_dbg_master ( ) )
goto out_disable ;
/* If the scanout has not changed, don't modify the FBC settings.
* Note that we make the fundamental assumption that the fb - > obj
* cannot be unpinned ( and have its GTT offset and fence revoked )
* without first being decoupled from the scanout and FBC disabled .
*/
if ( dev_priv - > cfb_plane = = intel_crtc - > plane & &
dev_priv - > cfb_fb = = fb - > base . id & &
dev_priv - > cfb_y = = crtc - > y )
return ;
if ( intel_fbc_enabled ( dev ) ) {
/* We update FBC along two paths, after changing fb/crtc
* configuration ( modeswitching ) and after page - flipping
* finishes . For the latter , we know that not only did
* we disable the FBC at the start of the page - flip
* sequence , but also more than one vblank has passed .
*
* For the former case of modeswitching , it is possible
* to switch between two FBC valid configurations
* instantaneously so we do need to disable the FBC
* before we can modify its control registers . We also
* have to wait for the next vblank for that to take
* effect . However , since we delay enabling FBC we can
* assume that a vblank has passed since disabling and
* that we can safely alter the registers in the deferred
* callback .
*
* In the scenario that we go from a valid to invalid
* and then back to valid FBC configuration we have
* no strict enforcement that a vblank occurred since
* disabling the FBC . However , along all current pipe
* disabling paths we do need to wait for a vblank at
* some point . And we wait before enabling FBC anyway .
*/
DRM_DEBUG_KMS ( " disabling active FBC for update \n " ) ;
intel_disable_fbc ( dev ) ;
}
intel_enable_fbc ( crtc , 500 ) ;
return ;
out_disable :
/* Multiple disables should be harmless */
if ( intel_fbc_enabled ( dev ) ) {
DRM_DEBUG_KMS ( " unsupported config, disabling FBC \n " ) ;
intel_disable_fbc ( dev ) ;
}
}
2012-04-16 19:20:35 -06:00
static const struct cxsr_latency cxsr_latency_table [ ] = {
{ 1 , 0 , 800 , 400 , 3382 , 33382 , 3983 , 33983 } , /* DDR2-400 SC */
{ 1 , 0 , 800 , 667 , 3354 , 33354 , 3807 , 33807 } , /* DDR2-667 SC */
{ 1 , 0 , 800 , 800 , 3347 , 33347 , 3763 , 33763 } , /* DDR2-800 SC */
{ 1 , 1 , 800 , 667 , 6420 , 36420 , 6873 , 36873 } , /* DDR3-667 SC */
{ 1 , 1 , 800 , 800 , 5902 , 35902 , 6318 , 36318 } , /* DDR3-800 SC */
{ 1 , 0 , 667 , 400 , 3400 , 33400 , 4021 , 34021 } , /* DDR2-400 SC */
{ 1 , 0 , 667 , 667 , 3372 , 33372 , 3845 , 33845 } , /* DDR2-667 SC */
{ 1 , 0 , 667 , 800 , 3386 , 33386 , 3822 , 33822 } , /* DDR2-800 SC */
{ 1 , 1 , 667 , 667 , 6438 , 36438 , 6911 , 36911 } , /* DDR3-667 SC */
{ 1 , 1 , 667 , 800 , 5941 , 35941 , 6377 , 36377 } , /* DDR3-800 SC */
{ 1 , 0 , 400 , 400 , 3472 , 33472 , 4173 , 34173 } , /* DDR2-400 SC */
{ 1 , 0 , 400 , 667 , 3443 , 33443 , 3996 , 33996 } , /* DDR2-667 SC */
{ 1 , 0 , 400 , 800 , 3430 , 33430 , 3946 , 33946 } , /* DDR2-800 SC */
{ 1 , 1 , 400 , 667 , 6509 , 36509 , 7062 , 37062 } , /* DDR3-667 SC */
{ 1 , 1 , 400 , 800 , 5985 , 35985 , 6501 , 36501 } , /* DDR3-800 SC */
{ 0 , 0 , 800 , 400 , 3438 , 33438 , 4065 , 34065 } , /* DDR2-400 SC */
{ 0 , 0 , 800 , 667 , 3410 , 33410 , 3889 , 33889 } , /* DDR2-667 SC */
{ 0 , 0 , 800 , 800 , 3403 , 33403 , 3845 , 33845 } , /* DDR2-800 SC */
{ 0 , 1 , 800 , 667 , 6476 , 36476 , 6955 , 36955 } , /* DDR3-667 SC */
{ 0 , 1 , 800 , 800 , 5958 , 35958 , 6400 , 36400 } , /* DDR3-800 SC */
{ 0 , 0 , 667 , 400 , 3456 , 33456 , 4103 , 34106 } , /* DDR2-400 SC */
{ 0 , 0 , 667 , 667 , 3428 , 33428 , 3927 , 33927 } , /* DDR2-667 SC */
{ 0 , 0 , 667 , 800 , 3443 , 33443 , 3905 , 33905 } , /* DDR2-800 SC */
{ 0 , 1 , 667 , 667 , 6494 , 36494 , 6993 , 36993 } , /* DDR3-667 SC */
{ 0 , 1 , 667 , 800 , 5998 , 35998 , 6460 , 36460 } , /* DDR3-800 SC */
{ 0 , 0 , 400 , 400 , 3528 , 33528 , 4255 , 34255 } , /* DDR2-400 SC */
{ 0 , 0 , 400 , 667 , 3500 , 33500 , 4079 , 34079 } , /* DDR2-667 SC */
{ 0 , 0 , 400 , 800 , 3487 , 33487 , 4029 , 34029 } , /* DDR2-800 SC */
{ 0 , 1 , 400 , 667 , 6566 , 36566 , 7145 , 37145 } , /* DDR3-667 SC */
{ 0 , 1 , 400 , 800 , 6042 , 36042 , 6584 , 36584 } , /* DDR3-800 SC */
} ;
const struct cxsr_latency * intel_get_cxsr_latency ( int is_desktop ,
int is_ddr3 ,
int fsb ,
int mem )
{
const struct cxsr_latency * latency ;
int i ;
if ( fsb = = 0 | | mem = = 0 )
return NULL ;
for ( i = 0 ; i < ARRAY_SIZE ( cxsr_latency_table ) ; i + + ) {
latency = & cxsr_latency_table [ i ] ;
if ( is_desktop = = latency - > is_desktop & &
is_ddr3 = = latency - > is_ddr3 & &
fsb = = latency - > fsb_freq & & mem = = latency - > mem_freq )
return latency ;
}
DRM_DEBUG_KMS ( " Unknown FSB/MEM found, disable CxSR \n " ) ;
return NULL ;
}
void pineview_disable_cxsr ( struct drm_device * dev )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
/* deactivate cxsr */
I915_WRITE ( DSPFW3 , I915_READ ( DSPFW3 ) & ~ PINEVIEW_SELF_REFRESH_EN ) ;
}
/*
* Latency for FIFO fetches is dependent on several factors :
* - memory configuration ( speed , channels )
* - chipset
* - current MCH state
* It can be fairly high in some situations , so here we assume a fairly
* pessimal value . It ' s a tradeoff between extra memory fetches ( if we
* set this value too high , the FIFO will fetch frequently to stay full )
* and power consumption ( set it too low to save power and we might see
* FIFO underruns and display " flicker " ) .
*
* A value of 5u s seems to be a good balance ; safe for very low end
* platforms but not overly aggressive on lower latency configs .
*/
static const int latency_ns = 5000 ;
int i9xx_get_fifo_size ( struct drm_device * dev , int plane )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
uint32_t dsparb = I915_READ ( DSPARB ) ;
int size ;
size = dsparb & 0x7f ;
if ( plane )
size = ( ( dsparb > > DSPARB_CSTART_SHIFT ) & 0x7f ) - size ;
DRM_DEBUG_KMS ( " FIFO size - (0x%08x) %s: %d \n " , dsparb ,
plane ? " B " : " A " , size ) ;
return size ;
}
int i85x_get_fifo_size ( struct drm_device * dev , int plane )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
uint32_t dsparb = I915_READ ( DSPARB ) ;
int size ;
size = dsparb & 0x1ff ;
if ( plane )
size = ( ( dsparb > > DSPARB_BEND_SHIFT ) & 0x1ff ) - size ;
size > > = 1 ; /* Convert to cachelines */
DRM_DEBUG_KMS ( " FIFO size - (0x%08x) %s: %d \n " , dsparb ,
plane ? " B " : " A " , size ) ;
return size ;
}
int i845_get_fifo_size ( struct drm_device * dev , int plane )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
uint32_t dsparb = I915_READ ( DSPARB ) ;
int size ;
size = dsparb & 0x7f ;
size > > = 2 ; /* Convert to cachelines */
DRM_DEBUG_KMS ( " FIFO size - (0x%08x) %s: %d \n " , dsparb ,
plane ? " B " : " A " ,
size ) ;
return size ;
}
int i830_get_fifo_size ( struct drm_device * dev , int plane )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
uint32_t dsparb = I915_READ ( DSPARB ) ;
int size ;
size = dsparb & 0x7f ;
size > > = 1 ; /* Convert to cachelines */
DRM_DEBUG_KMS ( " FIFO size - (0x%08x) %s: %d \n " , dsparb ,
plane ? " B " : " A " , size ) ;
return size ;
}
/* Pineview has different values for various configs */
static const struct intel_watermark_params pineview_display_wm = {
PINEVIEW_DISPLAY_FIFO ,
PINEVIEW_MAX_WM ,
PINEVIEW_DFT_WM ,
PINEVIEW_GUARD_WM ,
PINEVIEW_FIFO_LINE_SIZE
} ;
static const struct intel_watermark_params pineview_display_hplloff_wm = {
PINEVIEW_DISPLAY_FIFO ,
PINEVIEW_MAX_WM ,
PINEVIEW_DFT_HPLLOFF_WM ,
PINEVIEW_GUARD_WM ,
PINEVIEW_FIFO_LINE_SIZE
} ;
static const struct intel_watermark_params pineview_cursor_wm = {
PINEVIEW_CURSOR_FIFO ,
PINEVIEW_CURSOR_MAX_WM ,
PINEVIEW_CURSOR_DFT_WM ,
PINEVIEW_CURSOR_GUARD_WM ,
PINEVIEW_FIFO_LINE_SIZE ,
} ;
static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
PINEVIEW_CURSOR_FIFO ,
PINEVIEW_CURSOR_MAX_WM ,
PINEVIEW_CURSOR_DFT_WM ,
PINEVIEW_CURSOR_GUARD_WM ,
PINEVIEW_FIFO_LINE_SIZE
} ;
static const struct intel_watermark_params g4x_wm_info = {
G4X_FIFO_SIZE ,
G4X_MAX_WM ,
G4X_MAX_WM ,
2 ,
G4X_FIFO_LINE_SIZE ,
} ;
static const struct intel_watermark_params g4x_cursor_wm_info = {
I965_CURSOR_FIFO ,
I965_CURSOR_MAX_WM ,
I965_CURSOR_DFT_WM ,
2 ,
G4X_FIFO_LINE_SIZE ,
} ;
static const struct intel_watermark_params valleyview_wm_info = {
VALLEYVIEW_FIFO_SIZE ,
VALLEYVIEW_MAX_WM ,
VALLEYVIEW_MAX_WM ,
2 ,
G4X_FIFO_LINE_SIZE ,
} ;
static const struct intel_watermark_params valleyview_cursor_wm_info = {
I965_CURSOR_FIFO ,
VALLEYVIEW_CURSOR_MAX_WM ,
I965_CURSOR_DFT_WM ,
2 ,
G4X_FIFO_LINE_SIZE ,
} ;
static const struct intel_watermark_params i965_cursor_wm_info = {
I965_CURSOR_FIFO ,
I965_CURSOR_MAX_WM ,
I965_CURSOR_DFT_WM ,
2 ,
I915_FIFO_LINE_SIZE ,
} ;
static const struct intel_watermark_params i945_wm_info = {
I945_FIFO_SIZE ,
I915_MAX_WM ,
1 ,
2 ,
I915_FIFO_LINE_SIZE
} ;
static const struct intel_watermark_params i915_wm_info = {
I915_FIFO_SIZE ,
I915_MAX_WM ,
1 ,
2 ,
I915_FIFO_LINE_SIZE
} ;
static const struct intel_watermark_params i855_wm_info = {
I855GM_FIFO_SIZE ,
I915_MAX_WM ,
1 ,
2 ,
I830_FIFO_LINE_SIZE
} ;
static const struct intel_watermark_params i830_wm_info = {
I830_FIFO_SIZE ,
I915_MAX_WM ,
1 ,
2 ,
I830_FIFO_LINE_SIZE
} ;
static const struct intel_watermark_params ironlake_display_wm_info = {
ILK_DISPLAY_FIFO ,
ILK_DISPLAY_MAXWM ,
ILK_DISPLAY_DFTWM ,
2 ,
ILK_FIFO_LINE_SIZE
} ;
static const struct intel_watermark_params ironlake_cursor_wm_info = {
ILK_CURSOR_FIFO ,
ILK_CURSOR_MAXWM ,
ILK_CURSOR_DFTWM ,
2 ,
ILK_FIFO_LINE_SIZE
} ;
static const struct intel_watermark_params ironlake_display_srwm_info = {
ILK_DISPLAY_SR_FIFO ,
ILK_DISPLAY_MAX_SRWM ,
ILK_DISPLAY_DFT_SRWM ,
2 ,
ILK_FIFO_LINE_SIZE
} ;
static const struct intel_watermark_params ironlake_cursor_srwm_info = {
ILK_CURSOR_SR_FIFO ,
ILK_CURSOR_MAX_SRWM ,
ILK_CURSOR_DFT_SRWM ,
2 ,
ILK_FIFO_LINE_SIZE
} ;
static const struct intel_watermark_params sandybridge_display_wm_info = {
SNB_DISPLAY_FIFO ,
SNB_DISPLAY_MAXWM ,
SNB_DISPLAY_DFTWM ,
2 ,
SNB_FIFO_LINE_SIZE
} ;
static const struct intel_watermark_params sandybridge_cursor_wm_info = {
SNB_CURSOR_FIFO ,
SNB_CURSOR_MAXWM ,
SNB_CURSOR_DFTWM ,
2 ,
SNB_FIFO_LINE_SIZE
} ;
static const struct intel_watermark_params sandybridge_display_srwm_info = {
SNB_DISPLAY_SR_FIFO ,
SNB_DISPLAY_MAX_SRWM ,
SNB_DISPLAY_DFT_SRWM ,
2 ,
SNB_FIFO_LINE_SIZE
} ;
static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
SNB_CURSOR_SR_FIFO ,
SNB_CURSOR_MAX_SRWM ,
SNB_CURSOR_DFT_SRWM ,
2 ,
SNB_FIFO_LINE_SIZE
} ;
/**
* intel_calculate_wm - calculate watermark level
* @ clock_in_khz : pixel clock
* @ wm : chip FIFO params
* @ pixel_size : display pixel size
* @ latency_ns : memory latency for the platform
*
* Calculate the watermark level ( the level at which the display plane will
* start fetching from memory again ) . Each chip has a different display
* FIFO size and allocation , so the caller needs to figure that out and pass
* in the correct intel_watermark_params structure .
*
* As the pixel clock runs , the FIFO will be drained at a rate that depends
* on the pixel size . When it reaches the watermark level , it ' ll start
* fetching FIFO line sized based chunks from memory until the FIFO fills
* past the watermark point . If the FIFO drains completely , a FIFO underrun
* will occur , and a display engine hang could result .
*/
static unsigned long intel_calculate_wm ( unsigned long clock_in_khz ,
const struct intel_watermark_params * wm ,
int fifo_size ,
int pixel_size ,
unsigned long latency_ns )
{
long entries_required , wm_size ;
/*
* Note : we need to make sure we don ' t overflow for various clock &
* latency values .
* clocks go from a few thousand to several hundred thousand .
* latency is usually a few thousand
*/
entries_required = ( ( clock_in_khz / 1000 ) * pixel_size * latency_ns ) /
1000 ;
entries_required = DIV_ROUND_UP ( entries_required , wm - > cacheline_size ) ;
DRM_DEBUG_KMS ( " FIFO entries required for mode: %ld \n " , entries_required ) ;
wm_size = fifo_size - ( entries_required + wm - > guard_size ) ;
DRM_DEBUG_KMS ( " FIFO watermark level: %ld \n " , wm_size ) ;
/* Don't promote wm_size to unsigned... */
if ( wm_size > ( long ) wm - > max_wm )
wm_size = wm - > max_wm ;
if ( wm_size < = 0 )
wm_size = wm - > default_wm ;
return wm_size ;
}
static struct drm_crtc * single_enabled_crtc ( struct drm_device * dev )
{
struct drm_crtc * crtc , * enabled = NULL ;
list_for_each_entry ( crtc , & dev - > mode_config . crtc_list , head ) {
if ( crtc - > enabled & & crtc - > fb ) {
if ( enabled )
return NULL ;
enabled = crtc ;
}
}
return enabled ;
}
void pineview_update_wm ( struct drm_device * dev )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
struct drm_crtc * crtc ;
const struct cxsr_latency * latency ;
u32 reg ;
unsigned long wm ;
latency = intel_get_cxsr_latency ( IS_PINEVIEW_G ( dev ) , dev_priv - > is_ddr3 ,
dev_priv - > fsb_freq , dev_priv - > mem_freq ) ;
if ( ! latency ) {
DRM_DEBUG_KMS ( " Unknown FSB/MEM found, disable CxSR \n " ) ;
pineview_disable_cxsr ( dev ) ;
return ;
}
crtc = single_enabled_crtc ( dev ) ;
if ( crtc ) {
int clock = crtc - > mode . clock ;
int pixel_size = crtc - > fb - > bits_per_pixel / 8 ;
/* Display SR */
wm = intel_calculate_wm ( clock , & pineview_display_wm ,
pineview_display_wm . fifo_size ,
pixel_size , latency - > display_sr ) ;
reg = I915_READ ( DSPFW1 ) ;
reg & = ~ DSPFW_SR_MASK ;
reg | = wm < < DSPFW_SR_SHIFT ;
I915_WRITE ( DSPFW1 , reg ) ;
DRM_DEBUG_KMS ( " DSPFW1 register is %x \n " , reg ) ;
/* cursor SR */
wm = intel_calculate_wm ( clock , & pineview_cursor_wm ,
pineview_display_wm . fifo_size ,
pixel_size , latency - > cursor_sr ) ;
reg = I915_READ ( DSPFW3 ) ;
reg & = ~ DSPFW_CURSOR_SR_MASK ;
reg | = ( wm & 0x3f ) < < DSPFW_CURSOR_SR_SHIFT ;
I915_WRITE ( DSPFW3 , reg ) ;
/* Display HPLL off SR */
wm = intel_calculate_wm ( clock , & pineview_display_hplloff_wm ,
pineview_display_hplloff_wm . fifo_size ,
pixel_size , latency - > display_hpll_disable ) ;
reg = I915_READ ( DSPFW3 ) ;
reg & = ~ DSPFW_HPLL_SR_MASK ;
reg | = wm & DSPFW_HPLL_SR_MASK ;
I915_WRITE ( DSPFW3 , reg ) ;
/* cursor HPLL off SR */
wm = intel_calculate_wm ( clock , & pineview_cursor_hplloff_wm ,
pineview_display_hplloff_wm . fifo_size ,
pixel_size , latency - > cursor_hpll_disable ) ;
reg = I915_READ ( DSPFW3 ) ;
reg & = ~ DSPFW_HPLL_CURSOR_MASK ;
reg | = ( wm & 0x3f ) < < DSPFW_HPLL_CURSOR_SHIFT ;
I915_WRITE ( DSPFW3 , reg ) ;
DRM_DEBUG_KMS ( " DSPFW3 register is %x \n " , reg ) ;
/* activate cxsr */
I915_WRITE ( DSPFW3 ,
I915_READ ( DSPFW3 ) | PINEVIEW_SELF_REFRESH_EN ) ;
DRM_DEBUG_KMS ( " Self-refresh is enabled \n " ) ;
} else {
pineview_disable_cxsr ( dev ) ;
DRM_DEBUG_KMS ( " Self-refresh is disabled \n " ) ;
}
}
static bool g4x_compute_wm0 ( struct drm_device * dev ,
int plane ,
const struct intel_watermark_params * display ,
int display_latency_ns ,
const struct intel_watermark_params * cursor ,
int cursor_latency_ns ,
int * plane_wm ,
int * cursor_wm )
{
struct drm_crtc * crtc ;
int htotal , hdisplay , clock , pixel_size ;
int line_time_us , line_count ;
int entries , tlb_miss ;
crtc = intel_get_crtc_for_plane ( dev , plane ) ;
if ( crtc - > fb = = NULL | | ! crtc - > enabled ) {
* cursor_wm = cursor - > guard_size ;
* plane_wm = display - > guard_size ;
return false ;
}
htotal = crtc - > mode . htotal ;
hdisplay = crtc - > mode . hdisplay ;
clock = crtc - > mode . clock ;
pixel_size = crtc - > fb - > bits_per_pixel / 8 ;
/* Use the small buffer method to calculate plane watermark */
entries = ( ( clock * pixel_size / 1000 ) * display_latency_ns ) / 1000 ;
tlb_miss = display - > fifo_size * display - > cacheline_size - hdisplay * 8 ;
if ( tlb_miss > 0 )
entries + = tlb_miss ;
entries = DIV_ROUND_UP ( entries , display - > cacheline_size ) ;
* plane_wm = entries + display - > guard_size ;
if ( * plane_wm > ( int ) display - > max_wm )
* plane_wm = display - > max_wm ;
/* Use the large buffer method to calculate cursor watermark */
line_time_us = ( ( htotal * 1000 ) / clock ) ;
line_count = ( cursor_latency_ns / line_time_us + 1000 ) / 1000 ;
entries = line_count * 64 * pixel_size ;
tlb_miss = cursor - > fifo_size * cursor - > cacheline_size - hdisplay * 8 ;
if ( tlb_miss > 0 )
entries + = tlb_miss ;
entries = DIV_ROUND_UP ( entries , cursor - > cacheline_size ) ;
* cursor_wm = entries + cursor - > guard_size ;
if ( * cursor_wm > ( int ) cursor - > max_wm )
* cursor_wm = ( int ) cursor - > max_wm ;
return true ;
}
/*
* Check the wm result .
*
* If any calculated watermark values is larger than the maximum value that
* can be programmed into the associated watermark register , that watermark
* must be disabled .
*/
static bool g4x_check_srwm ( struct drm_device * dev ,
int display_wm , int cursor_wm ,
const struct intel_watermark_params * display ,
const struct intel_watermark_params * cursor )
{
DRM_DEBUG_KMS ( " SR watermark: display plane %d, cursor %d \n " ,
display_wm , cursor_wm ) ;
if ( display_wm > display - > max_wm ) {
DRM_DEBUG_KMS ( " display watermark is too large(%d/%ld), disabling \n " ,
display_wm , display - > max_wm ) ;
return false ;
}
if ( cursor_wm > cursor - > max_wm ) {
DRM_DEBUG_KMS ( " cursor watermark is too large(%d/%ld), disabling \n " ,
cursor_wm , cursor - > max_wm ) ;
return false ;
}
if ( ! ( display_wm | | cursor_wm ) ) {
DRM_DEBUG_KMS ( " SR latency is 0, disabling \n " ) ;
return false ;
}
return true ;
}
static bool g4x_compute_srwm ( struct drm_device * dev ,
int plane ,
int latency_ns ,
const struct intel_watermark_params * display ,
const struct intel_watermark_params * cursor ,
int * display_wm , int * cursor_wm )
{
struct drm_crtc * crtc ;
int hdisplay , htotal , pixel_size , clock ;
unsigned long line_time_us ;
int line_count , line_size ;
int small , large ;
int entries ;
if ( ! latency_ns ) {
* display_wm = * cursor_wm = 0 ;
return false ;
}
crtc = intel_get_crtc_for_plane ( dev , plane ) ;
hdisplay = crtc - > mode . hdisplay ;
htotal = crtc - > mode . htotal ;
clock = crtc - > mode . clock ;
pixel_size = crtc - > fb - > bits_per_pixel / 8 ;
line_time_us = ( htotal * 1000 ) / clock ;
line_count = ( latency_ns / line_time_us + 1000 ) / 1000 ;
line_size = hdisplay * pixel_size ;
/* Use the minimum of the small and large buffer method for primary */
small = ( ( clock * pixel_size / 1000 ) * latency_ns ) / 1000 ;
large = line_count * line_size ;
entries = DIV_ROUND_UP ( min ( small , large ) , display - > cacheline_size ) ;
* display_wm = entries + display - > guard_size ;
/* calculate the self-refresh watermark for display cursor */
entries = line_count * pixel_size * 64 ;
entries = DIV_ROUND_UP ( entries , cursor - > cacheline_size ) ;
* cursor_wm = entries + cursor - > guard_size ;
return g4x_check_srwm ( dev ,
* display_wm , * cursor_wm ,
display , cursor ) ;
}
static bool vlv_compute_drain_latency ( struct drm_device * dev ,
int plane ,
int * plane_prec_mult ,
int * plane_dl ,
int * cursor_prec_mult ,
int * cursor_dl )
{
struct drm_crtc * crtc ;
int clock , pixel_size ;
int entries ;
crtc = intel_get_crtc_for_plane ( dev , plane ) ;
if ( crtc - > fb = = NULL | | ! crtc - > enabled )
return false ;
clock = crtc - > mode . clock ; /* VESA DOT Clock */
pixel_size = crtc - > fb - > bits_per_pixel / 8 ; /* BPP */
entries = ( clock / 1000 ) * pixel_size ;
* plane_prec_mult = ( entries > 256 ) ?
DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16 ;
* plane_dl = ( 64 * ( * plane_prec_mult ) * 4 ) / ( ( clock / 1000 ) *
pixel_size ) ;
entries = ( clock / 1000 ) * 4 ; /* BPP is always 4 for cursor */
* cursor_prec_mult = ( entries > 256 ) ?
DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16 ;
* cursor_dl = ( 64 * ( * cursor_prec_mult ) * 4 ) / ( ( clock / 1000 ) * 4 ) ;
return true ;
}
/*
* Update drain latency registers of memory arbiter
*
* Valleyview SoC has a new memory arbiter and needs drain latency registers
* to be programmed . Each plane has a drain latency multiplier and a drain
* latency value .
*/
static void vlv_update_drain_latency ( struct drm_device * dev )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
int planea_prec , planea_dl , planeb_prec , planeb_dl ;
int cursora_prec , cursora_dl , cursorb_prec , cursorb_dl ;
int plane_prec_mult , cursor_prec_mult ; /* Precision multiplier is
either 16 or 32 */
/* For plane A, Cursor A */
if ( vlv_compute_drain_latency ( dev , 0 , & plane_prec_mult , & planea_dl ,
& cursor_prec_mult , & cursora_dl ) ) {
cursora_prec = ( cursor_prec_mult = = DRAIN_LATENCY_PRECISION_32 ) ?
DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16 ;
planea_prec = ( plane_prec_mult = = DRAIN_LATENCY_PRECISION_32 ) ?
DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16 ;
I915_WRITE ( VLV_DDL1 , cursora_prec |
( cursora_dl < < DDL_CURSORA_SHIFT ) |
planea_prec | planea_dl ) ;
}
/* For plane B, Cursor B */
if ( vlv_compute_drain_latency ( dev , 1 , & plane_prec_mult , & planeb_dl ,
& cursor_prec_mult , & cursorb_dl ) ) {
cursorb_prec = ( cursor_prec_mult = = DRAIN_LATENCY_PRECISION_32 ) ?
DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16 ;
planeb_prec = ( plane_prec_mult = = DRAIN_LATENCY_PRECISION_32 ) ?
DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16 ;
I915_WRITE ( VLV_DDL2 , cursorb_prec |
( cursorb_dl < < DDL_CURSORB_SHIFT ) |
planeb_prec | planeb_dl ) ;
}
}
# define single_plane_enabled(mask) is_power_of_2(mask)
void valleyview_update_wm ( struct drm_device * dev )
{
static const int sr_latency_ns = 12000 ;
struct drm_i915_private * dev_priv = dev - > dev_private ;
int planea_wm , planeb_wm , cursora_wm , cursorb_wm ;
int plane_sr , cursor_sr ;
unsigned int enabled = 0 ;
vlv_update_drain_latency ( dev ) ;
if ( g4x_compute_wm0 ( dev , 0 ,
& valleyview_wm_info , latency_ns ,
& valleyview_cursor_wm_info , latency_ns ,
& planea_wm , & cursora_wm ) )
enabled | = 1 ;
if ( g4x_compute_wm0 ( dev , 1 ,
& valleyview_wm_info , latency_ns ,
& valleyview_cursor_wm_info , latency_ns ,
& planeb_wm , & cursorb_wm ) )
enabled | = 2 ;
plane_sr = cursor_sr = 0 ;
if ( single_plane_enabled ( enabled ) & &
g4x_compute_srwm ( dev , ffs ( enabled ) - 1 ,
sr_latency_ns ,
& valleyview_wm_info ,
& valleyview_cursor_wm_info ,
& plane_sr , & cursor_sr ) )
I915_WRITE ( FW_BLC_SELF_VLV , FW_CSPWRDWNEN ) ;
else
I915_WRITE ( FW_BLC_SELF_VLV ,
I915_READ ( FW_BLC_SELF_VLV ) & ~ FW_CSPWRDWNEN ) ;
DRM_DEBUG_KMS ( " Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d \n " ,
planea_wm , cursora_wm ,
planeb_wm , cursorb_wm ,
plane_sr , cursor_sr ) ;
I915_WRITE ( DSPFW1 ,
( plane_sr < < DSPFW_SR_SHIFT ) |
( cursorb_wm < < DSPFW_CURSORB_SHIFT ) |
( planeb_wm < < DSPFW_PLANEB_SHIFT ) |
planea_wm ) ;
I915_WRITE ( DSPFW2 ,
( I915_READ ( DSPFW2 ) & DSPFW_CURSORA_MASK ) |
( cursora_wm < < DSPFW_CURSORA_SHIFT ) ) ;
I915_WRITE ( DSPFW3 ,
( I915_READ ( DSPFW3 ) | ( cursor_sr < < DSPFW_CURSOR_SR_SHIFT ) ) ) ;
}
void g4x_update_wm ( struct drm_device * dev )
{
static const int sr_latency_ns = 12000 ;
struct drm_i915_private * dev_priv = dev - > dev_private ;
int planea_wm , planeb_wm , cursora_wm , cursorb_wm ;
int plane_sr , cursor_sr ;
unsigned int enabled = 0 ;
if ( g4x_compute_wm0 ( dev , 0 ,
& g4x_wm_info , latency_ns ,
& g4x_cursor_wm_info , latency_ns ,
& planea_wm , & cursora_wm ) )
enabled | = 1 ;
if ( g4x_compute_wm0 ( dev , 1 ,
& g4x_wm_info , latency_ns ,
& g4x_cursor_wm_info , latency_ns ,
& planeb_wm , & cursorb_wm ) )
enabled | = 2 ;
plane_sr = cursor_sr = 0 ;
if ( single_plane_enabled ( enabled ) & &
g4x_compute_srwm ( dev , ffs ( enabled ) - 1 ,
sr_latency_ns ,
& g4x_wm_info ,
& g4x_cursor_wm_info ,
& plane_sr , & cursor_sr ) )
I915_WRITE ( FW_BLC_SELF , FW_BLC_SELF_EN ) ;
else
I915_WRITE ( FW_BLC_SELF ,
I915_READ ( FW_BLC_SELF ) & ~ FW_BLC_SELF_EN ) ;
DRM_DEBUG_KMS ( " Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d \n " ,
planea_wm , cursora_wm ,
planeb_wm , cursorb_wm ,
plane_sr , cursor_sr ) ;
I915_WRITE ( DSPFW1 ,
( plane_sr < < DSPFW_SR_SHIFT ) |
( cursorb_wm < < DSPFW_CURSORB_SHIFT ) |
( planeb_wm < < DSPFW_PLANEB_SHIFT ) |
planea_wm ) ;
I915_WRITE ( DSPFW2 ,
( I915_READ ( DSPFW2 ) & DSPFW_CURSORA_MASK ) |
( cursora_wm < < DSPFW_CURSORA_SHIFT ) ) ;
/* HPLL off in SR has some issues on G4x... disable it */
I915_WRITE ( DSPFW3 ,
( I915_READ ( DSPFW3 ) & ~ DSPFW_HPLL_SR_EN ) |
( cursor_sr < < DSPFW_CURSOR_SR_SHIFT ) ) ;
}
void i965_update_wm ( struct drm_device * dev )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
struct drm_crtc * crtc ;
int srwm = 1 ;
int cursor_sr = 16 ;
/* Calc sr entries for one plane configs */
crtc = single_enabled_crtc ( dev ) ;
if ( crtc ) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000 ;
int clock = crtc - > mode . clock ;
int htotal = crtc - > mode . htotal ;
int hdisplay = crtc - > mode . hdisplay ;
int pixel_size = crtc - > fb - > bits_per_pixel / 8 ;
unsigned long line_time_us ;
int entries ;
line_time_us = ( ( htotal * 1000 ) / clock ) ;
/* Use ns/us then divide to preserve precision */
entries = ( ( ( sr_latency_ns / line_time_us ) + 1000 ) / 1000 ) *
pixel_size * hdisplay ;
entries = DIV_ROUND_UP ( entries , I915_FIFO_LINE_SIZE ) ;
srwm = I965_FIFO_SIZE - entries ;
if ( srwm < 0 )
srwm = 1 ;
srwm & = 0x1ff ;
DRM_DEBUG_KMS ( " self-refresh entries: %d, wm: %d \n " ,
entries , srwm ) ;
entries = ( ( ( sr_latency_ns / line_time_us ) + 1000 ) / 1000 ) *
pixel_size * 64 ;
entries = DIV_ROUND_UP ( entries ,
i965_cursor_wm_info . cacheline_size ) ;
cursor_sr = i965_cursor_wm_info . fifo_size -
( entries + i965_cursor_wm_info . guard_size ) ;
if ( cursor_sr > i965_cursor_wm_info . max_wm )
cursor_sr = i965_cursor_wm_info . max_wm ;
DRM_DEBUG_KMS ( " self-refresh watermark: display plane %d "
" cursor %d \n " , srwm , cursor_sr ) ;
if ( IS_CRESTLINE ( dev ) )
I915_WRITE ( FW_BLC_SELF , FW_BLC_SELF_EN ) ;
} else {
/* Turn off self refresh if both pipes are enabled */
if ( IS_CRESTLINE ( dev ) )
I915_WRITE ( FW_BLC_SELF , I915_READ ( FW_BLC_SELF )
& ~ FW_BLC_SELF_EN ) ;
}
DRM_DEBUG_KMS ( " Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d \n " ,
srwm ) ;
/* 965 has limitations... */
I915_WRITE ( DSPFW1 , ( srwm < < DSPFW_SR_SHIFT ) |
( 8 < < 16 ) | ( 8 < < 8 ) | ( 8 < < 0 ) ) ;
I915_WRITE ( DSPFW2 , ( 8 < < 8 ) | ( 8 < < 0 ) ) ;
/* update cursor SR watermark */
I915_WRITE ( DSPFW3 , ( cursor_sr < < DSPFW_CURSOR_SR_SHIFT ) ) ;
}
void i9xx_update_wm ( struct drm_device * dev )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
const struct intel_watermark_params * wm_info ;
uint32_t fwater_lo ;
uint32_t fwater_hi ;
int cwm , srwm = 1 ;
int fifo_size ;
int planea_wm , planeb_wm ;
struct drm_crtc * crtc , * enabled = NULL ;
if ( IS_I945GM ( dev ) )
wm_info = & i945_wm_info ;
else if ( ! IS_GEN2 ( dev ) )
wm_info = & i915_wm_info ;
else
wm_info = & i855_wm_info ;
fifo_size = dev_priv - > display . get_fifo_size ( dev , 0 ) ;
crtc = intel_get_crtc_for_plane ( dev , 0 ) ;
if ( crtc - > enabled & & crtc - > fb ) {
planea_wm = intel_calculate_wm ( crtc - > mode . clock ,
wm_info , fifo_size ,
crtc - > fb - > bits_per_pixel / 8 ,
latency_ns ) ;
enabled = crtc ;
} else
planea_wm = fifo_size - wm_info - > guard_size ;
fifo_size = dev_priv - > display . get_fifo_size ( dev , 1 ) ;
crtc = intel_get_crtc_for_plane ( dev , 1 ) ;
if ( crtc - > enabled & & crtc - > fb ) {
planeb_wm = intel_calculate_wm ( crtc - > mode . clock ,
wm_info , fifo_size ,
crtc - > fb - > bits_per_pixel / 8 ,
latency_ns ) ;
if ( enabled = = NULL )
enabled = crtc ;
else
enabled = NULL ;
} else
planeb_wm = fifo_size - wm_info - > guard_size ;
DRM_DEBUG_KMS ( " FIFO watermarks - A: %d, B: %d \n " , planea_wm , planeb_wm ) ;
/*
* Overlay gets an aggressive default since video jitter is bad .
*/
cwm = 2 ;
/* Play safe and disable self-refresh before adjusting watermarks. */
if ( IS_I945G ( dev ) | | IS_I945GM ( dev ) )
I915_WRITE ( FW_BLC_SELF , FW_BLC_SELF_EN_MASK | 0 ) ;
else if ( IS_I915GM ( dev ) )
I915_WRITE ( INSTPM , I915_READ ( INSTPM ) & ~ INSTPM_SELF_EN ) ;
/* Calc sr entries for one plane configs */
if ( HAS_FW_BLC ( dev ) & & enabled ) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000 ;
int clock = enabled - > mode . clock ;
int htotal = enabled - > mode . htotal ;
int hdisplay = enabled - > mode . hdisplay ;
int pixel_size = enabled - > fb - > bits_per_pixel / 8 ;
unsigned long line_time_us ;
int entries ;
line_time_us = ( htotal * 1000 ) / clock ;
/* Use ns/us then divide to preserve precision */
entries = ( ( ( sr_latency_ns / line_time_us ) + 1000 ) / 1000 ) *
pixel_size * hdisplay ;
entries = DIV_ROUND_UP ( entries , wm_info - > cacheline_size ) ;
DRM_DEBUG_KMS ( " self-refresh entries: %d \n " , entries ) ;
srwm = wm_info - > fifo_size - entries ;
if ( srwm < 0 )
srwm = 1 ;
if ( IS_I945G ( dev ) | | IS_I945GM ( dev ) )
I915_WRITE ( FW_BLC_SELF ,
FW_BLC_SELF_FIFO_MASK | ( srwm & 0xff ) ) ;
else if ( IS_I915GM ( dev ) )
I915_WRITE ( FW_BLC_SELF , srwm & 0x3f ) ;
}
DRM_DEBUG_KMS ( " Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d \n " ,
planea_wm , planeb_wm , cwm , srwm ) ;
fwater_lo = ( ( planeb_wm & 0x3f ) < < 16 ) | ( planea_wm & 0x3f ) ;
fwater_hi = ( cwm & 0x1f ) ;
/* Set request length to 8 cachelines per fetch */
fwater_lo = fwater_lo | ( 1 < < 24 ) | ( 1 < < 8 ) ;
fwater_hi = fwater_hi | ( 1 < < 8 ) ;
I915_WRITE ( FW_BLC , fwater_lo ) ;
I915_WRITE ( FW_BLC2 , fwater_hi ) ;
if ( HAS_FW_BLC ( dev ) ) {
if ( enabled ) {
if ( IS_I945G ( dev ) | | IS_I945GM ( dev ) )
I915_WRITE ( FW_BLC_SELF ,
FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN ) ;
else if ( IS_I915GM ( dev ) )
I915_WRITE ( INSTPM , I915_READ ( INSTPM ) | INSTPM_SELF_EN ) ;
DRM_DEBUG_KMS ( " memory self refresh enabled \n " ) ;
} else
DRM_DEBUG_KMS ( " memory self refresh disabled \n " ) ;
}
}
void i830_update_wm ( struct drm_device * dev )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
struct drm_crtc * crtc ;
uint32_t fwater_lo ;
int planea_wm ;
crtc = single_enabled_crtc ( dev ) ;
if ( crtc = = NULL )
return ;
planea_wm = intel_calculate_wm ( crtc - > mode . clock , & i830_wm_info ,
dev_priv - > display . get_fifo_size ( dev , 0 ) ,
crtc - > fb - > bits_per_pixel / 8 ,
latency_ns ) ;
fwater_lo = I915_READ ( FW_BLC ) & ~ 0xfff ;
fwater_lo | = ( 3 < < 8 ) | planea_wm ;
DRM_DEBUG_KMS ( " Setting FIFO watermarks - A: %d \n " , planea_wm ) ;
I915_WRITE ( FW_BLC , fwater_lo ) ;
}
# define ILK_LP0_PLANE_LATENCY 700
# define ILK_LP0_CURSOR_LATENCY 1300
/*
* Check the wm result .
*
* If any calculated watermark values is larger than the maximum value that
* can be programmed into the associated watermark register , that watermark
* must be disabled .
*/
static bool ironlake_check_srwm ( struct drm_device * dev , int level ,
int fbc_wm , int display_wm , int cursor_wm ,
const struct intel_watermark_params * display ,
const struct intel_watermark_params * cursor )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
DRM_DEBUG_KMS ( " watermark %d: display plane %d, fbc lines %d, "
" cursor %d \n " , level , display_wm , fbc_wm , cursor_wm ) ;
if ( fbc_wm > SNB_FBC_MAX_SRWM ) {
DRM_DEBUG_KMS ( " fbc watermark(%d) is too large(%d), disabling wm%d+ \n " ,
fbc_wm , SNB_FBC_MAX_SRWM , level ) ;
/* fbc has it's own way to disable FBC WM */
I915_WRITE ( DISP_ARB_CTL ,
I915_READ ( DISP_ARB_CTL ) | DISP_FBC_WM_DIS ) ;
return false ;
}
if ( display_wm > display - > max_wm ) {
DRM_DEBUG_KMS ( " display watermark(%d) is too large(%d), disabling wm%d+ \n " ,
display_wm , SNB_DISPLAY_MAX_SRWM , level ) ;
return false ;
}
if ( cursor_wm > cursor - > max_wm ) {
DRM_DEBUG_KMS ( " cursor watermark(%d) is too large(%d), disabling wm%d+ \n " ,
cursor_wm , SNB_CURSOR_MAX_SRWM , level ) ;
return false ;
}
if ( ! ( fbc_wm | | display_wm | | cursor_wm ) ) {
DRM_DEBUG_KMS ( " latency %d is 0, disabling wm%d+ \n " , level , level ) ;
return false ;
}
return true ;
}
/*
* Compute watermark values of WM [ 1 - 3 ] ,
*/
static bool ironlake_compute_srwm ( struct drm_device * dev , int level , int plane ,
int latency_ns ,
const struct intel_watermark_params * display ,
const struct intel_watermark_params * cursor ,
int * fbc_wm , int * display_wm , int * cursor_wm )
{
struct drm_crtc * crtc ;
unsigned long line_time_us ;
int hdisplay , htotal , pixel_size , clock ;
int line_count , line_size ;
int small , large ;
int entries ;
if ( ! latency_ns ) {
* fbc_wm = * display_wm = * cursor_wm = 0 ;
return false ;
}
crtc = intel_get_crtc_for_plane ( dev , plane ) ;
hdisplay = crtc - > mode . hdisplay ;
htotal = crtc - > mode . htotal ;
clock = crtc - > mode . clock ;
pixel_size = crtc - > fb - > bits_per_pixel / 8 ;
line_time_us = ( htotal * 1000 ) / clock ;
line_count = ( latency_ns / line_time_us + 1000 ) / 1000 ;
line_size = hdisplay * pixel_size ;
/* Use the minimum of the small and large buffer method for primary */
small = ( ( clock * pixel_size / 1000 ) * latency_ns ) / 1000 ;
large = line_count * line_size ;
entries = DIV_ROUND_UP ( min ( small , large ) , display - > cacheline_size ) ;
* display_wm = entries + display - > guard_size ;
/*
* Spec says :
* FBC WM = ( ( Final Primary WM * 64 ) / number of bytes per line ) + 2
*/
* fbc_wm = DIV_ROUND_UP ( * display_wm * 64 , line_size ) + 2 ;
/* calculate the self-refresh watermark for display cursor */
entries = line_count * pixel_size * 64 ;
entries = DIV_ROUND_UP ( entries , cursor - > cacheline_size ) ;
* cursor_wm = entries + cursor - > guard_size ;
return ironlake_check_srwm ( dev , level ,
* fbc_wm , * display_wm , * cursor_wm ,
display , cursor ) ;
}
void ironlake_update_wm ( struct drm_device * dev )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
int fbc_wm , plane_wm , cursor_wm ;
unsigned int enabled ;
enabled = 0 ;
if ( g4x_compute_wm0 ( dev , 0 ,
& ironlake_display_wm_info ,
ILK_LP0_PLANE_LATENCY ,
& ironlake_cursor_wm_info ,
ILK_LP0_CURSOR_LATENCY ,
& plane_wm , & cursor_wm ) ) {
I915_WRITE ( WM0_PIPEA_ILK ,
( plane_wm < < WM0_PIPE_PLANE_SHIFT ) | cursor_wm ) ;
DRM_DEBUG_KMS ( " FIFO watermarks For pipe A - "
" plane %d, " " cursor: %d \n " ,
plane_wm , cursor_wm ) ;
enabled | = 1 ;
}
if ( g4x_compute_wm0 ( dev , 1 ,
& ironlake_display_wm_info ,
ILK_LP0_PLANE_LATENCY ,
& ironlake_cursor_wm_info ,
ILK_LP0_CURSOR_LATENCY ,
& plane_wm , & cursor_wm ) ) {
I915_WRITE ( WM0_PIPEB_ILK ,
( plane_wm < < WM0_PIPE_PLANE_SHIFT ) | cursor_wm ) ;
DRM_DEBUG_KMS ( " FIFO watermarks For pipe B - "
" plane %d, cursor: %d \n " ,
plane_wm , cursor_wm ) ;
enabled | = 2 ;
}
/*
* Calculate and update the self - refresh watermark only when one
* display plane is used .
*/
I915_WRITE ( WM3_LP_ILK , 0 ) ;
I915_WRITE ( WM2_LP_ILK , 0 ) ;
I915_WRITE ( WM1_LP_ILK , 0 ) ;
if ( ! single_plane_enabled ( enabled ) )
return ;
enabled = ffs ( enabled ) - 1 ;
/* WM1 */
if ( ! ironlake_compute_srwm ( dev , 1 , enabled ,
ILK_READ_WM1_LATENCY ( ) * 500 ,
& ironlake_display_srwm_info ,
& ironlake_cursor_srwm_info ,
& fbc_wm , & plane_wm , & cursor_wm ) )
return ;
I915_WRITE ( WM1_LP_ILK ,
WM1_LP_SR_EN |
( ILK_READ_WM1_LATENCY ( ) < < WM1_LP_LATENCY_SHIFT ) |
( fbc_wm < < WM1_LP_FBC_SHIFT ) |
( plane_wm < < WM1_LP_SR_SHIFT ) |
cursor_wm ) ;
/* WM2 */
if ( ! ironlake_compute_srwm ( dev , 2 , enabled ,
ILK_READ_WM2_LATENCY ( ) * 500 ,
& ironlake_display_srwm_info ,
& ironlake_cursor_srwm_info ,
& fbc_wm , & plane_wm , & cursor_wm ) )
return ;
I915_WRITE ( WM2_LP_ILK ,
WM2_LP_EN |
( ILK_READ_WM2_LATENCY ( ) < < WM1_LP_LATENCY_SHIFT ) |
( fbc_wm < < WM1_LP_FBC_SHIFT ) |
( plane_wm < < WM1_LP_SR_SHIFT ) |
cursor_wm ) ;
/*
* WM3 is unsupported on ILK , probably because we don ' t have latency
* data for that power state
*/
}
void sandybridge_update_wm ( struct drm_device * dev )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
int latency = SNB_READ_WM0_LATENCY ( ) * 100 ; /* In unit 0.1us */
u32 val ;
int fbc_wm , plane_wm , cursor_wm ;
unsigned int enabled ;
enabled = 0 ;
if ( g4x_compute_wm0 ( dev , 0 ,
& sandybridge_display_wm_info , latency ,
& sandybridge_cursor_wm_info , latency ,
& plane_wm , & cursor_wm ) ) {
val = I915_READ ( WM0_PIPEA_ILK ) ;
val & = ~ ( WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK ) ;
I915_WRITE ( WM0_PIPEA_ILK , val |
( ( plane_wm < < WM0_PIPE_PLANE_SHIFT ) | cursor_wm ) ) ;
DRM_DEBUG_KMS ( " FIFO watermarks For pipe A - "
" plane %d, " " cursor: %d \n " ,
plane_wm , cursor_wm ) ;
enabled | = 1 ;
}
if ( g4x_compute_wm0 ( dev , 1 ,
& sandybridge_display_wm_info , latency ,
& sandybridge_cursor_wm_info , latency ,
& plane_wm , & cursor_wm ) ) {
val = I915_READ ( WM0_PIPEB_ILK ) ;
val & = ~ ( WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK ) ;
I915_WRITE ( WM0_PIPEB_ILK , val |
( ( plane_wm < < WM0_PIPE_PLANE_SHIFT ) | cursor_wm ) ) ;
DRM_DEBUG_KMS ( " FIFO watermarks For pipe B - "
" plane %d, cursor: %d \n " ,
plane_wm , cursor_wm ) ;
enabled | = 2 ;
}
/* IVB has 3 pipes */
if ( IS_IVYBRIDGE ( dev ) & &
g4x_compute_wm0 ( dev , 2 ,
& sandybridge_display_wm_info , latency ,
& sandybridge_cursor_wm_info , latency ,
& plane_wm , & cursor_wm ) ) {
val = I915_READ ( WM0_PIPEC_IVB ) ;
val & = ~ ( WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK ) ;
I915_WRITE ( WM0_PIPEC_IVB , val |
( ( plane_wm < < WM0_PIPE_PLANE_SHIFT ) | cursor_wm ) ) ;
DRM_DEBUG_KMS ( " FIFO watermarks For pipe C - "
" plane %d, cursor: %d \n " ,
plane_wm , cursor_wm ) ;
enabled | = 3 ;
}
/*
* Calculate and update the self - refresh watermark only when one
* display plane is used .
*
* SNB support 3 levels of watermark .
*
* WM1 / WM2 / WM2 watermarks have to be enabled in the ascending order ,
* and disabled in the descending order
*
*/
I915_WRITE ( WM3_LP_ILK , 0 ) ;
I915_WRITE ( WM2_LP_ILK , 0 ) ;
I915_WRITE ( WM1_LP_ILK , 0 ) ;
if ( ! single_plane_enabled ( enabled ) | |
dev_priv - > sprite_scaling_enabled )
return ;
enabled = ffs ( enabled ) - 1 ;
/* WM1 */
if ( ! ironlake_compute_srwm ( dev , 1 , enabled ,
SNB_READ_WM1_LATENCY ( ) * 500 ,
& sandybridge_display_srwm_info ,
& sandybridge_cursor_srwm_info ,
& fbc_wm , & plane_wm , & cursor_wm ) )
return ;
I915_WRITE ( WM1_LP_ILK ,
WM1_LP_SR_EN |
( SNB_READ_WM1_LATENCY ( ) < < WM1_LP_LATENCY_SHIFT ) |
( fbc_wm < < WM1_LP_FBC_SHIFT ) |
( plane_wm < < WM1_LP_SR_SHIFT ) |
cursor_wm ) ;
/* WM2 */
if ( ! ironlake_compute_srwm ( dev , 2 , enabled ,
SNB_READ_WM2_LATENCY ( ) * 500 ,
& sandybridge_display_srwm_info ,
& sandybridge_cursor_srwm_info ,
& fbc_wm , & plane_wm , & cursor_wm ) )
return ;
I915_WRITE ( WM2_LP_ILK ,
WM2_LP_EN |
( SNB_READ_WM2_LATENCY ( ) < < WM1_LP_LATENCY_SHIFT ) |
( fbc_wm < < WM1_LP_FBC_SHIFT ) |
( plane_wm < < WM1_LP_SR_SHIFT ) |
cursor_wm ) ;
/* WM3 */
if ( ! ironlake_compute_srwm ( dev , 3 , enabled ,
SNB_READ_WM3_LATENCY ( ) * 500 ,
& sandybridge_display_srwm_info ,
& sandybridge_cursor_srwm_info ,
& fbc_wm , & plane_wm , & cursor_wm ) )
return ;
I915_WRITE ( WM3_LP_ILK ,
WM3_LP_EN |
( SNB_READ_WM3_LATENCY ( ) < < WM1_LP_LATENCY_SHIFT ) |
( fbc_wm < < WM1_LP_FBC_SHIFT ) |
( plane_wm < < WM1_LP_SR_SHIFT ) |
cursor_wm ) ;
}
static bool
sandybridge_compute_sprite_wm ( struct drm_device * dev , int plane ,
uint32_t sprite_width , int pixel_size ,
const struct intel_watermark_params * display ,
int display_latency_ns , int * sprite_wm )
{
struct drm_crtc * crtc ;
int clock ;
int entries , tlb_miss ;
crtc = intel_get_crtc_for_plane ( dev , plane ) ;
if ( crtc - > fb = = NULL | | ! crtc - > enabled ) {
* sprite_wm = display - > guard_size ;
return false ;
}
clock = crtc - > mode . clock ;
/* Use the small buffer method to calculate the sprite watermark */
entries = ( ( clock * pixel_size / 1000 ) * display_latency_ns ) / 1000 ;
tlb_miss = display - > fifo_size * display - > cacheline_size -
sprite_width * 8 ;
if ( tlb_miss > 0 )
entries + = tlb_miss ;
entries = DIV_ROUND_UP ( entries , display - > cacheline_size ) ;
* sprite_wm = entries + display - > guard_size ;
if ( * sprite_wm > ( int ) display - > max_wm )
* sprite_wm = display - > max_wm ;
return true ;
}
static bool
sandybridge_compute_sprite_srwm ( struct drm_device * dev , int plane ,
uint32_t sprite_width , int pixel_size ,
const struct intel_watermark_params * display ,
int latency_ns , int * sprite_wm )
{
struct drm_crtc * crtc ;
unsigned long line_time_us ;
int clock ;
int line_count , line_size ;
int small , large ;
int entries ;
if ( ! latency_ns ) {
* sprite_wm = 0 ;
return false ;
}
crtc = intel_get_crtc_for_plane ( dev , plane ) ;
clock = crtc - > mode . clock ;
if ( ! clock ) {
* sprite_wm = 0 ;
return false ;
}
line_time_us = ( sprite_width * 1000 ) / clock ;
if ( ! line_time_us ) {
* sprite_wm = 0 ;
return false ;
}
line_count = ( latency_ns / line_time_us + 1000 ) / 1000 ;
line_size = sprite_width * pixel_size ;
/* Use the minimum of the small and large buffer method for primary */
small = ( ( clock * pixel_size / 1000 ) * latency_ns ) / 1000 ;
large = line_count * line_size ;
entries = DIV_ROUND_UP ( min ( small , large ) , display - > cacheline_size ) ;
* sprite_wm = entries + display - > guard_size ;
return * sprite_wm > 0x3ff ? false : true ;
}
void sandybridge_update_sprite_wm ( struct drm_device * dev , int pipe ,
uint32_t sprite_width , int pixel_size )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
int latency = SNB_READ_WM0_LATENCY ( ) * 100 ; /* In unit 0.1us */
u32 val ;
int sprite_wm , reg ;
int ret ;
switch ( pipe ) {
case 0 :
reg = WM0_PIPEA_ILK ;
break ;
case 1 :
reg = WM0_PIPEB_ILK ;
break ;
case 2 :
reg = WM0_PIPEC_IVB ;
break ;
default :
return ; /* bad pipe */
}
ret = sandybridge_compute_sprite_wm ( dev , pipe , sprite_width , pixel_size ,
& sandybridge_display_wm_info ,
latency , & sprite_wm ) ;
if ( ! ret ) {
DRM_DEBUG_KMS ( " failed to compute sprite wm for pipe %d \n " ,
pipe ) ;
return ;
}
val = I915_READ ( reg ) ;
val & = ~ WM0_PIPE_SPRITE_MASK ;
I915_WRITE ( reg , val | ( sprite_wm < < WM0_PIPE_SPRITE_SHIFT ) ) ;
DRM_DEBUG_KMS ( " sprite watermarks For pipe %d - %d \n " , pipe , sprite_wm ) ;
ret = sandybridge_compute_sprite_srwm ( dev , pipe , sprite_width ,
pixel_size ,
& sandybridge_display_srwm_info ,
SNB_READ_WM1_LATENCY ( ) * 500 ,
& sprite_wm ) ;
if ( ! ret ) {
DRM_DEBUG_KMS ( " failed to compute sprite lp1 wm on pipe %d \n " ,
pipe ) ;
return ;
}
I915_WRITE ( WM1S_LP_ILK , sprite_wm ) ;
/* Only IVB has two more LP watermarks for sprite */
if ( ! IS_IVYBRIDGE ( dev ) )
return ;
ret = sandybridge_compute_sprite_srwm ( dev , pipe , sprite_width ,
pixel_size ,
& sandybridge_display_srwm_info ,
SNB_READ_WM2_LATENCY ( ) * 500 ,
& sprite_wm ) ;
if ( ! ret ) {
DRM_DEBUG_KMS ( " failed to compute sprite lp2 wm on pipe %d \n " ,
pipe ) ;
return ;
}
I915_WRITE ( WM2S_LP_IVB , sprite_wm ) ;
ret = sandybridge_compute_sprite_srwm ( dev , pipe , sprite_width ,
pixel_size ,
& sandybridge_display_srwm_info ,
SNB_READ_WM3_LATENCY ( ) * 500 ,
& sprite_wm ) ;
if ( ! ret ) {
DRM_DEBUG_KMS ( " failed to compute sprite lp3 wm on pipe %d \n " ,
pipe ) ;
return ;
}
I915_WRITE ( WM3S_LP_IVB , sprite_wm ) ;
}
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
*
* Calculate watermark values for the various WM regs based on current mode
* and plane configuration .
*
* There are several cases to deal with here :
* - normal ( i . e . non - self - refresh )
* - self - refresh ( SR ) mode
* - lines are large relative to FIFO size ( buffer can hold up to 2 )
* - lines are small relative to FIFO size ( buffer can hold more than 2
* lines ) , so need to account for TLB latency
*
* The normal calculation is :
* watermark = dotclock * bytes per pixel * latency
* where latency is platform & configuration dependent ( we assume pessimal
* values here ) .
*
* The SR calculation is :
* watermark = ( trunc ( latency / line time ) + 1 ) * surface width *
* bytes per pixel
* where
* line time = htotal / dotclock
* surface width = hdisplay for normal plane and 64 for cursor
* and latency is assumed to be high , as above .
*
* The final value programmed to the register should always be rounded up ,
* and include an extra 2 entries to account for clock crossings .
*
* We don ' t use the sprite , so we can ignore that . And on Crestline we have
* to set the non - SR watermarks to 8.
*/
void intel_update_watermarks ( struct drm_device * dev )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
if ( dev_priv - > display . update_wm )
dev_priv - > display . update_wm ( dev ) ;
}
void intel_update_sprite_watermarks ( struct drm_device * dev , int pipe ,
uint32_t sprite_width , int pixel_size )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
if ( dev_priv - > display . update_sprite_wm )
dev_priv - > display . update_sprite_wm ( dev , pipe , sprite_width ,
pixel_size ) ;
}