Merge branch 'drm-radeon-lockup' into drm-core-next

* drm-radeon-lockup:
  drm/radeon/kms: simplify & improve GPU reset V2
  drm/radeon/kms: rename gpu_reset to asic_reset
  drm/radeon/kms: fence cleanup + more reliable GPU lockup detection V4

Conflicts:
	drivers/gpu/drm/radeon/r300.c
This commit is contained in:
Dave Airlie 2010-04-20 13:15:05 +10:00
commit 0bcb1d844a
23 changed files with 801 additions and 473 deletions

View file

@ -487,7 +487,13 @@ int evergreen_mc_init(struct radeon_device *rdev)
return 0;
}
int evergreen_gpu_reset(struct radeon_device *rdev)
bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
{
/* FIXME: implement for evergreen */
return false;
}
int evergreen_asic_reset(struct radeon_device *rdev)
{
/* FIXME: implement for evergreen */
return 0;

View file

@ -663,26 +663,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
if (r100_debugfs_cp_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for CP !\n");
}
/* Reset CP */
tmp = RREG32(RADEON_CP_CSQ_STAT);
if ((tmp & (1 << 31))) {
DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp);
WREG32(RADEON_CP_CSQ_MODE, 0);
WREG32(RADEON_CP_CSQ_CNTL, 0);
WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
tmp = RREG32(RADEON_RBBM_SOFT_RESET);
mdelay(2);
WREG32(RADEON_RBBM_SOFT_RESET, 0);
tmp = RREG32(RADEON_RBBM_SOFT_RESET);
mdelay(2);
tmp = RREG32(RADEON_CP_CSQ_STAT);
if ((tmp & (1 << 31))) {
DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp);
}
} else {
DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
}
if (!rdev->me_fw) {
r = r100_cp_init_microcode(rdev);
if (r) {
@ -787,39 +767,6 @@ void r100_cp_disable(struct radeon_device *rdev)
}
}
int r100_cp_reset(struct radeon_device *rdev)
{
uint32_t tmp;
bool reinit_cp;
int i;
reinit_cp = rdev->cp.ready;
rdev->cp.ready = false;
WREG32(RADEON_CP_CSQ_MODE, 0);
WREG32(RADEON_CP_CSQ_CNTL, 0);
WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
(void)RREG32(RADEON_RBBM_SOFT_RESET);
udelay(200);
WREG32(RADEON_RBBM_SOFT_RESET, 0);
/* Wait to prevent race in RBBM_STATUS */
mdelay(1);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(RADEON_RBBM_STATUS);
if (!(tmp & (1 << 16))) {
DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n",
tmp);
if (reinit_cp) {
return r100_cp_init(rdev, rdev->cp.ring_size);
}
return 0;
}
DRM_UDELAY(1);
}
tmp = RREG32(RADEON_RBBM_STATUS);
DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp);
return -1;
}
void r100_cp_commit(struct radeon_device *rdev)
{
WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
@ -1733,76 +1680,163 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
void r100_gpu_init(struct radeon_device *rdev)
void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
{
/* TODO: anythings to do here ? pipes ? */
r100_hdp_reset(rdev);
lockup->last_cp_rptr = cp->rptr;
lockup->last_jiffies = jiffies;
}
void r100_hdp_reset(struct radeon_device *rdev)
/**
* r100_gpu_cp_is_lockup() - check if CP is lockup by recording information
* @rdev: radeon device structure
* @lockup: r100_gpu_lockup structure holding CP lockup tracking informations
* @cp: radeon_cp structure holding CP information
*
* We don't need to initialize the lockup tracking information as we will either
* have CP rptr to a different value of jiffies wrap around which will force
* initialization of the lockup tracking informations.
*
* A possible false positivie is if we get call after while and last_cp_rptr ==
* the current CP rptr, even if it's unlikely it might happen. To avoid this
* if the elapsed time since last call is bigger than 2 second than we return
* false and update the tracking information. Due to this the caller must call
* r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported
* the fencing code should be cautious about that.
*
* Caller should write to the ring to force CP to do something so we don't get
* false positive when CP is just gived nothing to do.
*
**/
bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
{
uint32_t tmp;
unsigned long cjiffies, elapsed;
tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
tmp |= (7 << 28);
WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
(void)RREG32(RADEON_HOST_PATH_CNTL);
udelay(200);
WREG32(RADEON_RBBM_SOFT_RESET, 0);
WREG32(RADEON_HOST_PATH_CNTL, tmp);
(void)RREG32(RADEON_HOST_PATH_CNTL);
cjiffies = jiffies;
if (!time_after(cjiffies, lockup->last_jiffies)) {
/* likely a wrap around */
lockup->last_cp_rptr = cp->rptr;
lockup->last_jiffies = jiffies;
return false;
}
if (cp->rptr != lockup->last_cp_rptr) {
/* CP is still working no lockup */
lockup->last_cp_rptr = cp->rptr;
lockup->last_jiffies = jiffies;
return false;
}
elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
if (elapsed >= 3000) {
/* very likely the improbable case where current
* rptr is equal to last recorded, a while ago, rptr
* this is more likely a false positive update tracking
* information which should force us to be recall at
* latter point
*/
lockup->last_cp_rptr = cp->rptr;
lockup->last_jiffies = jiffies;
return false;
}
if (elapsed >= 1000) {
dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
return true;
}
/* give a chance to the GPU ... */
return false;
}
int r100_rb2d_reset(struct radeon_device *rdev)
bool r100_gpu_is_lockup(struct radeon_device *rdev)
{
uint32_t tmp;
int i;
u32 rbbm_status;
int r;
WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
(void)RREG32(RADEON_RBBM_SOFT_RESET);
udelay(200);
WREG32(RADEON_RBBM_SOFT_RESET, 0);
/* Wait to prevent race in RBBM_STATUS */
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
return false;
}
/* force CP activities */
r = radeon_ring_lock(rdev, 2);
if (!r) {
/* PACKET2 NOP */
radeon_ring_write(rdev, 0x80000000);
radeon_ring_write(rdev, 0x80000000);
radeon_ring_unlock_commit(rdev);
}
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
}
void r100_bm_disable(struct radeon_device *rdev)
{
u32 tmp;
/* disable bus mastering */
tmp = RREG32(R_000030_BUS_CNTL);
WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
mdelay(1);
WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
mdelay(1);
WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
tmp = RREG32(RADEON_BUS_CNTL);
mdelay(1);
pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
mdelay(1);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(RADEON_RBBM_STATUS);
if (!(tmp & (1 << 26))) {
DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n",
tmp);
return 0;
}
DRM_UDELAY(1);
}
tmp = RREG32(RADEON_RBBM_STATUS);
DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp);
return -1;
}
int r100_gpu_reset(struct radeon_device *rdev)
int r100_asic_reset(struct radeon_device *rdev)
{
uint32_t status;
struct r100_mc_save save;
u32 status, tmp;
/* reset order likely matter */
status = RREG32(RADEON_RBBM_STATUS);
/* reset HDP */
r100_hdp_reset(rdev);
/* reset rb2d */
if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
r100_rb2d_reset(rdev);
r100_mc_stop(rdev, &save);
status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(status)) {
return 0;
}
/* TODO: reset 3D engine */
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* stop CP */
WREG32(RADEON_CP_CSQ_CNTL, 0);
tmp = RREG32(RADEON_CP_RB_CNTL);
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
WREG32(RADEON_CP_RB_RPTR_WR, 0);
WREG32(RADEON_CP_RB_WPTR, 0);
WREG32(RADEON_CP_RB_CNTL, tmp);
/* save PCI state */
pci_save_state(rdev->pdev);
/* disable bus mastering */
r100_bm_disable(rdev);
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
S_0000F0_SOFT_RESET_RE(1) |
S_0000F0_SOFT_RESET_PP(1) |
S_0000F0_SOFT_RESET_RB(1));
RREG32(R_0000F0_RBBM_SOFT_RESET);
mdelay(500);
WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
mdelay(1);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* reset CP */
status = RREG32(RADEON_RBBM_STATUS);
if (status & (1 << 16)) {
r100_cp_reset(rdev);
}
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
RREG32(R_0000F0_RBBM_SOFT_RESET);
mdelay(500);
WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
mdelay(1);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* restore PCI & busmastering */
pci_restore_state(rdev->pdev);
r100_enable_bm(rdev);
/* Check if GPU is idle */
status = RREG32(RADEON_RBBM_STATUS);
if (status & RADEON_RBBM_ACTIVE) {
DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
rdev->gpu_lockup = true;
return -1;
}
DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
r100_mc_resume(rdev, &save);
dev_info(rdev->dev, "GPU reset succeed\n");
return 0;
}
@ -3399,7 +3433,7 @@ static int r100_startup(struct radeon_device *rdev)
/* Resume clock */
r100_clock_startup(rdev);
/* Initialize GPU configuration (# pipes, ...) */
r100_gpu_init(rdev);
// r100_gpu_init(rdev);
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
r100_enable_bm(rdev);
@ -3436,7 +3470,7 @@ int r100_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
r100_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@ -3505,7 +3539,7 @@ int r100_init(struct radeon_device *rdev)
return r;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),

View file

@ -74,6 +74,134 @@
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
/* Registers */
#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
#define S_0000F0_SOFT_RESET_SE(x) (((x) & 0x1) << 2)
#define G_0000F0_SOFT_RESET_SE(x) (((x) >> 2) & 0x1)
#define C_0000F0_SOFT_RESET_SE 0xFFFFFFFB
#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
#define R_000030_BUS_CNTL 0x000030
#define S_000030_BUS_DBL_RESYNC(x) (((x) & 0x1) << 0)
#define G_000030_BUS_DBL_RESYNC(x) (((x) >> 0) & 0x1)
#define C_000030_BUS_DBL_RESYNC 0xFFFFFFFE
#define S_000030_BUS_MSTR_RESET(x) (((x) & 0x1) << 1)
#define G_000030_BUS_MSTR_RESET(x) (((x) >> 1) & 0x1)
#define C_000030_BUS_MSTR_RESET 0xFFFFFFFD
#define S_000030_BUS_FLUSH_BUF(x) (((x) & 0x1) << 2)
#define G_000030_BUS_FLUSH_BUF(x) (((x) >> 2) & 0x1)
#define C_000030_BUS_FLUSH_BUF 0xFFFFFFFB
#define S_000030_BUS_STOP_REQ_DIS(x) (((x) & 0x1) << 3)
#define G_000030_BUS_STOP_REQ_DIS(x) (((x) >> 3) & 0x1)
#define C_000030_BUS_STOP_REQ_DIS 0xFFFFFFF7
#define S_000030_BUS_PM4_READ_COMBINE_EN(x) (((x) & 0x1) << 4)
#define G_000030_BUS_PM4_READ_COMBINE_EN(x) (((x) >> 4) & 0x1)
#define C_000030_BUS_PM4_READ_COMBINE_EN 0xFFFFFFEF
#define S_000030_BUS_WRT_COMBINE_EN(x) (((x) & 0x1) << 5)
#define G_000030_BUS_WRT_COMBINE_EN(x) (((x) >> 5) & 0x1)
#define C_000030_BUS_WRT_COMBINE_EN 0xFFFFFFDF
#define S_000030_BUS_MASTER_DIS(x) (((x) & 0x1) << 6)
#define G_000030_BUS_MASTER_DIS(x) (((x) >> 6) & 0x1)
#define C_000030_BUS_MASTER_DIS 0xFFFFFFBF
#define S_000030_BIOS_ROM_WRT_EN(x) (((x) & 0x1) << 7)
#define G_000030_BIOS_ROM_WRT_EN(x) (((x) >> 7) & 0x1)
#define C_000030_BIOS_ROM_WRT_EN 0xFFFFFF7F
#define S_000030_BM_DAC_CRIPPLE(x) (((x) & 0x1) << 8)
#define G_000030_BM_DAC_CRIPPLE(x) (((x) >> 8) & 0x1)
#define C_000030_BM_DAC_CRIPPLE 0xFFFFFEFF
#define S_000030_BUS_NON_PM4_READ_COMBINE_EN(x) (((x) & 0x1) << 9)
#define G_000030_BUS_NON_PM4_READ_COMBINE_EN(x) (((x) >> 9) & 0x1)
#define C_000030_BUS_NON_PM4_READ_COMBINE_EN 0xFFFFFDFF
#define S_000030_BUS_XFERD_DISCARD_EN(x) (((x) & 0x1) << 10)
#define G_000030_BUS_XFERD_DISCARD_EN(x) (((x) >> 10) & 0x1)
#define C_000030_BUS_XFERD_DISCARD_EN 0xFFFFFBFF
#define S_000030_BUS_SGL_READ_DISABLE(x) (((x) & 0x1) << 11)
#define G_000030_BUS_SGL_READ_DISABLE(x) (((x) >> 11) & 0x1)
#define C_000030_BUS_SGL_READ_DISABLE 0xFFFFF7FF
#define S_000030_BIOS_DIS_ROM(x) (((x) & 0x1) << 12)
#define G_000030_BIOS_DIS_ROM(x) (((x) >> 12) & 0x1)
#define C_000030_BIOS_DIS_ROM 0xFFFFEFFF
#define S_000030_BUS_PCI_READ_RETRY_EN(x) (((x) & 0x1) << 13)
#define G_000030_BUS_PCI_READ_RETRY_EN(x) (((x) >> 13) & 0x1)
#define C_000030_BUS_PCI_READ_RETRY_EN 0xFFFFDFFF
#define S_000030_BUS_AGP_AD_STEPPING_EN(x) (((x) & 0x1) << 14)
#define G_000030_BUS_AGP_AD_STEPPING_EN(x) (((x) >> 14) & 0x1)
#define C_000030_BUS_AGP_AD_STEPPING_EN 0xFFFFBFFF
#define S_000030_BUS_PCI_WRT_RETRY_EN(x) (((x) & 0x1) << 15)
#define G_000030_BUS_PCI_WRT_RETRY_EN(x) (((x) >> 15) & 0x1)
#define C_000030_BUS_PCI_WRT_RETRY_EN 0xFFFF7FFF
#define S_000030_BUS_RETRY_WS(x) (((x) & 0xF) << 16)
#define G_000030_BUS_RETRY_WS(x) (((x) >> 16) & 0xF)
#define C_000030_BUS_RETRY_WS 0xFFF0FFFF
#define S_000030_BUS_MSTR_RD_MULT(x) (((x) & 0x1) << 20)
#define G_000030_BUS_MSTR_RD_MULT(x) (((x) >> 20) & 0x1)
#define C_000030_BUS_MSTR_RD_MULT 0xFFEFFFFF
#define S_000030_BUS_MSTR_RD_LINE(x) (((x) & 0x1) << 21)
#define G_000030_BUS_MSTR_RD_LINE(x) (((x) >> 21) & 0x1)
#define C_000030_BUS_MSTR_RD_LINE 0xFFDFFFFF
#define S_000030_BUS_SUSPEND(x) (((x) & 0x1) << 22)
#define G_000030_BUS_SUSPEND(x) (((x) >> 22) & 0x1)
#define C_000030_BUS_SUSPEND 0xFFBFFFFF
#define S_000030_LAT_16X(x) (((x) & 0x1) << 23)
#define G_000030_LAT_16X(x) (((x) >> 23) & 0x1)
#define C_000030_LAT_16X 0xFF7FFFFF
#define S_000030_BUS_RD_DISCARD_EN(x) (((x) & 0x1) << 24)
#define G_000030_BUS_RD_DISCARD_EN(x) (((x) >> 24) & 0x1)
#define C_000030_BUS_RD_DISCARD_EN 0xFEFFFFFF
#define S_000030_ENFRCWRDY(x) (((x) & 0x1) << 25)
#define G_000030_ENFRCWRDY(x) (((x) >> 25) & 0x1)
#define C_000030_ENFRCWRDY 0xFDFFFFFF
#define S_000030_BUS_MSTR_WS(x) (((x) & 0x1) << 26)
#define G_000030_BUS_MSTR_WS(x) (((x) >> 26) & 0x1)
#define C_000030_BUS_MSTR_WS 0xFBFFFFFF
#define S_000030_BUS_PARKING_DIS(x) (((x) & 0x1) << 27)
#define G_000030_BUS_PARKING_DIS(x) (((x) >> 27) & 0x1)
#define C_000030_BUS_PARKING_DIS 0xF7FFFFFF
#define S_000030_BUS_MSTR_DISCONNECT_EN(x) (((x) & 0x1) << 28)
#define G_000030_BUS_MSTR_DISCONNECT_EN(x) (((x) >> 28) & 0x1)
#define C_000030_BUS_MSTR_DISCONNECT_EN 0xEFFFFFFF
#define S_000030_SERR_EN(x) (((x) & 0x1) << 29)
#define G_000030_SERR_EN(x) (((x) >> 29) & 0x1)
#define C_000030_SERR_EN 0xDFFFFFFF
#define S_000030_BUS_READ_BURST(x) (((x) & 0x1) << 30)
#define G_000030_BUS_READ_BURST(x) (((x) >> 30) & 0x1)
#define C_000030_BUS_READ_BURST 0xBFFFFFFF
#define S_000030_BUS_RDY_READ_DLY(x) (((x) & 0x1) << 31)
#define G_000030_BUS_RDY_READ_DLY(x) (((x) >> 31) & 0x1)
#define C_000030_BUS_RDY_READ_DLY 0x7FFFFFFF
#define R_000040_GEN_INT_CNTL 0x000040
#define S_000040_CRTC_VBLANK(x) (((x) & 0x1) << 0)
#define G_000040_CRTC_VBLANK(x) (((x) >> 0) & 0x1)

View file

@ -27,8 +27,9 @@
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "drmP.h"
#include "drm.h"
#include <drm/drmP.h>
#include <drm/drm.h>
#include <drm/drm_crtc_helper.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
@ -151,6 +152,10 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev)
u32 tmp;
int r;
WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
@ -323,7 +328,6 @@ void r300_gpu_init(struct radeon_device *rdev)
{
uint32_t gb_tile_config, tmp;
r100_hdp_reset(rdev);
/* FIXME: rv380 one pipes ? */
if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
(rdev->family == CHIP_R350)) {
@ -376,89 +380,93 @@ void r300_gpu_init(struct radeon_device *rdev)
rdev->num_gb_pipes, rdev->num_z_pipes);
}
int r300_ga_reset(struct radeon_device *rdev)
bool r300_gpu_is_lockup(struct radeon_device *rdev)
{
uint32_t tmp;
bool reinit_cp;
int i;
u32 rbbm_status;
int r;
reinit_cp = rdev->cp.ready;
rdev->cp.ready = false;
for (i = 0; i < rdev->usec_timeout; i++) {
WREG32(RADEON_CP_CSQ_MODE, 0);
WREG32(RADEON_CP_CSQ_CNTL, 0);
WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
(void)RREG32(RADEON_RBBM_SOFT_RESET);
udelay(200);
WREG32(RADEON_RBBM_SOFT_RESET, 0);
/* Wait to prevent race in RBBM_STATUS */
mdelay(1);
tmp = RREG32(RADEON_RBBM_STATUS);
if (tmp & ((1 << 20) | (1 << 26))) {
DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp);
/* GA still busy soft reset it */
WREG32(0x429C, 0x200);
WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
WREG32(R300_RE_SCISSORS_TL, 0);
WREG32(R300_RE_SCISSORS_BR, 0);
WREG32(0x24AC, 0);
}
/* Wait to prevent race in RBBM_STATUS */
mdelay(1);
tmp = RREG32(RADEON_RBBM_STATUS);
if (!(tmp & ((1 << 20) | (1 << 26)))) {
break;
}
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
return false;
}
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(RADEON_RBBM_STATUS);
if (!(tmp & ((1 << 20) | (1 << 26)))) {
DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
tmp);
if (reinit_cp) {
return r100_cp_init(rdev, rdev->cp.ring_size);
}
return 0;
}
DRM_UDELAY(1);
/* force CP activities */
r = radeon_ring_lock(rdev, 2);
if (!r) {
/* PACKET2 NOP */
radeon_ring_write(rdev, 0x80000000);
radeon_ring_write(rdev, 0x80000000);
radeon_ring_unlock_commit(rdev);
}
tmp = RREG32(RADEON_RBBM_STATUS);
DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
return -1;
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
}
int r300_gpu_reset(struct radeon_device *rdev)
int r300_asic_reset(struct radeon_device *rdev)
{
uint32_t status;
struct r100_mc_save save;
u32 status, tmp;
/* reset order likely matter */
status = RREG32(RADEON_RBBM_STATUS);
/* reset HDP */
r100_hdp_reset(rdev);
/* reset rb2d */
if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
r100_rb2d_reset(rdev);
}
/* reset GA */
if (status & ((1 << 20) | (1 << 26))) {
r300_ga_reset(rdev);
}
/* reset CP */
status = RREG32(RADEON_RBBM_STATUS);
if (status & (1 << 16)) {
r100_cp_reset(rdev);
r100_mc_stop(rdev, &save);
status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(status)) {
return 0;
}
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* stop CP */
WREG32(RADEON_CP_CSQ_CNTL, 0);
tmp = RREG32(RADEON_CP_RB_CNTL);
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
WREG32(RADEON_CP_RB_RPTR_WR, 0);
WREG32(RADEON_CP_RB_WPTR, 0);
WREG32(RADEON_CP_RB_CNTL, tmp);
/* save PCI state */
pci_save_state(rdev->pdev);
/* disable bus mastering */
r100_bm_disable(rdev);
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
S_0000F0_SOFT_RESET_GA(1));
RREG32(R_0000F0_RBBM_SOFT_RESET);
mdelay(500);
WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
mdelay(1);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* resetting the CP seems to be problematic sometimes it end up
* hard locking the computer, but it's necessary for successfull
* reset more test & playing is needed on R3XX/R4XX to find a
* reliable (if any solution)
*/
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
RREG32(R_0000F0_RBBM_SOFT_RESET);
mdelay(500);
WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
mdelay(1);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* reset MC */
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1));
RREG32(R_0000F0_RBBM_SOFT_RESET);
mdelay(500);
WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
mdelay(1);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* restore PCI & busmastering */
pci_restore_state(rdev->pdev);
r100_enable_bm(rdev);
/* Check if GPU is idle */
status = RREG32(RADEON_RBBM_STATUS);
if (status & RADEON_RBBM_ACTIVE) {
DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
rdev->gpu_lockup = true;
return -1;
}
DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
r100_mc_resume(rdev, &save);
dev_info(rdev->dev, "GPU reset succeed\n");
return 0;
}
/*
* r300,r350,rv350,rv380 VRAM info
*/
@ -1317,7 +1325,7 @@ int r300_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
r300_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@ -1388,7 +1396,7 @@ int r300_init(struct radeon_device *rdev)
return r;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),

View file

@ -209,7 +209,52 @@
#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
#define R_00000D_SCLK_CNTL 0x00000D
#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0)

View file

@ -235,7 +235,7 @@ int r420_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
r420_clock_resume(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@ -316,7 +316,7 @@ int r420_init(struct radeon_device *rdev)
}
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),

View file

@ -53,7 +53,6 @@ static void r520_gpu_init(struct radeon_device *rdev)
{
unsigned pipe_select_current, gb_pipe_select, tmp;
r100_hdp_reset(rdev);
rv515_vga_render_disable(rdev);
/*
* DST_PIPE_CONFIG 0x170C
@ -209,7 +208,7 @@ int r520_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
rv515_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@ -246,7 +245,7 @@ int r520_init(struct radeon_device *rdev)
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),

View file

@ -750,7 +750,6 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
u32 srbm_reset = 0;
u32 tmp;
dev_info(rdev->dev, "GPU softreset \n");
@ -765,7 +764,7 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
/* Disable CP parsing/prefetching */
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
/* Check if any of the rendering block is busy and reset it */
if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
(RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
@ -784,72 +783,56 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
S_008020_SOFT_RESET_VGT(1);
dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(R_008020_GRBM_SOFT_RESET, tmp);
(void)RREG32(R_008020_GRBM_SOFT_RESET);
udelay(50);
RREG32(R_008020_GRBM_SOFT_RESET);
mdelay(15);
WREG32(R_008020_GRBM_SOFT_RESET, 0);
(void)RREG32(R_008020_GRBM_SOFT_RESET);
}
/* Reset CP (we always reset CP) */
tmp = S_008020_SOFT_RESET_CP(1);
dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(R_008020_GRBM_SOFT_RESET, tmp);
(void)RREG32(R_008020_GRBM_SOFT_RESET);
udelay(50);
RREG32(R_008020_GRBM_SOFT_RESET);
mdelay(15);
WREG32(R_008020_GRBM_SOFT_RESET, 0);
(void)RREG32(R_008020_GRBM_SOFT_RESET);
/* Reset others GPU block if necessary */
if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_GRBM(1);
if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_IH(1);
if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_VMC(1);
if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_MC(1);
if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_MC(1);
if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_MC(1);
if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_MC(1);
if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_MC(1);
if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
(void)RREG32(R_000E60_SRBM_SOFT_RESET);
udelay(50);
WREG32(R_000E60_SRBM_SOFT_RESET, 0);
(void)RREG32(R_000E60_SRBM_SOFT_RESET);
WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
(void)RREG32(R_000E60_SRBM_SOFT_RESET);
udelay(50);
WREG32(R_000E60_SRBM_SOFT_RESET, 0);
(void)RREG32(R_000E60_SRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
mdelay(1);
dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
RREG32(R_008010_GRBM_STATUS));
dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
RREG32(R_008014_GRBM_STATUS2));
dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
RREG32(R_000E50_SRBM_STATUS));
/* After reset we need to reinit the asic as GPU often endup in an
* incoherent state.
*/
atom_asic_init(rdev->mode_info.atom_context);
rv515_mc_resume(rdev, &save);
return 0;
}
int r600_gpu_reset(struct radeon_device *rdev)
bool r600_gpu_is_lockup(struct radeon_device *rdev)
{
u32 srbm_status;
u32 grbm_status;
u32 grbm_status2;
int r;
srbm_status = RREG32(R_000E50_SRBM_STATUS);
grbm_status = RREG32(R_008010_GRBM_STATUS);
grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
if (!G_008010_GUI_ACTIVE(grbm_status)) {
r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
return false;
}
/* force CP activities */
r = radeon_ring_lock(rdev, 2);
if (!r) {
/* PACKET2 NOP */
radeon_ring_write(rdev, 0x80000000);
radeon_ring_write(rdev, 0x80000000);
radeon_ring_unlock_commit(rdev);
}
rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
}
int r600_asic_reset(struct radeon_device *rdev)
{
return r600_gpu_soft_reset(rdev);
}

View file

@ -447,6 +447,9 @@ int r600_blit_init(struct radeon_device *rdev)
u32 packet2s[16];
int num_packet2s = 0;
/* don't reinitialize blit */
if (rdev->r600_blit.shader_obj)
return 0;
mutex_init(&rdev->r600_blit.mutex);
rdev->r600_blit.state_offset = 0;

View file

@ -99,6 +99,7 @@ extern int radeon_hw_i2c;
* symbol;
*/
#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
/* RADEON_IB_POOL_SIZE must be a power of 2 */
#define RADEON_IB_POOL_SIZE 16
#define RADEON_DEBUGFS_MAX_NUM_FILES 32
@ -182,7 +183,8 @@ struct radeon_fence_driver {
uint32_t scratch_reg;
atomic_t seq;
uint32_t last_seq;
unsigned long count_timeout;
unsigned long last_jiffies;
unsigned long last_timeout;
wait_queue_head_t queue;
rwlock_t lock;
struct list_head created;
@ -197,7 +199,6 @@ struct radeon_fence {
struct list_head list;
/* protected by radeon_fence.lock */
uint32_t seq;
unsigned long timeout;
bool emited;
bool signaled;
};
@ -746,7 +747,8 @@ struct radeon_asic {
int (*resume)(struct radeon_device *rdev);
int (*suspend)(struct radeon_device *rdev);
void (*vga_set_state)(struct radeon_device *rdev, bool state);
int (*gpu_reset)(struct radeon_device *rdev);
bool (*gpu_is_lockup)(struct radeon_device *rdev);
int (*asic_reset)(struct radeon_device *rdev);
void (*gart_tlb_flush)(struct radeon_device *rdev);
int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
@ -804,59 +806,68 @@ struct radeon_asic {
/*
* Asic structures
*/
struct r100_gpu_lockup {
unsigned long last_jiffies;
u32 last_cp_rptr;
};
struct r100_asic {
const unsigned *reg_safe_bm;
unsigned reg_safe_bm_size;
u32 hdp_cntl;
const unsigned *reg_safe_bm;
unsigned reg_safe_bm_size;
u32 hdp_cntl;
struct r100_gpu_lockup lockup;
};
struct r300_asic {
const unsigned *reg_safe_bm;
unsigned reg_safe_bm_size;
u32 resync_scratch;
u32 hdp_cntl;
const unsigned *reg_safe_bm;
unsigned reg_safe_bm_size;
u32 resync_scratch;
u32 hdp_cntl;
struct r100_gpu_lockup lockup;
};
struct r600_asic {
unsigned max_pipes;
unsigned max_tile_pipes;
unsigned max_simds;
unsigned max_backends;
unsigned max_gprs;
unsigned max_threads;
unsigned max_stack_entries;
unsigned max_hw_contexts;
unsigned max_gs_threads;
unsigned sx_max_export_size;
unsigned sx_max_export_pos_size;
unsigned sx_max_export_smx_size;
unsigned sq_num_cf_insts;
unsigned tiling_nbanks;
unsigned tiling_npipes;
unsigned tiling_group_size;
unsigned max_pipes;
unsigned max_tile_pipes;
unsigned max_simds;
unsigned max_backends;
unsigned max_gprs;
unsigned max_threads;
unsigned max_stack_entries;
unsigned max_hw_contexts;
unsigned max_gs_threads;
unsigned sx_max_export_size;
unsigned sx_max_export_pos_size;
unsigned sx_max_export_smx_size;
unsigned sq_num_cf_insts;
unsigned tiling_nbanks;
unsigned tiling_npipes;
unsigned tiling_group_size;
struct r100_gpu_lockup lockup;
};
struct rv770_asic {
unsigned max_pipes;
unsigned max_tile_pipes;
unsigned max_simds;
unsigned max_backends;
unsigned max_gprs;
unsigned max_threads;
unsigned max_stack_entries;
unsigned max_hw_contexts;
unsigned max_gs_threads;
unsigned sx_max_export_size;
unsigned sx_max_export_pos_size;
unsigned sx_max_export_smx_size;
unsigned sq_num_cf_insts;
unsigned sx_num_of_sets;
unsigned sc_prim_fifo_size;
unsigned sc_hiz_tile_fifo_size;
unsigned sc_earlyz_tile_fifo_fize;
unsigned tiling_nbanks;
unsigned tiling_npipes;
unsigned tiling_group_size;
unsigned max_pipes;
unsigned max_tile_pipes;
unsigned max_simds;
unsigned max_backends;
unsigned max_gprs;
unsigned max_threads;
unsigned max_stack_entries;
unsigned max_hw_contexts;
unsigned max_gs_threads;
unsigned sx_max_export_size;
unsigned sx_max_export_pos_size;
unsigned sx_max_export_smx_size;
unsigned sq_num_cf_insts;
unsigned sx_num_of_sets;
unsigned sc_prim_fifo_size;
unsigned sc_hiz_tile_fifo_size;
unsigned sc_earlyz_tile_fifo_fize;
unsigned tiling_nbanks;
unsigned tiling_npipes;
unsigned tiling_group_size;
struct r100_gpu_lockup lockup;
};
union radeon_asic_config {
@ -1145,7 +1156,8 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev))
#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
@ -1176,6 +1188,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
/* Common functions */
/* AGP */
extern int radeon_gpu_reset(struct radeon_device *rdev);
extern void radeon_agp_disable(struct radeon_device *rdev);
extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
extern void radeon_gart_restore(struct radeon_device *rdev);
@ -1200,6 +1213,8 @@ extern int radeon_resume_kms(struct drm_device *dev);
extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
extern void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
extern bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
/* rv200,rv250,rv280 */
extern void r200_set_safe_registers(struct radeon_device *rdev);
@ -1276,7 +1291,7 @@ extern void r600_scratch_init(struct radeon_device *rdev);
extern int r600_blit_init(struct radeon_device *rdev);
extern void r600_blit_fini(struct radeon_device *rdev);
extern int r600_init_microcode(struct radeon_device *rdev);
extern int r600_gpu_reset(struct radeon_device *rdev);
extern int r600_asic_reset(struct radeon_device *rdev);
/* r600 irq */
extern int r600_irq_init(struct radeon_device *rdev);
extern void r600_irq_fini(struct radeon_device *rdev);

View file

@ -134,7 +134,8 @@ static struct radeon_asic r100_asic = {
.suspend = &r100_suspend,
.resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
.gpu_reset = &r100_gpu_reset,
.gpu_is_lockup = &r100_gpu_is_lockup,
.asic_reset = &r100_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_commit = &r100_cp_commit,
@ -172,7 +173,8 @@ static struct radeon_asic r200_asic = {
.suspend = &r100_suspend,
.resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
.gpu_reset = &r100_gpu_reset,
.gpu_is_lockup = &r100_gpu_is_lockup,
.asic_reset = &r100_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_commit = &r100_cp_commit,
@ -209,7 +211,8 @@ static struct radeon_asic r300_asic = {
.suspend = &r300_suspend,
.resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
.gpu_reset = &r300_gpu_reset,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &r300_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_commit = &r100_cp_commit,
@ -247,7 +250,8 @@ static struct radeon_asic r300_asic_pcie = {
.suspend = &r300_suspend,
.resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
.gpu_reset = &r300_gpu_reset,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
@ -284,7 +288,8 @@ static struct radeon_asic r420_asic = {
.suspend = &r420_suspend,
.resume = &r420_resume,
.vga_set_state = &r100_vga_set_state,
.gpu_reset = &r300_gpu_reset,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
@ -322,7 +327,8 @@ static struct radeon_asic rs400_asic = {
.suspend = &rs400_suspend,
.resume = &rs400_resume,
.vga_set_state = &r100_vga_set_state,
.gpu_reset = &r300_gpu_reset,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.gart_set_page = &rs400_gart_set_page,
.cp_commit = &r100_cp_commit,
@ -360,7 +366,8 @@ static struct radeon_asic rs600_asic = {
.suspend = &rs600_suspend,
.resume = &rs600_resume,
.vga_set_state = &r100_vga_set_state,
.gpu_reset = &r300_gpu_reset,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rs600_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.cp_commit = &r100_cp_commit,
@ -398,7 +405,8 @@ static struct radeon_asic rs690_asic = {
.suspend = &rs690_suspend,
.resume = &rs690_resume,
.vga_set_state = &r100_vga_set_state,
.gpu_reset = &r300_gpu_reset,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.gart_set_page = &rs400_gart_set_page,
.cp_commit = &r100_cp_commit,
@ -436,7 +444,8 @@ static struct radeon_asic rv515_asic = {
.suspend = &rv515_suspend,
.resume = &rv515_resume,
.vga_set_state = &r100_vga_set_state,
.gpu_reset = &rv515_gpu_reset,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
@ -474,7 +483,8 @@ static struct radeon_asic r520_asic = {
.suspend = &rv515_suspend,
.resume = &r520_resume,
.vga_set_state = &r100_vga_set_state,
.gpu_reset = &rv515_gpu_reset,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
@ -513,7 +523,8 @@ static struct radeon_asic r600_asic = {
.resume = &r600_resume,
.cp_commit = &r600_cp_commit,
.vga_set_state = &r600_vga_set_state,
.gpu_reset = &r600_gpu_reset,
.gpu_is_lockup = &r600_gpu_is_lockup,
.asic_reset = &r600_asic_reset,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
@ -549,8 +560,9 @@ static struct radeon_asic rs780_asic = {
.suspend = &r600_suspend,
.resume = &r600_resume,
.cp_commit = &r600_cp_commit,
.gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
.gpu_reset = &r600_gpu_reset,
.asic_reset = &r600_asic_reset,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
@ -586,7 +598,8 @@ static struct radeon_asic rv770_asic = {
.suspend = &rv770_suspend,
.resume = &rv770_resume,
.cp_commit = &r600_cp_commit,
.gpu_reset = &rv770_gpu_reset,
.asic_reset = &r600_asic_reset,
.gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
@ -623,7 +636,8 @@ static struct radeon_asic evergreen_asic = {
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
.cp_commit = NULL,
.gpu_reset = &evergreen_gpu_reset,
.gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,

View file

@ -60,7 +60,8 @@ int r100_resume(struct radeon_device *rdev);
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void r100_vga_set_state(struct radeon_device *rdev, bool state);
int r100_gpu_reset(struct radeon_device *rdev);
bool r100_gpu_is_lockup(struct radeon_device *rdev);
int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
@ -110,8 +111,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev);
void r100_wb_disable(struct radeon_device *rdev);
void r100_wb_fini(struct radeon_device *rdev);
int r100_wb_init(struct radeon_device *rdev);
void r100_hdp_reset(struct radeon_device *rdev);
int r100_rb2d_reset(struct radeon_device *rdev);
int r100_cp_reset(struct radeon_device *rdev);
void r100_vga_render_disable(struct radeon_device *rdev);
int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
@ -126,7 +125,7 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p,
unsigned idx);
void r100_enable_bm(struct radeon_device *rdev);
void r100_set_common_regs(struct radeon_device *rdev);
void r100_bm_disable(struct radeon_device *rdev);
/*
* r200,rv250,rs300,rv280
*/
@ -134,7 +133,7 @@ extern int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_pages,
struct radeon_fence *fence);
struct radeon_fence *fence);
/*
* r300,r350,rv350,rv380
@ -143,7 +142,8 @@ extern int r300_init(struct radeon_device *rdev);
extern void r300_fini(struct radeon_device *rdev);
extern int r300_suspend(struct radeon_device *rdev);
extern int r300_resume(struct radeon_device *rdev);
extern int r300_gpu_reset(struct radeon_device *rdev);
extern bool r300_gpu_is_lockup(struct radeon_device *rdev);
extern int r300_asic_reset(struct radeon_device *rdev);
extern void r300_ring_start(struct radeon_device *rdev);
extern void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
@ -178,6 +178,7 @@ void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
/*
* rs600.
*/
extern int rs600_asic_reset(struct radeon_device *rdev);
extern int rs600_init(struct radeon_device *rdev);
extern void rs600_fini(struct radeon_device *rdev);
extern int rs600_suspend(struct radeon_device *rdev);
@ -212,7 +213,6 @@ void rs690_bandwidth_update(struct radeon_device *rdev);
*/
int rv515_init(struct radeon_device *rdev);
void rv515_fini(struct radeon_device *rdev);
int rv515_gpu_reset(struct radeon_device *rdev);
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rv515_ring_start(struct radeon_device *rdev);
@ -252,7 +252,8 @@ int r600_copy_dma(struct radeon_device *rdev,
struct radeon_fence *fence);
int r600_irq_process(struct radeon_device *rdev);
int r600_irq_set(struct radeon_device *rdev);
int r600_gpu_reset(struct radeon_device *rdev);
bool r600_gpu_is_lockup(struct radeon_device *rdev);
int r600_asic_reset(struct radeon_device *rdev);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
@ -276,7 +277,6 @@ int rv770_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev);
int rv770_suspend(struct radeon_device *rdev);
int rv770_resume(struct radeon_device *rdev);
int rv770_gpu_reset(struct radeon_device *rdev);
/*
* evergreen
@ -285,7 +285,8 @@ int evergreen_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
int evergreen_suspend(struct radeon_device *rdev);
int evergreen_resume(struct radeon_device *rdev);
int evergreen_gpu_reset(struct radeon_device *rdev);
bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
int evergreen_asic_reset(struct radeon_device *rdev);
void evergreen_bandwidth_update(struct radeon_device *rdev);
void evergreen_hpd_init(struct radeon_device *rdev);
void evergreen_hpd_fini(struct radeon_device *rdev);

View file

@ -220,10 +220,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
int r;
mutex_lock(&rdev->cs_mutex);
if (rdev->gpu_lockup) {
mutex_unlock(&rdev->cs_mutex);
return -EINVAL;
}
/* initialize parser */
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;

View file

@ -671,7 +671,7 @@ int radeon_device_init(struct radeon_device *rdev,
/* Acceleration not working on AGP card try again
* with fallback to PCI or PCIE GART
*/
radeon_gpu_reset(rdev);
radeon_asic_reset(rdev);
radeon_fini(rdev);
radeon_agp_disable(rdev);
r = radeon_init(rdev);
@ -691,6 +691,8 @@ void radeon_device_fini(struct radeon_device *rdev)
{
DRM_INFO("radeon: finishing device.\n");
rdev->shutdown = true;
/* evict vram memory */
radeon_bo_evict_vram(rdev);
radeon_fini(rdev);
destroy_workqueue(rdev->wq);
vga_switcheroo_unregister_client(rdev->pdev);
@ -789,6 +791,26 @@ int radeon_resume_kms(struct drm_device *dev)
return 0;
}
int radeon_gpu_reset(struct radeon_device *rdev)
{
int r;
radeon_save_bios_scratch_regs(rdev);
radeon_suspend(rdev);
r = radeon_asic_reset(rdev);
if (!r) {
dev_info(rdev->dev, "GPU reset succeed\n");
radeon_resume(rdev);
radeon_restore_bios_scratch_regs(rdev);
drm_helper_resume_force_mode(rdev->ddev);
return 0;
}
/* bad news, how to tell it to userspace ? */
dev_info(rdev->dev, "GPU reset failed\n");
return r;
}
/*
* Debugfs

View file

@ -58,7 +58,6 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
radeon_fence_ring_emit(rdev, fence);
fence->emited = true;
fence->timeout = jiffies + ((2000 * HZ) / 1000);
list_del(&fence->list);
list_add_tail(&fence->list, &rdev->fence_drv.emited);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
@ -71,15 +70,34 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
struct list_head *i, *n;
uint32_t seq;
bool wake = false;
unsigned long cjiffies;
if (rdev == NULL) {
return true;
}
if (rdev->shutdown) {
return true;
}
seq = RREG32(rdev->fence_drv.scratch_reg);
rdev->fence_drv.last_seq = seq;
if (seq != rdev->fence_drv.last_seq) {
rdev->fence_drv.last_seq = seq;
rdev->fence_drv.last_jiffies = jiffies;
rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
} else {
cjiffies = jiffies;
if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
cjiffies -= rdev->fence_drv.last_jiffies;
if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
/* update the timeout */
rdev->fence_drv.last_timeout -= cjiffies;
} else {
/* the 500ms timeout is elapsed we should test
* for GPU lockup
*/
rdev->fence_drv.last_timeout = 1;
}
} else {
/* wrap around update last jiffies, we will just wait
* a little longer
*/
rdev->fence_drv.last_jiffies = cjiffies;
}
return false;
}
n = NULL;
list_for_each(i, &rdev->fence_drv.emited) {
fence = list_entry(i, struct radeon_fence, list);
@ -171,9 +189,8 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
{
struct radeon_device *rdev;
unsigned long cur_jiffies;
unsigned long timeout;
bool expired = false;
unsigned long irq_flags, timeout;
u32 seq;
int r;
if (fence == NULL) {
@ -184,21 +201,18 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
if (radeon_fence_signaled(fence)) {
return 0;
}
timeout = rdev->fence_drv.last_timeout;
retry:
cur_jiffies = jiffies;
timeout = HZ / 100;
if (time_after(fence->timeout, cur_jiffies)) {
timeout = fence->timeout - cur_jiffies;
}
/* save current sequence used to check for GPU lockup */
seq = rdev->fence_drv.last_seq;
if (intr) {
radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
radeon_fence_signaled(fence), timeout);
radeon_irq_kms_sw_irq_put(rdev);
if (unlikely(r < 0))
if (unlikely(r < 0)) {
return r;
}
} else {
radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_timeout(rdev->fence_drv.queue,
@ -206,38 +220,36 @@ retry:
radeon_irq_kms_sw_irq_put(rdev);
}
if (unlikely(!radeon_fence_signaled(fence))) {
if (unlikely(r == 0)) {
expired = true;
/* we were interrupted for some reason and fence isn't
* isn't signaled yet, resume wait
*/
if (r) {
timeout = r;
goto retry;
}
if (unlikely(expired)) {
timeout = 1;
if (time_after(cur_jiffies, fence->timeout)) {
timeout = cur_jiffies - fence->timeout;
}
timeout = jiffies_to_msecs(timeout);
if (timeout > 500) {
DRM_ERROR("fence(%p:0x%08X) %lums timeout "
"going to reset GPU\n",
fence, fence->seq, timeout);
radeon_gpu_reset(rdev);
WREG32(rdev->fence_drv.scratch_reg, fence->seq);
}
/* don't protect read access to rdev->fence_drv.last_seq
* if we experiencing a lockup the value doesn't change
*/
if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
/* good news we believe it's a lockup */
WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq);
/* FIXME: what should we do ? marking everyone
* as signaled for now
*/
rdev->gpu_lockup = true;
WREG32(rdev->fence_drv.scratch_reg, fence->seq);
r = radeon_gpu_reset(rdev);
if (r)
return r;
rdev->gpu_lockup = false;
}
timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
rdev->fence_drv.last_jiffies = jiffies;
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
goto retry;
}
if (unlikely(expired)) {
rdev->fence_drv.count_timeout++;
cur_jiffies = jiffies;
timeout = 1;
if (time_after(cur_jiffies, fence->timeout)) {
timeout = cur_jiffies - fence->timeout;
}
timeout = jiffies_to_msecs(timeout);
DRM_ERROR("fence(%p:0x%08X) %lums timeout\n",
fence, fence->seq, timeout);
DRM_ERROR("last signaled fence(0x%08X)\n",
rdev->fence_drv.last_seq);
}
return 0;
}
@ -333,7 +345,6 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
INIT_LIST_HEAD(&rdev->fence_drv.created);
INIT_LIST_HEAD(&rdev->fence_drv.emited);
INIT_LIST_HEAD(&rdev->fence_drv.signaled);
rdev->fence_drv.count_timeout = 0;
init_waitqueue_head(&rdev->fence_drv.queue);
rdev->fence_drv.initialized = true;
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);

View file

@ -173,7 +173,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int i, j;
if (!rdev->gart.ready) {
DRM_ERROR("trying to bind memory to unitialized GART !\n");
WARN(1, "trying to bind memory to unitialized GART !\n");
return -EINVAL;
}
t = offset / RADEON_GPU_PAGE_SIZE;

View file

@ -243,8 +243,6 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev)
void rs400_gpu_init(struct radeon_device *rdev)
{
/* FIXME: HDP same place on rs400 ? */
r100_hdp_reset(rdev);
/* FIXME: is this correct ? */
r420_pipes_init(rdev);
if (rs400_mc_wait_for_idle(rdev)) {
@ -433,7 +431,7 @@ int rs400_resume(struct radeon_device *rdev)
/* setup MC before calling post tables */
rs400_mc_program(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@ -497,7 +495,7 @@ int rs400_init(struct radeon_device *rdev)
return r;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),

View file

@ -147,6 +147,78 @@ void rs600_hpd_fini(struct radeon_device *rdev)
}
}
void rs600_bm_disable(struct radeon_device *rdev)
{
u32 tmp;
/* disable bus mastering */
pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
mdelay(1);
}
int rs600_asic_reset(struct radeon_device *rdev)
{
u32 status, tmp;
struct rv515_mc_save save;
/* Stops all mc clients */
rv515_mc_stop(rdev, &save);
status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(status)) {
return 0;
}
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* stop CP */
WREG32(RADEON_CP_CSQ_CNTL, 0);
tmp = RREG32(RADEON_CP_RB_CNTL);
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
WREG32(RADEON_CP_RB_RPTR_WR, 0);
WREG32(RADEON_CP_RB_WPTR, 0);
WREG32(RADEON_CP_RB_CNTL, tmp);
pci_save_state(rdev->pdev);
/* disable bus mastering */
rs600_bm_disable(rdev);
/* reset GA+VAP */
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
S_0000F0_SOFT_RESET_GA(1));
RREG32(R_0000F0_RBBM_SOFT_RESET);
mdelay(500);
WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
mdelay(1);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* reset CP */
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
RREG32(R_0000F0_RBBM_SOFT_RESET);
mdelay(500);
WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
mdelay(1);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* reset MC */
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1));
RREG32(R_0000F0_RBBM_SOFT_RESET);
mdelay(500);
WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
mdelay(1);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* restore PCI & busmastering */
pci_restore_state(rdev->pdev);
/* Check if GPU is idle */
if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
rdev->gpu_lockup = true;
return -1;
}
rv515_mc_resume(rdev, &save);
dev_info(rdev->dev, "GPU reset succeed\n");
return 0;
}
/*
* GART.
*/
@ -454,7 +526,6 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev)
void rs600_gpu_init(struct radeon_device *rdev)
{
r100_hdp_reset(rdev);
r420_pipes_init(rdev);
/* Wait for mc idle */
if (rs600_mc_wait_for_idle(rdev))
@ -601,7 +672,7 @@ int rs600_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
rv515_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@ -664,7 +735,7 @@ int rs600_init(struct radeon_device *rdev)
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),

View file

@ -178,6 +178,52 @@
#define S_000074_MC_IND_DATA(x) (((x) & 0xFFFFFFFF) << 0)
#define G_000074_MC_IND_DATA(x) (((x) >> 0) & 0xFFFFFFFF)
#define C_000074_MC_IND_DATA 0x00000000
#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
#define R_000134_HDP_FB_LOCATION 0x000134
#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0)
#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF)

View file

@ -48,8 +48,6 @@ static int rs690_mc_wait_for_idle(struct radeon_device *rdev)
static void rs690_gpu_init(struct radeon_device *rdev)
{
/* FIXME: HDP same place on rs690 ? */
r100_hdp_reset(rdev);
/* FIXME: is this correct ? */
r420_pipes_init(rdev);
if (rs690_mc_wait_for_idle(rdev)) {
@ -653,7 +651,7 @@ int rs690_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
rv515_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@ -717,7 +715,7 @@ int rs690_init(struct radeon_device *rdev)
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),

View file

@ -147,16 +147,11 @@ void rv515_gpu_init(struct radeon_device *rdev)
{
unsigned pipe_select_current, gb_pipe_select, tmp;
r100_hdp_reset(rdev);
r100_rb2d_reset(rdev);
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
"reseting GPU. Bad things might happen.\n");
}
rv515_vga_render_disable(rdev);
r420_pipes_init(rdev);
gb_pipe_select = RREG32(0x402C);
tmp = RREG32(0x170C);
@ -174,91 +169,6 @@ void rv515_gpu_init(struct radeon_device *rdev)
}
}
int rv515_ga_reset(struct radeon_device *rdev)
{
uint32_t tmp;
bool reinit_cp;
int i;
reinit_cp = rdev->cp.ready;
rdev->cp.ready = false;
for (i = 0; i < rdev->usec_timeout; i++) {
WREG32(CP_CSQ_MODE, 0);
WREG32(CP_CSQ_CNTL, 0);
WREG32(RBBM_SOFT_RESET, 0x32005);
(void)RREG32(RBBM_SOFT_RESET);
udelay(200);
WREG32(RBBM_SOFT_RESET, 0);
/* Wait to prevent race in RBBM_STATUS */
mdelay(1);
tmp = RREG32(RBBM_STATUS);
if (tmp & ((1 << 20) | (1 << 26))) {
DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp);
/* GA still busy soft reset it */
WREG32(0x429C, 0x200);
WREG32(VAP_PVS_STATE_FLUSH_REG, 0);
WREG32(0x43E0, 0);
WREG32(0x43E4, 0);
WREG32(0x24AC, 0);
}
/* Wait to prevent race in RBBM_STATUS */
mdelay(1);
tmp = RREG32(RBBM_STATUS);
if (!(tmp & ((1 << 20) | (1 << 26)))) {
break;
}
}
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(RBBM_STATUS);
if (!(tmp & ((1 << 20) | (1 << 26)))) {
DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
tmp);
DRM_INFO("GA_IDLE=0x%08X\n", RREG32(0x425C));
DRM_INFO("RB3D_RESET_STATUS=0x%08X\n", RREG32(0x46f0));
DRM_INFO("ISYNC_CNTL=0x%08X\n", RREG32(0x1724));
if (reinit_cp) {
return r100_cp_init(rdev, rdev->cp.ring_size);
}
return 0;
}
DRM_UDELAY(1);
}
tmp = RREG32(RBBM_STATUS);
DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
return -1;
}
int rv515_gpu_reset(struct radeon_device *rdev)
{
uint32_t status;
/* reset order likely matter */
status = RREG32(RBBM_STATUS);
/* reset HDP */
r100_hdp_reset(rdev);
/* reset rb2d */
if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
r100_rb2d_reset(rdev);
}
/* reset GA */
if (status & ((1 << 20) | (1 << 26))) {
rv515_ga_reset(rdev);
}
/* reset CP */
status = RREG32(RBBM_STATUS);
if (status & (1 << 16)) {
r100_cp_reset(rdev);
}
/* Check if GPU is idle */
status = RREG32(RBBM_STATUS);
if (status & (1 << 31)) {
DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
return -1;
}
DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
return 0;
}
static void rv515_vram_get_type(struct radeon_device *rdev)
{
uint32_t tmp;
@ -335,7 +245,7 @@ static int rv515_debugfs_ga_info(struct seq_file *m, void *data)
tmp = RREG32(0x2140);
seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp);
radeon_gpu_reset(rdev);
radeon_asic_reset(rdev);
tmp = RREG32(0x425C);
seq_printf(m, "GA_IDLE 0x%08x\n", tmp);
return 0;
@ -503,7 +413,7 @@ int rv515_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
rv515_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@ -573,7 +483,7 @@ int rv515_init(struct radeon_device *rdev)
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),

View file

@ -217,6 +217,52 @@
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
/* Registers */
#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
#define R_0000F8_CONFIG_MEMSIZE 0x0000F8
#define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0)
#define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF)

View file

@ -917,12 +917,6 @@ int rv770_mc_init(struct radeon_device *rdev)
return 0;
}
int rv770_gpu_reset(struct radeon_device *rdev)
{
/* FIXME: implement any rv770 specific bits */
return r600_gpu_reset(rdev);
}
static int rv770_startup(struct radeon_device *rdev)
{
int r;