drm/radeon/kms: don't require up to 64k allocations. (v2)

This avoids needing to do a kmalloc > PAGE_SIZE for the main
indirect buffer chunk, it adds an accessor for all reads from
the chunk and caches a single page at a time for subsequent
reads.

changes since v1:
Use a two page pool which should be the most common case
a single packet spanning > PAGE_SIZE will be hit, but I'm
having trouble seeing anywhere we currently generate anything like that.
hopefully proper short page copying at end
added parser_error flag to set deep errors instead of having to test
every ib value fetch.
fixed bug in patch that went to list.

Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
Dave Airlie 2009-09-23 16:56:27 +10:00 committed by Dave Airlie
parent 35e4b7af21
commit 513bcb4655
7 changed files with 370 additions and 271 deletions

View file

@ -863,13 +863,11 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p,
void r100_cs_dump_packet(struct radeon_cs_parser *p, void r100_cs_dump_packet(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt) struct radeon_cs_packet *pkt)
{ {
struct radeon_cs_chunk *ib_chunk;
volatile uint32_t *ib; volatile uint32_t *ib;
unsigned i; unsigned i;
unsigned idx; unsigned idx;
ib = p->ib->ptr; ib = p->ib->ptr;
ib_chunk = &p->chunks[p->chunk_ib_idx];
idx = pkt->idx; idx = pkt->idx;
for (i = 0; i <= (pkt->count + 1); i++, idx++) { for (i = 0; i <= (pkt->count + 1); i++, idx++) {
DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]); DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
@ -896,7 +894,7 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p,
idx, ib_chunk->length_dw); idx, ib_chunk->length_dw);
return -EINVAL; return -EINVAL;
} }
header = ib_chunk->kdata[idx]; header = radeon_get_ib_value(p, idx);
pkt->idx = idx; pkt->idx = idx;
pkt->type = CP_PACKET_GET_TYPE(header); pkt->type = CP_PACKET_GET_TYPE(header);
pkt->count = CP_PACKET_GET_COUNT(header); pkt->count = CP_PACKET_GET_COUNT(header);
@ -939,7 +937,6 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p,
*/ */
int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
{ {
struct radeon_cs_chunk *ib_chunk;
struct drm_mode_object *obj; struct drm_mode_object *obj;
struct drm_crtc *crtc; struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc; struct radeon_crtc *radeon_crtc;
@ -947,8 +944,9 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
int crtc_id; int crtc_id;
int r; int r;
uint32_t header, h_idx, reg; uint32_t header, h_idx, reg;
volatile uint32_t *ib;
ib_chunk = &p->chunks[p->chunk_ib_idx]; ib = p->ib->ptr;
/* parse the wait until */ /* parse the wait until */
r = r100_cs_packet_parse(p, &waitreloc, p->idx); r = r100_cs_packet_parse(p, &waitreloc, p->idx);
@ -963,7 +961,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
return r; return r;
} }
if (ib_chunk->kdata[waitreloc.idx + 1] != RADEON_WAIT_CRTC_VLINE) { if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
DRM_ERROR("vline wait had illegal wait until\n"); DRM_ERROR("vline wait had illegal wait until\n");
r = -EINVAL; r = -EINVAL;
return r; return r;
@ -978,9 +976,9 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
p->idx += waitreloc.count; p->idx += waitreloc.count;
p->idx += p3reloc.count; p->idx += p3reloc.count;
header = ib_chunk->kdata[h_idx]; header = radeon_get_ib_value(p, h_idx);
crtc_id = ib_chunk->kdata[h_idx + 5]; crtc_id = radeon_get_ib_value(p, h_idx + 5);
reg = ib_chunk->kdata[h_idx] >> 2; reg = header >> 2;
mutex_lock(&p->rdev->ddev->mode_config.mutex); mutex_lock(&p->rdev->ddev->mode_config.mutex);
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) { if (!obj) {
@ -994,8 +992,9 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
if (!crtc->enabled) { if (!crtc->enabled) {
/* if the CRTC isn't enabled - we need to nop out the wait until */ /* if the CRTC isn't enabled - we need to nop out the wait until */
ib_chunk->kdata[h_idx + 2] = PACKET2(0);
ib_chunk->kdata[h_idx + 3] = PACKET2(0); ib[h_idx + 2] = PACKET2(0);
ib[h_idx + 3] = PACKET2(0);
} else if (crtc_id == 1) { } else if (crtc_id == 1) {
switch (reg) { switch (reg) {
case AVIVO_D1MODE_VLINE_START_END: case AVIVO_D1MODE_VLINE_START_END:
@ -1011,8 +1010,8 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
r = -EINVAL; r = -EINVAL;
goto out; goto out;
} }
ib_chunk->kdata[h_idx] = header; ib[h_idx] = header;
ib_chunk->kdata[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
} }
out: out:
mutex_unlock(&p->rdev->ddev->mode_config.mutex); mutex_unlock(&p->rdev->ddev->mode_config.mutex);
@ -1033,7 +1032,6 @@ out:
int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc) struct radeon_cs_reloc **cs_reloc)
{ {
struct radeon_cs_chunk *ib_chunk;
struct radeon_cs_chunk *relocs_chunk; struct radeon_cs_chunk *relocs_chunk;
struct radeon_cs_packet p3reloc; struct radeon_cs_packet p3reloc;
unsigned idx; unsigned idx;
@ -1044,7 +1042,6 @@ int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
return -EINVAL; return -EINVAL;
} }
*cs_reloc = NULL; *cs_reloc = NULL;
ib_chunk = &p->chunks[p->chunk_ib_idx];
relocs_chunk = &p->chunks[p->chunk_relocs_idx]; relocs_chunk = &p->chunks[p->chunk_relocs_idx];
r = r100_cs_packet_parse(p, &p3reloc, p->idx); r = r100_cs_packet_parse(p, &p3reloc, p->idx);
if (r) { if (r) {
@ -1057,7 +1054,7 @@ int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, &p3reloc); r100_cs_dump_packet(p, &p3reloc);
return -EINVAL; return -EINVAL;
} }
idx = ib_chunk->kdata[p3reloc.idx + 1]; idx = radeon_get_ib_value(p, p3reloc.idx + 1);
if (idx >= relocs_chunk->length_dw) { if (idx >= relocs_chunk->length_dw) {
DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
idx, relocs_chunk->length_dw); idx, relocs_chunk->length_dw);
@ -1126,7 +1123,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt, struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg) unsigned idx, unsigned reg)
{ {
struct radeon_cs_chunk *ib_chunk;
struct radeon_cs_reloc *reloc; struct radeon_cs_reloc *reloc;
struct r100_cs_track *track; struct r100_cs_track *track;
volatile uint32_t *ib; volatile uint32_t *ib;
@ -1134,11 +1130,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
int r; int r;
int i, face; int i, face;
u32 tile_flags = 0; u32 tile_flags = 0;
u32 idx_value;
ib = p->ib->ptr; ib = p->ib->ptr;
ib_chunk = &p->chunks[p->chunk_ib_idx];
track = (struct r100_cs_track *)p->track; track = (struct r100_cs_track *)p->track;
idx_value = radeon_get_ib_value(p, idx);
switch (reg) { switch (reg) {
case RADEON_CRTC_GUI_TRIG_VLINE: case RADEON_CRTC_GUI_TRIG_VLINE:
r = r100_cs_packet_parse_vline(p); r = r100_cs_packet_parse_vline(p);
@ -1166,8 +1164,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
return r; return r;
} }
track->zb.robj = reloc->robj; track->zb.robj = reloc->robj;
track->zb.offset = ib_chunk->kdata[idx]; track->zb.offset = idx_value;
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break; break;
case RADEON_RB3D_COLOROFFSET: case RADEON_RB3D_COLOROFFSET:
r = r100_cs_packet_next_reloc(p, &reloc); r = r100_cs_packet_next_reloc(p, &reloc);
@ -1178,8 +1176,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
return r; return r;
} }
track->cb[0].robj = reloc->robj; track->cb[0].robj = reloc->robj;
track->cb[0].offset = ib_chunk->kdata[idx]; track->cb[0].offset = idx_value;
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break; break;
case RADEON_PP_TXOFFSET_0: case RADEON_PP_TXOFFSET_0:
case RADEON_PP_TXOFFSET_1: case RADEON_PP_TXOFFSET_1:
@ -1192,7 +1190,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt); r100_cs_dump_packet(p, pkt);
return r; return r;
} }
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].robj = reloc->robj; track->textures[i].robj = reloc->robj;
break; break;
case RADEON_PP_CUBIC_OFFSET_T0_0: case RADEON_PP_CUBIC_OFFSET_T0_0:
@ -1208,8 +1206,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt); r100_cs_dump_packet(p, pkt);
return r; return r;
} }
track->textures[0].cube_info[i].offset = ib_chunk->kdata[idx]; track->textures[0].cube_info[i].offset = idx_value;
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[0].cube_info[i].robj = reloc->robj; track->textures[0].cube_info[i].robj = reloc->robj;
break; break;
case RADEON_PP_CUBIC_OFFSET_T1_0: case RADEON_PP_CUBIC_OFFSET_T1_0:
@ -1225,8 +1223,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt); r100_cs_dump_packet(p, pkt);
return r; return r;
} }
track->textures[1].cube_info[i].offset = ib_chunk->kdata[idx]; track->textures[1].cube_info[i].offset = idx_value;
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[1].cube_info[i].robj = reloc->robj; track->textures[1].cube_info[i].robj = reloc->robj;
break; break;
case RADEON_PP_CUBIC_OFFSET_T2_0: case RADEON_PP_CUBIC_OFFSET_T2_0:
@ -1242,12 +1240,12 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt); r100_cs_dump_packet(p, pkt);
return r; return r;
} }
track->textures[2].cube_info[i].offset = ib_chunk->kdata[idx]; track->textures[2].cube_info[i].offset = idx_value;
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[2].cube_info[i].robj = reloc->robj; track->textures[2].cube_info[i].robj = reloc->robj;
break; break;
case RADEON_RE_WIDTH_HEIGHT: case RADEON_RE_WIDTH_HEIGHT:
track->maxy = ((ib_chunk->kdata[idx] >> 16) & 0x7FF); track->maxy = ((idx_value >> 16) & 0x7FF);
break; break;
case RADEON_RB3D_COLORPITCH: case RADEON_RB3D_COLORPITCH:
r = r100_cs_packet_next_reloc(p, &reloc); r = r100_cs_packet_next_reloc(p, &reloc);
@ -1263,17 +1261,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags; tmp |= tile_flags;
ib[idx] = tmp; ib[idx] = tmp;
track->cb[0].pitch = ib_chunk->kdata[idx] & RADEON_COLORPITCH_MASK; track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
break; break;
case RADEON_RB3D_DEPTHPITCH: case RADEON_RB3D_DEPTHPITCH:
track->zb.pitch = ib_chunk->kdata[idx] & RADEON_DEPTHPITCH_MASK; track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
break; break;
case RADEON_RB3D_CNTL: case RADEON_RB3D_CNTL:
switch ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
case 7: case 7:
case 8: case 8:
case 9: case 9:
@ -1291,13 +1289,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
break; break;
default: default:
DRM_ERROR("Invalid color buffer format (%d) !\n", DRM_ERROR("Invalid color buffer format (%d) !\n",
((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
return -EINVAL; return -EINVAL;
} }
track->z_enabled = !!(ib_chunk->kdata[idx] & RADEON_Z_ENABLE); track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
break; break;
case RADEON_RB3D_ZSTENCILCNTL: case RADEON_RB3D_ZSTENCILCNTL:
switch (ib_chunk->kdata[idx] & 0xf) { switch (idx_value & 0xf) {
case 0: case 0:
track->zb.cpp = 2; track->zb.cpp = 2;
break; break;
@ -1321,44 +1319,44 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt); r100_cs_dump_packet(p, pkt);
return r; return r;
} }
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break; break;
case RADEON_PP_CNTL: case RADEON_PP_CNTL:
{ {
uint32_t temp = ib_chunk->kdata[idx] >> 4; uint32_t temp = idx_value >> 4;
for (i = 0; i < track->num_texture; i++) for (i = 0; i < track->num_texture; i++)
track->textures[i].enabled = !!(temp & (1 << i)); track->textures[i].enabled = !!(temp & (1 << i));
} }
break; break;
case RADEON_SE_VF_CNTL: case RADEON_SE_VF_CNTL:
track->vap_vf_cntl = ib_chunk->kdata[idx]; track->vap_vf_cntl = idx_value;
break; break;
case RADEON_SE_VTX_FMT: case RADEON_SE_VTX_FMT:
track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx]); track->vtx_size = r100_get_vtx_size(idx_value);
break; break;
case RADEON_PP_TEX_SIZE_0: case RADEON_PP_TEX_SIZE_0:
case RADEON_PP_TEX_SIZE_1: case RADEON_PP_TEX_SIZE_1:
case RADEON_PP_TEX_SIZE_2: case RADEON_PP_TEX_SIZE_2:
i = (reg - RADEON_PP_TEX_SIZE_0) / 8; i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
track->textures[i].width = (ib_chunk->kdata[idx] & RADEON_TEX_USIZE_MASK) + 1; track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
track->textures[i].height = ((ib_chunk->kdata[idx] & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
break; break;
case RADEON_PP_TEX_PITCH_0: case RADEON_PP_TEX_PITCH_0:
case RADEON_PP_TEX_PITCH_1: case RADEON_PP_TEX_PITCH_1:
case RADEON_PP_TEX_PITCH_2: case RADEON_PP_TEX_PITCH_2:
i = (reg - RADEON_PP_TEX_PITCH_0) / 8; i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
track->textures[i].pitch = ib_chunk->kdata[idx] + 32; track->textures[i].pitch = idx_value + 32;
break; break;
case RADEON_PP_TXFILTER_0: case RADEON_PP_TXFILTER_0:
case RADEON_PP_TXFILTER_1: case RADEON_PP_TXFILTER_1:
case RADEON_PP_TXFILTER_2: case RADEON_PP_TXFILTER_2:
i = (reg - RADEON_PP_TXFILTER_0) / 24; i = (reg - RADEON_PP_TXFILTER_0) / 24;
track->textures[i].num_levels = ((ib_chunk->kdata[idx] & RADEON_MAX_MIP_LEVEL_MASK) track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK)
>> RADEON_MAX_MIP_LEVEL_SHIFT); >> RADEON_MAX_MIP_LEVEL_SHIFT);
tmp = (ib_chunk->kdata[idx] >> 23) & 0x7; tmp = (idx_value >> 23) & 0x7;
if (tmp == 2 || tmp == 6) if (tmp == 2 || tmp == 6)
track->textures[i].roundup_w = false; track->textures[i].roundup_w = false;
tmp = (ib_chunk->kdata[idx] >> 27) & 0x7; tmp = (idx_value >> 27) & 0x7;
if (tmp == 2 || tmp == 6) if (tmp == 2 || tmp == 6)
track->textures[i].roundup_h = false; track->textures[i].roundup_h = false;
break; break;
@ -1366,16 +1364,16 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_PP_TXFORMAT_1: case RADEON_PP_TXFORMAT_1:
case RADEON_PP_TXFORMAT_2: case RADEON_PP_TXFORMAT_2:
i = (reg - RADEON_PP_TXFORMAT_0) / 24; i = (reg - RADEON_PP_TXFORMAT_0) / 24;
if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_NON_POWER2) { if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
track->textures[i].use_pitch = 1; track->textures[i].use_pitch = 1;
} else { } else {
track->textures[i].use_pitch = 0; track->textures[i].use_pitch = 0;
track->textures[i].width = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
track->textures[i].height = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
} }
if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_CUBIC_MAP_ENABLE) if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
track->textures[i].tex_coord_type = 2; track->textures[i].tex_coord_type = 2;
switch ((ib_chunk->kdata[idx] & RADEON_TXFORMAT_FORMAT_MASK)) { switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
case RADEON_TXFORMAT_I8: case RADEON_TXFORMAT_I8:
case RADEON_TXFORMAT_RGB332: case RADEON_TXFORMAT_RGB332:
case RADEON_TXFORMAT_Y8: case RADEON_TXFORMAT_Y8:
@ -1402,13 +1400,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->textures[i].cpp = 4; track->textures[i].cpp = 4;
break; break;
} }
track->textures[i].cube_info[4].width = 1 << ((ib_chunk->kdata[idx] >> 16) & 0xf); track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
track->textures[i].cube_info[4].height = 1 << ((ib_chunk->kdata[idx] >> 20) & 0xf); track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
break; break;
case RADEON_PP_CUBIC_FACES_0: case RADEON_PP_CUBIC_FACES_0:
case RADEON_PP_CUBIC_FACES_1: case RADEON_PP_CUBIC_FACES_1:
case RADEON_PP_CUBIC_FACES_2: case RADEON_PP_CUBIC_FACES_2:
tmp = ib_chunk->kdata[idx]; tmp = idx_value;
i = (reg - RADEON_PP_CUBIC_FACES_0) / 4; i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
for (face = 0; face < 4; face++) { for (face = 0; face < 4; face++) {
track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
@ -1427,15 +1425,14 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt, struct radeon_cs_packet *pkt,
struct radeon_object *robj) struct radeon_object *robj)
{ {
struct radeon_cs_chunk *ib_chunk;
unsigned idx; unsigned idx;
u32 value;
ib_chunk = &p->chunks[p->chunk_ib_idx];
idx = pkt->idx + 1; idx = pkt->idx + 1;
if ((ib_chunk->kdata[idx+2] + 1) > radeon_object_size(robj)) { value = radeon_get_ib_value(p, idx + 2);
if ((value + 1) > radeon_object_size(robj)) {
DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
"(need %u have %lu) !\n", "(need %u have %lu) !\n",
ib_chunk->kdata[idx+2] + 1, value + 1,
radeon_object_size(robj)); radeon_object_size(robj));
return -EINVAL; return -EINVAL;
} }
@ -1445,59 +1442,20 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
static int r100_packet3_check(struct radeon_cs_parser *p, static int r100_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt) struct radeon_cs_packet *pkt)
{ {
struct radeon_cs_chunk *ib_chunk;
struct radeon_cs_reloc *reloc; struct radeon_cs_reloc *reloc;
struct r100_cs_track *track; struct r100_cs_track *track;
unsigned idx; unsigned idx;
unsigned i, c;
volatile uint32_t *ib; volatile uint32_t *ib;
int r; int r;
ib = p->ib->ptr; ib = p->ib->ptr;
ib_chunk = &p->chunks[p->chunk_ib_idx];
idx = pkt->idx + 1; idx = pkt->idx + 1;
track = (struct r100_cs_track *)p->track; track = (struct r100_cs_track *)p->track;
switch (pkt->opcode) { switch (pkt->opcode) {
case PACKET3_3D_LOAD_VBPNTR: case PACKET3_3D_LOAD_VBPNTR:
c = ib_chunk->kdata[idx++]; r = r100_packet3_load_vbpntr(p, pkt, idx);
track->num_arrays = c; if (r)
for (i = 0; i < (c - 1); i += 2, idx += 3) { return r;
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
r100_cs_dump_packet(p, pkt);
return r;
}
ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
track->arrays[i + 0].robj = reloc->robj;
track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
track->arrays[i + 0].esize &= 0x7F;
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
r100_cs_dump_packet(p, pkt);
return r;
}
ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
track->arrays[i + 1].robj = reloc->robj;
track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24;
track->arrays[i + 1].esize &= 0x7F;
}
if (c & 1) {
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
r100_cs_dump_packet(p, pkt);
return r;
}
ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
track->arrays[i + 0].robj = reloc->robj;
track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
track->arrays[i + 0].esize &= 0x7F;
}
break; break;
case PACKET3_INDX_BUFFER: case PACKET3_INDX_BUFFER:
r = r100_cs_packet_next_reloc(p, &reloc); r = r100_cs_packet_next_reloc(p, &reloc);
@ -1506,7 +1464,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt); r100_cs_dump_packet(p, pkt);
return r; return r;
} }
ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
if (r) { if (r) {
return r; return r;
@ -1520,27 +1478,27 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt); r100_cs_dump_packet(p, pkt);
return r; return r;
} }
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
track->num_arrays = 1; track->num_arrays = 1;
track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx+2]); track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
track->arrays[0].robj = reloc->robj; track->arrays[0].robj = reloc->robj;
track->arrays[0].esize = track->vtx_size; track->arrays[0].esize = track->vtx_size;
track->max_indx = ib_chunk->kdata[idx+1]; track->max_indx = radeon_get_ib_value(p, idx+1);
track->vap_vf_cntl = ib_chunk->kdata[idx+3]; track->vap_vf_cntl = radeon_get_ib_value(p, idx+3);
track->immd_dwords = pkt->count - 1; track->immd_dwords = pkt->count - 1;
r = r100_cs_track_check(p->rdev, track); r = r100_cs_track_check(p->rdev, track);
if (r) if (r)
return r; return r;
break; break;
case PACKET3_3D_DRAW_IMMD: case PACKET3_3D_DRAW_IMMD:
if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) { if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
return -EINVAL; return -EINVAL;
} }
track->vap_vf_cntl = ib_chunk->kdata[idx+1]; track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
track->immd_dwords = pkt->count - 1; track->immd_dwords = pkt->count - 1;
r = r100_cs_track_check(p->rdev, track); r = r100_cs_track_check(p->rdev, track);
if (r) if (r)
@ -1548,11 +1506,11 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
break; break;
/* triggers drawing using in-packet vertex data */ /* triggers drawing using in-packet vertex data */
case PACKET3_3D_DRAW_IMMD_2: case PACKET3_3D_DRAW_IMMD_2:
if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) { if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
return -EINVAL; return -EINVAL;
} }
track->vap_vf_cntl = ib_chunk->kdata[idx]; track->vap_vf_cntl = radeon_get_ib_value(p, idx);
track->immd_dwords = pkt->count; track->immd_dwords = pkt->count;
r = r100_cs_track_check(p->rdev, track); r = r100_cs_track_check(p->rdev, track);
if (r) if (r)
@ -1560,28 +1518,28 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
break; break;
/* triggers drawing using in-packet vertex data */ /* triggers drawing using in-packet vertex data */
case PACKET3_3D_DRAW_VBUF_2: case PACKET3_3D_DRAW_VBUF_2:
track->vap_vf_cntl = ib_chunk->kdata[idx]; track->vap_vf_cntl = radeon_get_ib_value(p, idx);
r = r100_cs_track_check(p->rdev, track); r = r100_cs_track_check(p->rdev, track);
if (r) if (r)
return r; return r;
break; break;
/* triggers drawing of vertex buffers setup elsewhere */ /* triggers drawing of vertex buffers setup elsewhere */
case PACKET3_3D_DRAW_INDX_2: case PACKET3_3D_DRAW_INDX_2:
track->vap_vf_cntl = ib_chunk->kdata[idx]; track->vap_vf_cntl = radeon_get_ib_value(p, idx);
r = r100_cs_track_check(p->rdev, track); r = r100_cs_track_check(p->rdev, track);
if (r) if (r)
return r; return r;
break; break;
/* triggers drawing using indices to vertex buffer */ /* triggers drawing using indices to vertex buffer */
case PACKET3_3D_DRAW_VBUF: case PACKET3_3D_DRAW_VBUF:
track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
r = r100_cs_track_check(p->rdev, track); r = r100_cs_track_check(p->rdev, track);
if (r) if (r)
return r; return r;
break; break;
/* triggers drawing of vertex buffers setup elsewhere */ /* triggers drawing of vertex buffers setup elsewhere */
case PACKET3_3D_DRAW_INDX: case PACKET3_3D_DRAW_INDX:
track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
r = r100_cs_track_check(p->rdev, track); r = r100_cs_track_check(p->rdev, track);
if (r) if (r)
return r; return r;

View file

@ -84,6 +84,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt, struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg); unsigned idx, unsigned reg);
static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p, static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt, struct radeon_cs_packet *pkt,
unsigned idx, unsigned idx,
@ -93,9 +95,7 @@ static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
u32 tile_flags = 0; u32 tile_flags = 0;
u32 tmp; u32 tmp;
struct radeon_cs_reloc *reloc; struct radeon_cs_reloc *reloc;
struct radeon_cs_chunk *ib_chunk; u32 value;
ib_chunk = &p->chunks[p->chunk_ib_idx];
r = r100_cs_packet_next_reloc(p, &reloc); r = r100_cs_packet_next_reloc(p, &reloc);
if (r) { if (r) {
@ -104,7 +104,8 @@ static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt); r100_cs_dump_packet(p, pkt);
return r; return r;
} }
tmp = ib_chunk->kdata[idx] & 0x003fffff; value = radeon_get_ib_value(p, idx);
tmp = value & 0x003fffff;
tmp += (((u32)reloc->lobj.gpu_offset) >> 10); tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
@ -119,6 +120,64 @@ static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
} }
tmp |= tile_flags; tmp |= tile_flags;
p->ib->ptr[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp; p->ib->ptr[idx] = (value & 0x3fc00000) | tmp;
return 0; return 0;
} }
static inline int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
int idx)
{
unsigned c, i;
struct radeon_cs_reloc *reloc;
struct r100_cs_track *track;
int r = 0;
volatile uint32_t *ib;
u32 idx_value;
ib = p->ib->ptr;
track = (struct r100_cs_track *)p->track;
c = radeon_get_ib_value(p, idx++) & 0x1F;
track->num_arrays = c;
for (i = 0; i < (c - 1); i+=2, idx+=3) {
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
r100_cs_dump_packet(p, pkt);
return r;
}
idx_value = radeon_get_ib_value(p, idx);
ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
track->arrays[i + 0].esize = idx_value >> 8;
track->arrays[i + 0].robj = reloc->robj;
track->arrays[i + 0].esize &= 0x7F;
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
r100_cs_dump_packet(p, pkt);
return r;
}
ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
track->arrays[i + 1].robj = reloc->robj;
track->arrays[i + 1].esize = idx_value >> 24;
track->arrays[i + 1].esize &= 0x7F;
}
if (c & 1) {
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
r100_cs_dump_packet(p, pkt);
return r;
}
idx_value = radeon_get_ib_value(p, idx);
ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
track->arrays[i + 0].robj = reloc->robj;
track->arrays[i + 0].esize = idx_value >> 8;
track->arrays[i + 0].esize &= 0x7F;
}
return r;
}

View file

@ -96,7 +96,6 @@ int r200_packet0_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt, struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg) unsigned idx, unsigned reg)
{ {
struct radeon_cs_chunk *ib_chunk;
struct radeon_cs_reloc *reloc; struct radeon_cs_reloc *reloc;
struct r100_cs_track *track; struct r100_cs_track *track;
volatile uint32_t *ib; volatile uint32_t *ib;
@ -105,11 +104,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
int i; int i;
int face; int face;
u32 tile_flags = 0; u32 tile_flags = 0;
u32 idx_value;
ib = p->ib->ptr; ib = p->ib->ptr;
ib_chunk = &p->chunks[p->chunk_ib_idx];
track = (struct r100_cs_track *)p->track; track = (struct r100_cs_track *)p->track;
idx_value = radeon_get_ib_value(p, idx);
switch (reg) { switch (reg) {
case RADEON_CRTC_GUI_TRIG_VLINE: case RADEON_CRTC_GUI_TRIG_VLINE:
r = r100_cs_packet_parse_vline(p); r = r100_cs_packet_parse_vline(p);
@ -137,8 +136,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
return r; return r;
} }
track->zb.robj = reloc->robj; track->zb.robj = reloc->robj;
track->zb.offset = ib_chunk->kdata[idx]; track->zb.offset = idx_value;
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break; break;
case RADEON_RB3D_COLOROFFSET: case RADEON_RB3D_COLOROFFSET:
r = r100_cs_packet_next_reloc(p, &reloc); r = r100_cs_packet_next_reloc(p, &reloc);
@ -149,8 +148,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
return r; return r;
} }
track->cb[0].robj = reloc->robj; track->cb[0].robj = reloc->robj;
track->cb[0].offset = ib_chunk->kdata[idx]; track->cb[0].offset = idx_value;
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break; break;
case R200_PP_TXOFFSET_0: case R200_PP_TXOFFSET_0:
case R200_PP_TXOFFSET_1: case R200_PP_TXOFFSET_1:
@ -166,7 +165,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt); r100_cs_dump_packet(p, pkt);
return r; return r;
} }
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].robj = reloc->robj; track->textures[i].robj = reloc->robj;
break; break;
case R200_PP_CUBIC_OFFSET_F1_0: case R200_PP_CUBIC_OFFSET_F1_0:
@ -208,12 +207,12 @@ int r200_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt); r100_cs_dump_packet(p, pkt);
return r; return r;
} }
track->textures[i].cube_info[face - 1].offset = ib_chunk->kdata[idx]; track->textures[i].cube_info[face - 1].offset = idx_value;
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].cube_info[face - 1].robj = reloc->robj; track->textures[i].cube_info[face - 1].robj = reloc->robj;
break; break;
case RADEON_RE_WIDTH_HEIGHT: case RADEON_RE_WIDTH_HEIGHT:
track->maxy = ((ib_chunk->kdata[idx] >> 16) & 0x7FF); track->maxy = ((idx_value >> 16) & 0x7FF);
break; break;
case RADEON_RB3D_COLORPITCH: case RADEON_RB3D_COLORPITCH:
r = r100_cs_packet_next_reloc(p, &reloc); r = r100_cs_packet_next_reloc(p, &reloc);
@ -229,17 +228,17 @@ int r200_packet0_check(struct radeon_cs_parser *p,
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags; tmp |= tile_flags;
ib[idx] = tmp; ib[idx] = tmp;
track->cb[0].pitch = ib_chunk->kdata[idx] & RADEON_COLORPITCH_MASK; track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
break; break;
case RADEON_RB3D_DEPTHPITCH: case RADEON_RB3D_DEPTHPITCH:
track->zb.pitch = ib_chunk->kdata[idx] & RADEON_DEPTHPITCH_MASK; track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
break; break;
case RADEON_RB3D_CNTL: case RADEON_RB3D_CNTL:
switch ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
case 7: case 7:
case 8: case 8:
case 9: case 9:
@ -257,18 +256,18 @@ int r200_packet0_check(struct radeon_cs_parser *p,
break; break;
default: default:
DRM_ERROR("Invalid color buffer format (%d) !\n", DRM_ERROR("Invalid color buffer format (%d) !\n",
((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
return -EINVAL; return -EINVAL;
} }
if (ib_chunk->kdata[idx] & RADEON_DEPTHXY_OFFSET_ENABLE) { if (idx_value & RADEON_DEPTHXY_OFFSET_ENABLE) {
DRM_ERROR("No support for depth xy offset in kms\n"); DRM_ERROR("No support for depth xy offset in kms\n");
return -EINVAL; return -EINVAL;
} }
track->z_enabled = !!(ib_chunk->kdata[idx] & RADEON_Z_ENABLE); track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
break; break;
case RADEON_RB3D_ZSTENCILCNTL: case RADEON_RB3D_ZSTENCILCNTL:
switch (ib_chunk->kdata[idx] & 0xf) { switch (idx_value & 0xf) {
case 0: case 0:
track->zb.cpp = 2; track->zb.cpp = 2;
break; break;
@ -292,27 +291,27 @@ int r200_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt); r100_cs_dump_packet(p, pkt);
return r; return r;
} }
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break; break;
case RADEON_PP_CNTL: case RADEON_PP_CNTL:
{ {
uint32_t temp = ib_chunk->kdata[idx] >> 4; uint32_t temp = idx_value >> 4;
for (i = 0; i < track->num_texture; i++) for (i = 0; i < track->num_texture; i++)
track->textures[i].enabled = !!(temp & (1 << i)); track->textures[i].enabled = !!(temp & (1 << i));
} }
break; break;
case RADEON_SE_VF_CNTL: case RADEON_SE_VF_CNTL:
track->vap_vf_cntl = ib_chunk->kdata[idx]; track->vap_vf_cntl = idx_value;
break; break;
case 0x210c: case 0x210c:
/* VAP_VF_MAX_VTX_INDX */ /* VAP_VF_MAX_VTX_INDX */
track->max_indx = ib_chunk->kdata[idx] & 0x00FFFFFFUL; track->max_indx = idx_value & 0x00FFFFFFUL;
break; break;
case R200_SE_VTX_FMT_0: case R200_SE_VTX_FMT_0:
track->vtx_size = r200_get_vtx_size_0(ib_chunk->kdata[idx]); track->vtx_size = r200_get_vtx_size_0(idx_value);
break; break;
case R200_SE_VTX_FMT_1: case R200_SE_VTX_FMT_1:
track->vtx_size += r200_get_vtx_size_1(ib_chunk->kdata[idx]); track->vtx_size += r200_get_vtx_size_1(idx_value);
break; break;
case R200_PP_TXSIZE_0: case R200_PP_TXSIZE_0:
case R200_PP_TXSIZE_1: case R200_PP_TXSIZE_1:
@ -321,8 +320,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_PP_TXSIZE_4: case R200_PP_TXSIZE_4:
case R200_PP_TXSIZE_5: case R200_PP_TXSIZE_5:
i = (reg - R200_PP_TXSIZE_0) / 32; i = (reg - R200_PP_TXSIZE_0) / 32;
track->textures[i].width = (ib_chunk->kdata[idx] & RADEON_TEX_USIZE_MASK) + 1; track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
track->textures[i].height = ((ib_chunk->kdata[idx] & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
break; break;
case R200_PP_TXPITCH_0: case R200_PP_TXPITCH_0:
case R200_PP_TXPITCH_1: case R200_PP_TXPITCH_1:
@ -331,7 +330,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_PP_TXPITCH_4: case R200_PP_TXPITCH_4:
case R200_PP_TXPITCH_5: case R200_PP_TXPITCH_5:
i = (reg - R200_PP_TXPITCH_0) / 32; i = (reg - R200_PP_TXPITCH_0) / 32;
track->textures[i].pitch = ib_chunk->kdata[idx] + 32; track->textures[i].pitch = idx_value + 32;
break; break;
case R200_PP_TXFILTER_0: case R200_PP_TXFILTER_0:
case R200_PP_TXFILTER_1: case R200_PP_TXFILTER_1:
@ -340,12 +339,12 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_PP_TXFILTER_4: case R200_PP_TXFILTER_4:
case R200_PP_TXFILTER_5: case R200_PP_TXFILTER_5:
i = (reg - R200_PP_TXFILTER_0) / 32; i = (reg - R200_PP_TXFILTER_0) / 32;
track->textures[i].num_levels = ((ib_chunk->kdata[idx] & R200_MAX_MIP_LEVEL_MASK) track->textures[i].num_levels = ((idx_value & R200_MAX_MIP_LEVEL_MASK)
>> R200_MAX_MIP_LEVEL_SHIFT); >> R200_MAX_MIP_LEVEL_SHIFT);
tmp = (ib_chunk->kdata[idx] >> 23) & 0x7; tmp = (idx_value >> 23) & 0x7;
if (tmp == 2 || tmp == 6) if (tmp == 2 || tmp == 6)
track->textures[i].roundup_w = false; track->textures[i].roundup_w = false;
tmp = (ib_chunk->kdata[idx] >> 27) & 0x7; tmp = (idx_value >> 27) & 0x7;
if (tmp == 2 || tmp == 6) if (tmp == 2 || tmp == 6)
track->textures[i].roundup_h = false; track->textures[i].roundup_h = false;
break; break;
@ -364,8 +363,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_PP_TXFORMAT_X_4: case R200_PP_TXFORMAT_X_4:
case R200_PP_TXFORMAT_X_5: case R200_PP_TXFORMAT_X_5:
i = (reg - R200_PP_TXFORMAT_X_0) / 32; i = (reg - R200_PP_TXFORMAT_X_0) / 32;
track->textures[i].txdepth = ib_chunk->kdata[idx] & 0x7; track->textures[i].txdepth = idx_value & 0x7;
tmp = (ib_chunk->kdata[idx] >> 16) & 0x3; tmp = (idx_value >> 16) & 0x3;
/* 2D, 3D, CUBE */ /* 2D, 3D, CUBE */
switch (tmp) { switch (tmp) {
case 0: case 0:
@ -389,14 +388,14 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_PP_TXFORMAT_4: case R200_PP_TXFORMAT_4:
case R200_PP_TXFORMAT_5: case R200_PP_TXFORMAT_5:
i = (reg - R200_PP_TXFORMAT_0) / 32; i = (reg - R200_PP_TXFORMAT_0) / 32;
if (ib_chunk->kdata[idx] & R200_TXFORMAT_NON_POWER2) { if (idx_value & R200_TXFORMAT_NON_POWER2) {
track->textures[i].use_pitch = 1; track->textures[i].use_pitch = 1;
} else { } else {
track->textures[i].use_pitch = 0; track->textures[i].use_pitch = 0;
track->textures[i].width = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
track->textures[i].height = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
} }
switch ((ib_chunk->kdata[idx] & RADEON_TXFORMAT_FORMAT_MASK)) { switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
case R200_TXFORMAT_I8: case R200_TXFORMAT_I8:
case R200_TXFORMAT_RGB332: case R200_TXFORMAT_RGB332:
case R200_TXFORMAT_Y8: case R200_TXFORMAT_Y8:
@ -424,8 +423,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->textures[i].cpp = 4; track->textures[i].cpp = 4;
break; break;
} }
track->textures[i].cube_info[4].width = 1 << ((ib_chunk->kdata[idx] >> 16) & 0xf); track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
track->textures[i].cube_info[4].height = 1 << ((ib_chunk->kdata[idx] >> 20) & 0xf); track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
break; break;
case R200_PP_CUBIC_FACES_0: case R200_PP_CUBIC_FACES_0:
case R200_PP_CUBIC_FACES_1: case R200_PP_CUBIC_FACES_1:
@ -433,7 +432,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_PP_CUBIC_FACES_3: case R200_PP_CUBIC_FACES_3:
case R200_PP_CUBIC_FACES_4: case R200_PP_CUBIC_FACES_4:
case R200_PP_CUBIC_FACES_5: case R200_PP_CUBIC_FACES_5:
tmp = ib_chunk->kdata[idx]; tmp = idx_value;
i = (reg - R200_PP_CUBIC_FACES_0) / 32; i = (reg - R200_PP_CUBIC_FACES_0) / 32;
for (face = 0; face < 4; face++) { for (face = 0; face < 4; face++) {
track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);

View file

@ -697,17 +697,18 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt, struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg) unsigned idx, unsigned reg)
{ {
struct radeon_cs_chunk *ib_chunk;
struct radeon_cs_reloc *reloc; struct radeon_cs_reloc *reloc;
struct r100_cs_track *track; struct r100_cs_track *track;
volatile uint32_t *ib; volatile uint32_t *ib;
uint32_t tmp, tile_flags = 0; uint32_t tmp, tile_flags = 0;
unsigned i; unsigned i;
int r; int r;
u32 idx_value;
ib = p->ib->ptr; ib = p->ib->ptr;
ib_chunk = &p->chunks[p->chunk_ib_idx];
track = (struct r100_cs_track *)p->track; track = (struct r100_cs_track *)p->track;
idx_value = radeon_get_ib_value(p, idx);
switch(reg) { switch(reg) {
case AVIVO_D1MODE_VLINE_START_END: case AVIVO_D1MODE_VLINE_START_END:
case RADEON_CRTC_GUI_TRIG_VLINE: case RADEON_CRTC_GUI_TRIG_VLINE:
@ -738,8 +739,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
return r; return r;
} }
track->cb[i].robj = reloc->robj; track->cb[i].robj = reloc->robj;
track->cb[i].offset = ib_chunk->kdata[idx]; track->cb[i].offset = idx_value;
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break; break;
case R300_ZB_DEPTHOFFSET: case R300_ZB_DEPTHOFFSET:
r = r100_cs_packet_next_reloc(p, &reloc); r = r100_cs_packet_next_reloc(p, &reloc);
@ -750,8 +751,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
return r; return r;
} }
track->zb.robj = reloc->robj; track->zb.robj = reloc->robj;
track->zb.offset = ib_chunk->kdata[idx]; track->zb.offset = idx_value;
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break; break;
case R300_TX_OFFSET_0: case R300_TX_OFFSET_0:
case R300_TX_OFFSET_0+4: case R300_TX_OFFSET_0+4:
@ -777,32 +778,32 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt); r100_cs_dump_packet(p, pkt);
return r; return r;
} }
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].robj = reloc->robj; track->textures[i].robj = reloc->robj;
break; break;
/* Tracked registers */ /* Tracked registers */
case 0x2084: case 0x2084:
/* VAP_VF_CNTL */ /* VAP_VF_CNTL */
track->vap_vf_cntl = ib_chunk->kdata[idx]; track->vap_vf_cntl = idx_value;
break; break;
case 0x20B4: case 0x20B4:
/* VAP_VTX_SIZE */ /* VAP_VTX_SIZE */
track->vtx_size = ib_chunk->kdata[idx] & 0x7F; track->vtx_size = idx_value & 0x7F;
break; break;
case 0x2134: case 0x2134:
/* VAP_VF_MAX_VTX_INDX */ /* VAP_VF_MAX_VTX_INDX */
track->max_indx = ib_chunk->kdata[idx] & 0x00FFFFFFUL; track->max_indx = idx_value & 0x00FFFFFFUL;
break; break;
case 0x43E4: case 0x43E4:
/* SC_SCISSOR1 */ /* SC_SCISSOR1 */
track->maxy = ((ib_chunk->kdata[idx] >> 13) & 0x1FFF) + 1; track->maxy = ((idx_value >> 13) & 0x1FFF) + 1;
if (p->rdev->family < CHIP_RV515) { if (p->rdev->family < CHIP_RV515) {
track->maxy -= 1440; track->maxy -= 1440;
} }
break; break;
case 0x4E00: case 0x4E00:
/* RB3D_CCTL */ /* RB3D_CCTL */
track->num_cb = ((ib_chunk->kdata[idx] >> 5) & 0x3) + 1; track->num_cb = ((idx_value >> 5) & 0x3) + 1;
break; break;
case 0x4E38: case 0x4E38:
case 0x4E3C: case 0x4E3C:
@ -825,13 +826,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_COLOR_MICROTILE_ENABLE; tile_flags |= R300_COLOR_MICROTILE_ENABLE;
tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags; tmp |= tile_flags;
ib[idx] = tmp; ib[idx] = tmp;
i = (reg - 0x4E38) >> 2; i = (reg - 0x4E38) >> 2;
track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE; track->cb[i].pitch = idx_value & 0x3FFE;
switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) { switch (((idx_value >> 21) & 0xF)) {
case 9: case 9:
case 11: case 11:
case 12: case 12:
@ -854,13 +855,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break; break;
default: default:
DRM_ERROR("Invalid color buffer format (%d) !\n", DRM_ERROR("Invalid color buffer format (%d) !\n",
((ib_chunk->kdata[idx] >> 21) & 0xF)); ((idx_value >> 21) & 0xF));
return -EINVAL; return -EINVAL;
} }
break; break;
case 0x4F00: case 0x4F00:
/* ZB_CNTL */ /* ZB_CNTL */
if (ib_chunk->kdata[idx] & 2) { if (idx_value & 2) {
track->z_enabled = true; track->z_enabled = true;
} else { } else {
track->z_enabled = false; track->z_enabled = false;
@ -868,7 +869,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break; break;
case 0x4F10: case 0x4F10:
/* ZB_FORMAT */ /* ZB_FORMAT */
switch ((ib_chunk->kdata[idx] & 0xF)) { switch ((idx_value & 0xF)) {
case 0: case 0:
case 1: case 1:
track->zb.cpp = 2; track->zb.cpp = 2;
@ -878,7 +879,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break; break;
default: default:
DRM_ERROR("Invalid z buffer format (%d) !\n", DRM_ERROR("Invalid z buffer format (%d) !\n",
(ib_chunk->kdata[idx] & 0xF)); (idx_value & 0xF));
return -EINVAL; return -EINVAL;
} }
break; break;
@ -897,17 +898,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_DEPTHMICROTILE_TILED;; tile_flags |= R300_DEPTHMICROTILE_TILED;;
tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags; tmp |= tile_flags;
ib[idx] = tmp; ib[idx] = tmp;
track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC; track->zb.pitch = idx_value & 0x3FFC;
break; break;
case 0x4104: case 0x4104:
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
bool enabled; bool enabled;
enabled = !!(ib_chunk->kdata[idx] & (1 << i)); enabled = !!(idx_value & (1 << i));
track->textures[i].enabled = enabled; track->textures[i].enabled = enabled;
} }
break; break;
@ -929,9 +930,9 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case 0x44FC: case 0x44FC:
/* TX_FORMAT1_[0-15] */ /* TX_FORMAT1_[0-15] */
i = (reg - 0x44C0) >> 2; i = (reg - 0x44C0) >> 2;
tmp = (ib_chunk->kdata[idx] >> 25) & 0x3; tmp = (idx_value >> 25) & 0x3;
track->textures[i].tex_coord_type = tmp; track->textures[i].tex_coord_type = tmp;
switch ((ib_chunk->kdata[idx] & 0x1F)) { switch ((idx_value & 0x1F)) {
case R300_TX_FORMAT_X8: case R300_TX_FORMAT_X8:
case R300_TX_FORMAT_Y4X4: case R300_TX_FORMAT_Y4X4:
case R300_TX_FORMAT_Z3Y3X2: case R300_TX_FORMAT_Z3Y3X2:
@ -971,7 +972,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break; break;
default: default:
DRM_ERROR("Invalid texture format %u\n", DRM_ERROR("Invalid texture format %u\n",
(ib_chunk->kdata[idx] & 0x1F)); (idx_value & 0x1F));
return -EINVAL; return -EINVAL;
break; break;
} }
@ -994,11 +995,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case 0x443C: case 0x443C:
/* TX_FILTER0_[0-15] */ /* TX_FILTER0_[0-15] */
i = (reg - 0x4400) >> 2; i = (reg - 0x4400) >> 2;
tmp = ib_chunk->kdata[idx] & 0x7; tmp = idx_value & 0x7;
if (tmp == 2 || tmp == 4 || tmp == 6) { if (tmp == 2 || tmp == 4 || tmp == 6) {
track->textures[i].roundup_w = false; track->textures[i].roundup_w = false;
} }
tmp = (ib_chunk->kdata[idx] >> 3) & 0x7; tmp = (idx_value >> 3) & 0x7;
if (tmp == 2 || tmp == 4 || tmp == 6) { if (tmp == 2 || tmp == 4 || tmp == 6) {
track->textures[i].roundup_h = false; track->textures[i].roundup_h = false;
} }
@ -1021,12 +1022,12 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case 0x453C: case 0x453C:
/* TX_FORMAT2_[0-15] */ /* TX_FORMAT2_[0-15] */
i = (reg - 0x4500) >> 2; i = (reg - 0x4500) >> 2;
tmp = ib_chunk->kdata[idx] & 0x3FFF; tmp = idx_value & 0x3FFF;
track->textures[i].pitch = tmp + 1; track->textures[i].pitch = tmp + 1;
if (p->rdev->family >= CHIP_RV515) { if (p->rdev->family >= CHIP_RV515) {
tmp = ((ib_chunk->kdata[idx] >> 15) & 1) << 11; tmp = ((idx_value >> 15) & 1) << 11;
track->textures[i].width_11 = tmp; track->textures[i].width_11 = tmp;
tmp = ((ib_chunk->kdata[idx] >> 16) & 1) << 11; tmp = ((idx_value >> 16) & 1) << 11;
track->textures[i].height_11 = tmp; track->textures[i].height_11 = tmp;
} }
break; break;
@ -1048,15 +1049,15 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case 0x44BC: case 0x44BC:
/* TX_FORMAT0_[0-15] */ /* TX_FORMAT0_[0-15] */
i = (reg - 0x4480) >> 2; i = (reg - 0x4480) >> 2;
tmp = ib_chunk->kdata[idx] & 0x7FF; tmp = idx_value & 0x7FF;
track->textures[i].width = tmp + 1; track->textures[i].width = tmp + 1;
tmp = (ib_chunk->kdata[idx] >> 11) & 0x7FF; tmp = (idx_value >> 11) & 0x7FF;
track->textures[i].height = tmp + 1; track->textures[i].height = tmp + 1;
tmp = (ib_chunk->kdata[idx] >> 26) & 0xF; tmp = (idx_value >> 26) & 0xF;
track->textures[i].num_levels = tmp; track->textures[i].num_levels = tmp;
tmp = ib_chunk->kdata[idx] & (1 << 31); tmp = idx_value & (1 << 31);
track->textures[i].use_pitch = !!tmp; track->textures[i].use_pitch = !!tmp;
tmp = (ib_chunk->kdata[idx] >> 22) & 0xF; tmp = (idx_value >> 22) & 0xF;
track->textures[i].txdepth = tmp; track->textures[i].txdepth = tmp;
break; break;
case R300_ZB_ZPASS_ADDR: case R300_ZB_ZPASS_ADDR:
@ -1067,7 +1068,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt); r100_cs_dump_packet(p, pkt);
return r; return r;
} }
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break; break;
case 0x4be8: case 0x4be8:
/* valid register only on RV530 */ /* valid register only on RV530 */
@ -1085,60 +1086,20 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
static int r300_packet3_check(struct radeon_cs_parser *p, static int r300_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt) struct radeon_cs_packet *pkt)
{ {
struct radeon_cs_chunk *ib_chunk;
struct radeon_cs_reloc *reloc; struct radeon_cs_reloc *reloc;
struct r100_cs_track *track; struct r100_cs_track *track;
volatile uint32_t *ib; volatile uint32_t *ib;
unsigned idx; unsigned idx;
unsigned i, c;
int r; int r;
ib = p->ib->ptr; ib = p->ib->ptr;
ib_chunk = &p->chunks[p->chunk_ib_idx];
idx = pkt->idx + 1; idx = pkt->idx + 1;
track = (struct r100_cs_track *)p->track; track = (struct r100_cs_track *)p->track;
switch(pkt->opcode) { switch(pkt->opcode) {
case PACKET3_3D_LOAD_VBPNTR: case PACKET3_3D_LOAD_VBPNTR:
c = ib_chunk->kdata[idx++] & 0x1F; r = r100_packet3_load_vbpntr(p, pkt, idx);
track->num_arrays = c; if (r)
for (i = 0; i < (c - 1); i+=2, idx+=3) { return r;
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
r100_cs_dump_packet(p, pkt);
return r;
}
ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
track->arrays[i + 0].robj = reloc->robj;
track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
track->arrays[i + 0].esize &= 0x7F;
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
r100_cs_dump_packet(p, pkt);
return r;
}
ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
track->arrays[i + 1].robj = reloc->robj;
track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24;
track->arrays[i + 1].esize &= 0x7F;
}
if (c & 1) {
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
r100_cs_dump_packet(p, pkt);
return r;
}
ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
track->arrays[i + 0].robj = reloc->robj;
track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
track->arrays[i + 0].esize &= 0x7F;
}
break; break;
case PACKET3_INDX_BUFFER: case PACKET3_INDX_BUFFER:
r = r100_cs_packet_next_reloc(p, &reloc); r = r100_cs_packet_next_reloc(p, &reloc);
@ -1147,7 +1108,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt); r100_cs_dump_packet(p, pkt);
return r; return r;
} }
ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
if (r) { if (r) {
return r; return r;
@ -1158,11 +1119,11 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
/* Number of dwords is vtx_size * (num_vertices - 1) /* Number of dwords is vtx_size * (num_vertices - 1)
* PRIM_WALK must be equal to 3 vertex data in embedded * PRIM_WALK must be equal to 3 vertex data in embedded
* in cmd stream */ * in cmd stream */
if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) { if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
return -EINVAL; return -EINVAL;
} }
track->vap_vf_cntl = ib_chunk->kdata[idx+1]; track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
track->immd_dwords = pkt->count - 1; track->immd_dwords = pkt->count - 1;
r = r100_cs_track_check(p->rdev, track); r = r100_cs_track_check(p->rdev, track);
if (r) { if (r) {
@ -1173,11 +1134,11 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
/* Number of dwords is vtx_size * (num_vertices - 1) /* Number of dwords is vtx_size * (num_vertices - 1)
* PRIM_WALK must be equal to 3 vertex data in embedded * PRIM_WALK must be equal to 3 vertex data in embedded
* in cmd stream */ * in cmd stream */
if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) { if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
return -EINVAL; return -EINVAL;
} }
track->vap_vf_cntl = ib_chunk->kdata[idx]; track->vap_vf_cntl = radeon_get_ib_value(p, idx);
track->immd_dwords = pkt->count; track->immd_dwords = pkt->count;
r = r100_cs_track_check(p->rdev, track); r = r100_cs_track_check(p->rdev, track);
if (r) { if (r) {
@ -1185,28 +1146,28 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
} }
break; break;
case PACKET3_3D_DRAW_VBUF: case PACKET3_3D_DRAW_VBUF:
track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
r = r100_cs_track_check(p->rdev, track); r = r100_cs_track_check(p->rdev, track);
if (r) { if (r) {
return r; return r;
} }
break; break;
case PACKET3_3D_DRAW_VBUF_2: case PACKET3_3D_DRAW_VBUF_2:
track->vap_vf_cntl = ib_chunk->kdata[idx]; track->vap_vf_cntl = radeon_get_ib_value(p, idx);
r = r100_cs_track_check(p->rdev, track); r = r100_cs_track_check(p->rdev, track);
if (r) { if (r) {
return r; return r;
} }
break; break;
case PACKET3_3D_DRAW_INDX: case PACKET3_3D_DRAW_INDX:
track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
r = r100_cs_track_check(p->rdev, track); r = r100_cs_track_check(p->rdev, track);
if (r) { if (r) {
return r; return r;
} }
break; break;
case PACKET3_3D_DRAW_INDX_2: case PACKET3_3D_DRAW_INDX_2:
track->vap_vf_cntl = ib_chunk->kdata[idx]; track->vap_vf_cntl = radeon_get_ib_value(p, idx);
r = r100_cs_track_check(p->rdev, track); r = r100_cs_track_check(p->rdev, track);
if (r) { if (r) {
return r; return r;

View file

@ -57,7 +57,7 @@ int r600_cs_packet_parse(struct radeon_cs_parser *p,
idx, ib_chunk->length_dw); idx, ib_chunk->length_dw);
return -EINVAL; return -EINVAL;
} }
header = ib_chunk->kdata[idx]; header = radeon_get_ib_value(p, idx);
pkt->idx = idx; pkt->idx = idx;
pkt->type = CP_PACKET_GET_TYPE(header); pkt->type = CP_PACKET_GET_TYPE(header);
pkt->count = CP_PACKET_GET_COUNT(header); pkt->count = CP_PACKET_GET_COUNT(header);
@ -98,7 +98,6 @@ int r600_cs_packet_parse(struct radeon_cs_parser *p,
static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc) struct radeon_cs_reloc **cs_reloc)
{ {
struct radeon_cs_chunk *ib_chunk;
struct radeon_cs_chunk *relocs_chunk; struct radeon_cs_chunk *relocs_chunk;
struct radeon_cs_packet p3reloc; struct radeon_cs_packet p3reloc;
unsigned idx; unsigned idx;
@ -109,7 +108,6 @@ static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
return -EINVAL; return -EINVAL;
} }
*cs_reloc = NULL; *cs_reloc = NULL;
ib_chunk = &p->chunks[p->chunk_ib_idx];
relocs_chunk = &p->chunks[p->chunk_relocs_idx]; relocs_chunk = &p->chunks[p->chunk_relocs_idx];
r = r600_cs_packet_parse(p, &p3reloc, p->idx); r = r600_cs_packet_parse(p, &p3reloc, p->idx);
if (r) { if (r) {
@ -121,7 +119,7 @@ static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
p3reloc.idx); p3reloc.idx);
return -EINVAL; return -EINVAL;
} }
idx = ib_chunk->kdata[p3reloc.idx + 1]; idx = radeon_get_ib_value(p, p3reloc.idx + 1);
if (idx >= relocs_chunk->length_dw) { if (idx >= relocs_chunk->length_dw) {
DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
idx, relocs_chunk->length_dw); idx, relocs_chunk->length_dw);
@ -146,7 +144,6 @@ static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc) struct radeon_cs_reloc **cs_reloc)
{ {
struct radeon_cs_chunk *ib_chunk;
struct radeon_cs_chunk *relocs_chunk; struct radeon_cs_chunk *relocs_chunk;
struct radeon_cs_packet p3reloc; struct radeon_cs_packet p3reloc;
unsigned idx; unsigned idx;
@ -157,7 +154,6 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
return -EINVAL; return -EINVAL;
} }
*cs_reloc = NULL; *cs_reloc = NULL;
ib_chunk = &p->chunks[p->chunk_ib_idx];
relocs_chunk = &p->chunks[p->chunk_relocs_idx]; relocs_chunk = &p->chunks[p->chunk_relocs_idx];
r = r600_cs_packet_parse(p, &p3reloc, p->idx); r = r600_cs_packet_parse(p, &p3reloc, p->idx);
if (r) { if (r) {
@ -169,7 +165,7 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
p3reloc.idx); p3reloc.idx);
return -EINVAL; return -EINVAL;
} }
idx = ib_chunk->kdata[p3reloc.idx + 1]; idx = radeon_get_ib_value(p, p3reloc.idx + 1);
if (idx >= relocs_chunk->length_dw) { if (idx >= relocs_chunk->length_dw) {
DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
idx, relocs_chunk->length_dw); idx, relocs_chunk->length_dw);
@ -218,7 +214,6 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
static int r600_packet3_check(struct radeon_cs_parser *p, static int r600_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt) struct radeon_cs_packet *pkt)
{ {
struct radeon_cs_chunk *ib_chunk;
struct radeon_cs_reloc *reloc; struct radeon_cs_reloc *reloc;
volatile u32 *ib; volatile u32 *ib;
unsigned idx; unsigned idx;
@ -227,8 +222,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
int r; int r;
ib = p->ib->ptr; ib = p->ib->ptr;
ib_chunk = &p->chunks[p->chunk_ib_idx];
idx = pkt->idx + 1; idx = pkt->idx + 1;
switch (pkt->opcode) { switch (pkt->opcode) {
case PACKET3_START_3D_CMDBUF: case PACKET3_START_3D_CMDBUF:
if (p->family >= CHIP_RV770 || pkt->count) { if (p->family >= CHIP_RV770 || pkt->count) {
@ -281,7 +276,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL; return -EINVAL;
} }
/* bit 4 is reg (0) or mem (1) */ /* bit 4 is reg (0) or mem (1) */
if (ib_chunk->kdata[idx+0] & 0x10) { if (radeon_get_ib_value(p, idx) & 0x10) {
r = r600_cs_packet_next_reloc(p, &reloc); r = r600_cs_packet_next_reloc(p, &reloc);
if (r) { if (r) {
DRM_ERROR("bad WAIT_REG_MEM\n"); DRM_ERROR("bad WAIT_REG_MEM\n");
@ -297,8 +292,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL; return -EINVAL;
} }
/* 0xffffffff/0x0 is flush all cache flag */ /* 0xffffffff/0x0 is flush all cache flag */
if (ib_chunk->kdata[idx+1] != 0xffffffff || if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
ib_chunk->kdata[idx+2] != 0) { radeon_get_ib_value(p, idx + 2) != 0) {
r = r600_cs_packet_next_reloc(p, &reloc); r = r600_cs_packet_next_reloc(p, &reloc);
if (r) { if (r) {
DRM_ERROR("bad SURFACE_SYNC\n"); DRM_ERROR("bad SURFACE_SYNC\n");
@ -639,7 +634,6 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
* uncached). */ * uncached). */
ib_chunk = &parser.chunks[parser.chunk_ib_idx]; ib_chunk = &parser.chunks[parser.chunk_ib_idx];
parser.ib->length_dw = ib_chunk->length_dw; parser.ib->length_dw = ib_chunk->length_dw;
memcpy((void *)parser.ib->ptr, ib_chunk->kdata, ib_chunk->length_dw*4);
*l = parser.ib->length_dw; *l = parser.ib->length_dw;
r = r600_cs_parse(&parser); r = r600_cs_parse(&parser);
if (r) { if (r) {
@ -647,6 +641,12 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
r600_cs_parser_fini(&parser, r); r600_cs_parser_fini(&parser, r);
return r; return r;
} }
r = radeon_cs_finish_pages(&parser);
if (r) {
DRM_ERROR("Invalid command stream !\n");
r600_cs_parser_fini(&parser, r);
return r;
}
r600_cs_parser_fini(&parser, r); r600_cs_parser_fini(&parser, r);
return r; return r;
} }

View file

@ -342,7 +342,7 @@ struct radeon_ib {
unsigned long idx; unsigned long idx;
uint64_t gpu_addr; uint64_t gpu_addr;
struct radeon_fence *fence; struct radeon_fence *fence;
volatile uint32_t *ptr; uint32_t *ptr;
uint32_t length_dw; uint32_t length_dw;
}; };
@ -415,7 +415,12 @@ struct radeon_cs_reloc {
struct radeon_cs_chunk { struct radeon_cs_chunk {
uint32_t chunk_id; uint32_t chunk_id;
uint32_t length_dw; uint32_t length_dw;
int kpage_idx[2];
uint32_t *kpage[2];
uint32_t *kdata; uint32_t *kdata;
void __user *user_ptr;
int last_copied_page;
int last_page_index;
}; };
struct radeon_cs_parser { struct radeon_cs_parser {
@ -438,8 +443,38 @@ struct radeon_cs_parser {
struct radeon_ib *ib; struct radeon_ib *ib;
void *track; void *track;
unsigned family; unsigned family;
int parser_error;
}; };
extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
{
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
u32 pg_idx, pg_offset;
u32 idx_value = 0;
int new_page;
pg_idx = (idx * 4) / PAGE_SIZE;
pg_offset = (idx * 4) % PAGE_SIZE;
if (ibc->kpage_idx[0] == pg_idx)
return ibc->kpage[0][pg_offset/4];
if (ibc->kpage_idx[1] == pg_idx)
return ibc->kpage[1][pg_offset/4];
new_page = radeon_cs_update_pages(p, pg_idx);
if (new_page < 0) {
p->parser_error = new_page;
return 0;
}
idx_value = ibc->kpage[new_page][pg_offset/4];
return idx_value;
}
struct radeon_cs_packet { struct radeon_cs_packet {
unsigned idx; unsigned idx;
unsigned type; unsigned type;

View file

@ -142,15 +142,31 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
} }
p->chunks[i].length_dw = user_chunk.length_dw; p->chunks[i].length_dw = user_chunk.length_dw;
cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data; p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
size = p->chunks[i].length_dw * sizeof(uint32_t); cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
p->chunks[i].kdata = kmalloc(size, GFP_KERNEL); if (p->chunks[i].chunk_id != RADEON_CHUNK_ID_IB) {
if (p->chunks[i].kdata == NULL) { size = p->chunks[i].length_dw * sizeof(uint32_t);
return -ENOMEM; p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
} if (p->chunks[i].kdata == NULL) {
if (DRM_COPY_FROM_USER(p->chunks[i].kdata, cdata, size)) { return -ENOMEM;
return -EFAULT; }
if (DRM_COPY_FROM_USER(p->chunks[i].kdata,
p->chunks[i].user_ptr, size)) {
return -EFAULT;
}
} else {
p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (p->chunks[i].kpage[0] == NULL || p->chunks[i].kpage[1] == NULL) {
kfree(p->chunks[i].kpage[0]);
kfree(p->chunks[i].kpage[1]);
return -ENOMEM;
}
p->chunks[i].kpage_idx[0] = -1;
p->chunks[i].kpage_idx[1] = -1;
p->chunks[i].last_copied_page = -1;
p->chunks[i].last_page_index = ((p->chunks[i].length_dw * 4) - 1) / PAGE_SIZE;
} }
} }
if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) { if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
@ -190,6 +206,8 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
kfree(parser->relocs_ptr); kfree(parser->relocs_ptr);
for (i = 0; i < parser->nchunks; i++) { for (i = 0; i < parser->nchunks; i++) {
kfree(parser->chunks[i].kdata); kfree(parser->chunks[i].kdata);
kfree(parser->chunks[i].kpage[0]);
kfree(parser->chunks[i].kpage[1]);
} }
kfree(parser->chunks); kfree(parser->chunks);
kfree(parser->chunks_array); kfree(parser->chunks_array);
@ -238,8 +256,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
* uncached). */ * uncached). */
ib_chunk = &parser.chunks[parser.chunk_ib_idx]; ib_chunk = &parser.chunks[parser.chunk_ib_idx];
parser.ib->length_dw = ib_chunk->length_dw; parser.ib->length_dw = ib_chunk->length_dw;
memcpy((void *)parser.ib->ptr, ib_chunk->kdata, ib_chunk->length_dw*4);
r = radeon_cs_parse(&parser); r = radeon_cs_parse(&parser);
if (r || parser.parser_error) {
DRM_ERROR("Invalid command stream !\n");
radeon_cs_parser_fini(&parser, r);
mutex_unlock(&rdev->cs_mutex);
return r;
}
r = radeon_cs_finish_pages(&parser);
if (r) { if (r) {
DRM_ERROR("Invalid command stream !\n"); DRM_ERROR("Invalid command stream !\n");
radeon_cs_parser_fini(&parser, r); radeon_cs_parser_fini(&parser, r);
@ -254,3 +278,66 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
mutex_unlock(&rdev->cs_mutex); mutex_unlock(&rdev->cs_mutex);
return r; return r;
} }
int radeon_cs_finish_pages(struct radeon_cs_parser *p)
{
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
int i;
int size = PAGE_SIZE;
for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) {
if (i == ibc->last_page_index) {
size = (ibc->length_dw * 4) % PAGE_SIZE;
if (size == 0)
size = PAGE_SIZE;
}
if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
ibc->user_ptr + (i * PAGE_SIZE),
size))
return -EFAULT;
}
return 0;
}
int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
{
int new_page;
int num_extra_pages;
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
int i;
int size = PAGE_SIZE;
num_extra_pages = (pg_idx - ibc->last_copied_page - 1);
for (i = ibc->last_copied_page + 1; i < ibc->last_copied_page + num_extra_pages; i++) {
if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
ibc->user_ptr + (i * PAGE_SIZE),
PAGE_SIZE)) {
p->parser_error = -EFAULT;
return 0;
}
}
new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
if (pg_idx == ibc->last_page_index) {
size = (ibc->length_dw * 4) % PAGE_SIZE;
if (size == 0)
size = PAGE_SIZE;
}
if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
ibc->user_ptr + (pg_idx * PAGE_SIZE),
size)) {
p->parser_error = -EFAULT;
return 0;
}
/* copy to IB here */
memcpy((void *)(p->ib->ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
ibc->last_copied_page = pg_idx;
ibc->kpage_idx[new_page] = pg_idx;
return new_page;
}