Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm radeon fixes from Dave Airlie:
 "Just radeon fixes in this one:
   - some new PCI IDs
   - ATPX regression fix
   - async VM regression fixes
   - some module options fixes"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
  drm/radeon: fix ATPX regression in acpi rework
  drm/radeon: fix ATPX function documentation
  drm/radeon: move the retry to gem_object_create
  drm/radeon: move size limits to gem_object_create.
  drm/radeon: use vzalloc for gart pages
  drm/radeon: fix and simplify pot argument checks v3
  drm/radeon: fix header size estimation in VM code
  drm/radeon: remove set_page check from VM code
  drm/radeon: fix si_set_page v2
  drm/radeon: fix cayman_vm_set_page v2
  drm/radeon: fix PFP sync in vm_flush
  drm/radeon: add error output if VM CS fails on cayman
  drm/radeon: give each backlight a unique id
  drm/radeon: fix sparse warning
  drm/radeon: add some new SI PCI ids
This commit is contained in:
Linus Torvalds 2012-10-25 19:26:54 -07:00
commit b394209ce5
12 changed files with 121 additions and 109 deletions

View file

@ -184,6 +184,7 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
struct radeon_backlight_privdata *pdata; struct radeon_backlight_privdata *pdata;
struct radeon_encoder_atom_dig *dig; struct radeon_encoder_atom_dig *dig;
u8 backlight_level; u8 backlight_level;
char bl_name[16];
if (!radeon_encoder->enc_priv) if (!radeon_encoder->enc_priv)
return; return;
@ -203,7 +204,9 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
memset(&props, 0, sizeof(props)); memset(&props, 0, sizeof(props));
props.max_brightness = RADEON_MAX_BL_LEVEL; props.max_brightness = RADEON_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW; props.type = BACKLIGHT_RAW;
bd = backlight_device_register("radeon_bl", &drm_connector->kdev, snprintf(bl_name, sizeof(bl_name),
"radeon_bl%d", dev->primary->index);
bd = backlight_device_register(bl_name, &drm_connector->kdev,
pdata, &radeon_atom_backlight_ops, &props); pdata, &radeon_atom_backlight_ops, &props);
if (IS_ERR(bd)) { if (IS_ERR(bd)) {
DRM_ERROR("Backlight registration failed\n"); DRM_ERROR("Backlight registration failed\n");

View file

@ -2829,6 +2829,7 @@ static bool evergreen_vm_reg_valid(u32 reg)
case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS: case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
return true; return true;
default: default:
DRM_ERROR("Invalid register 0x%x in CS\n", reg);
return false; return false;
} }
} }

View file

@ -1538,26 +1538,31 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
{ {
struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index]; struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
int i;
radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, 1 + count * 2)); while (count) {
radeon_ring_write(ring, pe); unsigned ndw = 1 + count * 2;
radeon_ring_write(ring, upper_32_bits(pe) & 0xff); if (ndw > 0x3FFF)
for (i = 0; i < count; ++i) { ndw = 0x3FFF;
uint64_t value = 0;
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
addr += incr;
} else if (flags & RADEON_VM_PAGE_VALID) { radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
value = addr; radeon_ring_write(ring, pe);
addr += incr; radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
for (; ndw > 1; ndw -= 2, --count, pe += 8) {
uint64_t value = 0;
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
addr += incr;
} else if (flags & RADEON_VM_PAGE_VALID) {
value = addr;
addr += incr;
}
value |= r600_flags;
radeon_ring_write(ring, value);
radeon_ring_write(ring, upper_32_bits(value));
} }
value |= r600_flags;
radeon_ring_write(ring, value);
radeon_ring_write(ring, upper_32_bits(value));
} }
} }
@ -1586,4 +1591,8 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
/* bits 0-7 are the VM contexts0-7 */ /* bits 0-7 are the VM contexts0-7 */
radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0)); radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
radeon_ring_write(ring, 1 << vm->id); radeon_ring_write(ring, 1 << vm->id);
/* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
} }

View file

@ -502,6 +502,7 @@
#define PACKET3_MPEG_INDEX 0x3A #define PACKET3_MPEG_INDEX 0x3A
#define PACKET3_WAIT_REG_MEM 0x3C #define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D #define PACKET3_MEM_WRITE 0x3D
#define PACKET3_PFP_SYNC_ME 0x42
#define PACKET3_SURFACE_SYNC 0x43 #define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6) # define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_CB1_DEST_BASE_ENA (1 << 7) # define PACKET3_CB1_DEST_BASE_ENA (1 << 7)

View file

@ -87,7 +87,7 @@ static union acpi_object *radeon_atpx_call(acpi_handle handle, int function,
atpx_arg_elements[1].integer.value = 0; atpx_arg_elements[1].integer.value = 0;
} }
status = acpi_evaluate_object(handle, "ATPX", &atpx_arg, &buffer); status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
/* Fail only if calling the method fails and ATPX is supported */ /* Fail only if calling the method fails and ATPX is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
@ -373,11 +373,11 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
} }
/** /**
* radeon_atpx_pci_probe_handle - look up the ATRM and ATPX handles * radeon_atpx_pci_probe_handle - look up the ATPX handle
* *
* @pdev: pci device * @pdev: pci device
* *
* Look up the ATPX and ATRM handles (all asics). * Look up the ATPX handles (all asics).
* Returns true if the handles are found, false if not. * Returns true if the handles are found, false if not.
*/ */
static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)

View file

@ -355,6 +355,8 @@ int radeon_wb_init(struct radeon_device *rdev)
*/ */
void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base) void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
{ {
uint64_t limit = (uint64_t)radeon_vram_limit << 20;
mc->vram_start = base; mc->vram_start = base;
if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) { if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
@ -368,8 +370,8 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
mc->mc_vram_size = mc->aper_size; mc->mc_vram_size = mc->aper_size;
} }
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size) if (limit && limit < mc->real_vram_size)
mc->real_vram_size = radeon_vram_limit; mc->real_vram_size = limit;
dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
mc->mc_vram_size >> 20, mc->vram_start, mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20); mc->vram_end, mc->real_vram_size >> 20);
@ -834,6 +836,19 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
} }
/**
* radeon_check_pot_argument - check that argument is a power of two
*
* @arg: value to check
*
* Validates that a certain argument is a power of two (all asics).
* Returns true if argument is valid.
*/
static bool radeon_check_pot_argument(int arg)
{
return (arg & (arg - 1)) == 0;
}
/** /**
* radeon_check_arguments - validate module params * radeon_check_arguments - validate module params
* *
@ -845,52 +860,25 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
static void radeon_check_arguments(struct radeon_device *rdev) static void radeon_check_arguments(struct radeon_device *rdev)
{ {
/* vramlimit must be a power of two */ /* vramlimit must be a power of two */
switch (radeon_vram_limit) { if (!radeon_check_pot_argument(radeon_vram_limit)) {
case 0:
case 4:
case 8:
case 16:
case 32:
case 64:
case 128:
case 256:
case 512:
case 1024:
case 2048:
case 4096:
break;
default:
dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n", dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
radeon_vram_limit); radeon_vram_limit);
radeon_vram_limit = 0; radeon_vram_limit = 0;
break;
} }
radeon_vram_limit = radeon_vram_limit << 20;
/* gtt size must be power of two and greater or equal to 32M */ /* gtt size must be power of two and greater or equal to 32M */
switch (radeon_gart_size) { if (radeon_gart_size < 32) {
case 4:
case 8:
case 16:
dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n", dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
radeon_gart_size); radeon_gart_size);
radeon_gart_size = 512; radeon_gart_size = 512;
break;
case 32: } else if (!radeon_check_pot_argument(radeon_gart_size)) {
case 64:
case 128:
case 256:
case 512:
case 1024:
case 2048:
case 4096:
break;
default:
dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
radeon_gart_size); radeon_gart_size);
radeon_gart_size = 512; radeon_gart_size = 512;
break;
} }
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
/* AGP mode can only be -1, 1, 2, 4, 8 */ /* AGP mode can only be -1, 1, 2, 4, 8 */
switch (radeon_agpmode) { switch (radeon_agpmode) {
case -1: case -1:

View file

@ -355,14 +355,13 @@ int radeon_gart_init(struct radeon_device *rdev)
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages); rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
/* Allocate pages table */ /* Allocate pages table */
rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages, rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages);
GFP_KERNEL);
if (rdev->gart.pages == NULL) { if (rdev->gart.pages == NULL) {
radeon_gart_fini(rdev); radeon_gart_fini(rdev);
return -ENOMEM; return -ENOMEM;
} }
rdev->gart.pages_addr = kzalloc(sizeof(dma_addr_t) * rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
rdev->gart.num_cpu_pages, GFP_KERNEL); rdev->gart.num_cpu_pages);
if (rdev->gart.pages_addr == NULL) { if (rdev->gart.pages_addr == NULL) {
radeon_gart_fini(rdev); radeon_gart_fini(rdev);
return -ENOMEM; return -ENOMEM;
@ -388,8 +387,8 @@ void radeon_gart_fini(struct radeon_device *rdev)
radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
} }
rdev->gart.ready = false; rdev->gart.ready = false;
kfree(rdev->gart.pages); vfree(rdev->gart.pages);
kfree(rdev->gart.pages_addr); vfree(rdev->gart.pages_addr);
rdev->gart.pages = NULL; rdev->gart.pages = NULL;
rdev->gart.pages_addr = NULL; rdev->gart.pages_addr = NULL;
@ -577,7 +576,7 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
* *
* Global and local mutex must be locked! * Global and local mutex must be locked!
*/ */
int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm) static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
{ {
struct radeon_vm *vm_evict; struct radeon_vm *vm_evict;
@ -1036,8 +1035,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]); pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
pte += (addr & mask) * 8; pte += (addr & mask) * 8;
if (((last_pte + 8 * count) != pte) || if ((last_pte + 8 * count) != pte) {
((count + nptes) > 1 << 11)) {
if (count) { if (count) {
radeon_asic_vm_set_page(rdev, last_pte, radeon_asic_vm_set_page(rdev, last_pte,
@ -1148,17 +1146,17 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
if (RADEON_VM_BLOCK_SIZE > 11) if (RADEON_VM_BLOCK_SIZE > 11)
/* reserve space for one header for every 2k dwords */ /* reserve space for one header for every 2k dwords */
ndw += (nptes >> 11) * 3; ndw += (nptes >> 11) * 4;
else else
/* reserve space for one header for /* reserve space for one header for
every (1 << BLOCK_SIZE) entries */ every (1 << BLOCK_SIZE) entries */
ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 3; ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4;
/* reserve space for pte addresses */ /* reserve space for pte addresses */
ndw += nptes * 2; ndw += nptes * 2;
/* reserve space for one header for every 2k dwords */ /* reserve space for one header for every 2k dwords */
ndw += (npdes >> 11) * 3; ndw += (npdes >> 11) * 4;
/* reserve space for pde addresses */ /* reserve space for pde addresses */
ndw += npdes * 2; ndw += npdes * 2;

View file

@ -53,6 +53,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
struct drm_gem_object **obj) struct drm_gem_object **obj)
{ {
struct radeon_bo *robj; struct radeon_bo *robj;
unsigned long max_size;
int r; int r;
*obj = NULL; *obj = NULL;
@ -60,11 +61,26 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
if (alignment < PAGE_SIZE) { if (alignment < PAGE_SIZE) {
alignment = PAGE_SIZE; alignment = PAGE_SIZE;
} }
/* maximun bo size is the minimun btw visible vram and gtt size */
max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
if (size > max_size) {
printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n",
__func__, __LINE__, size >> 20, max_size >> 20);
return -ENOMEM;
}
retry:
r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj); r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
if (r) { if (r) {
if (r != -ERESTARTSYS) if (r != -ERESTARTSYS) {
if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
initial_domain |= RADEON_GEM_DOMAIN_GTT;
goto retry;
}
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
size, initial_domain, alignment, r); size, initial_domain, alignment, r);
}
return r; return r;
} }
*obj = &robj->gem_base; *obj = &robj->gem_base;

View file

@ -370,6 +370,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
struct backlight_properties props; struct backlight_properties props;
struct radeon_backlight_privdata *pdata; struct radeon_backlight_privdata *pdata;
uint8_t backlight_level; uint8_t backlight_level;
char bl_name[16];
if (!radeon_encoder->enc_priv) if (!radeon_encoder->enc_priv)
return; return;
@ -389,7 +390,9 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
memset(&props, 0, sizeof(props)); memset(&props, 0, sizeof(props));
props.max_brightness = RADEON_MAX_BL_LEVEL; props.max_brightness = RADEON_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW; props.type = BACKLIGHT_RAW;
bd = backlight_device_register("radeon_bl", &drm_connector->kdev, snprintf(bl_name, sizeof(bl_name),
"radeon_bl%d", dev->primary->index);
bd = backlight_device_register(bl_name, &drm_connector->kdev,
pdata, &radeon_backlight_ops, &props); pdata, &radeon_backlight_ops, &props);
if (IS_ERR(bd)) { if (IS_ERR(bd)) {
DRM_ERROR("Backlight registration failed\n"); DRM_ERROR("Backlight registration failed\n");

View file

@ -105,7 +105,6 @@ int radeon_bo_create(struct radeon_device *rdev,
struct radeon_bo *bo; struct radeon_bo *bo;
enum ttm_bo_type type; enum ttm_bo_type type;
unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
unsigned long max_size = 0;
size_t acc_size; size_t acc_size;
int r; int r;
@ -121,18 +120,9 @@ int radeon_bo_create(struct radeon_device *rdev,
} }
*bo_ptr = NULL; *bo_ptr = NULL;
/* maximun bo size is the minimun btw visible vram and gtt size */
max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
if ((page_align << PAGE_SHIFT) >= max_size) {
printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n",
__func__, __LINE__, page_align >> (20 - PAGE_SHIFT), max_size >> 20);
return -ENOMEM;
}
acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
sizeof(struct radeon_bo)); sizeof(struct radeon_bo));
retry:
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL) if (bo == NULL)
return -ENOMEM; return -ENOMEM;
@ -154,15 +144,6 @@ retry:
acc_size, sg, &radeon_ttm_bo_destroy); acc_size, sg, &radeon_ttm_bo_destroy);
up_read(&rdev->pm.mclk_lock); up_read(&rdev->pm.mclk_lock);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
if (domain == RADEON_GEM_DOMAIN_VRAM) {
domain |= RADEON_GEM_DOMAIN_GTT;
goto retry;
}
dev_err(rdev->dev,
"object_init failed for (%lu, 0x%08X)\n",
size, domain);
}
return r; return r;
} }
*bo_ptr = bo; *bo_ptr = bo;

View file

@ -2808,26 +2808,31 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
{ {
struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index]; struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
int i;
uint64_t value;
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 2 + count * 2)); while (count) {
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | unsigned ndw = 2 + count * 2;
WRITE_DATA_DST_SEL(1))); if (ndw > 0x3FFE)
radeon_ring_write(ring, pe); ndw = 0x3FFE;
radeon_ring_write(ring, upper_32_bits(pe));
for (i = 0; i < count; ++i) { radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
if (flags & RADEON_VM_PAGE_SYSTEM) { radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
value = radeon_vm_map_gart(rdev, addr); WRITE_DATA_DST_SEL(1)));
value &= 0xFFFFFFFFFFFFF000ULL; radeon_ring_write(ring, pe);
} else if (flags & RADEON_VM_PAGE_VALID) radeon_ring_write(ring, upper_32_bits(pe));
value = addr; for (; ndw > 2; ndw -= 2, --count, pe += 8) {
else uint64_t value;
value = 0; if (flags & RADEON_VM_PAGE_SYSTEM) {
addr += incr; value = radeon_vm_map_gart(rdev, addr);
value |= r600_flags; value &= 0xFFFFFFFFFFFFF000ULL;
radeon_ring_write(ring, value); } else if (flags & RADEON_VM_PAGE_VALID)
radeon_ring_write(ring, upper_32_bits(value)); value = addr;
else
value = 0;
addr += incr;
value |= r600_flags;
radeon_ring_write(ring, value);
radeon_ring_write(ring, upper_32_bits(value));
}
} }
} }
@ -2868,6 +2873,10 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
radeon_ring_write(ring, 1 << vm->id); radeon_ring_write(ring, 1 << vm->id);
/* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
} }
/* /*

View file

@ -205,6 +205,8 @@
{0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ {0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6791, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6792, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ {0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
@ -217,6 +219,7 @@
{0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \