1
0
Fork 0

amd, nouveau, one i915 and one EDID fix for v4.12-rc1

-----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJZFTyMAAoJEAx081l5xIa+u6EP+wWndGMPkEtfyUPk8VX2c0so
 qUyVrjqr2gl/gkHFsw69wDt38EKKEby1aOrSDT/n3paSVXoHBl+QrS8aF5REV3LI
 n6jlKvYbI33FOsISY4CZLN+A3O/KY5ykzGa7X1P0U7b5DRE+r7Xzo2TUpsDbuBSI
 wQxYixK3IK0ygXdZ4T9xDTZ6VIcksiGVagrsdAduu2PFzuU6ye6GTcu+iTOxASAS
 EsiYveZIcSVHuY3nTmVu5Zg6q714kRdMtSvvKFkNsV9QAahal2NC9cnzDQcKGzFA
 3l+osqJQr7KQovyfpU1+cJKrmeWanlve347eC5+p13E/BjwMUAScjZwhLjxrgOPt
 SdqDgUdYa0/w/5mzLqvUDmiNZpM/YgpIRW1pekeM67xQCC3qVM1FiTXavPoANrnB
 BR4EL6lTE7emTDDz6cuXIgP2eSQCufg9pOEYE9U1AbjM9+52g3oAAJ+btFXdr047
 nUnA0csq+uiOXbyc8iBrFB3QxwPGdyL0JQdywQ4PoisB4AfBF+w4RFmuzddV+opb
 gU9kstuphevFmoIjKKy09zv69JKoNQV7a7xebJayovlZwlI+DcQwBIceG/u5h9vK
 st5pn9TC/aivzxVvj/DS+3TPcQ51x7ifWrV9LmAYI7hl9SmyiYtCju6DyV262Vu/
 /Iwj9DEFnjXpuCqLr/Zr
 =ORlL
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-for-v4.12-rc1' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "AMD, nouveau, one i915, and one EDID fix for v4.12-rc1

  Some fixes that it would be good to have in rc1. It contains the i915
  quiet fix that you reported.

  It also has an amdgpu fixes pull, with lots of ongoing work on Vega10
  which is new in this kernel and is preliminary support so may have a
  fair bit of movement.

  Otherwise a few non-Vega10 AMD fixes, one EDID fix and some nouveau
  regression fixers"

* tag 'drm-fixes-for-v4.12-rc1' of git://people.freedesktop.org/~airlied/linux: (144 commits)
  drm/i915: Make vblank evade warnings optional
  drm/nouveau/therm: remove ineffective workarounds for alarm bugs
  drm/nouveau/tmr: avoid processing completed alarms when adding a new one
  drm/nouveau/tmr: fix corruption of the pending list when rescheduling an alarm
  drm/nouveau/tmr: handle races with hw when updating the next alarm time
  drm/nouveau/tmr: ack interrupt before processing alarms
  drm/nouveau/core: fix static checker warning
  drm/nouveau/fb/ram/gf100-: remove 0x10f200 read
  drm/nouveau/kms/nv50: skip core channel cursor update on position-only changes
  drm/nouveau/kms/nv50: fix source-rect-only plane updates
  drm/nouveau/kms/nv50: remove pointless argument to window atomic_check_acquire()
  drm/amd/powerplay: refine pwm1_enable callback functions for CI.
  drm/amd/powerplay: refine pwm1_enable callback functions for vi.
  drm/amd/powerplay: refine pwm1_enable callback functions for Vega10.
  drm/amdgpu: refine amdgpu pwm1_enable sysfs interface.
  drm/amdgpu: add amd fan ctrl mode enums.
  drm/amd/powerplay: add more smu message on Vega10.
  drm/amdgpu: fix dependency issue
  drm/amd: fix init order of sched job
  drm/amdgpu: add some additional vega10 pci ids
  ...
hifive-unleashed-5.1
Linus Torvalds 2017-05-12 11:48:26 -07:00
commit a34ab101a9
108 changed files with 2435 additions and 2124 deletions

View File

@ -110,6 +110,7 @@ extern int amdgpu_pos_buf_per_se;
extern int amdgpu_cntl_sb_buf_per_se;
extern int amdgpu_param_buf_per_se;
#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
@ -966,6 +967,8 @@ struct amdgpu_gfx_config {
unsigned mc_arb_ramcfg;
unsigned gb_addr_config;
unsigned num_rbs;
unsigned gs_vgt_table_depth;
unsigned gs_prim_buffer_depth;
uint32_t tile_mode_array[32];
uint32_t macrotile_mode_array[16];
@ -980,6 +983,7 @@ struct amdgpu_gfx_config {
struct amdgpu_cu_info {
uint32_t number; /* total active CU number */
uint32_t ao_cu_mask;
uint32_t wave_front_size;
uint32_t bitmap[4][4];
};
@ -1000,10 +1004,10 @@ struct amdgpu_ngg_buf {
};
enum {
PRIM = 0,
POS,
CNTL,
PARAM,
NGG_PRIM = 0,
NGG_POS,
NGG_CNTL,
NGG_PARAM,
NGG_BUF_MAX
};
@ -1125,6 +1129,7 @@ struct amdgpu_job {
void *owner;
uint64_t fence_ctx; /* the fence_context this job uses */
bool vm_needs_flush;
bool need_pipeline_sync;
unsigned vm_id;
uint64_t vm_pd_addr;
uint32_t gds_base, gds_size;
@ -1704,9 +1709,6 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
#define WREG32_FIELD_OFFSET(reg, offset, field, val) \
WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
#define WREG32_FIELD15(ip, idx, reg, field, val) \
WREG32(SOC15_REG_OFFSET(ip, idx, mm##reg), (RREG32(SOC15_REG_OFFSET(ip, idx, mm##reg)) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
/*
* BIOS helpers.
*/

View File

@ -1727,6 +1727,12 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
{
int i;
/*
* VBIOS will check ASIC_INIT_COMPLETE bit to decide if
* execute ASIC_Init posting via driver
*/
adev->bios_scratch[7] &= ~ATOM_S7_ASIC_INIT_COMPLETE_MASK;
for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]);
}

View File

@ -26,6 +26,7 @@
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "atom.h"
#include "atombios.h"
#define get_index_into_master_table(master_table, table_name) (offsetof(struct master_table, table_name) / sizeof(uint16_t))
@ -77,10 +78,29 @@ void amdgpu_atomfirmware_scratch_regs_restore(struct amdgpu_device *adev)
{
int i;
/*
* VBIOS will check ASIC_INIT_COMPLETE bit to decide if
* execute ASIC_Init posting via driver
*/
adev->bios_scratch[7] &= ~ATOM_S7_ASIC_INIT_COMPLETE_MASK;
for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
WREG32(adev->bios_scratch_reg_offset + i, adev->bios_scratch[i]);
}
void amdgpu_atomfirmware_scratch_regs_engine_hung(struct amdgpu_device *adev,
bool hung)
{
u32 tmp = RREG32(adev->bios_scratch_reg_offset + 3);
if (hung)
tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
else
tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
WREG32(adev->bios_scratch_reg_offset + 3, tmp);
}
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
{
struct atom_context *ctx = adev->mode_info.atom_context;

View File

@ -28,6 +28,8 @@ bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
void amdgpu_atomfirmware_scratch_regs_save(struct amdgpu_device *adev);
void amdgpu_atomfirmware_scratch_regs_restore(struct amdgpu_device *adev);
void amdgpu_atomfirmware_scratch_regs_engine_hung(struct amdgpu_device *adev,
bool hung);
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
#endif

View File

@ -117,8 +117,13 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
}
out_cleanup:
/* Check error value now. The value can be overwritten when clean up.*/
if (r) {
DRM_ERROR("Error while benchmarking BO move.\n");
}
if (sobj) {
r = amdgpu_bo_reserve(sobj, false);
r = amdgpu_bo_reserve(sobj, true);
if (likely(r == 0)) {
amdgpu_bo_unpin(sobj);
amdgpu_bo_unreserve(sobj);
@ -126,17 +131,13 @@ out_cleanup:
amdgpu_bo_unref(&sobj);
}
if (dobj) {
r = amdgpu_bo_reserve(dobj, false);
r = amdgpu_bo_reserve(dobj, true);
if (likely(r == 0)) {
amdgpu_bo_unpin(dobj);
amdgpu_bo_unreserve(dobj);
}
amdgpu_bo_unref(&dobj);
}
if (r) {
DRM_ERROR("Error while benchmarking BO move.\n");
}
}
void amdgpu_benchmark(struct amdgpu_device *adev, int test_number)

View File

@ -42,82 +42,6 @@ struct amdgpu_cgs_device {
struct amdgpu_device *adev = \
((struct amdgpu_cgs_device *)cgs_device)->adev
static int amdgpu_cgs_gpu_mem_info(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type,
uint64_t *mc_start, uint64_t *mc_size,
uint64_t *mem_size)
{
CGS_FUNC_ADEV;
switch(type) {
case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__VISIBLE_FB:
*mc_start = 0;
*mc_size = adev->mc.visible_vram_size;
*mem_size = adev->mc.visible_vram_size - adev->vram_pin_size;
break;
case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
*mc_start = adev->mc.visible_vram_size;
*mc_size = adev->mc.real_vram_size - adev->mc.visible_vram_size;
*mem_size = *mc_size;
break;
case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
*mc_start = adev->mc.gtt_start;
*mc_size = adev->mc.gtt_size;
*mem_size = adev->mc.gtt_size - adev->gart_pin_size;
break;
default:
return -EINVAL;
}
return 0;
}
static int amdgpu_cgs_gmap_kmem(struct cgs_device *cgs_device, void *kmem,
uint64_t size,
uint64_t min_offset, uint64_t max_offset,
cgs_handle_t *kmem_handle, uint64_t *mcaddr)
{
CGS_FUNC_ADEV;
int ret;
struct amdgpu_bo *bo;
struct page *kmem_page = vmalloc_to_page(kmem);
int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo);
if (ret)
return ret;
ret = amdgpu_bo_reserve(bo, false);
if (unlikely(ret != 0))
return ret;
/* pin buffer into GTT */
ret = amdgpu_bo_pin_restricted(bo, AMDGPU_GEM_DOMAIN_GTT,
min_offset, max_offset, mcaddr);
amdgpu_bo_unreserve(bo);
*kmem_handle = (cgs_handle_t)bo;
return ret;
}
static int amdgpu_cgs_gunmap_kmem(struct cgs_device *cgs_device, cgs_handle_t kmem_handle)
{
struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle;
if (obj) {
int r = amdgpu_bo_reserve(obj, false);
if (likely(r == 0)) {
amdgpu_bo_unpin(obj);
amdgpu_bo_unreserve(obj);
}
amdgpu_bo_unref(&obj);
}
return 0;
}
static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
enum cgs_gpu_mem_type type,
uint64_t size, uint64_t align,
@ -215,7 +139,7 @@ static int amdgpu_cgs_free_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
if (obj) {
int r = amdgpu_bo_reserve(obj, false);
int r = amdgpu_bo_reserve(obj, true);
if (likely(r == 0)) {
amdgpu_bo_kunmap(obj);
amdgpu_bo_unpin(obj);
@ -239,7 +163,7 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
r = amdgpu_bo_reserve(obj, false);
r = amdgpu_bo_reserve(obj, true);
if (unlikely(r != 0))
return r;
r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains,
@ -252,7 +176,7 @@ static int amdgpu_cgs_gunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t
{
int r;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
r = amdgpu_bo_reserve(obj, false);
r = amdgpu_bo_reserve(obj, true);
if (unlikely(r != 0))
return r;
r = amdgpu_bo_unpin(obj);
@ -265,7 +189,7 @@ static int amdgpu_cgs_kmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
{
int r;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
r = amdgpu_bo_reserve(obj, false);
r = amdgpu_bo_reserve(obj, true);
if (unlikely(r != 0))
return r;
r = amdgpu_bo_kmap(obj, map);
@ -277,7 +201,7 @@ static int amdgpu_cgs_kunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t
{
int r;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
r = amdgpu_bo_reserve(obj, false);
r = amdgpu_bo_reserve(obj, true);
if (unlikely(r != 0))
return r;
amdgpu_bo_kunmap(obj);
@ -349,62 +273,6 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
WARN(1, "Invalid indirect register space");
}
static uint8_t amdgpu_cgs_read_pci_config_byte(struct cgs_device *cgs_device, unsigned addr)
{
CGS_FUNC_ADEV;
uint8_t val;
int ret = pci_read_config_byte(adev->pdev, addr, &val);
if (WARN(ret, "pci_read_config_byte error"))
return 0;
return val;
}
static uint16_t amdgpu_cgs_read_pci_config_word(struct cgs_device *cgs_device, unsigned addr)
{
CGS_FUNC_ADEV;
uint16_t val;
int ret = pci_read_config_word(adev->pdev, addr, &val);
if (WARN(ret, "pci_read_config_word error"))
return 0;
return val;
}
static uint32_t amdgpu_cgs_read_pci_config_dword(struct cgs_device *cgs_device,
unsigned addr)
{
CGS_FUNC_ADEV;
uint32_t val;
int ret = pci_read_config_dword(adev->pdev, addr, &val);
if (WARN(ret, "pci_read_config_dword error"))
return 0;
return val;
}
static void amdgpu_cgs_write_pci_config_byte(struct cgs_device *cgs_device, unsigned addr,
uint8_t value)
{
CGS_FUNC_ADEV;
int ret = pci_write_config_byte(adev->pdev, addr, value);
WARN(ret, "pci_write_config_byte error");
}
static void amdgpu_cgs_write_pci_config_word(struct cgs_device *cgs_device, unsigned addr,
uint16_t value)
{
CGS_FUNC_ADEV;
int ret = pci_write_config_word(adev->pdev, addr, value);
WARN(ret, "pci_write_config_word error");
}
static void amdgpu_cgs_write_pci_config_dword(struct cgs_device *cgs_device, unsigned addr,
uint32_t value)
{
CGS_FUNC_ADEV;
int ret = pci_write_config_dword(adev->pdev, addr, value);
WARN(ret, "pci_write_config_dword error");
}
static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device,
enum cgs_resource_type resource_type,
uint64_t size,
@ -477,56 +345,6 @@ static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigne
adev->mode_info.atom_context, table, args);
}
static int amdgpu_cgs_create_pm_request(struct cgs_device *cgs_device, cgs_handle_t *request)
{
/* TODO */
return 0;
}
static int amdgpu_cgs_destroy_pm_request(struct cgs_device *cgs_device, cgs_handle_t request)
{
/* TODO */
return 0;
}
static int amdgpu_cgs_set_pm_request(struct cgs_device *cgs_device, cgs_handle_t request,
int active)
{
/* TODO */
return 0;
}
static int amdgpu_cgs_pm_request_clock(struct cgs_device *cgs_device, cgs_handle_t request,
enum cgs_clock clock, unsigned freq)
{
/* TODO */
return 0;
}
static int amdgpu_cgs_pm_request_engine(struct cgs_device *cgs_device, cgs_handle_t request,
enum cgs_engine engine, int powered)
{
/* TODO */
return 0;
}
static int amdgpu_cgs_pm_query_clock_limits(struct cgs_device *cgs_device,
enum cgs_clock clock,
struct cgs_clock_limits *limits)
{
/* TODO */
return 0;
}
static int amdgpu_cgs_set_camera_voltages(struct cgs_device *cgs_device, uint32_t mask,
const uint32_t *voltages)
{
DRM_ERROR("not implemented");
return -EPERM;
}
struct cgs_irq_params {
unsigned src_id;
cgs_irq_source_set_func_t set;
@ -1269,9 +1087,6 @@ static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
}
static const struct cgs_ops amdgpu_cgs_ops = {
.gpu_mem_info = amdgpu_cgs_gpu_mem_info,
.gmap_kmem = amdgpu_cgs_gmap_kmem,
.gunmap_kmem = amdgpu_cgs_gunmap_kmem,
.alloc_gpu_mem = amdgpu_cgs_alloc_gpu_mem,
.free_gpu_mem = amdgpu_cgs_free_gpu_mem,
.gmap_gpu_mem = amdgpu_cgs_gmap_gpu_mem,
@ -1282,23 +1097,10 @@ static const struct cgs_ops amdgpu_cgs_ops = {
.write_register = amdgpu_cgs_write_register,
.read_ind_register = amdgpu_cgs_read_ind_register,
.write_ind_register = amdgpu_cgs_write_ind_register,
.read_pci_config_byte = amdgpu_cgs_read_pci_config_byte,
.read_pci_config_word = amdgpu_cgs_read_pci_config_word,
.read_pci_config_dword = amdgpu_cgs_read_pci_config_dword,
.write_pci_config_byte = amdgpu_cgs_write_pci_config_byte,
.write_pci_config_word = amdgpu_cgs_write_pci_config_word,
.write_pci_config_dword = amdgpu_cgs_write_pci_config_dword,
.get_pci_resource = amdgpu_cgs_get_pci_resource,
.atom_get_data_table = amdgpu_cgs_atom_get_data_table,
.atom_get_cmd_table_revs = amdgpu_cgs_atom_get_cmd_table_revs,
.atom_exec_cmd_table = amdgpu_cgs_atom_exec_cmd_table,
.create_pm_request = amdgpu_cgs_create_pm_request,
.destroy_pm_request = amdgpu_cgs_destroy_pm_request,
.set_pm_request = amdgpu_cgs_set_pm_request,
.pm_request_clock = amdgpu_cgs_pm_request_clock,
.pm_request_engine = amdgpu_cgs_pm_request_engine,
.pm_query_clock_limits = amdgpu_cgs_pm_query_clock_limits,
.set_camera_voltages = amdgpu_cgs_set_camera_voltages,
.get_firmware_info = amdgpu_cgs_get_firmware_info,
.rel_firmware = amdgpu_cgs_rel_firmware,
.set_powergating_state = amdgpu_cgs_set_powergating_state,

View File

@ -1074,6 +1074,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
job->uf_sequence = cs->out.handle;
amdgpu_job_free_resources(job);
amdgpu_cs_parser_fini(p, 0, true);
trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base);
@ -1129,7 +1130,10 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
goto out;
r = amdgpu_cs_submit(&parser, cs);
if (r)
goto out;
return 0;
out:
amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
return r;

View File

@ -273,6 +273,9 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
spin_lock(&ctx->ring_lock);
if (seq == ~0ull)
seq = ctx->rings[ring->idx].sequence - 1;
if (seq >= cring->sequence) {
spin_unlock(&ctx->ring_lock);
return ERR_PTR(-EINVAL);

View File

@ -53,7 +53,6 @@
#include "bif/bif_4_1_d.h"
#include <linux/pci.h>
#include <linux/firmware.h>
#include "amdgpu_pm.h"
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
@ -350,7 +349,7 @@ static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
if (adev->vram_scratch.robj == NULL) {
return;
}
r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
r = amdgpu_bo_reserve(adev->vram_scratch.robj, true);
if (likely(r == 0)) {
amdgpu_bo_kunmap(adev->vram_scratch.robj);
amdgpu_bo_unpin(adev->vram_scratch.robj);
@ -422,12 +421,11 @@ static int amdgpu_doorbell_init(struct amdgpu_device *adev)
if (adev->doorbell.num_doorbells == 0)
return -EINVAL;
adev->doorbell.ptr = ioremap(adev->doorbell.base, adev->doorbell.num_doorbells * sizeof(u32));
if (adev->doorbell.ptr == NULL) {
adev->doorbell.ptr = ioremap(adev->doorbell.base,
adev->doorbell.num_doorbells *
sizeof(u32));
if (adev->doorbell.ptr == NULL)
return -ENOMEM;
}
DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)adev->doorbell.base);
DRM_INFO("doorbell mmio size: %u\n", (unsigned)adev->doorbell.size);
return 0;
}
@ -1584,9 +1582,6 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
}
}
amdgpu_dpm_enable_uvd(adev, false);
amdgpu_dpm_enable_vce(adev, false);
return 0;
}
@ -1854,7 +1849,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* mutex initialization are all done here so we
* can recall function without having locking issues */
mutex_init(&adev->vm_manager.lock);
atomic_set(&adev->irq.ih.lock, 0);
mutex_init(&adev->firmware.mutex);
mutex_init(&adev->pm.mutex);
@ -2071,7 +2065,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
DRM_INFO("amdgpu: finishing device.\n");
adev->shutdown = true;
drm_crtc_force_disable_all(adev->ddev);
if (adev->mode_info.mode_config_initialized)
drm_crtc_force_disable_all(adev->ddev);
/* evict vram memory */
amdgpu_bo_evict_vram(adev);
amdgpu_ib_pool_fini(adev);
@ -2146,7 +2141,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, false);
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
@ -2159,7 +2154,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
robj = gem_to_amdgpu_bo(rfb->obj);
/* don't unpin kernel fb objects */
if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
r = amdgpu_bo_reserve(robj, false);
r = amdgpu_bo_reserve(robj, true);
if (r == 0) {
amdgpu_bo_unpin(robj);
amdgpu_bo_unreserve(robj);
@ -2216,7 +2211,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
struct drm_connector *connector;
struct amdgpu_device *adev = dev->dev_private;
struct drm_crtc *crtc;
int r;
int r = 0;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@ -2228,11 +2223,8 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
pci_set_power_state(dev->pdev, PCI_D0);
pci_restore_state(dev->pdev);
r = pci_enable_device(dev->pdev);
if (r) {
if (fbcon)
console_unlock();
return r;
}
if (r)
goto unlock;
}
if (adev->is_atom_fw)
amdgpu_atomfirmware_scratch_regs_restore(adev);
@ -2249,7 +2241,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
r = amdgpu_resume(adev);
if (r) {
DRM_ERROR("amdgpu_resume failed (%d).\n", r);
return r;
goto unlock;
}
amdgpu_fence_driver_resume(adev);
@ -2260,11 +2252,8 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
}
r = amdgpu_late_init(adev);
if (r) {
if (fbcon)
console_unlock();
return r;
}
if (r)
goto unlock;
/* pin cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@ -2272,7 +2261,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, false);
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
r = amdgpu_bo_pin(aobj,
AMDGPU_GEM_DOMAIN_VRAM,
@ -2314,12 +2303,14 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
dev->dev->power.disable_depth--;
#endif
if (fbcon) {
if (fbcon)
amdgpu_fbdev_set_suspend(adev, 0);
console_unlock();
}
return 0;
unlock:
if (fbcon)
console_unlock();
return r;
}
static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
@ -2430,25 +2421,37 @@ static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
uint32_t domain;
int r;
if (!bo->shadow)
return 0;
if (!bo->shadow)
return 0;
r = amdgpu_bo_reserve(bo, false);
if (r)
return r;
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
/* if bo has been evicted, then no need to recover */
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
r = amdgpu_bo_reserve(bo, true);
if (r)
return r;
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
/* if bo has been evicted, then no need to recover */
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
r = amdgpu_bo_validate(bo->shadow);
if (r) {
DRM_ERROR("bo validate failed!\n");
goto err;
}
r = amdgpu_ttm_bind(&bo->shadow->tbo, &bo->shadow->tbo.mem);
if (r) {
DRM_ERROR("%p bind failed\n", bo->shadow);
goto err;
}
r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
NULL, fence, true);
if (r) {
DRM_ERROR("recover page table failed!\n");
goto err;
}
}
if (r) {
DRM_ERROR("recover page table failed!\n");
goto err;
}
}
err:
amdgpu_bo_unreserve(bo);
return r;
amdgpu_bo_unreserve(bo);
return r;
}
/**
@ -2520,6 +2523,7 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary)
ring = adev->mman.buffer_funcs_ring;
mutex_lock(&adev->shadow_list_lock);
list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
next = NULL;
amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
if (fence) {
r = dma_fence_wait(fence, false);
@ -2593,7 +2597,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring)
if (!ring || !ring->sched.thread)
continue;
kthread_park(ring->sched.thread);
amd_sched_hw_job_reset(&ring->sched);
@ -2666,6 +2670,7 @@ retry:
DRM_INFO("recover vram bo from shadow\n");
mutex_lock(&adev->shadow_list_lock);
list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
next = NULL;
amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
if (fence) {
r = dma_fence_wait(fence, false);
@ -2688,7 +2693,8 @@ retry:
}
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring)
if (!ring || !ring->sched.thread)
continue;
amd_sched_job_recovery(&ring->sched);
@ -2697,7 +2703,7 @@ retry:
} else {
dev_err(adev->dev, "asic resume failed (%d).\n", r);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
if (adev->rings[i]) {
if (adev->rings[i] && adev->rings[i]->sched.thread) {
kthread_unpark(adev->rings[i]->sched.thread);
}
}

View File

@ -123,7 +123,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
int r;
/* unpin of the old buffer */
r = amdgpu_bo_reserve(work->old_abo, false);
r = amdgpu_bo_reserve(work->old_abo, true);
if (likely(r == 0)) {
r = amdgpu_bo_unpin(work->old_abo);
if (unlikely(r != 0)) {
@ -138,52 +138,11 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
kfree(work);
}
static void amdgpu_flip_work_cleanup(struct amdgpu_flip_work *work)
{
int i;
amdgpu_bo_unref(&work->old_abo);
dma_fence_put(work->excl);
for (i = 0; i < work->shared_count; ++i)
dma_fence_put(work->shared[i]);
kfree(work->shared);
kfree(work);
}
static void amdgpu_flip_cleanup_unreserve(struct amdgpu_flip_work *work,
struct amdgpu_bo *new_abo)
{
amdgpu_bo_unreserve(new_abo);
amdgpu_flip_work_cleanup(work);
}
static void amdgpu_flip_cleanup_unpin(struct amdgpu_flip_work *work,
struct amdgpu_bo *new_abo)
{
if (unlikely(amdgpu_bo_unpin(new_abo) != 0))
DRM_ERROR("failed to unpin new abo in error path\n");
amdgpu_flip_cleanup_unreserve(work, new_abo);
}
void amdgpu_crtc_cleanup_flip_ctx(struct amdgpu_flip_work *work,
struct amdgpu_bo *new_abo)
{
if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
DRM_ERROR("failed to reserve new abo in error path\n");
amdgpu_flip_work_cleanup(work);
return;
}
amdgpu_flip_cleanup_unpin(work, new_abo);
}
int amdgpu_crtc_prepare_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags,
uint32_t target,
struct amdgpu_flip_work **work_p,
struct amdgpu_bo **new_abo_p)
int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags, uint32_t target,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
@ -196,7 +155,7 @@ int amdgpu_crtc_prepare_flip(struct drm_crtc *crtc,
unsigned long flags;
u64 tiling_flags;
u64 base;
int r;
int i, r;
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL)
@ -257,80 +216,41 @@ int amdgpu_crtc_prepare_flip(struct drm_crtc *crtc,
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
r = -EBUSY;
goto pflip_cleanup;
}
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
*work_p = work;
*new_abo_p = new_abo;
return 0;
pflip_cleanup:
amdgpu_crtc_cleanup_flip_ctx(work, new_abo);
return r;
unpin:
amdgpu_flip_cleanup_unpin(work, new_abo);
return r;
unreserve:
amdgpu_flip_cleanup_unreserve(work, new_abo);
return r;
cleanup:
amdgpu_flip_work_cleanup(work);
return r;
}
void amdgpu_crtc_submit_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct amdgpu_flip_work *work,
struct amdgpu_bo *new_abo)
{
unsigned long flags;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
amdgpu_crtc->pflip_works = work;
DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
amdgpu_crtc->crtc_id, amdgpu_crtc, work);
/* update crtc fb */
crtc->primary->fb = fb;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
DRM_DEBUG_DRIVER(
"crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
amdgpu_crtc->crtc_id, amdgpu_crtc, work);
amdgpu_flip_work_func(&work->flip_work.work);
}
int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags,
uint32_t target,
struct drm_modeset_acquire_ctx *ctx)
{
struct amdgpu_bo *new_abo;
struct amdgpu_flip_work *work;
int r;
r = amdgpu_crtc_prepare_flip(crtc,
fb,
event,
page_flip_flags,
target,
&work,
&new_abo);
if (r)
return r;
amdgpu_crtc_submit_flip(crtc, fb, work, new_abo);
return 0;
pflip_cleanup:
if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
DRM_ERROR("failed to reserve new abo in error path\n");
goto cleanup;
}
unpin:
if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) {
DRM_ERROR("failed to unpin new abo in error path\n");
}
unreserve:
amdgpu_bo_unreserve(new_abo);
cleanup:
amdgpu_bo_unref(&work->old_abo);
dma_fence_put(work->excl);
for (i = 0; i < work->shared_count; ++i)
dma_fence_put(work->shared[i]);
kfree(work->shared);
kfree(work);
return r;
}
int amdgpu_crtc_set_config(struct drm_mode_set *set,

View File

@ -63,9 +63,11 @@
* - 3.11.0 - Add support for sensor query info (clocks, temp, etc).
* - 3.12.0 - Add query for double offchip LDS buffers
* - 3.13.0 - Add PRT support
* - 3.14.0 - Fix race in amdgpu_ctx_get_fence() and note new functionality
* - 3.15.0 - Export more gpu info for gfx9
*/
#define KMS_DRIVER_MAJOR 3
#define KMS_DRIVER_MINOR 13
#define KMS_DRIVER_MINOR 15
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@ -453,7 +455,9 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
{0x1002, 0x6862, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
{0x1002, 0x6863, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
{0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
{0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
{0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
{0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
{0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
{0, 0, 0}

View File

@ -112,7 +112,7 @@ static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
int ret;
ret = amdgpu_bo_reserve(abo, false);
ret = amdgpu_bo_reserve(abo, true);
if (likely(ret == 0)) {
amdgpu_bo_kunmap(abo);
amdgpu_bo_unpin(abo);

View File

@ -186,7 +186,7 @@ void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
if (adev->gart.robj == NULL) {
return;
}
r = amdgpu_bo_reserve(adev->gart.robj, false);
r = amdgpu_bo_reserve(adev->gart.robj, true);
if (likely(r == 0)) {
amdgpu_bo_kunmap(adev->gart.robj);
amdgpu_bo_unpin(adev->gart.robj);

View File

@ -139,6 +139,35 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
return 0;
}
static int amdgpu_gem_vm_check(void *param, struct amdgpu_bo *bo)
{
/* if anything is swapped out don't swap it in here,
just abort and wait for the next CS */
if (!amdgpu_bo_gpu_accessible(bo))
return -ERESTARTSYS;
if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
return -ERESTARTSYS;
return 0;
}
static bool amdgpu_gem_vm_ready(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct list_head *list)
{
struct ttm_validate_buffer *entry;
list_for_each_entry(entry, list, head) {
struct amdgpu_bo *bo =
container_of(entry->bo, struct amdgpu_bo, tbo);
if (amdgpu_gem_vm_check(NULL, bo))
return false;
}
return !amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_gem_vm_check, NULL);
}
void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv)
{
@ -148,15 +177,13 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry vm_pd;
struct list_head list, duplicates;
struct list_head list;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct amdgpu_bo_va *bo_va;
struct dma_fence *fence = NULL;
int r;
INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&duplicates);
tv.bo = &bo->tbo;
tv.shared = true;
@ -164,16 +191,18 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
if (r) {
dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r);
return;
}
bo_va = amdgpu_vm_bo_find(vm, bo);
if (bo_va) {
if (--bo_va->ref_count == 0) {
amdgpu_vm_bo_rmv(adev, bo_va);
if (bo_va && --bo_va->ref_count == 0) {
amdgpu_vm_bo_rmv(adev, bo_va);
if (amdgpu_gem_vm_ready(adev, vm, &list)) {
struct dma_fence *fence = NULL;
r = amdgpu_vm_clear_freed(adev, vm, &fence);
if (unlikely(r)) {
@ -502,19 +531,6 @@ out:
return r;
}
static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
{
/* if anything is swapped out don't swap it in here,
just abort and wait for the next CS */
if (!amdgpu_bo_gpu_accessible(bo))
return -ERESTARTSYS;
if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
return -ERESTARTSYS;
return 0;
}
/**
* amdgpu_gem_va_update_vm -update the bo_va in its VM
*
@ -533,19 +549,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
struct list_head *list,
uint32_t operation)
{
struct ttm_validate_buffer *entry;
int r = -ERESTARTSYS;
list_for_each_entry(entry, list, head) {
struct amdgpu_bo *bo =
container_of(entry->bo, struct amdgpu_bo, tbo);
if (amdgpu_gem_va_check(NULL, bo))
goto error;
}
r = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_gem_va_check,
NULL);
if (r)
if (!amdgpu_gem_vm_ready(adev, vm, list))
goto error;
r = amdgpu_vm_update_directories(adev, vm);

View File

@ -134,6 +134,15 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
return r;
}
void amdgpu_gtt_mgr_print(struct seq_file *m, struct ttm_mem_type_manager *man)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_gtt_mgr *mgr = man->priv;
seq_printf(m, "man size:%llu pages, gtt available:%llu pages, usage:%lluMB\n",
man->size, mgr->available, (u64)atomic64_read(&adev->gtt_usage) >> 20);
}
/**
* amdgpu_gtt_mgr_new - allocate a new node
*

View File

@ -160,6 +160,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
return r;
}
if (ring->funcs->emit_pipeline_sync && job && job->need_pipeline_sync)
amdgpu_ring_emit_pipeline_sync(ring);
if (vm) {
r = amdgpu_vm_flush(ring, job);
@ -217,7 +219,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
if (job && job->vm_id)
amdgpu_vm_reset_id(adev, job->vm_id);
amdgpu_vm_reset_id(adev, ring->funcs->vmhub,
job->vm_id);
amdgpu_ring_undo(ring);
return r;
}

View File

@ -57,6 +57,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
(*job)->vm = vm;
(*job)->ibs = (void *)&(*job)[1];
(*job)->num_ibs = num_ibs;
(*job)->need_pipeline_sync = false;
amdgpu_sync_create(&(*job)->sync);
@ -139,7 +140,7 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync);
if (fence == NULL && vm && !job->vm_id) {
while (fence == NULL && vm && !job->vm_id) {
struct amdgpu_ring *ring = job->ring;
int r;
@ -152,6 +153,9 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
fence = amdgpu_sync_get_fence(&job->sync);
}
if (amd_sched_dependency_optimized(fence, sched_job->s_entity))
job->need_pipeline_sync = true;
return fence;
}

View File

@ -545,11 +545,22 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
adev->gfx.config.double_offchip_lds_buf;
if (amdgpu_ngg) {
dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[PRIM].gpu_addr;
dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[POS].gpu_addr;
dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[CNTL].gpu_addr;
dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[PARAM].gpu_addr;
dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PRIM].gpu_addr;
dev_info.prim_buf_size = adev->gfx.ngg.buf[NGG_PRIM].size;
dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[NGG_POS].gpu_addr;
dev_info.pos_buf_size = adev->gfx.ngg.buf[NGG_POS].size;
dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[NGG_CNTL].gpu_addr;
dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[NGG_CNTL].size;
dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PARAM].gpu_addr;
dev_info.param_buf_size = adev->gfx.ngg.buf[NGG_PARAM].size;
}
dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
dev_info.num_tcc_blocks = adev->gfx.config.max_texture_channel_caches;
dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth;
dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
return copy_to_user(out, &dev_info,
min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
@ -810,7 +821,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
if (amdgpu_sriov_vf(adev)) {
/* TODO: how to handle reserve failure */
BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, false));
BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
amdgpu_vm_bo_rmv(adev, fpriv->vm.csa_bo_va);
fpriv->vm.csa_bo_va = NULL;
amdgpu_bo_unreserve(adev->virt.csa_obj);

View File

@ -597,21 +597,6 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags, uint32_t target,
struct drm_modeset_acquire_ctx *ctx);
void amdgpu_crtc_cleanup_flip_ctx(struct amdgpu_flip_work *work,
struct amdgpu_bo *new_abo);
int amdgpu_crtc_prepare_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags,
uint32_t target,
struct amdgpu_flip_work **work,
struct amdgpu_bo **new_abo);
void amdgpu_crtc_submit_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct amdgpu_flip_work *work,
struct amdgpu_bo *new_abo);
extern const struct drm_mode_config_funcs amdgpu_mode_funcs;
#endif

View File

@ -295,7 +295,7 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
if (*bo == NULL)
return;
if (likely(amdgpu_bo_reserve(*bo, false) == 0)) {
if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
if (cpu_addr)
amdgpu_bo_kunmap(*bo);
@ -543,6 +543,27 @@ err:
return r;
}
int amdgpu_bo_validate(struct amdgpu_bo *bo)
{
uint32_t domain;
int r;
if (bo->pin_count)
return 0;
domain = bo->prefered_domains;
retry:
amdgpu_ttm_placement_from_domain(bo, domain);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
domain = bo->allowed_domains;
goto retry;
}
return r;
}
int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,

View File

@ -175,6 +175,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
struct amdgpu_bo *bo,
struct reservation_object *resv,
struct dma_fence **fence, bool direct);
int amdgpu_bo_validate(struct amdgpu_bo *bo);
int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,

View File

@ -867,8 +867,7 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
/* never 0 (full-speed), fuse or smc-controlled always */
return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
return sprintf(buf, "%i\n", pwm_mode);
}
static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
@ -887,14 +886,7 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
if (err)
return err;
switch (value) {
case 1: /* manual, percent-based */
amdgpu_dpm_set_fan_control_mode(adev, FDO_PWM_MODE_STATIC);
break;
default: /* disable */
amdgpu_dpm_set_fan_control_mode(adev, 0);
break;
}
amdgpu_dpm_set_fan_control_mode(adev, value);
return count;
}

View File

@ -113,7 +113,7 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj)
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
int ret = 0;
ret = amdgpu_bo_reserve(bo, false);
ret = amdgpu_bo_reserve(bo, true);
if (unlikely(ret != 0))
return;

View File

@ -55,6 +55,8 @@ static int psp_sw_init(void *handle)
psp->bootloader_load_sos = psp_v3_1_bootloader_load_sos;
psp->prep_cmd_buf = psp_v3_1_prep_cmd_buf;
psp->ring_init = psp_v3_1_ring_init;
psp->ring_create = psp_v3_1_ring_create;
psp->ring_destroy = psp_v3_1_ring_destroy;
psp->cmd_submit = psp_v3_1_cmd_submit;
psp->compare_sram_data = psp_v3_1_compare_sram_data;
psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk;
@ -152,11 +154,6 @@ static void psp_prep_tmr_cmd_buf(struct psp_gfx_cmd_resp *cmd,
static int psp_tmr_init(struct psp_context *psp)
{
int ret;
struct psp_gfx_cmd_resp *cmd;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
/*
* Allocate 3M memory aligned to 1M from Frame Buffer (local
@ -168,22 +165,30 @@ static int psp_tmr_init(struct psp_context *psp)
ret = amdgpu_bo_create_kernel(psp->adev, 0x300000, 0x100000,
AMDGPU_GEM_DOMAIN_VRAM,
&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
if (ret)
goto failed;
return ret;
}
static int psp_tmr_load(struct psp_context *psp)
{
int ret;
struct psp_gfx_cmd_resp *cmd;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
psp_prep_tmr_cmd_buf(cmd, psp->tmr_mc_addr, 0x300000);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr, 1);
if (ret)
goto failed_mem;
goto failed;
kfree(cmd);
return 0;
failed_mem:
amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
failed:
kfree(cmd);
return ret;
@ -203,104 +208,78 @@ static void psp_prep_asd_cmd_buf(struct psp_gfx_cmd_resp *cmd,
cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
}
static int psp_asd_load(struct psp_context *psp)
static int psp_asd_init(struct psp_context *psp)
{
int ret;
struct amdgpu_bo *asd_bo, *asd_shared_bo;
uint64_t asd_mc_addr, asd_shared_mc_addr;
void *asd_buf, *asd_shared_buf;
struct psp_gfx_cmd_resp *cmd;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
/*
* Allocate 16k memory aligned to 4k from Frame Buffer (local
* physical) for shared ASD <-> Driver
*/
ret = amdgpu_bo_create_kernel(psp->adev, PSP_ASD_SHARED_MEM_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&asd_shared_bo, &asd_shared_mc_addr, &asd_buf);
if (ret)
goto failed;
ret = amdgpu_bo_create_kernel(psp->adev, PSP_ASD_SHARED_MEM_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
&psp->asd_shared_bo,
&psp->asd_shared_mc_addr,
&psp->asd_shared_buf);
/*
* Allocate 256k memory aligned to 4k from Frame Buffer (local
* physical) for ASD firmware
*/
ret = amdgpu_bo_create_kernel(psp->adev, PSP_ASD_BIN_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&asd_bo, &asd_mc_addr, &asd_buf);
if (ret)
goto failed_mem;
memcpy(asd_buf, psp->asd_start_addr, psp->asd_ucode_size);
psp_prep_asd_cmd_buf(cmd, asd_mc_addr, asd_shared_mc_addr,
psp->asd_ucode_size, PSP_ASD_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr, 2);
if (ret)
goto failed_mem1;
amdgpu_bo_free_kernel(&asd_bo, &asd_mc_addr, &asd_buf);
amdgpu_bo_free_kernel(&asd_shared_bo, &asd_shared_mc_addr, &asd_shared_buf);
kfree(cmd);
return 0;
failed_mem1:
amdgpu_bo_free_kernel(&asd_bo, &asd_mc_addr, &asd_buf);
failed_mem:
amdgpu_bo_free_kernel(&asd_shared_bo, &asd_shared_mc_addr, &asd_shared_buf);
failed:
kfree(cmd);
return ret;
}
static int psp_load_fw(struct amdgpu_device *adev)
static int psp_asd_load(struct psp_context *psp)
{
int ret;
struct psp_gfx_cmd_resp *cmd;
int i;
struct amdgpu_firmware_info *ucode;
struct psp_context *psp = &adev->psp;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp->asd_ucode_size);
psp_prep_asd_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->asd_shared_mc_addr,
psp->asd_ucode_size, PSP_ASD_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr, 2);
kfree(cmd);
return ret;
}
static int psp_hw_start(struct psp_context *psp)
{
int ret;
ret = psp_bootloader_load_sysdrv(psp);
if (ret)
goto failed;
return ret;
ret = psp_bootloader_load_sos(psp);
if (ret)
goto failed;
return ret;
ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
if (ret)
goto failed;
return ret;
ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&psp->fence_buf_bo,
&psp->fence_buf_mc_addr,
&psp->fence_buf);
ret = psp_tmr_load(psp);
if (ret)
goto failed;
memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
ret = psp_tmr_init(psp);
if (ret)
goto failed_mem;
return ret;
ret = psp_asd_load(psp);
if (ret)
goto failed_mem;
return ret;
return 0;
}
static int psp_np_fw_load(struct psp_context *psp)
{
int i, ret;
struct amdgpu_firmware_info *ucode;
struct amdgpu_device* adev = psp->adev;
for (i = 0; i < adev->firmware.max_ucodes; i++) {
ucode = &adev->firmware.ucode[i];
@ -310,15 +289,21 @@ static int psp_load_fw(struct amdgpu_device *adev)
if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
psp_smu_reload_quirk(psp))
continue;
if (amdgpu_sriov_vf(adev) &&
(ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0
|| ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1
|| ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G))
/*skip ucode loading in SRIOV VF */
continue;
ret = psp_prep_cmd_buf(ucode, cmd);
ret = psp_prep_cmd_buf(ucode, psp->cmd);
if (ret)
goto failed_mem;
return ret;
ret = psp_cmd_submit_buf(psp, ucode, cmd,
ret = psp_cmd_submit_buf(psp, ucode, psp->cmd,
psp->fence_buf_mc_addr, i + 3);
if (ret)
goto failed_mem;
return ret;
#if 0
/* check if firmware loaded sucessfully */
@ -327,8 +312,59 @@ static int psp_load_fw(struct amdgpu_device *adev)
#endif
}
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
&psp->fence_buf_mc_addr, &psp->fence_buf);
return 0;
}
static int psp_load_fw(struct amdgpu_device *adev)
{
int ret;
struct psp_context *psp = &adev->psp;
struct psp_gfx_cmd_resp *cmd;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
psp->cmd = cmd;
ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
AMDGPU_GEM_DOMAIN_GTT,
&psp->fw_pri_bo,
&psp->fw_pri_mc_addr,
&psp->fw_pri_buf);
if (ret)
goto failed;
ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&psp->fence_buf_bo,
&psp->fence_buf_mc_addr,
&psp->fence_buf);
if (ret)
goto failed_mem1;
memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
if (ret)
goto failed_mem1;
ret = psp_tmr_init(psp);
if (ret)
goto failed_mem;
ret = psp_asd_init(psp);
if (ret)
goto failed_mem;
ret = psp_hw_start(psp);
if (ret)
goto failed_mem;
ret = psp_np_fw_load(psp);
if (ret)
goto failed_mem;
kfree(cmd);
return 0;
@ -336,6 +372,9 @@ static int psp_load_fw(struct amdgpu_device *adev)
failed_mem:
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
&psp->fence_buf_mc_addr, &psp->fence_buf);
failed_mem1:
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
failed:
kfree(cmd);
return ret;
@ -379,12 +418,24 @@ static int psp_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
amdgpu_ucode_fini_bo(adev);
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
return 0;
amdgpu_ucode_fini_bo(adev);
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
if (psp->tmr_buf)
amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
if (psp->fw_pri_buf)
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
if (psp->fence_buf_bo)
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
&psp->fence_buf_mc_addr, &psp->fence_buf);
return 0;
}
@ -397,18 +448,30 @@ static int psp_resume(void *handle)
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
return 0;
DRM_INFO("PSP is resuming...\n");
mutex_lock(&adev->firmware.mutex);
ret = psp_load_fw(adev);
ret = psp_hw_start(psp);
if (ret)
DRM_ERROR("PSP resume failed\n");
goto failed;
ret = psp_np_fw_load(psp);
if (ret)
goto failed;
mutex_unlock(&adev->firmware.mutex);
return 0;
failed:
DRM_ERROR("PSP resume failed\n");
mutex_unlock(&adev->firmware.mutex);
return ret;
}

View File

@ -30,8 +30,8 @@
#define PSP_FENCE_BUFFER_SIZE 0x1000
#define PSP_CMD_BUFFER_SIZE 0x1000
#define PSP_ASD_BIN_SIZE 0x40000
#define PSP_ASD_SHARED_MEM_SIZE 0x4000
#define PSP_1_MEG 0x100000
enum psp_ring_type
{
@ -57,6 +57,7 @@ struct psp_context
{
struct amdgpu_device *adev;
struct psp_ring km_ring;
struct psp_gfx_cmd_resp *cmd;
int (*init_microcode)(struct psp_context *psp);
int (*bootloader_load_sysdrv)(struct psp_context *psp);
@ -64,6 +65,9 @@ struct psp_context
int (*prep_cmd_buf)(struct amdgpu_firmware_info *ucode,
struct psp_gfx_cmd_resp *cmd);
int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type);
int (*ring_create)(struct psp_context *psp, enum psp_ring_type ring_type);
int (*ring_destroy)(struct psp_context *psp,
enum psp_ring_type ring_type);
int (*cmd_submit)(struct psp_context *psp, struct amdgpu_firmware_info *ucode,
uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, int index);
bool (*compare_sram_data)(struct psp_context *psp,
@ -71,6 +75,11 @@ struct psp_context
enum AMDGPU_UCODE_ID ucode_type);
bool (*smu_reload_quirk)(struct psp_context *psp);
/* fence buffer */
struct amdgpu_bo *fw_pri_bo;
uint64_t fw_pri_mc_addr;
void *fw_pri_buf;
/* sos firmware */
const struct firmware *sos_fw;
uint32_t sos_fw_version;
@ -85,12 +94,15 @@ struct psp_context
uint64_t tmr_mc_addr;
void *tmr_buf;
/* asd firmware */
/* asd firmware and buffer */
const struct firmware *asd_fw;
uint32_t asd_fw_version;
uint32_t asd_feature_version;
uint32_t asd_ucode_size;
uint8_t *asd_start_addr;
struct amdgpu_bo *asd_shared_bo;
uint64_t asd_shared_mc_addr;
void *asd_shared_buf;
/* fence buffer */
struct amdgpu_bo *fence_buf_bo;
@ -105,6 +117,8 @@ struct amdgpu_psp_funcs {
#define psp_prep_cmd_buf(ucode, type) (psp)->prep_cmd_buf((ucode), (type))
#define psp_ring_init(psp, type) (psp)->ring_init((psp), (type))
#define psp_ring_create(psp, type) (psp)->ring_create((psp), (type))
#define psp_ring_destroy(psp, type) ((psp)->ring_destroy((psp), (type)))
#define psp_cmd_submit(psp, ucode, cmd_mc, fence_mc, index) \
(psp)->cmd_submit((psp), (ucode), (cmd_mc), (fence_mc), (index))
#define psp_compare_sram_data(psp, ucode, type) \

View File

@ -99,6 +99,7 @@ struct amdgpu_ring_funcs {
uint32_t align_mask;
u32 nop;
bool support_64bit_ptrs;
unsigned vmhub;
/* ring read/write ptr handling */
u64 (*get_rptr)(struct amdgpu_ring *ring);
@ -178,6 +179,7 @@ struct amdgpu_ring {
unsigned cond_exe_offs;
u64 cond_exe_gpu_addr;
volatile u32 *cond_exe_cpu_addr;
unsigned vm_inv_eng;
#if defined(CONFIG_DEBUG_FS)
struct dentry *ent;
#endif

View File

@ -130,7 +130,7 @@ int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
return -EINVAL;
}
r = amdgpu_bo_reserve(sa_manager->bo, false);
r = amdgpu_bo_reserve(sa_manager->bo, true);
if (!r) {
amdgpu_bo_kunmap(sa_manager->bo);
amdgpu_bo_unpin(sa_manager->bo);

View File

@ -190,26 +190,29 @@ TRACE_EVENT(amdgpu_sched_run_job,
TRACE_EVENT(amdgpu_vm_grab_id,
TP_PROTO(struct amdgpu_vm *vm, int ring, struct amdgpu_job *job),
TP_PROTO(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_job *job),
TP_ARGS(vm, ring, job),
TP_STRUCT__entry(
__field(struct amdgpu_vm *, vm)
__field(u32, ring)
__field(u32, vmid)
__field(u32, vm_id)
__field(u32, vm_hub)
__field(u64, pd_addr)
__field(u32, needs_flush)
),
TP_fast_assign(
__entry->vm = vm;
__entry->ring = ring;
__entry->vmid = job->vm_id;
__entry->ring = ring->idx;
__entry->vm_id = job->vm_id;
__entry->vm_hub = ring->funcs->vmhub,
__entry->pd_addr = job->vm_pd_addr;
__entry->needs_flush = job->vm_needs_flush;
),
TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx needs_flush=%u",
__entry->vm, __entry->ring, __entry->vmid,
__entry->pd_addr, __entry->needs_flush)
TP_printk("vm=%p, ring=%u, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u",
__entry->vm, __entry->ring, __entry->vm_id,
__entry->vm_hub, __entry->pd_addr, __entry->needs_flush)
);
TRACE_EVENT(amdgpu_vm_bo_map,
@ -331,21 +334,25 @@ TRACE_EVENT(amdgpu_vm_copy_ptes,
);
TRACE_EVENT(amdgpu_vm_flush,
TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id),
TP_ARGS(pd_addr, ring, id),
TP_PROTO(struct amdgpu_ring *ring, unsigned vm_id,
uint64_t pd_addr),
TP_ARGS(ring, vm_id, pd_addr),
TP_STRUCT__entry(
__field(u64, pd_addr)
__field(u32, ring)
__field(u32, id)
__field(u32, vm_id)
__field(u32, vm_hub)
__field(u64, pd_addr)
),
TP_fast_assign(
__entry->ring = ring->idx;
__entry->vm_id = vm_id;
__entry->vm_hub = ring->funcs->vmhub;
__entry->pd_addr = pd_addr;
__entry->ring = ring;
__entry->id = id;
),
TP_printk("ring=%u, id=%u, pd_addr=%010Lx",
__entry->ring, __entry->id, __entry->pd_addr)
TP_printk("ring=%u, id=%u, hub=%u, pd_addr=%010Lx",
__entry->ring, __entry->vm_id,
__entry->vm_hub,__entry->pd_addr)
);
TRACE_EVENT(amdgpu_bo_list_set,

View File

@ -203,7 +203,9 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
abo = container_of(bo, struct amdgpu_bo, tbo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
if (adev->mman.buffer_funcs_ring->ready == false) {
if (adev->mman.buffer_funcs &&
adev->mman.buffer_funcs_ring &&
adev->mman.buffer_funcs_ring->ready == false) {
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
} else {
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
@ -763,7 +765,7 @@ int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
{
struct amdgpu_ttm_tt *gtt, *tmp;
struct ttm_mem_reg bo_mem;
uint32_t flags;
uint64_t flags;
int r;
bo_mem.mem_type = TTM_PL_TT;
@ -1038,11 +1040,17 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
if (bo->mem.mem_type == TTM_PL_VRAM &&
bo->mem.start == AMDGPU_BO_INVALID_OFFSET) {
unsigned long num_pages = bo->mem.num_pages;
struct drm_mm_node *node = bo->mem.mm_node;
unsigned long num_pages = bo->mem.num_pages;
struct drm_mm_node *node = bo->mem.mm_node;
if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
return ttm_bo_eviction_valuable(bo, place);
switch (bo->mem.mem_type) {
case TTM_PL_TT:
return true;
case TTM_PL_VRAM:
/* Check each drm MM node individually */
while (num_pages) {
if (place->fpfn < (node->start + node->size) &&
@ -1052,8 +1060,10 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
num_pages -= node->size;
++node;
}
break;
return false;
default:
break;
}
return ttm_bo_eviction_valuable(bo, place);
@ -1188,7 +1198,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
return;
amdgpu_ttm_debugfs_fini(adev);
if (adev->stollen_vga_memory) {
r = amdgpu_bo_reserve(adev->stollen_vga_memory, false);
r = amdgpu_bo_reserve(adev->stollen_vga_memory, true);
if (r == 0) {
amdgpu_bo_unpin(adev->stollen_vga_memory);
amdgpu_bo_unreserve(adev->stollen_vga_memory);
@ -1401,6 +1411,8 @@ error_free:
#if defined(CONFIG_DEBUG_FS)
extern void amdgpu_gtt_mgr_print(struct seq_file *m, struct ttm_mem_type_manager
*man);
static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
@ -1414,11 +1426,17 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
spin_lock(&glob->lru_lock);
drm_mm_print(mm, &p);
spin_unlock(&glob->lru_lock);
if (ttm_pl == TTM_PL_VRAM)
switch (ttm_pl) {
case TTM_PL_VRAM:
seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
adev->mman.bdev.man[ttm_pl].size,
(u64)atomic64_read(&adev->vram_usage) >> 20,
(u64)atomic64_read(&adev->vram_vis_usage) >> 20);
break;
case TTM_PL_TT:
amdgpu_gtt_mgr_print(m, &adev->mman.bdev.man[TTM_PL_TT]);
break;
}
return 0;
}

View File

@ -382,10 +382,14 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
* if SMU loaded firmware, it needn't add SMC, UVD, and VCE
* ucode info here
*/
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM - 4;
else
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
if (amdgpu_sriov_vf(adev))
adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM - 3;
else
adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM - 4;
} else {
adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM;
}
for (i = 0; i < adev->firmware.max_ucodes; i++) {
ucode = &adev->firmware.ucode[i];

View File

@ -955,11 +955,11 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
uint32_t rptr = amdgpu_ring_get_rptr(ring);
unsigned i;
int r;
int r, timeout = adev->usec_timeout;
/* TODO: remove it if VCE can work for sriov */
/* workaround VCE ring test slow issue for sriov*/
if (amdgpu_sriov_vf(adev))
return 0;
timeout *= 10;
r = amdgpu_ring_alloc(ring, 16);
if (r) {
@ -970,13 +970,13 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, VCE_CMD_END);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
for (i = 0; i < timeout; i++) {
if (amdgpu_ring_get_rptr(ring) != rptr)
break;
DRM_UDELAY(1);
}
if (i < adev->usec_timeout) {
if (i < timeout) {
DRM_INFO("ring test on %d succeeded in %d usecs\n",
ring->idx, i);
} else {
@ -999,10 +999,6 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct dma_fence *fence = NULL;
long r;
/* TODO: remove it if VCE can work for sriov */
if (amdgpu_sriov_vf(ring->adev))
return 0;
/* skip vce ring1/2 ib test for now, since it's not reliable */
if (ring != &ring->adev->vce.ring[0])
return 0;

View File

@ -225,3 +225,49 @@ int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
return 0;
}
/**
* amdgpu_virt_alloc_mm_table() - alloc memory for mm table
* @amdgpu: amdgpu device.
* MM table is used by UVD and VCE for its initialization
* Return: Zero if allocate success.
*/
int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
{
int r;
if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
return 0;
r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->virt.mm_table.bo,
&adev->virt.mm_table.gpu_addr,
(void *)&adev->virt.mm_table.cpu_addr);
if (r) {
DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
return r;
}
memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
adev->virt.mm_table.gpu_addr,
adev->virt.mm_table.cpu_addr);
return 0;
}
/**
* amdgpu_virt_free_mm_table() - free mm table memory
* @amdgpu: amdgpu device.
* Free MM table memory
*/
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
{
if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
return;
amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
&adev->virt.mm_table.gpu_addr,
(void *)&adev->virt.mm_table.cpu_addr);
adev->virt.mm_table.gpu_addr = 0;
}

View File

@ -98,5 +98,7 @@ int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary);
int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
#endif

View File

@ -406,6 +406,8 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_job *job)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
uint64_t fence_context = adev->fence_context + ring->idx;
struct dma_fence *updates = sync->last_vm_update;
struct amdgpu_vm_id *id, *idle;
@ -413,16 +415,15 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
unsigned i;
int r = 0;
fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids,
GFP_KERNEL);
fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
if (!fences)
return -ENOMEM;
mutex_lock(&adev->vm_manager.lock);
mutex_lock(&id_mgr->lock);
/* Check if we have an idle VMID */
i = 0;
list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) {
list_for_each_entry(idle, &id_mgr->ids_lru, list) {
fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
if (!fences[i])
break;
@ -430,7 +431,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
}
/* If we can't find a idle VMID to use, wait till one becomes available */
if (&idle->list == &adev->vm_manager.ids_lru) {
if (&idle->list == &id_mgr->ids_lru) {
u64 fence_context = adev->vm_manager.fence_context + ring->idx;
unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
struct dma_fence_array *array;
@ -455,25 +456,19 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r)
goto error;
mutex_unlock(&adev->vm_manager.lock);
mutex_unlock(&id_mgr->lock);
return 0;
}
kfree(fences);
job->vm_needs_flush = true;
job->vm_needs_flush = false;
/* Check if we can use a VMID already assigned to this VM */
i = ring->idx;
do {
list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
struct dma_fence *flushed;
id = vm->ids[i++];
if (i == AMDGPU_MAX_RINGS)
i = 0;
bool needs_flush = false;
/* Check all the prerequisites to using this VMID */
if (!id)
continue;
if (amdgpu_vm_had_gpu_reset(adev, id))
continue;
@ -483,16 +478,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (job->vm_pd_addr != id->pd_gpu_addr)
continue;
if (!id->last_flush)
continue;
if (id->last_flush->context != fence_context &&
!dma_fence_is_signaled(id->last_flush))
continue;
if (!id->last_flush ||
(id->last_flush->context != fence_context &&
!dma_fence_is_signaled(id->last_flush)))
needs_flush = true;
flushed = id->flushed_updates;
if (updates &&
(!flushed || dma_fence_is_later(updates, flushed)))
if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
needs_flush = true;
/* Concurrent flushes are only possible starting with Vega10 */
if (adev->asic_type < CHIP_VEGA10 && needs_flush)
continue;
/* Good we can use this VMID. Remember this submission as
@ -502,17 +498,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r)
goto error;
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
vm->ids[ring->idx] = id;
if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
dma_fence_put(id->flushed_updates);
id->flushed_updates = dma_fence_get(updates);
}
job->vm_id = id - adev->vm_manager.ids;
job->vm_needs_flush = false;
trace_amdgpu_vm_grab_id(vm, ring->idx, job);
if (needs_flush)
goto needs_flush;
else
goto no_flush_needed;
mutex_unlock(&adev->vm_manager.lock);
return 0;
} while (i != ring->idx);
};
/* Still no ID to use? Then use the idle one found earlier */
id = idle;
@ -522,23 +518,25 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r)
goto error;
id->pd_gpu_addr = job->vm_pd_addr;
dma_fence_put(id->flushed_updates);
id->flushed_updates = dma_fence_get(updates);
id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
atomic64_set(&id->owner, vm->client_id);
needs_flush:
job->vm_needs_flush = true;
dma_fence_put(id->last_flush);
id->last_flush = NULL;
dma_fence_put(id->flushed_updates);
id->flushed_updates = dma_fence_get(updates);
no_flush_needed:
list_move_tail(&id->list, &id_mgr->ids_lru);
id->pd_gpu_addr = job->vm_pd_addr;
id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
atomic64_set(&id->owner, vm->client_id);
vm->ids[ring->idx] = id;
job->vm_id = id - adev->vm_manager.ids;
trace_amdgpu_vm_grab_id(vm, ring->idx, job);
job->vm_id = id - id_mgr->ids;
trace_amdgpu_vm_grab_id(vm, ring, job);
error:
mutex_unlock(&adev->vm_manager.lock);
mutex_unlock(&id_mgr->lock);
return r;
}
@ -590,7 +588,9 @@ static u64 amdgpu_vm_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id];
unsigned vmhub = ring->funcs->vmhub;
struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vm_id *id = &id_mgr->ids[job->vm_id];
bool gds_switch_needed = ring->funcs->emit_gds_switch && (
id->gds_base != job->gds_base ||
id->gds_size != job->gds_size ||
@ -614,24 +614,24 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
if (ring->funcs->init_cond_exec)
patch_offset = amdgpu_ring_init_cond_exec(ring);
if (ring->funcs->emit_pipeline_sync)
if (ring->funcs->emit_pipeline_sync && !job->need_pipeline_sync)
amdgpu_ring_emit_pipeline_sync(ring);
if (ring->funcs->emit_vm_flush && vm_flush_needed) {
u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr);
struct dma_fence *fence;
trace_amdgpu_vm_flush(pd_addr, ring->idx, job->vm_id);
trace_amdgpu_vm_flush(ring, job->vm_id, pd_addr);
amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr);
r = amdgpu_fence_emit(ring, &fence);
if (r)
return r;
mutex_lock(&adev->vm_manager.lock);
mutex_lock(&id_mgr->lock);
dma_fence_put(id->last_flush);
id->last_flush = fence;
mutex_unlock(&adev->vm_manager.lock);
mutex_unlock(&id_mgr->lock);
}
if (gds_switch_needed) {
@ -666,9 +666,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
*
* Reset saved GDW, GWS and OA to force switch on next flush.
*/
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
unsigned vmid)
{
struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vm_id *id = &id_mgr->ids[vmid];
id->gds_base = 0;
id->gds_size = 0;
@ -1336,6 +1338,12 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
flags &= ~AMDGPU_PTE_MTYPE_MASK;
flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
if ((mapping->flags & AMDGPU_PTE_PRT) &&
(adev->asic_type >= CHIP_VEGA10)) {
flags |= AMDGPU_PTE_PRT;
flags &= ~AMDGPU_PTE_VALID;
}
trace_amdgpu_vm_bo_update(mapping);
pfn = mapping->offset >> PAGE_SHIFT;
@ -1629,8 +1637,9 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping, list);
list_del(&mapping->list);
r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping,
0, 0, &f);
r = amdgpu_vm_bo_update_mapping(adev, NULL, 0, NULL, vm,
mapping->start, mapping->last,
0, 0, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
if (r) {
dma_fence_put(f);
@ -2117,10 +2126,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
unsigned ring_instance;
struct amdgpu_ring *ring;
struct amd_sched_rq *rq;
int i, r;
int r;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
vm->ids[i] = NULL;
vm->va = RB_ROOT;
vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
spin_lock_init(&vm->status_lock);
@ -2241,16 +2248,21 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
*/
void amdgpu_vm_manager_init(struct amdgpu_device *adev)
{
unsigned i;
unsigned i, j;
INIT_LIST_HEAD(&adev->vm_manager.ids_lru);
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vm_id_manager *id_mgr =
&adev->vm_manager.id_mgr[i];
/* skip over VMID 0, since it is the system VM */
for (i = 1; i < adev->vm_manager.num_ids; ++i) {
amdgpu_vm_reset_id(adev, i);
amdgpu_sync_create(&adev->vm_manager.ids[i].active);
list_add_tail(&adev->vm_manager.ids[i].list,
&adev->vm_manager.ids_lru);
mutex_init(&id_mgr->lock);
INIT_LIST_HEAD(&id_mgr->ids_lru);
/* skip over VMID 0, since it is the system VM */
for (j = 1; j < id_mgr->num_ids; ++j) {
amdgpu_vm_reset_id(adev, i, j);
amdgpu_sync_create(&id_mgr->ids[i].active);
list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
}
}
adev->vm_manager.fence_context =
@ -2258,6 +2270,7 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
adev->vm_manager.seqno[i] = 0;
atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
atomic64_set(&adev->vm_manager.client_counter, 0);
spin_lock_init(&adev->vm_manager.prt_lock);
@ -2273,13 +2286,19 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
*/
void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
{
unsigned i;
unsigned i, j;
for (i = 0; i < AMDGPU_NUM_VM; ++i) {
struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vm_id_manager *id_mgr =
&adev->vm_manager.id_mgr[i];
amdgpu_sync_free(&adev->vm_manager.ids[i].active);
dma_fence_put(id->flushed_updates);
dma_fence_put(id->last_flush);
mutex_destroy(&id_mgr->lock);
for (j = 0; j < AMDGPU_NUM_VM; ++j) {
struct amdgpu_vm_id *id = &id_mgr->ids[j];
amdgpu_sync_free(&id->active);
dma_fence_put(id->flushed_updates);
dma_fence_put(id->last_flush);
}
}
}

View File

@ -65,7 +65,8 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_PTE_FRAG(x) ((x & 0x1fULL) << 7)
#define AMDGPU_PTE_PRT (1ULL << 63)
/* TILED for VEGA10, reserved for older ASICs */
#define AMDGPU_PTE_PRT (1ULL << 51)
/* VEGA10 only */
#define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57)
@ -114,9 +115,6 @@ struct amdgpu_vm {
struct dma_fence *last_dir_update;
uint64_t last_eviction_counter;
/* for id and flush management per ring */
struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS];
/* protecting freed */
spinlock_t freed_lock;
@ -149,12 +147,16 @@ struct amdgpu_vm_id {
uint32_t oa_size;
};
struct amdgpu_vm_id_manager {
struct mutex lock;
unsigned num_ids;
struct list_head ids_lru;
struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
};
struct amdgpu_vm_manager {
/* Handling of VMIDs */
struct mutex lock;
unsigned num_ids;
struct list_head ids_lru;
struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
struct amdgpu_vm_id_manager id_mgr[AMDGPU_MAX_VMHUBS];
/* Handling of VM fences */
u64 fence_context;
@ -200,7 +202,8 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct dma_fence *fence,
struct amdgpu_job *job);
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
unsigned vmid);
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,

View File

@ -1267,30 +1267,33 @@ static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
{
if (mode) {
/* stop auto-manage */
switch (mode) {
case AMD_FAN_CTRL_NONE:
if (adev->pm.dpm.fan.ucode_fan_control)
ci_fan_ctrl_stop_smc_fan_control(adev);
ci_fan_ctrl_set_static_mode(adev, mode);
} else {
/* restart auto-manage */
ci_dpm_set_fan_speed_percent(adev, 100);
break;
case AMD_FAN_CTRL_MANUAL:
if (adev->pm.dpm.fan.ucode_fan_control)
ci_fan_ctrl_stop_smc_fan_control(adev);
break;
case AMD_FAN_CTRL_AUTO:
if (adev->pm.dpm.fan.ucode_fan_control)
ci_thermal_start_smc_fan_control(adev);
else
ci_fan_ctrl_set_default_mode(adev);
break;
default:
break;
}
}
static u32 ci_dpm_get_fan_control_mode(struct amdgpu_device *adev)
{
struct ci_power_info *pi = ci_get_pi(adev);
u32 tmp;
if (pi->fan_is_controlled_by_smc)
return 0;
tmp = RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
return (tmp >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT);
return AMD_FAN_CTRL_AUTO;
else
return AMD_FAN_CTRL_MANUAL;
}
#if 0
@ -3036,6 +3039,7 @@ static int ci_populate_single_memory_level(struct amdgpu_device *adev,
memory_clock,
&memory_level->MinVddcPhases);
memory_level->EnabledForActivity = 1;
memory_level->EnabledForThrottle = 1;
memory_level->UpH = 0;
memory_level->DownH = 100;
@ -3468,8 +3472,6 @@ static int ci_populate_all_memory_levels(struct amdgpu_device *adev)
return ret;
}
pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
if ((dpm_table->mclk_table.count >= 2) &&
((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) {
pi->smc_state_table.MemoryLevel[1].MinVddc =

View File

@ -2230,7 +2230,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false);
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r != 0))
return r;
amdgpu_bo_unpin(abo);
@ -2589,7 +2589,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
unpin:
if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
ret = amdgpu_bo_reserve(aobj, false);
ret = amdgpu_bo_reserve(aobj, true);
if (likely(ret == 0)) {
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
@ -2720,7 +2720,7 @@ static void dce_v10_0_crtc_disable(struct drm_crtc *crtc)
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false);
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n");
else {

View File

@ -2214,7 +2214,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false);
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r != 0))
return r;
amdgpu_bo_unpin(abo);
@ -2609,7 +2609,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
unpin:
if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
ret = amdgpu_bo_reserve(aobj, false);
ret = amdgpu_bo_reserve(aobj, true);
if (likely(ret == 0)) {
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
@ -2740,7 +2740,7 @@ static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false);
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n");
else {

View File

@ -979,7 +979,7 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
u32 priority_a_mark = 0, priority_b_mark = 0;
u32 priority_a_cnt = PRIORITY_OFF;
u32 priority_b_cnt = PRIORITY_OFF;
u32 tmp, arb_control3;
u32 tmp, arb_control3, lb_vblank_lead_lines = 0;
fixed20_12 a, b, c;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
@ -1091,6 +1091,8 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
c.full = dfixed_div(c, a);
priority_b_mark = dfixed_trunc(c);
priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
}
/* select wm A */
@ -1120,6 +1122,9 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
/* save values for DPM */
amdgpu_crtc->line_time = line_time;
amdgpu_crtc->wm_high = latency_watermark_a;
/* Save number of lines the linebuffer leads before the scanout */
amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
}
/* watermark setup */
@ -1640,7 +1645,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false);
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r != 0))
return r;
amdgpu_bo_unpin(abo);
@ -1957,7 +1962,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
unpin:
if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
ret = amdgpu_bo_reserve(aobj, false);
ret = amdgpu_bo_reserve(aobj, true);
if (likely(ret == 0)) {
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
@ -2083,7 +2088,7 @@ static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false);
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n");
else {

View File

@ -2089,7 +2089,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false);
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r != 0))
return r;
amdgpu_bo_unpin(abo);
@ -2440,7 +2440,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
unpin:
if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
ret = amdgpu_bo_reserve(aobj, false);
ret = amdgpu_bo_reserve(aobj, true);
if (likely(ret == 0)) {
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
@ -2571,7 +2571,7 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false);
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n");
else {

View File

@ -248,7 +248,7 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false);
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n");
else {

View File

@ -1579,7 +1579,7 @@ static void gfx_v6_0_setup_spi(struct amdgpu_device *adev)
static void gfx_v6_0_config_init(struct amdgpu_device *adev)
{
adev->gfx.config.double_offchip_lds_buf = 1;
adev->gfx.config.double_offchip_lds_buf = 0;
}
static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
@ -2437,7 +2437,7 @@ static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
int r;
if (adev->gfx.rlc.save_restore_obj) {
r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, true);
if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
@ -2448,7 +2448,7 @@ static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
}
if (adev->gfx.rlc.clear_state_obj) {
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
@ -2459,7 +2459,7 @@ static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
}
if (adev->gfx.rlc.cp_table_obj) {
r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true);
if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
@ -3292,7 +3292,7 @@ static int gfx_v6_0_sw_init(void *handle)
ring->me = 1;
ring->pipe = i;
ring->queue = i;
sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
r = amdgpu_ring_init(adev, ring, 1024,
&adev->gfx.eop_irq, irq_type);

View File

@ -1935,7 +1935,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
INDEX_STRIDE, 3);
mutex_lock(&adev->srbm_mutex);
for (i = 0; i < adev->vm_manager.num_ids; i++) {
for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
if (i == 0)
sh_mem_base = 0;
else
@ -2792,7 +2792,7 @@ static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev)
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
if (ring->mqd_obj) {
r = amdgpu_bo_reserve(ring->mqd_obj, false);
r = amdgpu_bo_reserve(ring->mqd_obj, true);
if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r);
@ -2810,7 +2810,7 @@ static void gfx_v7_0_mec_fini(struct amdgpu_device *adev)
int r;
if (adev->gfx.mec.hpd_eop_obj) {
r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true);
if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
@ -3359,7 +3359,7 @@ static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
/* save restore block */
if (adev->gfx.rlc.save_restore_obj) {
r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, true);
if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
@ -3371,7 +3371,7 @@ static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
/* clear state block */
if (adev->gfx.rlc.clear_state_obj) {
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
@ -3383,7 +3383,7 @@ static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
/* clear state block */
if (adev->gfx.rlc.cp_table_obj) {
r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true);
if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);

View File

@ -1239,7 +1239,7 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
/* clear state block */
if (adev->gfx.rlc.clear_state_obj) {
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC cbs bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
@ -1250,7 +1250,7 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
/* jump table block */
if (adev->gfx.rlc.cp_table_obj) {
r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true);
if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
@ -1363,7 +1363,7 @@ static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
int r;
if (adev->gfx.mec.hpd_eop_obj) {
r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true);
if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
@ -1490,7 +1490,7 @@ static int gfx_v8_0_kiq_init(struct amdgpu_device *adev)
memset(hpd, 0, MEC_HPD_SIZE);
r = amdgpu_bo_reserve(kiq->eop_obj, false);
r = amdgpu_bo_reserve(kiq->eop_obj, true);
if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
amdgpu_bo_kunmap(kiq->eop_obj);
@ -1932,6 +1932,7 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
case 0xca:
case 0xce:
case 0x88:
case 0xe6:
/* B6 */
adev->gfx.config.max_cu_per_sh = 6;
break;
@ -1964,17 +1965,28 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.max_backends_per_se = 1;
switch (adev->pdev->revision) {
case 0x80:
case 0x81:
case 0xc0:
case 0xc1:
case 0xc2:
case 0xc4:
case 0xc8:
case 0xc9:
case 0xd6:
case 0xda:
case 0xe9:
case 0xea:
adev->gfx.config.max_cu_per_sh = 3;
break;
case 0x83:
case 0xd0:
case 0xd1:
case 0xd2:
case 0xd4:
case 0xdb:
case 0xe1:
case 0xe2:
default:
adev->gfx.config.max_cu_per_sh = 2;
break;
@ -3890,7 +3902,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
INDEX_STRIDE, 3);
mutex_lock(&adev->srbm_mutex);
for (i = 0; i < adev->vm_manager.num_ids; i++) {
for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
vi_srbm_select(adev, 0, 0, 0, i);
/* CP and shaders */
if (i == 0) {

File diff suppressed because it is too large Load Diff

View File

@ -346,7 +346,8 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
* size equal to the 1024 or vram, whichever is larger.
*/
if (amdgpu_gart_size == -1)
adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
adev->mc.mc_vram_size);
else
adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
@ -621,7 +622,7 @@ static int gmc_v6_0_vm_init(struct amdgpu_device *adev)
* amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
adev->vm_manager.num_level = 1;
amdgpu_vm_manager_init(adev);

View File

@ -395,7 +395,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
* size equal to the 1024 or vram, whichever is larger.
*/
if (amdgpu_gart_size == -1)
adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
adev->mc.mc_vram_size);
else
adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
@ -746,7 +747,7 @@ static int gmc_v7_0_vm_init(struct amdgpu_device *adev)
* amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
adev->vm_manager.num_level = 1;
amdgpu_vm_manager_init(adev);

View File

@ -557,7 +557,8 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
* size equal to the 1024 or vram, whichever is larger.
*/
if (amdgpu_gart_size == -1)
adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
adev->mc.mc_vram_size);
else
adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
@ -949,7 +950,7 @@ static int gmc_v8_0_vm_init(struct amdgpu_device *adev)
* amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
adev->vm_manager.num_level = 1;
amdgpu_vm_manager_init(adev);

View File

@ -386,6 +386,23 @@ static int gmc_v9_0_early_init(void *handle)
static int gmc_v9_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 3, 3 };
unsigned i;
for(i = 0; i < adev->num_rings; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
unsigned vmhub = ring->funcs->vmhub;
ring->vm_inv_eng = vm_inv_eng[vmhub]++;
dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
ring->idx, ring->name, ring->vm_inv_eng,
ring->funcs->vmhub);
}
/* Engine 17 is used for GART flushes */
for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
BUG_ON(vm_inv_eng[i] > 17);
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
}
@ -469,7 +486,8 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
* size equal to the 1024 or vram, whichever is larger.
*/
if (amdgpu_gart_size == -1)
adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
adev->mc.mc_vram_size);
else
adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
@ -519,7 +537,8 @@ static int gmc_v9_0_vm_init(struct amdgpu_device *adev)
* amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
/* TODO: fix num_level for APU when updating vm size and block size */
if (adev->flags & AMD_IS_APU)

View File

@ -511,6 +511,9 @@ static int mmhub_v1_0_set_clockgating_state(void *handle,
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_sriov_vf(adev))
return 0;
switch (adev->asic_type) {
case CHIP_VEGA10:
mmhub_v1_0_update_medium_grain_clock_gating(adev,

View File

@ -84,4 +84,61 @@ struct mmsch_v1_0_cmd_indirect_write {
uint32_t reg_value;
};
static inline void mmsch_v1_0_insert_direct_wt(struct mmsch_v1_0_cmd_direct_write *direct_wt,
uint32_t *init_table,
uint32_t reg_offset,
uint32_t value)
{
direct_wt->cmd_header.reg_offset = reg_offset;
direct_wt->reg_value = value;
memcpy((void *)init_table, direct_wt, sizeof(struct mmsch_v1_0_cmd_direct_write));
}
static inline void mmsch_v1_0_insert_direct_rd_mod_wt(struct mmsch_v1_0_cmd_direct_read_modify_write *direct_rd_mod_wt,
uint32_t *init_table,
uint32_t reg_offset,
uint32_t mask, uint32_t data)
{
direct_rd_mod_wt->cmd_header.reg_offset = reg_offset;
direct_rd_mod_wt->mask_value = mask;
direct_rd_mod_wt->write_data = data;
memcpy((void *)init_table, direct_rd_mod_wt,
sizeof(struct mmsch_v1_0_cmd_direct_read_modify_write));
}
static inline void mmsch_v1_0_insert_direct_poll(struct mmsch_v1_0_cmd_direct_polling *direct_poll,
uint32_t *init_table,
uint32_t reg_offset,
uint32_t mask, uint32_t wait)
{
direct_poll->cmd_header.reg_offset = reg_offset;
direct_poll->mask_value = mask;
direct_poll->wait_value = wait;
memcpy((void *)init_table, direct_poll, sizeof(struct mmsch_v1_0_cmd_direct_polling));
}
#define MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(reg, mask, data) { \
mmsch_v1_0_insert_direct_rd_mod_wt(&direct_rd_mod_wt, \
init_table, (reg), \
(mask), (data)); \
init_table += sizeof(struct mmsch_v1_0_cmd_direct_read_modify_write)/4; \
table_size += sizeof(struct mmsch_v1_0_cmd_direct_read_modify_write)/4; \
}
#define MMSCH_V1_0_INSERT_DIRECT_WT(reg, value) { \
mmsch_v1_0_insert_direct_wt(&direct_wt, \
init_table, (reg), \
(value)); \
init_table += sizeof(struct mmsch_v1_0_cmd_direct_write)/4; \
table_size += sizeof(struct mmsch_v1_0_cmd_direct_write)/4; \
}
#define MMSCH_V1_0_INSERT_DIRECT_POLL(reg, mask, wait) { \
mmsch_v1_0_insert_direct_poll(&direct_poll, \
init_table, (reg), \
(mask), (wait)); \
init_table += sizeof(struct mmsch_v1_0_cmd_direct_polling)/4; \
table_size += sizeof(struct mmsch_v1_0_cmd_direct_polling)/4; \
}
#endif

View File

@ -368,9 +368,12 @@ static int xgpu_vi_mailbox_rcv_msg(struct amdgpu_device *adev,
u32 reg;
u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, RCV_MSG_VALID);
reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
if (!(reg & mask))
return -ENOENT;
/* workaround: host driver doesn't set VALID for CMPL now */
if (event != IDH_FLR_NOTIFICATION_CMPL) {
reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
if (!(reg & mask))
return -ENOENT;
}
reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
if (reg != event)

View File

@ -166,11 +166,8 @@ int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
{
int ret;
uint32_t psp_gfxdrv_command_reg = 0;
struct amdgpu_bo *psp_sysdrv;
void *psp_sysdrv_virt = NULL;
uint64_t psp_sysdrv_mem;
struct amdgpu_device *adev = psp->adev;
uint32_t size, sol_reg;
uint32_t sol_reg;
/* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded.
@ -185,27 +182,14 @@ int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
if (ret)
return ret;
/*
* Create a 1 meg GART memory to store the psp sys driver
* binary with a 1 meg aligned address
*/
size = (psp->sys_bin_size + (PSP_BOOTLOADER_1_MEG_ALIGNMENT - 1)) &
(~(PSP_BOOTLOADER_1_MEG_ALIGNMENT - 1));
ret = amdgpu_bo_create_kernel(adev, size, PSP_BOOTLOADER_1_MEG_ALIGNMENT,
AMDGPU_GEM_DOMAIN_GTT,
&psp_sysdrv,
&psp_sysdrv_mem,
&psp_sysdrv_virt);
if (ret)
return ret;
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
/* Copy PSP System Driver binary to memory */
memcpy(psp_sysdrv_virt, psp->sys_start_addr, psp->sys_bin_size);
memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size);
/* Provide the sys driver to bootrom */
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_36),
(uint32_t)(psp_sysdrv_mem >> 20));
(uint32_t)(psp->fw_pri_mc_addr >> 20));
psp_gfxdrv_command_reg = 1 << 16;
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
psp_gfxdrv_command_reg);
@ -216,8 +200,6 @@ int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
0x80000000, 0x80000000, false);
amdgpu_bo_free_kernel(&psp_sysdrv, &psp_sysdrv_mem, &psp_sysdrv_virt);
return ret;
}
@ -225,11 +207,8 @@ int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
{
int ret;
unsigned int psp_gfxdrv_command_reg = 0;
struct amdgpu_bo *psp_sos;
void *psp_sos_virt = NULL;
uint64_t psp_sos_mem;
struct amdgpu_device *adev = psp->adev;
uint32_t size, sol_reg;
uint32_t sol_reg;
/* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded.
@ -244,23 +223,14 @@ int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
if (ret)
return ret;
size = (psp->sos_bin_size + (PSP_BOOTLOADER_1_MEG_ALIGNMENT - 1)) &
(~((uint64_t)PSP_BOOTLOADER_1_MEG_ALIGNMENT - 1));
ret = amdgpu_bo_create_kernel(adev, size, PSP_BOOTLOADER_1_MEG_ALIGNMENT,
AMDGPU_GEM_DOMAIN_GTT,
&psp_sos,
&psp_sos_mem,
&psp_sos_virt);
if (ret)
return ret;
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
/* Copy Secure OS binary to PSP memory */
memcpy(psp_sos_virt, psp->sos_start_addr, psp->sos_bin_size);
memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size);
/* Provide the PSP secure OS to bootrom */
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_36),
(uint32_t)(psp_sos_mem >> 20));
(uint32_t)(psp->fw_pri_mc_addr >> 20));
psp_gfxdrv_command_reg = 2 << 16;
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
psp_gfxdrv_command_reg);
@ -273,8 +243,6 @@ int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
0, true);
#endif
amdgpu_bo_free_kernel(&psp_sos, &psp_sos_mem, &psp_sos_virt);
return ret;
}
@ -300,7 +268,6 @@ int psp_v3_1_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd
int psp_v3_1_ring_init(struct psp_context *psp, enum psp_ring_type ring_type)
{
int ret = 0;
unsigned int psp_ring_reg = 0;
struct psp_ring *ring;
struct amdgpu_device *adev = psp->adev;
@ -320,6 +287,16 @@ int psp_v3_1_ring_init(struct psp_context *psp, enum psp_ring_type ring_type)
return ret;
}
return 0;
}
int psp_v3_1_ring_create(struct psp_context *psp, enum psp_ring_type ring_type)
{
int ret = 0;
unsigned int psp_ring_reg = 0;
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
/* Write low address of the ring to C2PMSG_69 */
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_69), psp_ring_reg);
@ -344,6 +321,33 @@ int psp_v3_1_ring_init(struct psp_context *psp, enum psp_ring_type ring_type)
return ret;
}
int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
{
int ret = 0;
struct psp_ring *ring;
unsigned int psp_ring_reg = 0;
struct amdgpu_device *adev = psp->adev;
ring = &psp->km_ring;
/* Write the ring destroy command to C2PMSG_64 */
psp_ring_reg = 3 << 16;
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), psp_ring_reg);
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
0x80000000, 0x80000000, false);
if (ring->ring_mem)
amdgpu_bo_free_kernel(&adev->firmware.rbuf,
&ring->ring_mem_mc_addr,
(void **)&ring->ring_mem);
return ret;
}
int psp_v3_1_cmd_submit(struct psp_context *psp,
struct amdgpu_firmware_info *ucode,
uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,

View File

@ -39,6 +39,10 @@ extern int psp_v3_1_prep_cmd_buf(struct amdgpu_firmware_info *ucode,
struct psp_gfx_cmd_resp *cmd);
extern int psp_v3_1_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type);
extern int psp_v3_1_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type);
extern int psp_v3_1_ring_destroy(struct psp_context *psp,
enum psp_ring_type ring_type);
extern int psp_v3_1_cmd_submit(struct psp_context *psp,
struct amdgpu_firmware_info *ucode,
uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,

View File

@ -48,8 +48,7 @@ static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev);
static const u32 golden_settings_sdma_4[] =
{
static const u32 golden_settings_sdma_4[] = {
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831f07,
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xff000ff0, 0x3f000100,
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0100, 0x00000100,
@ -76,8 +75,7 @@ static const u32 golden_settings_sdma_4[] =
SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_UTCL1_PAGE), 0x000003ff, 0x000003c0
};
static const u32 golden_settings_sdma_vg10[] =
{
static const u32 golden_settings_sdma_vg10[] = {
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG), 0x0018773f, 0x00104002,
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ), 0x0018773f, 0x00104002,
SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG), 0x0018773f, 0x00104002,
@ -87,16 +85,17 @@ static const u32 golden_settings_sdma_vg10[] =
static u32 sdma_v4_0_get_reg_offset(u32 instance, u32 internal_offset)
{
u32 base = 0;
switch (instance) {
case 0:
base = SDMA0_BASE.instance[0].segment[0];
break;
case 1:
base = SDMA1_BASE.instance[0].segment[0];
break;
default:
BUG();
break;
case 0:
base = SDMA0_BASE.instance[0].segment[0];
break;
case 1:
base = SDMA1_BASE.instance[0].segment[0];
break;
default:
BUG();
break;
}
return base + internal_offset;
@ -159,7 +158,8 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
case CHIP_VEGA10:
chip_name = "vega10";
break;
default: BUG();
default:
BUG();
}
for (i = 0; i < adev->sdma.num_instances; i++) {
@ -179,7 +179,7 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
if (adev->sdma.instance[i].feature_version >= 20)
adev->sdma.instance[i].burst_nop = true;
DRM_DEBUG("psp_load == '%s'\n",
adev->firmware.load_type == AMDGPU_FW_LOAD_PSP? "true": "false");
adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@ -192,9 +192,7 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
}
out:
if (err) {
printk(KERN_ERR
"sdma_v4_0: Failed to load firmware \"%s\"\n",
fw_name);
DRM_ERROR("sdma_v4_0: Failed to load firmware \"%s\"\n", fw_name);
for (i = 0; i < adev->sdma.num_instances; i++) {
release_firmware(adev->sdma.instance[i].fw);
adev->sdma.instance[i].fw = NULL;
@ -212,10 +210,10 @@ out:
*/
static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
{
u64* rptr;
u64 *rptr;
/* XXX check if swapping is necessary on BE */
rptr =((u64*)&ring->adev->wb.wb[ring->rptr_offs]);
rptr = ((u64 *)&ring->adev->wb.wb[ring->rptr_offs]);
DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
return ((*rptr) >> 2);
@ -231,19 +229,20 @@ static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u64* wptr = NULL;
uint64_t local_wptr=0;
u64 *wptr = NULL;
uint64_t local_wptr = 0;
if (ring->use_doorbell) {
/* XXX check if swapping is necessary on BE */
wptr = ((u64*)&adev->wb.wb[ring->wptr_offs]);
wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]);
DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr);
*wptr = (*wptr) >> 2;
DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr);
} else {
u32 lowbit, highbit;
int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
wptr=&local_wptr;
wptr = &local_wptr;
lowbit = RREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR)) >> 2;
highbit = RREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
@ -285,12 +284,13 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
} else {
int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
DRM_DEBUG("Not using doorbell -- "
"mmSDMA%i_GFX_RB_WPTR == 0x%08x "
"mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x \n",
me,
"mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
me,
lower_32_bits(ring->wptr << 2),
me,
upper_32_bits(ring->wptr << 2));
WREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
WREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
@ -319,22 +319,22 @@ static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* Schedule an IB in the DMA ring (VEGA10).
*/
static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
u32 vmid = vm_id & 0xf;
u32 vmid = vm_id & 0xf;
/* IB packet must end on a 8 DW boundary */
sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
/* IB packet must end on a 8 DW boundary */
sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
/* base must be 32 byte aligned */
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
/* base must be 32 byte aligned */
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0);
}
@ -523,7 +523,7 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
u32 doorbell;
u32 doorbell_offset;
u32 temp;
int i,r;
int i, r;
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
@ -572,7 +572,7 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
doorbell = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL));
doorbell_offset = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL_OFFSET));
if (ring->use_doorbell){
if (ring->use_doorbell) {
doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
OFFSET, ring->doorbell_index);
@ -694,9 +694,7 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
for (j = 0; j < fw_size; j++)
{
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
}
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
}
@ -744,10 +742,8 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)
if (r)
return r;
r = sdma_v4_0_rlc_resume(adev);
if (r)
return r;
return 0;
return r;
}
/**
@ -797,9 +793,8 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
for (i = 0; i < adev->usec_timeout; i++) {
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF) {
if (tmp == 0xDEADBEEF)
break;
}
DRM_UDELAY(1);
}
@ -864,29 +859,29 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err1;
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err1;
} else if (r < 0) {
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
}
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF) {
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
r = 0;
} else {
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
r = -EINVAL;
}
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err1;
} else if (r < 0) {
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
}
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF) {
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
r = 0;
} else {
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
r = -EINVAL;
}
err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err0:
amdgpu_wb_free(adev, index);
return r;
amdgpu_wb_free(adev, index);
return r;
}
@ -1039,44 +1034,40 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
unsigned eng = ring->idx;
unsigned i;
unsigned eng = ring->vm_inv_eng;
pd_addr = pd_addr | 0x1; /* valid bit */
/* now only use physical base address of PDE and valid */
BUG_ON(pd_addr & 0xFFFF00000000003EULL);
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, hub->ctx0_ptb_addr_lo32 + vm_id * 2);
amdgpu_ring_write(ring, lower_32_bits(pd_addr));
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, hub->ctx0_ptb_addr_lo32 + vm_id * 2);
amdgpu_ring_write(ring, lower_32_bits(pd_addr));
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, hub->ctx0_ptb_addr_hi32 + vm_id * 2);
amdgpu_ring_write(ring, upper_32_bits(pd_addr));
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, hub->ctx0_ptb_addr_hi32 + vm_id * 2);
amdgpu_ring_write(ring, upper_32_bits(pd_addr));
/* flush TLB */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, hub->vm_inv_eng0_req + eng);
amdgpu_ring_write(ring, req);
/* flush TLB */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, hub->vm_inv_eng0_req + eng);
amdgpu_ring_write(ring, req);
/* wait for flush */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 1 << vm_id); /* reference */
amdgpu_ring_write(ring, 1 << vm_id); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
}
/* wait for flush */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 1 << vm_id); /* reference */
amdgpu_ring_write(ring, 1 << vm_id); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
}
static int sdma_v4_0_early_init(void *handle)
@ -1162,8 +1153,6 @@ static int sdma_v4_0_hw_init(void *handle)
sdma_v4_0_init_golden_registers(adev);
r = sdma_v4_0_start(adev);
if (r)
return r;
return r;
}
@ -1199,10 +1188,12 @@ static bool sdma_v4_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) {
u32 tmp = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_STATUS_REG));
if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
return false;
return false;
}
return true;
@ -1211,8 +1202,9 @@ static bool sdma_v4_0_is_idle(void *handle)
static int sdma_v4_0_wait_for_idle(void *handle)
{
unsigned i;
u32 sdma0,sdma1;
u32 sdma0, sdma1;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
sdma0 = RREG32(sdma_v4_0_get_reg_offset(0, mmSDMA0_STATUS_REG));
sdma1 = RREG32(sdma_v4_0_get_reg_offset(1, mmSDMA0_STATUS_REG));
@ -1240,7 +1232,7 @@ static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev,
u32 reg_offset = (type == AMDGPU_SDMA_IRQ_TRAP0) ?
sdma_v4_0_get_reg_offset(0, mmSDMA0_CNTL) :
sdma_v4_0_get_reg_offset(1, mmSDMA0_CNTL);
sdma_v4_0_get_reg_offset(1, mmSDMA0_CNTL);
sdma_cntl = RREG32(reg_offset);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
@ -1332,7 +1324,7 @@ static void sdma_v4_0_update_medium_grain_clock_gating(
SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK);
if(def != data)
if (def != data)
WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL), data);
}
} else {
@ -1382,17 +1374,17 @@ static void sdma_v4_0_update_medium_grain_light_sleep(
/* 1-not override: enable sdma1 mem light sleep */
if (adev->asic_type == CHIP_VEGA10) {
def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL));
data |= SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (def != data)
WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL), data);
def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL));
data |= SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (def != data)
WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL), data);
}
} else {
/* 0-override:disable sdma0 mem light sleep */
def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (def != data)
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
/* 0-override:disable sdma1 mem light sleep */
if (adev->asic_type == CHIP_VEGA10) {
@ -1473,6 +1465,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
.align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
.vmhub = AMDGPU_MMHUB,
.get_rptr = sdma_v4_0_ring_get_rptr,
.get_wptr = sdma_v4_0_ring_get_wptr,
.set_wptr = sdma_v4_0_ring_set_wptr,
@ -1480,7 +1473,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
6 + /* sdma_v4_0_ring_emit_hdp_flush */
3 + /* sdma_v4_0_ring_emit_hdp_invalidate */
6 + /* sdma_v4_0_ring_emit_pipeline_sync */
36 + /* sdma_v4_0_ring_emit_vm_flush */
18 + /* sdma_v4_0_ring_emit_vm_flush */
10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
.emit_ib = sdma_v4_0_ring_emit_ib,
@ -1606,8 +1599,7 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
}
}
const struct amdgpu_ip_block_version sdma_v4_0_ip_block =
{
const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_SDMA,
.major = 4,
.minor = 0,

View File

@ -25,7 +25,7 @@
#include <linux/module.h>
#include "drmP.h"
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "amdgpu_atomfirmware.h"
#include "amdgpu_ih.h"
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
@ -405,11 +405,11 @@ static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev)
static int soc15_asic_reset(struct amdgpu_device *adev)
{
amdgpu_atombios_scratch_regs_engine_hung(adev, true);
amdgpu_atomfirmware_scratch_regs_engine_hung(adev, true);
soc15_gpu_pci_config_reset(adev);
amdgpu_atombios_scratch_regs_engine_hung(adev, false);
amdgpu_atomfirmware_scratch_regs_engine_hung(adev, false);
return 0;
}
@ -505,8 +505,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
if (!amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block);
amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block);
amdgpu_ip_block_add(adev, &vce_v4_0_ip_block);
break;
default:

View File

@ -45,13 +45,31 @@ struct nbio_pcie_index_data {
u32 index_offset;
u32 data_offset;
};
// Register Access Macro
/* Register Access Macros */
#define SOC15_REG_OFFSET(ip, inst, reg) (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
(1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
(2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
(3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
(ip##_BASE__INST##inst##_SEG4 + reg)))))
#define WREG32_FIELD15(ip, idx, reg, field, val) \
WREG32(SOC15_REG_OFFSET(ip, idx, mm##reg), (RREG32(SOC15_REG_OFFSET(ip, idx, mm##reg)) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
#define RREG32_SOC15(ip, inst, reg) \
RREG32( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
(1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
(2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
(3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
(ip##_BASE__INST##inst##_SEG4 + reg))))))
#define WREG32_SOC15(ip, inst, reg, value) \
WREG32( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
(1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
(2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
(3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
(ip##_BASE__INST##inst##_SEG4 + reg))))), value)
#endif

View File

@ -27,10 +27,14 @@
#include "amdgpu_uvd.h"
#include "soc15d.h"
#include "soc15_common.h"
#include "mmsch_v1_0.h"
#include "vega10/soc15ip.h"
#include "vega10/UVD/uvd_7_0_offset.h"
#include "vega10/UVD/uvd_7_0_sh_mask.h"
#include "vega10/VCE/vce_4_0_offset.h"
#include "vega10/VCE/vce_4_0_default.h"
#include "vega10/VCE/vce_4_0_sh_mask.h"
#include "vega10/NBIF/nbif_6_1_offset.h"
#include "vega10/HDP/hdp_4_0_offset.h"
#include "vega10/MMHUB/mmhub_1_0_offset.h"
@ -41,6 +45,7 @@ static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v7_0_start(struct amdgpu_device *adev);
static void uvd_v7_0_stop(struct amdgpu_device *adev);
static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
/**
* uvd_v7_0_ring_get_rptr - get read pointer
@ -98,6 +103,9 @@ static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring->use_doorbell)
return adev->wb.wb[ring->wptr_offs];
if (ring == &adev->uvd.ring_enc[0])
return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_WPTR));
else
@ -129,6 +137,13 @@ static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring->use_doorbell) {
/* XXX check if swapping is necessary on BE */
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
return;
}
if (ring == &adev->uvd.ring_enc[0])
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_WPTR),
lower_32_bits(ring->wptr));
@ -353,7 +368,10 @@ static int uvd_v7_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->uvd.num_enc_rings = 2;
if (amdgpu_sriov_vf(adev))
adev->uvd.num_enc_rings = 1;
else
adev->uvd.num_enc_rings = 2;
uvd_v7_0_set_ring_funcs(adev);
uvd_v7_0_set_enc_ring_funcs(adev);
uvd_v7_0_set_irq_funcs(adev);
@ -406,21 +424,31 @@ static int uvd_v7_0_sw_init(void *handle)
r = amdgpu_uvd_resume(adev);
if (r)
return r;
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
if (r)
return r;
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.ring_enc[i];
sprintf(ring->name, "uvd_enc%d", i);
if (!amdgpu_sriov_vf(adev)) {
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
if (r)
return r;
}
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.ring_enc[i];
sprintf(ring->name, "uvd_enc%d", i);
if (amdgpu_sriov_vf(adev)) {
ring->use_doorbell = true;
ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2;
}
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
if (r)
return r;
}
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
return r;
return r;
}
@ -429,6 +457,8 @@ static int uvd_v7_0_sw_fini(void *handle)
int i, r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_virt_free_mm_table(adev);
r = amdgpu_uvd_suspend(adev);
if (r)
return r;
@ -455,49 +485,54 @@ static int uvd_v7_0_hw_init(void *handle)
uint32_t tmp;
int i, r;
r = uvd_v7_0_start(adev);
if (amdgpu_sriov_vf(adev))
r = uvd_v7_0_sriov_start(adev);
else
r = uvd_v7_0_start(adev);
if (r)
goto done;
ring->ready = true;
r = amdgpu_ring_test_ring(ring);
if (r) {
ring->ready = false;
goto done;
if (!amdgpu_sriov_vf(adev)) {
ring->ready = true;
r = amdgpu_ring_test_ring(ring);
if (r) {
ring->ready = false;
goto done;
}
r = amdgpu_ring_alloc(ring, 10);
if (r) {
DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
goto done;
}
tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
amdgpu_ring_write(ring, tmp);
amdgpu_ring_write(ring, 0xFFFFF);
tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
amdgpu_ring_write(ring, tmp);
amdgpu_ring_write(ring, 0xFFFFF);
tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
amdgpu_ring_write(ring, tmp);
amdgpu_ring_write(ring, 0xFFFFF);
/* Clear timeout status bits */
amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0,
mmUVD_SEMA_TIMEOUT_STATUS), 0));
amdgpu_ring_write(ring, 0x8);
amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0,
mmUVD_SEMA_CNTL), 0));
amdgpu_ring_write(ring, 3);
amdgpu_ring_commit(ring);
}
r = amdgpu_ring_alloc(ring, 10);
if (r) {
DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
goto done;
}
tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
amdgpu_ring_write(ring, tmp);
amdgpu_ring_write(ring, 0xFFFFF);
tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
amdgpu_ring_write(ring, tmp);
amdgpu_ring_write(ring, 0xFFFFF);
tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
amdgpu_ring_write(ring, tmp);
amdgpu_ring_write(ring, 0xFFFFF);
/* Clear timeout status bits */
amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0,
mmUVD_SEMA_TIMEOUT_STATUS), 0));
amdgpu_ring_write(ring, 0x8);
amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0,
mmUVD_SEMA_CNTL), 0));
amdgpu_ring_write(ring, 3);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.ring_enc[i];
ring->ready = true;
@ -618,6 +653,241 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
}
static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
struct amdgpu_mm_table *table)
{
uint32_t data = 0, loop;
uint64_t addr = table->gpu_addr;
struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
uint32_t size;
size = header->header_size + header->vce_table_size + header->uvd_table_size;
/* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO), lower_32_bits(addr));
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI), upper_32_bits(addr));
/* 2, update vmid of descriptor */
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_VMID));
data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_VMID), data);
/* 3, notify mmsch about the size of this descriptor */
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE), size);
/* 4, set resp to zero */
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP), 0);
/* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST), 0x10000001);
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP));
loop = 1000;
while ((data & 0x10000002) != 0x10000002) {
udelay(10);
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP));
loop--;
if (!loop)
break;
}
if (!loop) {
dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
return -EBUSY;
}
return 0;
}
static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
uint32_t offset, size, tmp;
uint32_t table_size = 0;
struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
struct mmsch_v1_0_cmd_end end = { {0} };
uint32_t *init_table = adev->virt.mm_table.cpu_addr;
struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
end.cmd_header.command_type = MMSCH_COMMAND__END;
if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) {
header->version = MMSCH_VERSION;
header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
if (header->vce_table_offset == 0 && header->vce_table_size == 0)
header->uvd_table_offset = header->header_size;
else
header->uvd_table_offset = header->vce_table_size + header->vce_table_offset;
init_table += header->uvd_table_offset;
ring = &adev->uvd.ring;
size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
/* disable clock gating */
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
~UVD_POWER_STATUS__UVD_PG_MODE_MASK, 0);
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS),
0xFFFFFFFF, 0x00000004);
/* mc resume*/
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
offset = 0;
} else {
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
lower_32_bits(adev->uvd.gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
upper_32_bits(adev->uvd.gpu_addr));
offset = size;
}
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
lower_32_bits(adev->uvd.gpu_addr + offset));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
upper_32_bits(adev->uvd.gpu_addr + offset));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
lower_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
upper_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2),
AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_ADDR_CONFIG),
adev->gfx.config.gb_addr_config);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG),
adev->gfx.config.gb_addr_config);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG),
adev->gfx.config.gb_addr_config);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
/* mc resume end*/
/* disable clock gating */
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL),
~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
/* disable interupt */
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
/* stall UMC and register bus before resetting VCPU */
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
/* put LMI, VCPU, RBC etc... into reset */
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
(uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
/* initialize UVD memory controller */
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL),
(uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
UVD_LMI_CTRL__REQ_MODE_MASK |
0x00100000L));
/* disable byte swapping */
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_SWAP_CNTL), 0);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MP_SWAP_CNTL), 0);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA0), 0x40c2040);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA1), 0x0);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB0), 0x40c2040);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB1), 0x0);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_ALU), 0);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUX), 0x88);
/* take all subblocks out of reset, except VCPU */
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
/* enable VCPU clock */
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
UVD_VCPU_CNTL__CLK_EN_MASK);
/* enable UMC */
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
/* boot up the VCPU */
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0);
MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0x02, 0x02);
/* enable master interrupt */
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
/* clear the bit 4 of UVD_STATUS */
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS),
~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
/* force RBC into idle state */
size = order_base_2(ring->ring_size);
tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp);
/* set the write pointer delay */
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL), 0);
/* set the wb address */
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR),
(upper_32_bits(ring->gpu_addr) >> 2));
/* programm the RB_BASE for ring buffer */
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
lower_32_bits(ring->gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
upper_32_bits(ring->gpu_addr));
ring->wptr = 0;
ring = &adev->uvd.ring_enc[0];
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE), ring->ring_size / 4);
/* add end packet */
memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
header->uvd_table_size = table_size;
return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
}
return -EINVAL; /* already initializaed ? */
}
/**
* uvd_v7_0_start - start UVD block
*
@ -1034,42 +1304,38 @@ static void uvd_v7_0_vm_reg_wait(struct amdgpu_ring *ring,
static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
uint32_t data0, data1, mask;
unsigned eng = ring->idx;
unsigned i;
unsigned eng = ring->vm_inv_eng;
pd_addr = pd_addr | 0x1; /* valid bit */
/* now only use physical base address of PDE and valid */
BUG_ON(pd_addr & 0xFFFF00000000003EULL);
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
data1 = upper_32_bits(pd_addr);
uvd_v7_0_vm_reg_write(ring, data0, data1);
data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
data1 = upper_32_bits(pd_addr);
uvd_v7_0_vm_reg_write(ring, data0, data1);
data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
data1 = lower_32_bits(pd_addr);
uvd_v7_0_vm_reg_write(ring, data0, data1);
data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
data1 = lower_32_bits(pd_addr);
uvd_v7_0_vm_reg_write(ring, data0, data1);
data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
data1 = lower_32_bits(pd_addr);
mask = 0xffffffff;
uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
data1 = lower_32_bits(pd_addr);
mask = 0xffffffff;
uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
/* flush TLB */
data0 = (hub->vm_inv_eng0_req + eng) << 2;
data1 = req;
uvd_v7_0_vm_reg_write(ring, data0, data1);
/* flush TLB */
data0 = (hub->vm_inv_eng0_req + eng) << 2;
data1 = req;
uvd_v7_0_vm_reg_write(ring, data0, data1);
/* wait for flush */
data0 = (hub->vm_inv_eng0_ack + eng) << 2;
data1 = 1 << vm_id;
mask = 1 << vm_id;
uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
}
/* wait for flush */
data0 = (hub->vm_inv_eng0_ack + eng) << 2;
data1 = 1 << vm_id;
mask = 1 << vm_id;
uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
}
static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
@ -1080,44 +1346,37 @@ static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vm_id, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
unsigned eng = ring->idx;
unsigned i;
unsigned eng = ring->vm_inv_eng;
pd_addr = pd_addr | 0x1; /* valid bit */
/* now only use physical base address of PDE and valid */
BUG_ON(pd_addr & 0xFFFF00000000003EULL);
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
amdgpu_ring_write(ring, upper_32_bits(pd_addr));
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
amdgpu_ring_write(ring,
(hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
amdgpu_ring_write(ring, upper_32_bits(pd_addr));
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
amdgpu_ring_write(ring, lower_32_bits(pd_addr));
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
amdgpu_ring_write(ring,
(hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
amdgpu_ring_write(ring, lower_32_bits(pd_addr));
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring, lower_32_bits(pd_addr));
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
amdgpu_ring_write(ring,
(hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring, lower_32_bits(pd_addr));
/* flush TLB */
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
amdgpu_ring_write(ring, (hub->vm_inv_eng0_req + eng) << 2);
amdgpu_ring_write(ring, req);
/* flush TLB */
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
amdgpu_ring_write(ring, (hub->vm_inv_eng0_req + eng) << 2);
amdgpu_ring_write(ring, req);
/* wait for flush */
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
amdgpu_ring_write(ring, 1 << vm_id);
amdgpu_ring_write(ring, 1 << vm_id);
}
/* wait for flush */
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
amdgpu_ring_write(ring, 1 << vm_id);
amdgpu_ring_write(ring, 1 << vm_id);
}
#if 0
@ -1240,7 +1499,8 @@ static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
amdgpu_fence_process(&adev->uvd.ring_enc[0]);
break;
case 120:
amdgpu_fence_process(&adev->uvd.ring_enc[1]);
if (!amdgpu_sriov_vf(adev))
amdgpu_fence_process(&adev->uvd.ring_enc[1]);
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
@ -1448,13 +1708,14 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
.align_mask = 0xf,
.nop = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0),
.support_64bit_ptrs = false,
.vmhub = AMDGPU_MMHUB,
.get_rptr = uvd_v7_0_ring_get_rptr,
.get_wptr = uvd_v7_0_ring_get_wptr,
.set_wptr = uvd_v7_0_ring_set_wptr,
.emit_frame_size =
2 + /* uvd_v7_0_ring_emit_hdp_flush */
2 + /* uvd_v7_0_ring_emit_hdp_invalidate */
34 * AMDGPU_MAX_VMHUBS + /* uvd_v7_0_ring_emit_vm_flush */
34 + /* uvd_v7_0_ring_emit_vm_flush */
14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
.emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
.emit_ib = uvd_v7_0_ring_emit_ib,
@ -1475,11 +1736,12 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
.align_mask = 0x3f,
.nop = HEVC_ENC_CMD_NO_OP,
.support_64bit_ptrs = false,
.vmhub = AMDGPU_MMHUB,
.get_rptr = uvd_v7_0_enc_ring_get_rptr,
.get_wptr = uvd_v7_0_enc_ring_get_wptr,
.set_wptr = uvd_v7_0_enc_ring_set_wptr,
.emit_frame_size =
17 * AMDGPU_MAX_VMHUBS + /* uvd_v7_0_enc_ring_emit_vm_flush */
17 + /* uvd_v7_0_enc_ring_emit_vm_flush */
5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1, /* uvd_v7_0_enc_ring_insert_end */
.emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */

View File

@ -49,63 +49,6 @@ static void vce_v4_0_mc_resume(struct amdgpu_device *adev);
static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev);
static void vce_v4_0_set_irq_funcs(struct amdgpu_device *adev);
static inline void mmsch_insert_direct_wt(struct mmsch_v1_0_cmd_direct_write *direct_wt,
uint32_t *init_table,
uint32_t reg_offset,
uint32_t value)
{
direct_wt->cmd_header.reg_offset = reg_offset;
direct_wt->reg_value = value;
memcpy((void *)init_table, direct_wt, sizeof(struct mmsch_v1_0_cmd_direct_write));
}
static inline void mmsch_insert_direct_rd_mod_wt(struct mmsch_v1_0_cmd_direct_read_modify_write *direct_rd_mod_wt,
uint32_t *init_table,
uint32_t reg_offset,
uint32_t mask, uint32_t data)
{
direct_rd_mod_wt->cmd_header.reg_offset = reg_offset;
direct_rd_mod_wt->mask_value = mask;
direct_rd_mod_wt->write_data = data;
memcpy((void *)init_table, direct_rd_mod_wt,
sizeof(struct mmsch_v1_0_cmd_direct_read_modify_write));
}
static inline void mmsch_insert_direct_poll(struct mmsch_v1_0_cmd_direct_polling *direct_poll,
uint32_t *init_table,
uint32_t reg_offset,
uint32_t mask, uint32_t wait)
{
direct_poll->cmd_header.reg_offset = reg_offset;
direct_poll->mask_value = mask;
direct_poll->wait_value = wait;
memcpy((void *)init_table, direct_poll, sizeof(struct mmsch_v1_0_cmd_direct_polling));
}
#define INSERT_DIRECT_RD_MOD_WT(reg, mask, data) { \
mmsch_insert_direct_rd_mod_wt(&direct_rd_mod_wt, \
init_table, (reg), \
(mask), (data)); \
init_table += sizeof(struct mmsch_v1_0_cmd_direct_read_modify_write)/4; \
table_size += sizeof(struct mmsch_v1_0_cmd_direct_read_modify_write)/4; \
}
#define INSERT_DIRECT_WT(reg, value) { \
mmsch_insert_direct_wt(&direct_wt, \
init_table, (reg), \
(value)); \
init_table += sizeof(struct mmsch_v1_0_cmd_direct_write)/4; \
table_size += sizeof(struct mmsch_v1_0_cmd_direct_write)/4; \
}
#define INSERT_DIRECT_POLL(reg, mask, wait) { \
mmsch_insert_direct_poll(&direct_poll, \
init_table, (reg), \
(mask), (wait)); \
init_table += sizeof(struct mmsch_v1_0_cmd_direct_polling)/4; \
table_size += sizeof(struct mmsch_v1_0_cmd_direct_polling)/4; \
}
/**
* vce_v4_0_ring_get_rptr - get read pointer
*
@ -280,60 +223,73 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
init_table += header->vce_table_offset;
ring = &adev->vce.ring[0];
INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR), ring->wptr);
INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR), ring->wptr);
INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO), lower_32_bits(ring->gpu_addr));
INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE), ring->ring_size / 4);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO),
lower_32_bits(ring->gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI),
upper_32_bits(ring->gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE),
ring->ring_size / 4);
/* BEGING OF MC_RESUME */
INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A), ~(1 << 16), 0);
INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), ~0xFF9FF000, 0x1FF000);
INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), ~0x3F, 0x3F);
INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), 0x1FF);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL), 0x398000);
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CACHE_CTRL), ~0x1, 0);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL), 0);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL), 0x398000);
INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CACHE_CTRL), ~0x1, 0);
INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL), 0);
INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0);
INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0), adev->vce.gpu_addr >> 8);
INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1), adev->vce.gpu_addr >> 8);
INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR2), adev->vce.gpu_addr >> 8);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1),
adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR2),
adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8);
} else {
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
adev->vce.gpu_addr >> 8);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1),
adev->vce.gpu_addr >> 8);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR2),
adev->vce.gpu_addr >> 8);
}
offset = AMDGPU_VCE_FIRMWARE_OFFSET;
size = VCE_V4_0_FW_SIZE;
INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), offset & 0x7FFFFFFF);
INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0),
offset & 0x7FFFFFFF);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
offset += size;
size = VCE_V4_0_STACK_SIZE;
INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET1), offset & 0x7FFFFFFF);
INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE1), size);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET1),
offset & 0x7FFFFFFF);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE1), size);
offset += size;
size = VCE_V4_0_DATA_SIZE;
INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET2), offset & 0x7FFFFFFF);
INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE2), size);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET2),
offset & 0x7FFFFFFF);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE2), size);
INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), ~0x100, 0);
INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN),
0xffffffff, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), ~0x100, 0);
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN),
0xffffffff, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
/* end of MC_RESUME */
INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL),
~0x200001, VCE_VCPU_CNTL__CLK_EN_MASK);
INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET),
~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, 0);
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
VCE_STATUS__JOB_BUSY_MASK, ~VCE_STATUS__JOB_BUSY_MASK);
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL),
~0x200001, VCE_VCPU_CNTL__CLK_EN_MASK);
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET),
~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, 0);
INSERT_DIRECT_POLL(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK,
VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK);
MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK,
VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK);
/* clear BUSY flag */
INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
~VCE_STATUS__JOB_BUSY_MASK, 0);
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
~VCE_STATUS__JOB_BUSY_MASK, 0);
/* add end packet */
memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
@ -494,20 +450,9 @@ static int vce_v4_0_sw_init(void *handle)
return r;
}
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->virt.mm_table.bo,
&adev->virt.mm_table.gpu_addr,
(void *)&adev->virt.mm_table.cpu_addr);
if (!r) {
memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
printk("mm table gpu addr = 0x%llx, cpu addr = %p. \n",
adev->virt.mm_table.gpu_addr,
adev->virt.mm_table.cpu_addr);
}
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
return r;
}
return r;
}
@ -518,10 +463,7 @@ static int vce_v4_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* free MM table */
if (amdgpu_sriov_vf(adev))
amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
&adev->virt.mm_table.gpu_addr,
(void *)&adev->virt.mm_table.cpu_addr);
amdgpu_virt_free_mm_table(adev);
r = amdgpu_vce_suspend(adev);
if (r)
@ -973,44 +915,37 @@ static void vce_v4_0_ring_insert_end(struct amdgpu_ring *ring)
static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vm_id, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
unsigned eng = ring->idx;
unsigned i;
unsigned eng = ring->vm_inv_eng;
pd_addr = pd_addr | 0x1; /* valid bit */
/* now only use physical base address of PDE and valid */
BUG_ON(pd_addr & 0xFFFF00000000003EULL);
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
amdgpu_ring_write(ring, upper_32_bits(pd_addr));
amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
amdgpu_ring_write(ring,
(hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
amdgpu_ring_write(ring, upper_32_bits(pd_addr));
amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
amdgpu_ring_write(ring, lower_32_bits(pd_addr));
amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
amdgpu_ring_write(ring,
(hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
amdgpu_ring_write(ring, lower_32_bits(pd_addr));
amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring, lower_32_bits(pd_addr));
amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
amdgpu_ring_write(ring,
(hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring, lower_32_bits(pd_addr));
/* flush TLB */
amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
amdgpu_ring_write(ring, (hub->vm_inv_eng0_req + eng) << 2);
amdgpu_ring_write(ring, req);
/* flush TLB */
amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
amdgpu_ring_write(ring, (hub->vm_inv_eng0_req + eng) << 2);
amdgpu_ring_write(ring, req);
/* wait for flush */
amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
amdgpu_ring_write(ring, 1 << vm_id);
amdgpu_ring_write(ring, 1 << vm_id);
}
/* wait for flush */
amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
amdgpu_ring_write(ring, 1 << vm_id);
amdgpu_ring_write(ring, 1 << vm_id);
}
static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev,
@ -1078,12 +1013,13 @@ static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
.align_mask = 0x3f,
.nop = VCE_CMD_NO_OP,
.support_64bit_ptrs = false,
.vmhub = AMDGPU_MMHUB,
.get_rptr = vce_v4_0_ring_get_rptr,
.get_wptr = vce_v4_0_ring_get_wptr,
.set_wptr = vce_v4_0_ring_set_wptr,
.parse_cs = amdgpu_vce_ring_parse_cs_vm,
.emit_frame_size =
17 * AMDGPU_MAX_VMHUBS + /* vce_v4_0_emit_vm_flush */
17 + /* vce_v4_0_emit_vm_flush */
5 + 5 + /* amdgpu_vce_ring_emit_fence x2 vm fence */
1, /* vce_v4_0_ring_insert_end */
.emit_ib_size = 5, /* vce_v4_0_ring_emit_ib */

View File

@ -138,6 +138,12 @@ struct amd_pp_profile {
uint8_t down_hyst;
};
enum amd_fan_ctrl_mode {
AMD_FAN_CTRL_NONE = 0,
AMD_FAN_CTRL_MANUAL = 1,
AMD_FAN_CTRL_AUTO = 2,
};
/* CG flags */
#define AMD_CG_SUPPORT_GFX_MGCG (1 << 0)
#define AMD_CG_SUPPORT_GFX_MGLS (1 << 1)

View File

@ -53,20 +53,6 @@ enum cgs_ind_reg {
CGS_IND_REG__AUDIO_ENDPT
};
/**
* enum cgs_clock - Clocks controlled by the SMU
*/
enum cgs_clock {
CGS_CLOCK__SCLK,
CGS_CLOCK__MCLK,
CGS_CLOCK__VCLK,
CGS_CLOCK__DCLK,
CGS_CLOCK__ECLK,
CGS_CLOCK__ACLK,
CGS_CLOCK__ICLK,
/* ... */
};
/**
* enum cgs_engine - Engines that can be statically power-gated
*/
@ -81,15 +67,6 @@ enum cgs_engine {
/* ... */
};
/**
* enum cgs_voltage_planes - Voltage planes for external camera HW
*/
enum cgs_voltage_planes {
CGS_VOLTAGE_PLANE__SENSOR0,
CGS_VOLTAGE_PLANE__SENSOR1,
/* ... */
};
/*
* enum cgs_ucode_id - Firmware types for different IPs
*/
@ -146,17 +123,6 @@ enum cgs_resource_type {
CGS_RESOURCE_TYPE_ROM,
};
/**
* struct cgs_clock_limits - Clock limits
*
* Clocks are specified in 10KHz units.
*/
struct cgs_clock_limits {
unsigned min; /**< Minimum supported frequency */
unsigned max; /**< Maxumim supported frequency */
unsigned sustainable; /**< Thermally sustainable frequency */
};
/**
* struct cgs_firmware_info - Firmware information
*/
@ -220,54 +186,6 @@ struct cgs_acpi_method_info {
uint32_t padding[9];
};
/**
* cgs_gpu_mem_info() - Return information about memory heaps
* @cgs_device: opaque device handle
* @type: memory type
* @mc_start: Start MC address of the heap (output)
* @mc_size: MC address space size (output)
* @mem_size: maximum amount of memory available for allocation (output)
*
* This function returns information about memory heaps. The type
* parameter is used to select the memory heap. The mc_start and
* mc_size for GART heaps may be bigger than the memory available for
* allocation.
*
* mc_start and mc_size are undefined for non-contiguous FB memory
* types, since buffers allocated with these types may or may not be
* GART mapped.
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_gpu_mem_info_t)(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type,
uint64_t *mc_start, uint64_t *mc_size,
uint64_t *mem_size);
/**
* cgs_gmap_kmem() - map kernel memory to GART aperture
* @cgs_device: opaque device handle
* @kmem: pointer to kernel memory
* @size: size to map
* @min_offset: minimum offset from start of GART aperture
* @max_offset: maximum offset from start of GART aperture
* @kmem_handle: kernel memory handle (output)
* @mcaddr: MC address (output)
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_gmap_kmem_t)(struct cgs_device *cgs_device, void *kmem, uint64_t size,
uint64_t min_offset, uint64_t max_offset,
cgs_handle_t *kmem_handle, uint64_t *mcaddr);
/**
* cgs_gunmap_kmem() - unmap kernel memory
* @cgs_device: opaque device handle
* @kmem_handle: kernel memory handle returned by gmap_kmem
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_gunmap_kmem_t)(struct cgs_device *cgs_device, cgs_handle_t kmem_handle);
/**
* cgs_alloc_gpu_mem() - Allocate GPU memory
* @cgs_device: opaque device handle
@ -391,62 +309,6 @@ typedef uint32_t (*cgs_read_ind_register_t)(struct cgs_device *cgs_device, enum
typedef void (*cgs_write_ind_register_t)(struct cgs_device *cgs_device, enum cgs_ind_reg space,
unsigned index, uint32_t value);
/**
* cgs_read_pci_config_byte() - Read byte from PCI configuration space
* @cgs_device: opaque device handle
* @addr: address
*
* Return: Value read
*/
typedef uint8_t (*cgs_read_pci_config_byte_t)(struct cgs_device *cgs_device, unsigned addr);
/**
* cgs_read_pci_config_word() - Read word from PCI configuration space
* @cgs_device: opaque device handle
* @addr: address, must be word-aligned
*
* Return: Value read
*/
typedef uint16_t (*cgs_read_pci_config_word_t)(struct cgs_device *cgs_device, unsigned addr);
/**
* cgs_read_pci_config_dword() - Read dword from PCI configuration space
* @cgs_device: opaque device handle
* @addr: address, must be dword-aligned
*
* Return: Value read
*/
typedef uint32_t (*cgs_read_pci_config_dword_t)(struct cgs_device *cgs_device,
unsigned addr);
/**
* cgs_write_pci_config_byte() - Write byte to PCI configuration space
* @cgs_device: opaque device handle
* @addr: address
* @value: value to write
*/
typedef void (*cgs_write_pci_config_byte_t)(struct cgs_device *cgs_device, unsigned addr,
uint8_t value);
/**
* cgs_write_pci_config_word() - Write byte to PCI configuration space
* @cgs_device: opaque device handle
* @addr: address, must be word-aligned
* @value: value to write
*/
typedef void (*cgs_write_pci_config_word_t)(struct cgs_device *cgs_device, unsigned addr,
uint16_t value);
/**
* cgs_write_pci_config_dword() - Write byte to PCI configuration space
* @cgs_device: opaque device handle
* @addr: address, must be dword-aligned
* @value: value to write
*/
typedef void (*cgs_write_pci_config_dword_t)(struct cgs_device *cgs_device, unsigned addr,
uint32_t value);
/**
* cgs_get_pci_resource() - provide access to a device resource (PCI BAR)
* @cgs_device: opaque device handle
@ -500,87 +362,6 @@ typedef int (*cgs_atom_get_cmd_table_revs_t)(struct cgs_device *cgs_device, unsi
typedef int (*cgs_atom_exec_cmd_table_t)(struct cgs_device *cgs_device,
unsigned table, void *args);
/**
* cgs_create_pm_request() - Create a power management request
* @cgs_device: opaque device handle
* @request: handle of created PM request (output)
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_create_pm_request_t)(struct cgs_device *cgs_device, cgs_handle_t *request);
/**
* cgs_destroy_pm_request() - Destroy a power management request
* @cgs_device: opaque device handle
* @request: handle of created PM request
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_destroy_pm_request_t)(struct cgs_device *cgs_device, cgs_handle_t request);
/**
* cgs_set_pm_request() - Activate or deactiveate a PM request
* @cgs_device: opaque device handle
* @request: PM request handle
* @active: 0 = deactivate, non-0 = activate
*
* While a PM request is active, its minimum clock requests are taken
* into account as the requested engines are powered up. When the
* request is inactive, the engines may be powered down and clocks may
* be lower, depending on other PM requests by other driver
* components.
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_set_pm_request_t)(struct cgs_device *cgs_device, cgs_handle_t request,
int active);
/**
* cgs_pm_request_clock() - Request a minimum frequency for a specific clock
* @cgs_device: opaque device handle
* @request: PM request handle
* @clock: which clock?
* @freq: requested min. frequency in 10KHz units (0 to clear request)
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_pm_request_clock_t)(struct cgs_device *cgs_device, cgs_handle_t request,
enum cgs_clock clock, unsigned freq);
/**
* cgs_pm_request_engine() - Request an engine to be powered up
* @cgs_device: opaque device handle
* @request: PM request handle
* @engine: which engine?
* @powered: 0 = powered down, non-0 = powered up
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_pm_request_engine_t)(struct cgs_device *cgs_device, cgs_handle_t request,
enum cgs_engine engine, int powered);
/**
* cgs_pm_query_clock_limits() - Query clock frequency limits
* @cgs_device: opaque device handle
* @clock: which clock?
* @limits: clock limits
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_pm_query_clock_limits_t)(struct cgs_device *cgs_device,
enum cgs_clock clock,
struct cgs_clock_limits *limits);
/**
* cgs_set_camera_voltages() - Apply specific voltages to PMIC voltage planes
* @cgs_device: opaque device handle
* @mask: bitmask of voltages to change (1<<CGS_VOLTAGE_PLANE__xyz|...)
* @voltages: pointer to array of voltage values in 1mV units
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_set_camera_voltages_t)(struct cgs_device *cgs_device, uint32_t mask,
const uint32_t *voltages);
/**
* cgs_get_firmware_info - Get the firmware information from core driver
* @cgs_device: opaque device handle
@ -627,9 +408,6 @@ typedef int (*cgs_enter_safe_mode)(struct cgs_device *cgs_device, bool en);
struct cgs_ops {
/* memory management calls (similar to KFD interface) */
cgs_gpu_mem_info_t gpu_mem_info;
cgs_gmap_kmem_t gmap_kmem;
cgs_gunmap_kmem_t gunmap_kmem;
cgs_alloc_gpu_mem_t alloc_gpu_mem;
cgs_free_gpu_mem_t free_gpu_mem;
cgs_gmap_gpu_mem_t gmap_gpu_mem;
@ -641,27 +419,12 @@ struct cgs_ops {
cgs_write_register_t write_register;
cgs_read_ind_register_t read_ind_register;
cgs_write_ind_register_t write_ind_register;
/* PCI configuration space access */
cgs_read_pci_config_byte_t read_pci_config_byte;
cgs_read_pci_config_word_t read_pci_config_word;
cgs_read_pci_config_dword_t read_pci_config_dword;
cgs_write_pci_config_byte_t write_pci_config_byte;
cgs_write_pci_config_word_t write_pci_config_word;
cgs_write_pci_config_dword_t write_pci_config_dword;
/* PCI resources */
cgs_get_pci_resource_t get_pci_resource;
/* ATOM BIOS */
cgs_atom_get_data_table_t atom_get_data_table;
cgs_atom_get_cmd_table_revs_t atom_get_cmd_table_revs;
cgs_atom_exec_cmd_table_t atom_exec_cmd_table;
/* Power management */
cgs_create_pm_request_t create_pm_request;
cgs_destroy_pm_request_t destroy_pm_request;
cgs_set_pm_request_t set_pm_request;
cgs_pm_request_clock_t pm_request_clock;
cgs_pm_request_engine_t pm_request_engine;
cgs_pm_query_clock_limits_t pm_query_clock_limits;
cgs_set_camera_voltages_t set_camera_voltages;
/* Firmware Info */
cgs_get_firmware_info get_firmware_info;
cgs_rel_firmware rel_firmware;
@ -696,12 +459,6 @@ struct cgs_device
#define CGS_OS_CALL(func,dev,...) \
(((struct cgs_device *)dev)->os_ops->func(dev, ##__VA_ARGS__))
#define cgs_gpu_mem_info(dev,type,mc_start,mc_size,mem_size) \
CGS_CALL(gpu_mem_info,dev,type,mc_start,mc_size,mem_size)
#define cgs_gmap_kmem(dev,kmem,size,min_off,max_off,kmem_handle,mcaddr) \
CGS_CALL(gmap_kmem,dev,kmem,size,min_off,max_off,kmem_handle,mcaddr)
#define cgs_gunmap_kmem(dev,kmem_handle) \
CGS_CALL(gunmap_kmem,dev,keme_handle)
#define cgs_alloc_gpu_mem(dev,type,size,align,min_off,max_off,handle) \
CGS_CALL(alloc_gpu_mem,dev,type,size,align,min_off,max_off,handle)
#define cgs_free_gpu_mem(dev,handle) \
@ -724,19 +481,6 @@ struct cgs_device
#define cgs_write_ind_register(dev,space,index,value) \
CGS_CALL(write_ind_register,dev,space,index,value)
#define cgs_read_pci_config_byte(dev,addr) \
CGS_CALL(read_pci_config_byte,dev,addr)
#define cgs_read_pci_config_word(dev,addr) \
CGS_CALL(read_pci_config_word,dev,addr)
#define cgs_read_pci_config_dword(dev,addr) \
CGS_CALL(read_pci_config_dword,dev,addr)
#define cgs_write_pci_config_byte(dev,addr,value) \
CGS_CALL(write_pci_config_byte,dev,addr,value)
#define cgs_write_pci_config_word(dev,addr,value) \
CGS_CALL(write_pci_config_word,dev,addr,value)
#define cgs_write_pci_config_dword(dev,addr,value) \
CGS_CALL(write_pci_config_dword,dev,addr,value)
#define cgs_atom_get_data_table(dev,table,size,frev,crev) \
CGS_CALL(atom_get_data_table,dev,table,size,frev,crev)
#define cgs_atom_get_cmd_table_revs(dev,table,frev,crev) \
@ -744,20 +488,6 @@ struct cgs_device
#define cgs_atom_exec_cmd_table(dev,table,args) \
CGS_CALL(atom_exec_cmd_table,dev,table,args)
#define cgs_create_pm_request(dev,request) \
CGS_CALL(create_pm_request,dev,request)
#define cgs_destroy_pm_request(dev,request) \
CGS_CALL(destroy_pm_request,dev,request)
#define cgs_set_pm_request(dev,request,active) \
CGS_CALL(set_pm_request,dev,request,active)
#define cgs_pm_request_clock(dev,request,clock,freq) \
CGS_CALL(pm_request_clock,dev,request,clock,freq)
#define cgs_pm_request_engine(dev,request,engine,powered) \
CGS_CALL(pm_request_engine,dev,request,engine,powered)
#define cgs_pm_query_clock_limits(dev,clock,limits) \
CGS_CALL(pm_query_clock_limits,dev,clock,limits)
#define cgs_set_camera_voltages(dev,mask,voltages) \
CGS_CALL(set_camera_voltages,dev,mask,voltages)
#define cgs_get_firmware_info(dev, type, info) \
CGS_CALL(get_firmware_info, dev, type, info)
#define cgs_rel_firmware(dev, type) \

View File

@ -251,7 +251,9 @@ static int pp_suspend(void *handle)
ret = pp_check(pp_handle);
if (ret != 0)
if (ret == PP_DPM_DISABLED)
return 0;
else if (ret != 0)
return ret;
eventmgr = pp_handle->eventmgr;

View File

@ -219,7 +219,7 @@ const pem_event_action notify_smu_suspend_tasks[] = {
};
const pem_event_action disable_smc_firmware_ctf_tasks[] = {
/* PEM_Task_DisableSMCFirmwareCTF,*/
pem_task_disable_smc_firmware_ctf,
NULL
};

View File

@ -173,6 +173,11 @@ int pem_task_stop_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_even
return 0;
}
int pem_task_disable_smc_firmware_ctf(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
{
return phm_disable_smc_firmware_ctf(eventmgr->hwmgr);
}
int pem_task_setup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
{
return phm_setup_asic(eventmgr->hwmgr);

View File

@ -84,5 +84,6 @@ int pem_task_update_allowed_performance_levels(struct pp_eventmgr *eventmgr, str
/*thermal */
int pem_task_initialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
int pem_task_uninitialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
int pem_task_disable_smc_firmware_ctf(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
#endif /* _EVENT_TASKS_H_ */

View File

@ -501,3 +501,13 @@ int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_i
return hwmgr->hwmgr_func->get_max_high_clocks(hwmgr, clocks);
}
int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
if (hwmgr->hwmgr_func->disable_smc_firmware_ctf == NULL)
return -EINVAL;
return hwmgr->hwmgr_func->disable_smc_firmware_ctf(hwmgr);
}

View File

@ -314,52 +314,45 @@ int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
le32_to_cpu(profile->gb_vdroop_table_ckson_a2);
param->ulGbFuseTableCksoffM1 =
le32_to_cpu(profile->avfsgb_fuse_table_cksoff_m1);
param->usGbFuseTableCksoffM2 =
param->ulGbFuseTableCksoffM2 =
le16_to_cpu(profile->avfsgb_fuse_table_cksoff_m2);
param->ulGbFuseTableCksoffB =
le32_to_cpu(profile->avfsgb_fuse_table_cksoff_b);
param->ulGbFuseTableCksonM1 =
le32_to_cpu(profile->avfsgb_fuse_table_ckson_m1);
param->usGbFuseTableCksonM2 =
param->ulGbFuseTableCksonM2 =
le16_to_cpu(profile->avfsgb_fuse_table_ckson_m2);
param->ulGbFuseTableCksonB =
le32_to_cpu(profile->avfsgb_fuse_table_ckson_b);
param->usMaxVoltage025mv =
le16_to_cpu(profile->max_voltage_0_25mv);
param->ucEnableGbVdroopTableCksoff =
profile->enable_gb_vdroop_table_cksoff;
param->ucEnableGbVdroopTableCkson =
profile->enable_gb_vdroop_table_ckson;
param->ucEnableGbFuseTableCksoff =
profile->enable_gb_fuse_table_cksoff;
param->ucEnableGbFuseTableCkson =
profile->enable_gb_fuse_table_ckson;
param->usPsmAgeComfactor =
le16_to_cpu(profile->psm_age_comfactor);
param->ucEnableApplyAvfsCksoffVoltage =
profile->enable_apply_avfs_cksoff_voltage;
param->ulDispclk2GfxclkM1 =
le32_to_cpu(profile->dispclk2gfxclk_a);
param->usDispclk2GfxclkM2 =
param->ulDispclk2GfxclkM2 =
le16_to_cpu(profile->dispclk2gfxclk_b);
param->ulDispclk2GfxclkB =
le32_to_cpu(profile->dispclk2gfxclk_c);
param->ulDcefclk2GfxclkM1 =
le32_to_cpu(profile->dcefclk2gfxclk_a);
param->usDcefclk2GfxclkM2 =
param->ulDcefclk2GfxclkM2 =
le16_to_cpu(profile->dcefclk2gfxclk_b);
param->ulDcefclk2GfxclkB =
le32_to_cpu(profile->dcefclk2gfxclk_c);
param->ulPixelclk2GfxclkM1 =
le32_to_cpu(profile->pixclk2gfxclk_a);
param->usPixelclk2GfxclkM2 =
param->ulPixelclk2GfxclkM2 =
le16_to_cpu(profile->pixclk2gfxclk_b);
param->ulPixelclk2GfxclkB =
le32_to_cpu(profile->pixclk2gfxclk_c);
param->ulPhyclk2GfxclkM1 =
le32_to_cpu(profile->phyclk2gfxclk_a);
param->usPhyclk2GfxclkM2 =
param->ulPhyclk2GfxclkM2 =
le16_to_cpu(profile->phyclk2gfxclk_b);
param->ulPhyclk2GfxclkB =
le32_to_cpu(profile->phyclk2gfxclk_c);
@ -394,3 +387,31 @@ int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr,
return 0;
}
int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_bios_boot_up_values *boot_values)
{
struct atom_firmware_info_v3_1 *info = NULL;
uint16_t ix;
ix = GetIndexIntoMasterDataTable(firmwareinfo);
info = (struct atom_firmware_info_v3_1 *)
cgs_atom_get_data_table(hwmgr->device,
ix, NULL, NULL, NULL);
if (!info) {
pr_info("Error retrieving BIOS firmwareinfo!");
return -EINVAL;
}
boot_values->ulRevision = info->firmware_revision;
boot_values->ulGfxClk = info->bootup_sclk_in10khz;
boot_values->ulUClk = info->bootup_mclk_in10khz;
boot_values->ulSocClk = 0;
boot_values->usVddc = info->bootup_vddc_mv;
boot_values->usVddci = info->bootup_vddci_mv;
boot_values->usMvddc = info->bootup_mvddc_mv;
boot_values->usVddGfx = info->bootup_vddgfx_mv;
return 0;
}

View File

@ -69,7 +69,7 @@ struct pp_atomfwctrl_clock_dividers_soc15 {
struct pp_atomfwctrl_avfs_parameters {
uint32_t ulMaxVddc;
uint32_t ulMinVddc;
uint8_t ucMaxVidStep;
uint32_t ulMeanNsigmaAcontant0;
uint32_t ulMeanNsigmaAcontant1;
uint32_t ulMeanNsigmaAcontant2;
@ -82,30 +82,30 @@ struct pp_atomfwctrl_avfs_parameters {
uint32_t ulGbVdroopTableCksonA0;
uint32_t ulGbVdroopTableCksonA1;
uint32_t ulGbVdroopTableCksonA2;
uint32_t ulGbFuseTableCksoffM1;
uint16_t usGbFuseTableCksoffM2;
uint32_t ulGbFuseTableCksoffB;\
uint32_t ulGbFuseTableCksoffM2;
uint32_t ulGbFuseTableCksoffB;
uint32_t ulGbFuseTableCksonM1;
uint16_t usGbFuseTableCksonM2;
uint32_t ulGbFuseTableCksonM2;
uint32_t ulGbFuseTableCksonB;
uint16_t usMaxVoltage025mv;
uint8_t ucEnableGbVdroopTableCksoff;
uint8_t ucEnableGbVdroopTableCkson;
uint8_t ucEnableGbFuseTableCksoff;
uint8_t ucEnableGbFuseTableCkson;
uint16_t usPsmAgeComfactor;
uint8_t ucEnableApplyAvfsCksoffVoltage;
uint32_t ulDispclk2GfxclkM1;
uint16_t usDispclk2GfxclkM2;
uint32_t ulDispclk2GfxclkM2;
uint32_t ulDispclk2GfxclkB;
uint32_t ulDcefclk2GfxclkM1;
uint16_t usDcefclk2GfxclkM2;
uint32_t ulDcefclk2GfxclkM2;
uint32_t ulDcefclk2GfxclkB;
uint32_t ulPixelclk2GfxclkM1;
uint16_t usPixelclk2GfxclkM2;
uint32_t ulPixelclk2GfxclkM2;
uint32_t ulPixelclk2GfxclkB;
uint32_t ulPhyclk2GfxclkM1;
uint16_t usPhyclk2GfxclkM2;
uint32_t ulPhyclk2GfxclkM2;
uint32_t ulPhyclk2GfxclkB;
};
@ -119,6 +119,18 @@ struct pp_atomfwctrl_gpio_parameters {
uint8_t ucFwCtfGpio;
uint8_t ucFwCtfPolarity;
};
struct pp_atomfwctrl_bios_boot_up_values {
uint32_t ulRevision;
uint32_t ulGfxClk;
uint32_t ulUClk;
uint32_t ulSocClk;
uint16_t usVddc;
uint16_t usVddci;
uint16_t usMvddc;
uint16_t usVddGfx;
};
int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
uint32_t clock_type, uint32_t clock_value,
struct pp_atomfwctrl_clock_dividers_soc15 *dividers);
@ -136,5 +148,8 @@ int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_gpio_parameters *param);
int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_bios_boot_up_values *boot_values);
#endif

View File

@ -4334,26 +4334,31 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
static int smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
if (mode) {
/* stop auto-manage */
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl))
smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
smu7_fan_ctrl_set_static_mode(hwmgr, mode);
} else
/* restart auto-manage */
smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
int result = 0;
return 0;
switch (mode) {
case AMD_FAN_CTRL_NONE:
result = smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
break;
case AMD_FAN_CTRL_MANUAL:
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl))
result = smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
break;
case AMD_FAN_CTRL_AUTO:
result = smu7_fan_ctrl_set_static_mode(hwmgr, mode);
if (!result)
result = smu7_fan_ctrl_start_smc_fan_control(hwmgr);
break;
default:
break;
}
return result;
}
static int smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
{
if (hwmgr->fan_ctrl_is_in_default_mode)
return hwmgr->fan_ctrl_default_mode;
else
return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL2, FDO_PWM_MODE);
return hwmgr->fan_ctrl_enabled ? AMD_FAN_CTRL_AUTO : AMD_FAN_CTRL_MANUAL;
}
static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
@ -4522,32 +4527,6 @@ static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type
return 0;
}
static int smu7_request_firmware(struct pp_hwmgr *hwmgr)
{
int ret;
struct cgs_firmware_info info = {0};
ret = cgs_get_firmware_info(hwmgr->device,
smu7_convert_fw_type_to_cgs(UCODE_ID_SMU),
&info);
if (ret || !info.kptr)
return -EINVAL;
return 0;
}
static int smu7_release_firmware(struct pp_hwmgr *hwmgr)
{
int ret;
ret = cgs_rel_firmware(hwmgr->device,
smu7_convert_fw_type_to_cgs(UCODE_ID_SMU));
if (ret)
return -EINVAL;
return 0;
}
static void smu7_find_min_clock_masks(struct pp_hwmgr *hwmgr,
uint32_t *sclk_mask, uint32_t *mclk_mask,
uint32_t min_sclk, uint32_t min_mclk)
@ -4691,10 +4670,9 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.get_clock_by_type = smu7_get_clock_by_type,
.read_sensor = smu7_read_sensor,
.dynamic_state_management_disable = smu7_disable_dpm_tasks,
.request_firmware = smu7_request_firmware,
.release_firmware = smu7_release_firmware,
.set_power_profile_state = smu7_set_power_profile_state,
.avfs_control = smu7_avfs_control,
.disable_smc_firmware_ctf = smu7_thermal_disable_alert,
};
uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,

View File

@ -112,10 +112,9 @@ int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
*/
int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
if (hwmgr->fan_ctrl_is_in_default_mode) {
hwmgr->fan_ctrl_default_mode =
PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL2, FDO_PWM_MODE);
hwmgr->tmin =
PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@ -149,7 +148,7 @@ int smu7_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
return 0;
}
static int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
{
int result;
@ -179,6 +178,7 @@ static int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
PPSMC_MSG_SetFanTemperatureTarget,
hwmgr->thermal_controller.
advanceFanControlParameters.ucTargetTemperature);
hwmgr->fan_ctrl_enabled = true;
return result;
}
@ -186,6 +186,7 @@ static int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
{
hwmgr->fan_ctrl_enabled = false;
return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl);
}
@ -280,7 +281,7 @@ int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_TACH_STATUS, TACH_PERIOD, tach_period);
return smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
return smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC_RPM);
}
/**

View File

@ -54,6 +54,6 @@ extern int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *spe
extern int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
extern int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr);
extern int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr);
extern int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr);
#endif

View File

@ -111,6 +111,8 @@ static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr)
hwmgr->feature_mask & PP_SOCCLK_DPM_MASK ? false : true;
data->registry_data.mclk_dpm_key_disabled =
hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
data->registry_data.pcie_dpm_key_disabled =
hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
data->registry_data.dcefclk_dpm_key_disabled =
hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK ? false : true;
@ -121,7 +123,9 @@ static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->registry_data.enable_tdc_limit_feature = 1;
}
data->registry_data.pcie_dpm_key_disabled = 1;
data->registry_data.clock_stretcher_support =
hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK ? false : true;
data->registry_data.disable_water_mark = 0;
data->registry_data.fan_control_support = 1;
@ -1133,7 +1137,7 @@ static void vega10_setup_default_single_dpm_table(struct pp_hwmgr *hwmgr,
int i;
for (i = 0; i < dep_table->count; i++) {
if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value !=
if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value <=
dep_table->entries[i].clk) {
dpm_table->dpm_levels[dpm_table->count].value =
dep_table->entries[i].clk;
@ -1178,29 +1182,9 @@ static int vega10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
else
pcie_table->lclk[i] =
bios_pcie_table->entries[i].pcie_sclk;
pcie_table->count++;
}
if (data->registry_data.pcieSpeedOverride)
pcie_table->pcie_gen[i] = data->registry_data.pcieSpeedOverride;
else
pcie_table->pcie_gen[i] =
bios_pcie_table->entries[bios_pcie_table->count - 1].gen_speed;
if (data->registry_data.pcieLaneOverride)
pcie_table->pcie_lane[i] = data->registry_data.pcieLaneOverride;
else
pcie_table->pcie_lane[i] =
bios_pcie_table->entries[bios_pcie_table->count - 1].lane_width;
if (data->registry_data.pcieClockOverride)
pcie_table->lclk[i] = data->registry_data.pcieClockOverride;
else
pcie_table->lclk[i] =
bios_pcie_table->entries[bios_pcie_table->count - 1].pcie_sclk;
pcie_table->count++;
pcie_table->count = NUM_LINK_LEVELS;
return 0;
}
@ -1290,7 +1274,7 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
dpm_table = &(data->dpm_table.eclk_table);
for (i = 0; i < dep_mm_table->count; i++) {
if (i == 0 || dpm_table->dpm_levels
[dpm_table->count - 1].value !=
[dpm_table->count - 1].value <=
dep_mm_table->entries[i].eclk) {
dpm_table->dpm_levels[dpm_table->count].value =
dep_mm_table->entries[i].eclk;
@ -1306,7 +1290,7 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
dpm_table = &(data->dpm_table.vclk_table);
for (i = 0; i < dep_mm_table->count; i++) {
if (i == 0 || dpm_table->dpm_levels
[dpm_table->count - 1].value !=
[dpm_table->count - 1].value <=
dep_mm_table->entries[i].vclk) {
dpm_table->dpm_levels[dpm_table->count].value =
dep_mm_table->entries[i].vclk;
@ -1320,7 +1304,7 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
dpm_table = &(data->dpm_table.dclk_table);
for (i = 0; i < dep_mm_table->count; i++) {
if (i == 0 || dpm_table->dpm_levels
[dpm_table->count - 1].value !=
[dpm_table->count - 1].value <=
dep_mm_table->entries[i].dclk) {
dpm_table->dpm_levels[dpm_table->count].value =
dep_mm_table->entries[i].dclk;
@ -1432,9 +1416,7 @@ static int vega10_populate_ulv_state(struct pp_hwmgr *hwmgr)
(struct phm_ppt_v2_information *)(hwmgr->pptable);
data->smc_state_table.pp_table.UlvOffsetVid =
(uint8_t)(table_info->us_ulv_voltage_offset *
VOLTAGE_VID_OFFSET_SCALE2 /
VOLTAGE_VID_OFFSET_SCALE1);
(uint8_t)table_info->us_ulv_voltage_offset;
data->smc_state_table.pp_table.UlvSmnclkDid =
(uint8_t)(table_info->us_ulv_smnclk_did);
@ -1553,7 +1535,11 @@ static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
current_gfxclk_level->FbMult =
cpu_to_le32(dividers.ulPll_fb_mult);
/* Spread FB Multiplier bit: bit 0:8 int, bit 31:16 frac */
current_gfxclk_level->SsOn = dividers.ucPll_ss_enable;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_EngineSpreadSpectrumSupport))
current_gfxclk_level->SsOn = dividers.ucPll_ss_enable;
else
current_gfxclk_level->SsOn = 0;
current_gfxclk_level->SsFbMult =
cpu_to_le32(dividers.ulPll_ss_fbsmult);
current_gfxclk_level->SsSlewFrac =
@ -2044,10 +2030,10 @@ static int vega10_populate_clock_stretcher_table(struct pp_hwmgr *hwmgr)
table_info->vdd_dep_on_sclk;
uint32_t i;
for (i = 0; dep_table->count; i++) {
for (i = 0; i < dep_table->count; i++) {
pp_table->CksEnable[i] = dep_table->entries[i].cks_enable;
pp_table->CksVidOffset[i] = convert_to_vid(
dep_table->entries[i].cks_voffset);
pp_table->CksVidOffset[i] = (uint8_t)(dep_table->entries[i].cks_voffset
* VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
}
return 0;
@ -2073,66 +2059,70 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
if (!result) {
pp_table->MinVoltageVid = (uint8_t)
convert_to_vid((uint16_t)(avfs_params.ulMaxVddc));
pp_table->MaxVoltageVid = (uint8_t)
convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
pp_table->BtcGbVdroopTableCksOn.a0 =
cpu_to_le32(avfs_params.ulGbVdroopTableCksonA0);
pp_table->BtcGbVdroopTableCksOn.a1 =
cpu_to_le32(avfs_params.ulGbVdroopTableCksonA1);
pp_table->BtcGbVdroopTableCksOn.a2 =
cpu_to_le32(avfs_params.ulGbVdroopTableCksonA2);
pp_table->MaxVoltageVid = (uint8_t)
convert_to_vid((uint16_t)(avfs_params.ulMaxVddc));
pp_table->AConstant[0] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant0);
pp_table->AConstant[1] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant1);
pp_table->AConstant[2] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant2);
pp_table->DC_tol_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
pp_table->Platform_mean = cpu_to_le16(avfs_params.usMeanNsigmaPlatformMean);
pp_table->Platform_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
pp_table->PSM_Age_CompFactor = cpu_to_le16(avfs_params.usPsmAgeComfactor);
pp_table->BtcGbVdroopTableCksOff.a0 =
cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA0);
pp_table->BtcGbVdroopTableCksOff.a0_shift = 20;
pp_table->BtcGbVdroopTableCksOff.a1 =
cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA1);
pp_table->BtcGbVdroopTableCksOff.a1_shift = 20;
pp_table->BtcGbVdroopTableCksOff.a2 =
cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA2);
pp_table->BtcGbVdroopTableCksOff.a2_shift = 20;
pp_table->OverrideBtcGbCksOn = avfs_params.ucEnableGbVdroopTableCkson;
pp_table->BtcGbVdroopTableCksOn.a0 =
cpu_to_le32(avfs_params.ulGbVdroopTableCksonA0);
pp_table->BtcGbVdroopTableCksOn.a0_shift = 20;
pp_table->BtcGbVdroopTableCksOn.a1 =
cpu_to_le32(avfs_params.ulGbVdroopTableCksonA1);
pp_table->BtcGbVdroopTableCksOn.a1_shift = 20;
pp_table->BtcGbVdroopTableCksOn.a2 =
cpu_to_le32(avfs_params.ulGbVdroopTableCksonA2);
pp_table->BtcGbVdroopTableCksOn.a2_shift = 20;
pp_table->AvfsGbCksOn.m1 =
cpu_to_le32(avfs_params.ulGbFuseTableCksonM1);
pp_table->AvfsGbCksOn.m2 =
cpu_to_le16(avfs_params.usGbFuseTableCksonM2);
cpu_to_le16(avfs_params.ulGbFuseTableCksonM2);
pp_table->AvfsGbCksOn.b =
cpu_to_le32(avfs_params.ulGbFuseTableCksonB);
pp_table->AvfsGbCksOn.m1_shift = 24;
pp_table->AvfsGbCksOn.m2_shift = 12;
pp_table->AvfsGbCksOn.b_shift = 0;
pp_table->OverrideAvfsGbCksOn =
avfs_params.ucEnableGbFuseTableCkson;
pp_table->AvfsGbCksOff.m1 =
cpu_to_le32(avfs_params.ulGbFuseTableCksoffM1);
pp_table->AvfsGbCksOff.m2 =
cpu_to_le16(avfs_params.usGbFuseTableCksoffM2);
cpu_to_le16(avfs_params.ulGbFuseTableCksoffM2);
pp_table->AvfsGbCksOff.b =
cpu_to_le32(avfs_params.ulGbFuseTableCksoffB);
pp_table->AvfsGbCksOff.m1_shift = 24;
pp_table->AvfsGbCksOff.m2_shift = 12;
pp_table->AvfsGbCksOff.b_shift = 0;
pp_table->AConstant[0] =
cpu_to_le32(avfs_params.ulMeanNsigmaAcontant0);
pp_table->AConstant[1] =
cpu_to_le32(avfs_params.ulMeanNsigmaAcontant1);
pp_table->AConstant[2] =
cpu_to_le32(avfs_params.ulMeanNsigmaAcontant2);
pp_table->DC_tol_sigma =
cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
pp_table->Platform_mean =
cpu_to_le16(avfs_params.usMeanNsigmaPlatformMean);
pp_table->PSM_Age_CompFactor =
cpu_to_le16(avfs_params.usPsmAgeComfactor);
pp_table->Platform_sigma =
cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
for (i = 0; i < dep_table->count; i++)
pp_table->StaticVoltageOffsetVid[i] = (uint8_t)
(dep_table->entries[i].sclk_offset *
for (i = 0; i < dep_table->count; i++) {
if (dep_table->entries[i].sclk_offset == 0)
pp_table->StaticVoltageOffsetVid[i] = 248;
else
pp_table->StaticVoltageOffsetVid[i] =
(uint8_t)(dep_table->entries[i].sclk_offset *
VOLTAGE_VID_OFFSET_SCALE2 /
VOLTAGE_VID_OFFSET_SCALE1);
pp_table->OverrideBtcGbCksOn =
avfs_params.ucEnableGbVdroopTableCkson;
pp_table->OverrideAvfsGbCksOn =
avfs_params.ucEnableGbFuseTableCkson;
}
if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
data->disp_clk_quad_eqn_a) &&
@ -2141,20 +2131,21 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
(int32_t)data->disp_clk_quad_eqn_a;
pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
(int16_t)data->disp_clk_quad_eqn_b;
(int32_t)data->disp_clk_quad_eqn_b;
pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
(int32_t)data->disp_clk_quad_eqn_c;
} else {
pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
(int32_t)avfs_params.ulDispclk2GfxclkM1;
pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
(int16_t)avfs_params.usDispclk2GfxclkM2;
(int32_t)avfs_params.ulDispclk2GfxclkM2;
pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
(int32_t)avfs_params.ulDispclk2GfxclkB;
}
pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1_shift = 24;
pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2_shift = 12;
pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b_shift = 12;
if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
data->dcef_clk_quad_eqn_a) &&
@ -2163,20 +2154,21 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
(int32_t)data->dcef_clk_quad_eqn_a;
pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
(int16_t)data->dcef_clk_quad_eqn_b;
(int32_t)data->dcef_clk_quad_eqn_b;
pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
(int32_t)data->dcef_clk_quad_eqn_c;
} else {
pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
(int32_t)avfs_params.ulDcefclk2GfxclkM1;
pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
(int16_t)avfs_params.usDcefclk2GfxclkM2;
(int32_t)avfs_params.ulDcefclk2GfxclkM2;
pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
(int32_t)avfs_params.ulDcefclk2GfxclkB;
}
pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1_shift = 24;
pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2_shift = 12;
pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b_shift = 12;
if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
data->pixel_clk_quad_eqn_a) &&
@ -2185,21 +2177,21 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
(int32_t)data->pixel_clk_quad_eqn_a;
pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
(int16_t)data->pixel_clk_quad_eqn_b;
(int32_t)data->pixel_clk_quad_eqn_b;
pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
(int32_t)data->pixel_clk_quad_eqn_c;
} else {
pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
(int32_t)avfs_params.ulPixelclk2GfxclkM1;
pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
(int16_t)avfs_params.usPixelclk2GfxclkM2;
(int32_t)avfs_params.ulPixelclk2GfxclkM2;
pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
(int32_t)avfs_params.ulPixelclk2GfxclkB;
}
pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1_shift = 24;
pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2_shift = 12;
pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b_shift = 12;
if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
data->phy_clk_quad_eqn_a) &&
(PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
@ -2207,20 +2199,21 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
(int32_t)data->phy_clk_quad_eqn_a;
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
(int16_t)data->phy_clk_quad_eqn_b;
(int32_t)data->phy_clk_quad_eqn_b;
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
(int32_t)data->phy_clk_quad_eqn_c;
} else {
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
(int32_t)avfs_params.ulPhyclk2GfxclkM1;
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
(int16_t)avfs_params.usPhyclk2GfxclkM2;
(int32_t)avfs_params.ulPhyclk2GfxclkM2;
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
(int32_t)avfs_params.ulPhyclk2GfxclkB;
}
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1_shift = 24;
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2_shift = 12;
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b_shift = 12;
} else {
data->smu_features[GNLD_AVFS].supported = false;
}
@ -2309,6 +2302,7 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
(struct phm_ppt_v2_information *)(hwmgr->pptable);
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct pp_atomfwctrl_voltage_table voltage_table;
struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
result = vega10_setup_default_dpm_tables(hwmgr);
PP_ASSERT_WITH_CODE(!result,
@ -2331,6 +2325,7 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
(uint8_t)(table_info->uc_vce_dpm_voltage_mode);
pp_table->Mp0DpmVoltageMode =
(uint8_t)(table_info->uc_mp0_dpm_voltage_mode);
pp_table->DisplayDpmVoltageMode =
(uint8_t)(table_info->uc_dcef_dpm_voltage_mode);
@ -2372,14 +2367,31 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
"Failed to initialize UVD Level!",
return result);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ClockStretcher)) {
if (data->registry_data.clock_stretcher_support) {
result = vega10_populate_clock_stretcher_table(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to populate Clock Stretcher Table!",
return result);
}
result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
if (!result) {
data->vbios_boot_state.vddc = boot_up_values.usVddc;
data->vbios_boot_state.vddci = boot_up_values.usVddci;
data->vbios_boot_state.mvddc = boot_up_values.usMvddc;
data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
if (0 != boot_up_values.usVddc) {
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetFloorSocVoltage,
(boot_up_values.usVddc * 4));
data->vbios_boot_state.bsoc_vddc_lock = true;
} else {
data->vbios_boot_state.bsoc_vddc_lock = false;
}
}
result = vega10_populate_avfs_parameters(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to initialize AVFS Parameters!",
@ -2404,35 +2416,9 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!result,
"Failed to upload PPtable!", return result);
if (data->smu_features[GNLD_AVFS].supported) {
uint32_t features_enabled;
result = vega10_get_smc_features(hwmgr->smumgr, &features_enabled);
PP_ASSERT_WITH_CODE(!result,
"Failed to Retrieve Enabled Features!",
return result);
if (!(features_enabled & (1 << FEATURE_AVFS_BIT))) {
result = vega10_perform_btc(hwmgr->smumgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to Perform BTC!",
result = vega10_avfs_enable(hwmgr, true);
PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!",
return result);
result = vega10_avfs_enable(hwmgr, true);
PP_ASSERT_WITH_CODE(!result,
"Attempt to enable AVFS feature Failed!",
return result);
result = vega10_save_vft_table(hwmgr->smumgr,
(uint8_t *)&(data->smc_state_table.avfs_table));
PP_ASSERT_WITH_CODE(!result,
"Attempt to save VFT table Failed!",
return result);
} else {
data->smu_features[GNLD_AVFS].enabled = true;
result = vega10_restore_vft_table(hwmgr->smumgr,
(uint8_t *)&(data->smc_state_table.avfs_table));
PP_ASSERT_WITH_CODE(!result,
"Attempt to restore VFT table Failed!",
return result;);
}
}
return 0;
}
@ -2457,6 +2443,26 @@ static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr)
return 0;
}
static int vega10_disable_thermal_protection(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
if (data->smu_features[GNLD_THERMAL].supported) {
if (!data->smu_features[GNLD_THERMAL].enabled)
pr_info("THERMAL Feature Already disabled!");
PP_ASSERT_WITH_CODE(
!vega10_enable_smc_features(hwmgr->smumgr,
false,
data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
"disable THERMAL Feature Failed!",
return -1);
data->smu_features[GNLD_THERMAL].enabled = false;
}
return 0;
}
static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data =
@ -2535,6 +2541,37 @@ static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
return 0;
}
static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
{
struct vega10_hwmgr *data =
(struct vega10_hwmgr *)(hwmgr->backend);
uint32_t i, feature_mask = 0;
if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
"Attempt to Enable LED DPM feature Failed!", return -EINVAL);
data->smu_features[GNLD_LED_DISPLAY].enabled = true;
}
for (i = 0; i < GNLD_DPM_MAX; i++) {
if (data->smu_features[i].smu_feature_bitmap & bitmap) {
if (data->smu_features[i].supported) {
if (data->smu_features[i].enabled) {
feature_mask |= data->smu_features[i].
smu_feature_bitmap;
data->smu_features[i].enabled = false;
}
}
}
}
vega10_enable_smc_features(hwmgr->smumgr, false, feature_mask);
return 0;
}
/**
* @brief Tell SMC to enabled the supported DPMs.
*
@ -2576,6 +2613,12 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
data->smu_features[GNLD_LED_DISPLAY].enabled = true;
}
if (data->vbios_boot_state.bsoc_vddc_lock) {
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetFloorSocVoltage, 0);
data->vbios_boot_state.bsoc_vddc_lock = false;
}
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_Falcon_QuickTransition)) {
if (data->smu_features[GNLD_ACDC].supported) {
@ -2602,8 +2645,6 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"Failed to configure telemetry!",
return tmp_result);
vega10_set_tools_address(hwmgr->smumgr);
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_NumOfDisplays, 0);
@ -3880,32 +3921,36 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
static int vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
if (mode) {
/* stop auto-manage */
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl))
vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
vega10_fan_ctrl_set_static_mode(hwmgr, mode);
} else
/* restart auto-manage */
vega10_fan_ctrl_reset_fan_speed_to_default(hwmgr);
int result = 0;
return 0;
switch (mode) {
case AMD_FAN_CTRL_NONE:
result = vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
break;
case AMD_FAN_CTRL_MANUAL:
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl))
result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
break;
case AMD_FAN_CTRL_AUTO:
result = vega10_fan_ctrl_set_static_mode(hwmgr, mode);
if (!result)
result = vega10_fan_ctrl_start_smc_fan_control(hwmgr);
break;
default:
break;
}
return result;
}
static int vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
{
uint32_t reg;
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
if (hwmgr->fan_ctrl_is_in_default_mode) {
return hwmgr->fan_ctrl_default_mode;
} else {
reg = soc15_get_register_offset(THM_HWID, 0,
mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2);
return (cgs_read_register(hwmgr->device, reg) &
CG_FDO_CTRL2__FDO_PWM_MODE_MASK) >>
CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
}
if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
return AMD_FAN_CTRL_MANUAL;
else
return AMD_FAN_CTRL_AUTO;
}
static int vega10_get_dal_power_level(struct pp_hwmgr *hwmgr,
@ -4148,55 +4193,56 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
switch (type) {
case PP_SCLK:
if (data->registry_data.sclk_dpm_key_disabled)
break;
for (i = 0; i < 32; i++) {
if (mask & (1 << i))
break;
}
data->smc_state_table.gfx_boot_level = i;
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr,
PPSMC_MSG_SetSoftMinGfxclkByIndex,
i),
"Failed to set soft min sclk index!",
return -1);
for (i = 31; i >= 0; i--) {
if (mask & (1 << i))
break;
}
data->smc_state_table.gfx_max_level = i;
PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
"Failed to upload boot level to lowest!",
return -EINVAL);
PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
"Failed to upload dpm max level to highest!",
return -EINVAL);
break;
case PP_MCLK:
if (data->registry_data.mclk_dpm_key_disabled)
break;
for (i = 0; i < 32; i++) {
if (mask & (1 << i))
break;
}
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr,
PPSMC_MSG_SetSoftMinUclkByIndex,
i),
"Failed to set soft min mclk index!",
return -1);
for (i = 0; i < 32; i++) {
if (mask & (1 << i))
break;
}
data->smc_state_table.mem_boot_level = i;
for (i = 31; i >= 0; i--) {
if (mask & (1 << i))
break;
}
data->smc_state_table.mem_max_level = i;
PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
"Failed to upload boot level to lowest!",
return -EINVAL);
PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
"Failed to upload dpm max level to highest!",
return -EINVAL);
break;
case PP_PCIE:
if (data->registry_data.pcie_dpm_key_disabled)
break;
for (i = 0; i < 32; i++) {
if (mask & (1 << i))
break;
}
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr,
PPSMC_MSG_SetMinLinkDpmByIndex,
i),
"Failed to set min pcie index!",
return -1);
break;
default:
break;
}
@ -4395,11 +4441,55 @@ vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmg
return is_update_required;
}
static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result, result = 0;
tmp_result = (vega10_is_dpm_running(hwmgr)) ? 0 : -1;
PP_ASSERT_WITH_CODE(tmp_result == 0,
"DPM is not running right now, no need to disable DPM!",
return 0);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ThermalController))
vega10_disable_thermal_protection(hwmgr);
tmp_result = vega10_disable_power_containment(hwmgr);
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to disable power containment!", result = tmp_result);
tmp_result = vega10_avfs_enable(hwmgr, false);
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to disable AVFS!", result = tmp_result);
tmp_result = vega10_stop_dpm(hwmgr, SMC_DPM_FEATURES);
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to stop DPM!", result = tmp_result);
return result;
}
static int vega10_power_off_asic(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
int result;
result = vega10_disable_dpm_tasks(hwmgr);
PP_ASSERT_WITH_CODE((0 == result),
"[disable_dpm_tasks] Failed to disable DPM!",
);
data->water_marks_bitmap &= ~(WaterMarksLoaded);
return result;
}
static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.backend_init = vega10_hwmgr_backend_init,
.backend_fini = vega10_hwmgr_backend_fini,
.asic_setup = vega10_setup_asic_task,
.dynamic_state_management_enable = vega10_enable_dpm_tasks,
.dynamic_state_management_disable = vega10_disable_dpm_tasks,
.get_num_of_pp_table_entries =
vega10_get_number_of_powerplay_table_entries,
.get_power_state_size = vega10_get_power_state_size,
@ -4439,6 +4529,8 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.check_states_equal = vega10_check_states_equal,
.check_smc_update_required_for_display_configuration =
vega10_check_smc_update_required_for_display_configuration,
.power_off_asic = vega10_power_off_asic,
.disable_smc_firmware_ctf = vega10_thermal_disable_alert,
};
int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)

View File

@ -177,8 +177,11 @@ struct vega10_dpmlevel_enable_mask {
};
struct vega10_vbios_boot_state {
bool bsoc_vddc_lock;
uint16_t vddc;
uint16_t vddci;
uint16_t mvddc;
uint16_t vdd_gfx;
uint32_t gfx_clock;
uint32_t mem_clock;
uint32_t soc_clock;

View File

@ -48,8 +48,8 @@ void vega10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
table->Tliquid1Limit = cpu_to_le16(tdp_table->usTemperatureLimitLiquid1);
table->Tliquid2Limit = cpu_to_le16(tdp_table->usTemperatureLimitLiquid2);
table->TplxLimit = cpu_to_le16(tdp_table->usTemperatureLimitPlx);
table->LoadLineResistance = cpu_to_le16(
hwmgr->platform_descriptor.LoadLineSlope);
table->LoadLineResistance =
hwmgr->platform_descriptor.LoadLineSlope * 256;
table->FitLimit = 0; /* Not used for Vega10 */
table->Liquid1_I2C_address = tdp_table->ucLiquid1_I2C_address;
@ -113,6 +113,29 @@ int vega10_enable_power_containment(struct pp_hwmgr *hwmgr)
return result;
}
int vega10_disable_power_containment(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data =
(struct vega10_hwmgr *)(hwmgr->backend);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment)) {
if (data->smu_features[GNLD_PPT].supported)
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
false, data->smu_features[GNLD_PPT].smu_feature_bitmap),
"Attempt to disable PPT feature Failed!",
data->smu_features[GNLD_PPT].supported = false);
if (data->smu_features[GNLD_TDC].supported)
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
false, data->smu_features[GNLD_TDC].smu_feature_bitmap),
"Attempt to disable PPT feature Failed!",
data->smu_features[GNLD_TDC].supported = false);
}
return 0;
}
static int vega10_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
uint32_t adjust_percent)
{

View File

@ -60,6 +60,7 @@ int vega10_enable_smc_cac(struct pp_hwmgr *hwmgr);
int vega10_enable_power_containment(struct pp_hwmgr *hwmgr);
int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
int vega10_power_control_set_level(struct pp_hwmgr *hwmgr);
int vega10_disable_power_containment(struct pp_hwmgr *hwmgr);
#endif /* _VEGA10_POWERTUNE_H_ */

View File

@ -407,7 +407,7 @@ static int get_tdp_table(
tdp_table->ucPlx_I2C_address = power_tune_table->ucPlx_I2C_address;
tdp_table->ucPlx_I2C_Line = power_tune_table->ucPlx_I2C_LineSCL;
tdp_table->ucPlx_I2C_LineSDA = power_tune_table->ucPlx_I2C_LineSDA;
hwmgr->platform_descriptor.LoadLineSlope = power_tune_table->usLoadLineResistance;
hwmgr->platform_descriptor.LoadLineSlope = le16_to_cpu(power_tune_table->usLoadLineResistance);
} else {
power_tune_table_v2 = (ATOM_Vega10_PowerTune_Table_V2 *)table;
tdp_table->usMaximumPowerDeliveryLimit = le16_to_cpu(power_tune_table_v2->usSocketPowerLimit);
@ -453,7 +453,7 @@ static int get_tdp_table(
tdp_table->ucPlx_I2C_LineSDA = sda;
hwmgr->platform_descriptor.LoadLineSlope =
power_tune_table_v2->usLoadLineResistance;
le16_to_cpu(power_tune_table_v2->usLoadLineResistance);
}
*info_tdp_table = tdp_table;

View File

@ -381,14 +381,10 @@ int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
temp = cgs_read_register(hwmgr->device, reg);
temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
temp = (temp & CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP_MASK) >>
CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP__SHIFT;
/* Bit 9 means the reading is lower than the lowest usable value. */
if (temp & 0x200)
temp = VEGA10_THERMAL_MAXIMUM_TEMP_READING;
else
temp = temp & 0x1ff;
temp = temp & 0x1ff;
temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
@ -424,23 +420,28 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
mmTHM_THERMAL_INT_CTRL_BASE_IDX, mmTHM_THERMAL_INT_CTRL);
val = cgs_read_register(hwmgr->device, reg);
val &= ~(THM_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK);
val |= (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) <<
THM_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT;
val &= ~(THM_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK);
val |= (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) <<
THM_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT;
val &= (~THM_THERMAL_INT_CTRL__MAX_IH_CREDIT_MASK);
val |= (5 << THM_THERMAL_INT_CTRL__MAX_IH_CREDIT__SHIFT);
val &= (~THM_THERMAL_INT_CTRL__THERM_IH_HW_ENA_MASK);
val |= (1 << THM_THERMAL_INT_CTRL__THERM_IH_HW_ENA__SHIFT);
val &= (~THM_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK);
val |= ((high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
<< THM_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT);
val &= (~THM_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK);
val |= ((low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
<< THM_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT);
val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
cgs_write_register(hwmgr->device, reg, val);
reg = soc15_get_register_offset(THM_HWID, 0,
mmTHM_TCON_HTC_BASE_IDX, mmTHM_TCON_HTC);
val = cgs_read_register(hwmgr->device, reg);
val &= ~(THM_TCON_HTC__HTC_TMP_LMT_MASK);
val |= (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) <<
THM_TCON_HTC__HTC_TMP_LMT__SHIFT;
cgs_write_register(hwmgr->device, reg, val);
return 0;
}
@ -482,18 +483,28 @@ static int vega10_thermal_initialize(struct pp_hwmgr *hwmgr)
static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
uint32_t val = 0;
uint32_t reg;
if (data->smu_features[GNLD_FW_CTF].supported) {
if (data->smu_features[GNLD_FW_CTF].enabled)
printk("[Thermal_EnableAlert] FW CTF Already Enabled!\n");
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
true,
data->smu_features[GNLD_FW_CTF].smu_feature_bitmap),
"Attempt to Enable FW CTF feature Failed!",
return -1);
data->smu_features[GNLD_FW_CTF].enabled = true;
}
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
true,
data->smu_features[GNLD_FW_CTF].smu_feature_bitmap),
"Attempt to Enable FW CTF feature Failed!",
return -1);
data->smu_features[GNLD_FW_CTF].enabled = true;
val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
reg = soc15_get_register_offset(THM_HWID, 0, mmTHM_THERMAL_INT_ENA_BASE_IDX, mmTHM_THERMAL_INT_ENA);
cgs_write_register(hwmgr->device, reg, val);
return 0;
}
@ -501,21 +512,27 @@ static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr)
* Disable thermal alerts on the RV770 thermal controller.
* @param hwmgr The address of the hardware manager.
*/
static int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr)
int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
uint32_t reg;
if (data->smu_features[GNLD_FW_CTF].supported) {
if (!data->smu_features[GNLD_FW_CTF].enabled)
printk("[Thermal_EnableAlert] FW CTF Already disabled!\n");
}
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
false,
data->smu_features[GNLD_FW_CTF].smu_feature_bitmap),
"Attempt to disable FW CTF feature Failed!",
return -1);
data->smu_features[GNLD_FW_CTF].enabled = false;
data->smu_features[GNLD_FW_CTF].enabled = false;
}
reg = soc15_get_register_offset(THM_HWID, 0, mmTHM_THERMAL_INT_ENA_BASE_IDX, mmTHM_THERMAL_INT_ENA);
cgs_write_register(hwmgr->device, reg, 0);
return 0;
}
@ -561,6 +578,11 @@ int tf_vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
table->FanTargetTemperature = hwmgr->thermal_controller.
advanceFanControlParameters.usTMax;
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetFanTemperatureTarget,
(uint32_t)table->FanTargetTemperature);
table->FanPwmMin = hwmgr->thermal_controller.
advanceFanControlParameters.usPWMMin * 255 / 100;
table->FanTargetGfxclk = (uint16_t)(hwmgr->thermal_controller.

View File

@ -78,6 +78,8 @@ extern int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr,
uint32_t *speed);
extern int vega10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
extern uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr);
extern int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr);
int vega10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr);
#endif

View File

@ -431,6 +431,6 @@ extern int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
struct pp_display_clock_request *clock);
extern int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
extern int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr);
#endif /* _HARDWARE_MANAGER_H_ */

View File

@ -368,11 +368,10 @@ struct pp_hwmgr_func {
int (*get_mclk_od)(struct pp_hwmgr *hwmgr);
int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
int (*read_sensor)(struct pp_hwmgr *hwmgr, int idx, void *value, int *size);
int (*request_firmware)(struct pp_hwmgr *hwmgr);
int (*release_firmware)(struct pp_hwmgr *hwmgr);
int (*set_power_profile_state)(struct pp_hwmgr *hwmgr,
struct amd_pp_profile *request);
int (*avfs_control)(struct pp_hwmgr *hwmgr, bool enable);
int (*disable_smc_firmware_ctf)(struct pp_hwmgr *hwmgr);
};
struct pp_table_func {
@ -765,6 +764,7 @@ struct pp_hwmgr {
struct pp_thermal_controller_info thermal_controller;
bool fan_ctrl_is_in_default_mode;
uint32_t fan_ctrl_default_mode;
bool fan_ctrl_enabled;
uint32_t tmin;
struct phm_microcode_version_info microcode_version_info;
uint32_t ps_size;

View File

@ -30,7 +30,7 @@
* SMU TEAM: Always increment the interface version if
* any structure is changed in this file
*/
#define SMU9_DRIVER_IF_VERSION 0xB
#define SMU9_DRIVER_IF_VERSION 0xD
#define PPTABLE_V10_SMU_VERSION 1
@ -302,7 +302,17 @@ typedef struct {
uint32_t DpmLevelPowerDelta;
uint32_t Reserved[19];
uint8_t EnableBoostState;
uint8_t AConstant_Shift;
uint8_t DC_tol_sigma_Shift;
uint8_t PSM_Age_CompFactor_Shift;
uint16_t BoostStartTemperature;
uint16_t BoostStopTemperature;
PllSetting_t GfxBoostState;
uint32_t Reserved[14];
/* Padding - ignore */
uint32_t MmHubPadding[7]; /* SMU internal use */
@ -464,4 +474,8 @@ typedef struct {
#define DB_PCC_SHIFT 26
#define DB_EDC_SHIFT 27
#define REMOVE_FMAX_MARGIN_BIT 0x0
#define REMOVE_DCTOL_MARGIN_BIT 0x1
#define REMOVE_PLATFORM_MARGIN_BIT 0x2
#endif

View File

@ -122,7 +122,10 @@ typedef uint16_t PPSMC_Result;
#define PPSMC_MSG_SetFanMinPwm 0x52
#define PPSMC_MSG_ConfigureGfxDidt 0x55
#define PPSMC_MSG_NumOfDisplays 0x56
#define PPSMC_Message_Count 0x57
#define PPSMC_MSG_ReadSerialNumTop32 0x58
#define PPSMC_MSG_ReadSerialNumBottom32 0x59
#define PPSMC_Message_Count 0x5A
typedef int PPSMC_Msg;

View File

@ -74,18 +74,18 @@ static bool vega10_is_smc_ram_running(struct pp_smumgr *smumgr)
return false;
}
/**
* Check if SMC has responded to previous message.
*
* @param smumgr the address of the powerplay hardware manager.
* @return TRUE SMC has responded, FALSE otherwise.
*/
/*
* Check if SMC has responded to previous message.
*
* @param smumgr the address of the powerplay hardware manager.
* @return TRUE SMC has responded, FALSE otherwise.
*/
static uint32_t vega10_wait_for_response(struct pp_smumgr *smumgr)
{
uint32_t reg;
if (!vega10_is_smc_ram_running(smumgr))
return -1;
return -EINVAL;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
@ -96,20 +96,19 @@ static uint32_t vega10_wait_for_response(struct pp_smumgr *smumgr)
return cgs_read_register(smumgr->device, reg);
}
/**
* Send a message to the SMC, and do not wait for its response.
*
* @param smumgr the address of the powerplay hardware manager.
* @param msg the message to send.
* @return Always return 0.
*/
/*
* Send a message to the SMC, and do not wait for its response.
* @param smumgr the address of the powerplay hardware manager.
* @param msg the message to send.
* @return Always return 0.
*/
int vega10_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr,
uint16_t msg)
{
uint32_t reg;
if (!vega10_is_smc_ram_running(smumgr))
return -1;
return -EINVAL;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
@ -118,19 +117,18 @@ int vega10_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr,
return 0;
}
/**
* Send a message to the SMC, and wait for its response.
*
* @param smumgr the address of the powerplay hardware manager.
* @param msg the message to send.
* @return The response that came from the SMC.
*/
/*
* Send a message to the SMC, and wait for its response.
* @param smumgr the address of the powerplay hardware manager.
* @param msg the message to send.
* @return Always return 0.
*/
int vega10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
{
uint32_t reg;
if (!vega10_is_smc_ram_running(smumgr))
return -1;
return -EINVAL;
vega10_wait_for_response(smumgr);
@ -140,19 +138,18 @@ int vega10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
vega10_send_msg_to_smc_without_waiting(smumgr, msg);
PP_ASSERT_WITH_CODE(vega10_wait_for_response(smumgr) == 1,
"Failed to send Message.",
return -1);
if (vega10_wait_for_response(smumgr) != 1)
pr_err("Failed to send message: 0x%x\n", msg);
return 0;
}
/**
/*
* Send a message to the SMC with parameter
* @param smumgr: the address of the powerplay hardware manager.
* @param msg: the message to send.
* @param parameter: the parameter to send
* @return The response that came from the SMC.
* @return Always return 0.
*/
int vega10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
uint16_t msg, uint32_t parameter)
@ -160,7 +157,7 @@ int vega10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
uint32_t reg;
if (!vega10_is_smc_ram_running(smumgr))
return -1;
return -EINVAL;
vega10_wait_for_response(smumgr);
@ -174,22 +171,20 @@ int vega10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
vega10_send_msg_to_smc_without_waiting(smumgr, msg);
PP_ASSERT_WITH_CODE(vega10_wait_for_response(smumgr) == 1,
"Failed to send Message.",
return -1);
if (vega10_wait_for_response(smumgr) != 1)
pr_err("Failed to send message: 0x%x\n", msg);
return 0;
}
/**
* Send a message to the SMC with parameter, do not wait for response
*
* @param smumgr: the address of the powerplay hardware manager.
* @param msg: the message to send.
* @param parameter: the parameter to send
* @return The response that came from the SMC.
*/
/*
* Send a message to the SMC with parameter, do not wait for response
* @param smumgr: the address of the powerplay hardware manager.
* @param msg: the message to send.
* @param parameter: the parameter to send
* @return The response that came from the SMC.
*/
int vega10_send_msg_to_smc_with_parameter_without_waiting(
struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
{
@ -202,13 +197,12 @@ int vega10_send_msg_to_smc_with_parameter_without_waiting(
return vega10_send_msg_to_smc_without_waiting(smumgr, msg);
}
/**
* Retrieve an argument from SMC.
*
* @param smumgr the address of the powerplay hardware manager.
* @param arg pointer to store the argument from SMC.
* @return Always return 0.
*/
/*
* Retrieve an argument from SMC.
* @param smumgr the address of the powerplay hardware manager.
* @param arg pointer to store the argument from SMC.
* @return Always return 0.
*/
int vega10_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg)
{
uint32_t reg;
@ -221,11 +215,11 @@ int vega10_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg)
return 0;
}
/**
* Copy table from SMC into driver FB
* @param smumgr the address of the SMC manager
* @param table_id the driver's table ID to copy from
*/
/*
* Copy table from SMC into driver FB
* @param smumgr the address of the SMC manager
* @param table_id the driver's table ID to copy from
*/
int vega10_copy_table_from_smc(struct pp_smumgr *smumgr,
uint8_t *table, int16_t table_id)
{
@ -233,25 +227,25 @@ int vega10_copy_table_from_smc(struct pp_smumgr *smumgr,
(struct vega10_smumgr *)(smumgr->backend);
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -1;);
"Invalid SMU Table ID!", return -EINVAL);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
"Invalid SMU Table version!", return -1;);
"Invalid SMU Table version!", return -EINVAL);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -1;);
"Invalid SMU Table Length!", return -EINVAL);
PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
PPSMC_MSG_SetDriverDramAddrHigh,
priv->smu_tables.entry[table_id].table_addr_high) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -1;);
"[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL);
PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
PPSMC_MSG_SetDriverDramAddrLow,
priv->smu_tables.entry[table_id].table_addr_low) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
return -1;);
return -EINVAL);
PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
PPSMC_MSG_TransferTableSmu2Dram,
priv->smu_tables.entry[table_id].table_id) == 0,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
return -1;);
return -EINVAL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@ -259,11 +253,11 @@ int vega10_copy_table_from_smc(struct pp_smumgr *smumgr,
return 0;
}
/**
* Copy table from Driver FB into SMC
* @param smumgr the address of the SMC manager
* @param table_id the table to copy from
*/
/*
* Copy table from Driver FB into SMC
* @param smumgr the address of the SMC manager
* @param table_id the table to copy from
*/
int vega10_copy_table_to_smc(struct pp_smumgr *smumgr,
uint8_t *table, int16_t table_id)
{
@ -271,11 +265,11 @@ int vega10_copy_table_to_smc(struct pp_smumgr *smumgr,
(struct vega10_smumgr *)(smumgr->backend);
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -1;);
"Invalid SMU Table ID!", return -EINVAL);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
"Invalid SMU Table version!", return -1;);
"Invalid SMU Table version!", return -EINVAL);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -1;);
"Invalid SMU Table Length!", return -EINVAL);
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
@ -284,35 +278,26 @@ int vega10_copy_table_to_smc(struct pp_smumgr *smumgr,
PPSMC_MSG_SetDriverDramAddrHigh,
priv->smu_tables.entry[table_id].table_addr_high) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
return -1;);
return -EINVAL;);
PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
PPSMC_MSG_SetDriverDramAddrLow,
priv->smu_tables.entry[table_id].table_addr_low) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
return -1;);
return -EINVAL);
PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
PPSMC_MSG_TransferTableDram2Smu,
priv->smu_tables.entry[table_id].table_id) == 0,
"[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
return -1;);
return -EINVAL);
return 0;
}
int vega10_perform_btc(struct pp_smumgr *smumgr)
{
PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc_with_parameter(
smumgr, PPSMC_MSG_RunBtc, 0),
"Attempt to run DC BTC Failed!",
return -1);
return 0;
}
int vega10_save_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table)
{
PP_ASSERT_WITH_CODE(avfs_table,
"No access to SMC AVFS Table",
return -1);
return -EINVAL);
return vega10_copy_table_from_smc(smumgr, avfs_table, AVFSTABLE);
}
@ -321,7 +306,7 @@ int vega10_restore_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table)
{
PP_ASSERT_WITH_CODE(avfs_table,
"No access to SMC AVFS Table",
return -1);
return -EINVAL);
return vega10_copy_table_to_smc(smumgr, avfs_table, AVFSTABLE);
}
@ -339,13 +324,16 @@ int vega10_enable_smc_features(struct pp_smumgr *smumgr,
int vega10_get_smc_features(struct pp_smumgr *smumgr,
uint32_t *features_enabled)
{
if (features_enabled == NULL)
return -EINVAL;
if (!vega10_send_msg_to_smc(smumgr,
PPSMC_MSG_GetEnabledSmuFeatures)) {
if (!vega10_read_arg_from_smc(smumgr, features_enabled))
return 0;
vega10_read_arg_from_smc(smumgr, features_enabled);
return 0;
}
return -1;
return -EINVAL;
}
int vega10_set_tools_address(struct pp_smumgr *smumgr)
@ -372,25 +360,20 @@ static int vega10_verify_smc_interface(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(smumgr,
PPSMC_MSG_GetDriverIfVersion),
"Attempt to get SMC IF Version Number Failed!",
return -1);
PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(smumgr,
&smc_driver_if_version),
"Attempt to read SMC IF Version Number Failed!",
return -1);
return -EINVAL);
vega10_read_arg_from_smc(smumgr, &smc_driver_if_version);
if (smc_driver_if_version != SMU9_DRIVER_IF_VERSION)
return -1;
if (smc_driver_if_version != SMU9_DRIVER_IF_VERSION) {
pr_err("Your firmware(0x%x) doesn't match \
SMU9_DRIVER_IF_VERSION(0x%x). \
Please update your firmware!\n",
smc_driver_if_version, SMU9_DRIVER_IF_VERSION);
return -EINVAL;
}
return 0;
}
/**
* Write a 32bit value to the SMC SRAM space.
* ALL PARAMETERS ARE IN HOST BYTE ORDER.
* @param smumgr the address of the powerplay hardware manager.
* @param smc_addr the address in the SMC RAM to access.
* @param value to write to the SMC SRAM.
*/
static int vega10_smu_init(struct pp_smumgr *smumgr)
{
struct vega10_smumgr *priv;
@ -427,7 +410,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
kfree(smumgr->backend);
cgs_free_gpu_mem(smumgr->device,
(cgs_handle_t)handle);
return -1);
return -EINVAL);
priv->smu_tables.entry[PPTABLE].version = 0x01;
priv->smu_tables.entry[PPTABLE].size = sizeof(PPTable_t);
@ -455,7 +438,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
(cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
cgs_free_gpu_mem(smumgr->device,
(cgs_handle_t)handle);
return -1);
return -EINVAL);
priv->smu_tables.entry[WMTABLE].version = 0x01;
priv->smu_tables.entry[WMTABLE].size = sizeof(Watermarks_t);
@ -485,7 +468,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
(cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
cgs_free_gpu_mem(smumgr->device,
(cgs_handle_t)handle);
return -1);
return -EINVAL);
priv->smu_tables.entry[AVFSTABLE].version = 0x01;
priv->smu_tables.entry[AVFSTABLE].size = sizeof(AvfsTable_t);
@ -497,7 +480,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
priv->smu_tables.entry[AVFSTABLE].table = kaddr;
priv->smu_tables.entry[AVFSTABLE].handle = handle;
tools_size = 0;
tools_size = 0x19000;
if (tools_size) {
smu_allocate_memory(smumgr->device,
tools_size,
@ -517,9 +500,44 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
smu_lower_32_bits(mc_addr);
priv->smu_tables.entry[TOOLSTABLE].table = kaddr;
priv->smu_tables.entry[TOOLSTABLE].handle = handle;
vega10_set_tools_address(smumgr);
}
}
/* allocate space for AVFS Fuse table */
smu_allocate_memory(smumgr->device,
sizeof(AvfsFuseOverride_t),
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
&mc_addr,
&kaddr,
&handle);
PP_ASSERT_WITH_CODE(kaddr,
"[vega10_smu_init] Out of memory for avfs fuse table.",
kfree(smumgr->backend);
cgs_free_gpu_mem(smumgr->device,
(cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
cgs_free_gpu_mem(smumgr->device,
(cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
cgs_free_gpu_mem(smumgr->device,
(cgs_handle_t)priv->smu_tables.entry[AVFSTABLE].handle);
cgs_free_gpu_mem(smumgr->device,
(cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle);
cgs_free_gpu_mem(smumgr->device,
(cgs_handle_t)handle);
return -EINVAL);
priv->smu_tables.entry[AVFSFUSETABLE].version = 0x01;
priv->smu_tables.entry[AVFSFUSETABLE].size = sizeof(AvfsFuseOverride_t);
priv->smu_tables.entry[AVFSFUSETABLE].table_id = TABLE_AVFS_FUSE_OVERRIDE;
priv->smu_tables.entry[AVFSFUSETABLE].table_addr_high =
smu_upper_32_bits(mc_addr);
priv->smu_tables.entry[AVFSFUSETABLE].table_addr_low =
smu_lower_32_bits(mc_addr);
priv->smu_tables.entry[AVFSFUSETABLE].table = kaddr;
priv->smu_tables.entry[AVFSFUSETABLE].handle = handle;
return 0;
}
@ -538,6 +556,8 @@ static int vega10_smu_fini(struct pp_smumgr *smumgr)
if (priv->smu_tables.entry[TOOLSTABLE].table)
cgs_free_gpu_mem(smumgr->device,
(cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle);
cgs_free_gpu_mem(smumgr->device,
(cgs_handle_t)priv->smu_tables.entry[AVFSFUSETABLE].handle);
kfree(smumgr->backend);
smumgr->backend = NULL;
}
@ -548,7 +568,7 @@ static int vega10_start_smu(struct pp_smumgr *smumgr)
{
PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(smumgr),
"Failed to verify SMC interface!",
return -1);
return -EINVAL);
return 0;
}

View File

@ -30,6 +30,7 @@ enum smu_table_id {
WMTABLE,
AVFSTABLE,
TOOLSTABLE,
AVFSFUSETABLE,
MAX_SMU_TABLE,
};
@ -62,7 +63,6 @@ int vega10_get_smc_features(struct pp_smumgr *smumgr,
uint32_t *features_enabled);
int vega10_save_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table);
int vega10_restore_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table);
int vega10_perform_btc(struct pp_smumgr *smumgr);
int vega10_set_tools_address(struct pp_smumgr *smumgr);

View File

@ -236,6 +236,23 @@ static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb
dma_fence_put(f);
}
bool amd_sched_dependency_optimized(struct dma_fence* fence,
struct amd_sched_entity *entity)
{
struct amd_gpu_scheduler *sched = entity->sched;
struct amd_sched_fence *s_fence;
if (!fence || dma_fence_is_signaled(fence))
return false;
if (fence->context == entity->fence_context)
return true;
s_fence = to_amd_sched_fence(fence);
if (s_fence && s_fence->sched == sched)
return true;
return false;
}
static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
{
struct amd_gpu_scheduler *sched = entity->sched;
@ -387,7 +404,9 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
spin_lock(&sched->job_list_lock);
list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
if (dma_fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
if (s_job->s_fence->parent &&
dma_fence_remove_callback(s_job->s_fence->parent,
&s_job->s_fence->cb)) {
dma_fence_put(s_job->s_fence->parent);
s_job->s_fence->parent = NULL;
}
@ -460,9 +479,9 @@ int amd_sched_job_init(struct amd_sched_job *job,
job->sched = sched;
job->s_entity = entity;
job->s_fence = amd_sched_fence_create(entity, owner);
job->id = atomic64_inc_return(&sched->job_id_count);
if (!job->s_fence)
return -ENOMEM;
job->id = atomic64_inc_return(&sched->job_id_count);
INIT_WORK(&job->finish_work, amd_sched_job_finish);
INIT_LIST_HEAD(&job->node);

View File

@ -158,4 +158,6 @@ int amd_sched_job_init(struct amd_sched_job *job,
void *owner);
void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched);
void amd_sched_job_recovery(struct amd_gpu_scheduler *sched);
bool amd_sched_dependency_optimized(struct dma_fence* fence,
struct amd_sched_entity *entity);
#endif

View File

@ -80,6 +80,8 @@
#define EDID_QUIRK_FORCE_12BPC (1 << 9)
/* Force 6bpc */
#define EDID_QUIRK_FORCE_6BPC (1 << 10)
/* Force 10bpc */
#define EDID_QUIRK_FORCE_10BPC (1 << 11)
struct detailed_mode_closure {
struct drm_connector *connector;
@ -122,6 +124,9 @@ static const struct edid_quirk {
{ "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
EDID_QUIRK_DETAILED_IN_CM },
/* LGD panel of HP zBook 17 G2, eDP 10 bpc, but reports unknown bpc */
{ "LGD", 764, EDID_QUIRK_FORCE_10BPC },
/* LG Philips LCD LP154W01-A5 */
{ "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
{ "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
@ -4244,6 +4249,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
if (quirks & EDID_QUIRK_FORCE_8BPC)
connector->display_info.bpc = 8;
if (quirks & EDID_QUIRK_FORCE_10BPC)
connector->display_info.bpc = 10;
if (quirks & EDID_QUIRK_FORCE_12BPC)
connector->display_info.bpc = 12;

View File

@ -87,3 +87,16 @@ config DRM_I915_LOW_LEVEL_TRACEPOINTS
and also analyze the request dependency resolving timeline.
If in doubt, say "N".
config DRM_I915_DEBUG_VBLANK_EVADE
bool "Enable extra debug warnings for vblank evasion"
depends on DRM_I915
default n
help
Choose this option to turn on extra debug warnings for the
vblank evade mechanism. This gives a warning every time the
the deadline allotted for the vblank evade critical section
is exceeded, even if there isn't an actual risk of missing
the vblank.
If in doubt, say "N".

View File

@ -198,12 +198,15 @@ void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work
ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
crtc->debug.min_vbl, crtc->debug.max_vbl,
crtc->debug.scanline_start, scanline_end);
} else if (ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time) >
VBLANK_EVASION_TIME_US)
}
#ifdef CONFIG_DRM_I915_DEBUG_VBLANK_EVADE
else if (ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time) >
VBLANK_EVASION_TIME_US)
DRM_WARN("Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
pipe_name(pipe),
ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
VBLANK_EVASION_TIME_US);
#endif
}
static void

View File

@ -831,8 +831,7 @@ nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
static int
nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh,
u32 pflip_flags)
struct nv50_head_atom *asyh)
{
struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
@ -848,7 +847,10 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
asyw->image.h = fb->base.height;
asyw->image.kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8;
asyw->interval = pflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ? 0 : 1;
if (asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
asyw->interval = 0;
else
asyw->interval = 1;
if (asyw->image.kind) {
asyw->image.layout = 0;
@ -887,7 +889,6 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
struct nv50_head_atom *harm = NULL, *asyh = NULL;
bool varm = false, asyv = false, asym = false;
int ret;
u32 pflip_flags = 0;
NV_ATOMIC(drm, "%s atomic_check\n", plane->name);
if (asyw->state.crtc) {
@ -896,7 +897,6 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
return PTR_ERR(asyh);
asym = drm_atomic_crtc_needs_modeset(&asyh->state);
asyv = asyh->state.active;
pflip_flags = asyh->state.pageflip_flags;
}
if (armw->state.crtc) {
@ -912,12 +912,9 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point)))
asyw->set.point = true;
if (!varm || asym || armw->state.fb != asyw->state.fb) {
ret = nv50_wndw_atomic_check_acquire(
wndw, asyw, asyh, pflip_flags);
if (ret)
return ret;
}
ret = nv50_wndw_atomic_check_acquire(wndw, asyw, asyh);
if (ret)
return ret;
} else
if (varm) {
nv50_wndw_atomic_check_release(wndw, asyw, harm);
@ -1122,9 +1119,13 @@ static void
nv50_curs_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh,
struct nv50_wndw_atom *asyw)
{
asyh->curs.handle = nv50_disp(wndw->plane.dev)->mast.base.vram.handle;
asyh->curs.offset = asyw->image.offset;
asyh->set.curs = asyh->curs.visible;
u32 handle = nv50_disp(wndw->plane.dev)->mast.base.vram.handle;
u32 offset = asyw->image.offset;
if (asyh->curs.handle != handle || asyh->curs.offset != offset) {
asyh->curs.handle = handle;
asyh->curs.offset = offset;
asyh->set.curs = asyh->curs.visible;
}
}
static void

View File

@ -295,7 +295,7 @@ nvkm_object_ctor(const struct nvkm_object_func *func,
INIT_LIST_HEAD(&object->head);
INIT_LIST_HEAD(&object->tree);
RB_CLEAR_NODE(&object->node);
WARN_ON(oclass->engine && !object->engine);
WARN_ON(IS_ERR(object->engine));
}
int

View File

@ -638,7 +638,6 @@ gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
return ret;
}
ram->ranks = (nvkm_rd32(device, 0x10f200) & 0x00000004) ? 2 : 1;
return 0;
}

View File

@ -146,7 +146,7 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode)
poll = false;
}
if (list_empty(&therm->alarm.head) && poll)
if (poll)
nvkm_timer_alarm(tmr, 1000000000ULL, &therm->alarm);
spin_unlock_irqrestore(&therm->lock, flags);

View File

@ -83,7 +83,7 @@ nvkm_fan_update(struct nvkm_fan *fan, bool immediate, int target)
spin_unlock_irqrestore(&fan->lock, flags);
/* schedule next fan update, if not at target speed already */
if (list_empty(&fan->alarm.head) && target != duty) {
if (target != duty) {
u16 bump_period = fan->bios.bump_period;
u16 slow_down_period = fan->bios.slow_down_period;
u64 delay;

View File

@ -53,7 +53,7 @@ nvkm_fantog_update(struct nvkm_fantog *fan, int percent)
duty = !nvkm_gpio_get(gpio, 0, DCB_GPIO_FAN, 0xff);
nvkm_gpio_set(gpio, 0, DCB_GPIO_FAN, 0xff, duty);
if (list_empty(&fan->alarm.head) && percent != (duty * 100)) {
if (percent != (duty * 100)) {
u64 next_change = (percent * fan->period_us) / 100;
if (!duty)
next_change = fan->period_us - next_change;

View File

@ -185,7 +185,7 @@ alarm_timer_callback(struct nvkm_alarm *alarm)
spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags);
/* schedule the next poll in one second */
if (therm->func->temp_get(therm) >= 0 && list_empty(&alarm->head))
if (therm->func->temp_get(therm) >= 0)
nvkm_timer_alarm(tmr, 1000000000ULL, alarm);
}

View File

@ -36,23 +36,29 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
unsigned long flags;
LIST_HEAD(exec);
/* move any due alarms off the pending list */
/* Process pending alarms. */
spin_lock_irqsave(&tmr->lock, flags);
list_for_each_entry_safe(alarm, atemp, &tmr->alarms, head) {
if (alarm->timestamp <= nvkm_timer_read(tmr))
list_move_tail(&alarm->head, &exec);
/* Have we hit the earliest alarm that hasn't gone off? */
if (alarm->timestamp > nvkm_timer_read(tmr)) {
/* Schedule it. If we didn't race, we're done. */
tmr->func->alarm_init(tmr, alarm->timestamp);
if (alarm->timestamp > nvkm_timer_read(tmr))
break;
}
/* Move to completed list. We'll drop the lock before
* executing the callback so it can reschedule itself.
*/
list_move_tail(&alarm->head, &exec);
}
/* reschedule interrupt for next alarm time */
if (!list_empty(&tmr->alarms)) {
alarm = list_first_entry(&tmr->alarms, typeof(*alarm), head);
tmr->func->alarm_init(tmr, alarm->timestamp);
} else {
/* Shut down interrupt if no more pending alarms. */
if (list_empty(&tmr->alarms))
tmr->func->alarm_fini(tmr);
}
spin_unlock_irqrestore(&tmr->lock, flags);
/* execute any pending alarm handlers */
/* Execute completed callbacks. */
list_for_each_entry_safe(alarm, atemp, &exec, head) {
list_del_init(&alarm->head);
alarm->func(alarm);
@ -65,24 +71,37 @@ nvkm_timer_alarm(struct nvkm_timer *tmr, u32 nsec, struct nvkm_alarm *alarm)
struct nvkm_alarm *list;
unsigned long flags;
alarm->timestamp = nvkm_timer_read(tmr) + nsec;
/* append new alarm to list, in soonest-alarm-first order */
/* Remove alarm from pending list.
*
* This both protects against the corruption of the list,
* and implements alarm rescheduling/cancellation.
*/
spin_lock_irqsave(&tmr->lock, flags);
if (!nsec) {
if (!list_empty(&alarm->head))
list_del(&alarm->head);
} else {
list_del_init(&alarm->head);
if (nsec) {
/* Insert into pending list, ordered earliest to latest. */
alarm->timestamp = nvkm_timer_read(tmr) + nsec;
list_for_each_entry(list, &tmr->alarms, head) {
if (list->timestamp > alarm->timestamp)
break;
}
list_add_tail(&alarm->head, &list->head);
/* Update HW if this is now the earliest alarm. */
list = list_first_entry(&tmr->alarms, typeof(*list), head);
if (list == alarm) {
tmr->func->alarm_init(tmr, alarm->timestamp);
/* This shouldn't happen if callers aren't stupid.
*
* Worst case scenario is that it'll take roughly
* 4 seconds for the next alarm to trigger.
*/
WARN_ON(alarm->timestamp <= nvkm_timer_read(tmr));
}
}
spin_unlock_irqrestore(&tmr->lock, flags);
/* process pending alarms */
nvkm_timer_alarm_trigger(tmr);
}
void

View File

@ -76,8 +76,8 @@ nv04_timer_intr(struct nvkm_timer *tmr)
u32 stat = nvkm_rd32(device, NV04_PTIMER_INTR_0);
if (stat & 0x00000001) {
nvkm_timer_alarm_trigger(tmr);
nvkm_wr32(device, NV04_PTIMER_INTR_0, 0x00000001);
nvkm_timer_alarm_trigger(tmr);
stat &= ~0x00000001;
}

View File

@ -9150,23 +9150,10 @@ static u32 dce8_latency_watermark(struct dce8_wm_params *wm)
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
tmp = min(dfixed_trunc(a), tmp);
b.full = dfixed_const(mc_latency + 512);
c.full = dfixed_const(wm->disp_clk);
b.full = dfixed_div(b, c);
c.full = dfixed_const(dmif_size);
b.full = dfixed_div(c, b);
tmp = min(dfixed_trunc(a), dfixed_trunc(b));
b.full = dfixed_const(1000);
c.full = dfixed_const(wm->disp_clk);
b.full = dfixed_div(c, b);
c.full = dfixed_const(wm->bytes_per_pixel);
b.full = dfixed_mul(b, c);
lb_fill_bw = min(tmp, dfixed_trunc(b));
lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
@ -9274,14 +9261,14 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
{
struct drm_display_mode *mode = &radeon_crtc->base.mode;
struct dce8_wm_params wm_low, wm_high;
u32 pixel_period;
u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 tmp, wm_mask;
if (radeon_crtc->base.enabled && num_heads && mode) {
pixel_period = 1000000 / (u32)mode->clock;
line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
/* watermark for high clocks */
if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
@ -9297,7 +9284,7 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
wm_high.active_time = mode->crtc_hdisplay * pixel_period;
wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@ -9337,7 +9324,7 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
wm_low.active_time = mode->crtc_hdisplay * pixel_period;
wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)

View File

@ -2188,13 +2188,7 @@ static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm)
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
b.full = dfixed_const(1000);
c.full = dfixed_const(wm->disp_clk);
b.full = dfixed_div(c, b);
c.full = dfixed_const(wm->bytes_per_pixel);
b.full = dfixed_mul(b, c);
lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b));
lb_fill_bw = min(dfixed_trunc(a), wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
@ -2261,7 +2255,7 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
struct drm_display_mode *mode = &radeon_crtc->base.mode;
struct evergreen_wm_params wm_low, wm_high;
u32 dram_channels;
u32 pixel_period;
u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 priority_a_mark = 0, priority_b_mark = 0;
@ -2272,8 +2266,8 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
fixed20_12 a, b, c;
if (radeon_crtc->base.enabled && num_heads && mode) {
pixel_period = 1000000 / (u32)mode->clock;
line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
priority_a_cnt = 0;
priority_b_cnt = 0;
dram_channels = evergreen_get_number_of_dram_channels(rdev);
@ -2291,7 +2285,7 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
wm_high.active_time = mode->crtc_hdisplay * pixel_period;
wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@ -2318,7 +2312,7 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
wm_low.active_time = mode->crtc_hdisplay * pixel_period;
wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)

Some files were not shown because too many files have changed in this diff Show More