Fix for the Lima GPU display
Revert some files back to a working state. Signed-off-by: Alistair Francis <alistair@alistair23.me>alistair/sunxi64-5.4-dsi
parent
6a58556571
commit
658171708c
|
@ -93,20 +93,6 @@ config DRM_KMS_FB_HELPER
|
||||||
help
|
help
|
||||||
FBDEV helpers for KMS drivers.
|
FBDEV helpers for KMS drivers.
|
||||||
|
|
||||||
config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
|
|
||||||
bool "Enable refcount backtrace history in the DP MST helpers"
|
|
||||||
select STACKDEPOT
|
|
||||||
depends on DRM_KMS_HELPER
|
|
||||||
depends on DEBUG_KERNEL
|
|
||||||
depends on EXPERT
|
|
||||||
help
|
|
||||||
Enables debug tracing for topology refs in DRM's DP MST helpers. A
|
|
||||||
history of each topology reference/dereference will be printed to the
|
|
||||||
kernel log once a port or branch device's topology refcount reaches 0.
|
|
||||||
|
|
||||||
This has the potential to use a lot of memory and print some very
|
|
||||||
large kernel messages. If in doubt, say "N".
|
|
||||||
|
|
||||||
config DRM_FBDEV_EMULATION
|
config DRM_FBDEV_EMULATION
|
||||||
bool "Enable legacy fbdev support for your modesetting driver"
|
bool "Enable legacy fbdev support for your modesetting driver"
|
||||||
depends on DRM
|
depends on DRM
|
||||||
|
|
|
@ -53,9 +53,8 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
|
||||||
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
|
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
|
||||||
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
|
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
|
||||||
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
|
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
|
||||||
amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
|
amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
|
||||||
amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
|
amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o smu_v11_0_i2c.o
|
||||||
amdgpu_umc.o smu_v11_0_i2c.o
|
|
||||||
|
|
||||||
amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
|
amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
|
||||||
|
|
||||||
|
@ -68,7 +67,7 @@ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce
|
||||||
amdgpu-y += \
|
amdgpu-y += \
|
||||||
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
|
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
|
||||||
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \
|
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \
|
||||||
arct_reg_init.o navi12_reg_init.o mxgpu_nv.o
|
arct_reg_init.o navi12_reg_init.o
|
||||||
|
|
||||||
# add DF block
|
# add DF block
|
||||||
amdgpu-y += \
|
amdgpu-y += \
|
||||||
|
@ -84,7 +83,7 @@ amdgpu-y += \
|
||||||
|
|
||||||
# add UMC block
|
# add UMC block
|
||||||
amdgpu-y += \
|
amdgpu-y += \
|
||||||
umc_v6_1.o umc_v6_0.o
|
umc_v6_1.o
|
||||||
|
|
||||||
# add IH block
|
# add IH block
|
||||||
amdgpu-y += \
|
amdgpu-y += \
|
||||||
|
|
|
@ -73,7 +73,6 @@
|
||||||
#include "amdgpu_gmc.h"
|
#include "amdgpu_gmc.h"
|
||||||
#include "amdgpu_gfx.h"
|
#include "amdgpu_gfx.h"
|
||||||
#include "amdgpu_sdma.h"
|
#include "amdgpu_sdma.h"
|
||||||
#include "amdgpu_nbio.h"
|
|
||||||
#include "amdgpu_dm.h"
|
#include "amdgpu_dm.h"
|
||||||
#include "amdgpu_virt.h"
|
#include "amdgpu_virt.h"
|
||||||
#include "amdgpu_csa.h"
|
#include "amdgpu_csa.h"
|
||||||
|
@ -107,8 +106,6 @@ struct amdgpu_mgpu_info
|
||||||
uint32_t num_apu;
|
uint32_t num_apu;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define AMDGPU_MAX_TIMEOUT_PARAM_LENGTH 256
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Modules parameters.
|
* Modules parameters.
|
||||||
*/
|
*/
|
||||||
|
@ -125,7 +122,6 @@ extern int amdgpu_disp_priority;
|
||||||
extern int amdgpu_hw_i2c;
|
extern int amdgpu_hw_i2c;
|
||||||
extern int amdgpu_pcie_gen2;
|
extern int amdgpu_pcie_gen2;
|
||||||
extern int amdgpu_msi;
|
extern int amdgpu_msi;
|
||||||
extern char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH];
|
|
||||||
extern int amdgpu_dpm;
|
extern int amdgpu_dpm;
|
||||||
extern int amdgpu_fw_load_type;
|
extern int amdgpu_fw_load_type;
|
||||||
extern int amdgpu_aspm;
|
extern int amdgpu_aspm;
|
||||||
|
@ -139,7 +135,6 @@ extern int amdgpu_vm_fragment_size;
|
||||||
extern int amdgpu_vm_fault_stop;
|
extern int amdgpu_vm_fault_stop;
|
||||||
extern int amdgpu_vm_debug;
|
extern int amdgpu_vm_debug;
|
||||||
extern int amdgpu_vm_update_mode;
|
extern int amdgpu_vm_update_mode;
|
||||||
extern int amdgpu_exp_hw_support;
|
|
||||||
extern int amdgpu_dc;
|
extern int amdgpu_dc;
|
||||||
extern int amdgpu_sched_jobs;
|
extern int amdgpu_sched_jobs;
|
||||||
extern int amdgpu_sched_hw_submission;
|
extern int amdgpu_sched_hw_submission;
|
||||||
|
@ -151,7 +146,11 @@ extern uint amdgpu_sdma_phase_quantum;
|
||||||
extern char *amdgpu_disable_cu;
|
extern char *amdgpu_disable_cu;
|
||||||
extern char *amdgpu_virtual_display;
|
extern char *amdgpu_virtual_display;
|
||||||
extern uint amdgpu_pp_feature_mask;
|
extern uint amdgpu_pp_feature_mask;
|
||||||
extern uint amdgpu_force_long_training;
|
extern int amdgpu_ngg;
|
||||||
|
extern int amdgpu_prim_buf_per_se;
|
||||||
|
extern int amdgpu_pos_buf_per_se;
|
||||||
|
extern int amdgpu_cntl_sb_buf_per_se;
|
||||||
|
extern int amdgpu_param_buf_per_se;
|
||||||
extern int amdgpu_job_hang_limit;
|
extern int amdgpu_job_hang_limit;
|
||||||
extern int amdgpu_lbpw;
|
extern int amdgpu_lbpw;
|
||||||
extern int amdgpu_compute_multipipe;
|
extern int amdgpu_compute_multipipe;
|
||||||
|
@ -168,12 +167,6 @@ extern int amdgpu_mcbp;
|
||||||
extern int amdgpu_discovery;
|
extern int amdgpu_discovery;
|
||||||
extern int amdgpu_mes;
|
extern int amdgpu_mes;
|
||||||
extern int amdgpu_noretry;
|
extern int amdgpu_noretry;
|
||||||
extern int amdgpu_force_asic_type;
|
|
||||||
#ifdef CONFIG_HSA_AMD
|
|
||||||
extern int sched_policy;
|
|
||||||
#else
|
|
||||||
static const int sched_policy = KFD_SCHED_POLICY_HWS;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||||
extern int amdgpu_si_support;
|
extern int amdgpu_si_support;
|
||||||
|
@ -290,9 +283,6 @@ struct amdgpu_ip_block_version {
|
||||||
const struct amd_ip_funcs *funcs;
|
const struct amd_ip_funcs *funcs;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define HW_REV(_Major, _Minor, _Rev) \
|
|
||||||
((((uint32_t) (_Major)) << 16) | ((uint32_t) (_Minor) << 8) | ((uint32_t) (_Rev)))
|
|
||||||
|
|
||||||
struct amdgpu_ip_block {
|
struct amdgpu_ip_block {
|
||||||
struct amdgpu_ip_block_status status;
|
struct amdgpu_ip_block_status status;
|
||||||
const struct amdgpu_ip_block_version *version;
|
const struct amdgpu_ip_block_version *version;
|
||||||
|
@ -435,6 +425,7 @@ struct amdgpu_fpriv {
|
||||||
};
|
};
|
||||||
|
|
||||||
int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
|
int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
|
||||||
|
int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev);
|
||||||
|
|
||||||
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||||
unsigned size, struct amdgpu_ib *ib);
|
unsigned size, struct amdgpu_ib *ib);
|
||||||
|
@ -486,6 +477,7 @@ struct amdgpu_cs_parser {
|
||||||
uint64_t bytes_moved_vis_threshold;
|
uint64_t bytes_moved_vis_threshold;
|
||||||
uint64_t bytes_moved;
|
uint64_t bytes_moved;
|
||||||
uint64_t bytes_moved_vis;
|
uint64_t bytes_moved_vis;
|
||||||
|
struct amdgpu_bo_list_entry *evictable;
|
||||||
|
|
||||||
/* user fence */
|
/* user fence */
|
||||||
struct amdgpu_bo_list_entry uf_entry;
|
struct amdgpu_bo_list_entry uf_entry;
|
||||||
|
@ -632,11 +624,6 @@ struct amdgpu_fw_vram_usage {
|
||||||
u64 size;
|
u64 size;
|
||||||
struct amdgpu_bo *reserved_bo;
|
struct amdgpu_bo *reserved_bo;
|
||||||
void *va;
|
void *va;
|
||||||
|
|
||||||
/* Offset on the top of VRAM, used as c2p write buffer.
|
|
||||||
*/
|
|
||||||
u64 mem_train_fb_loc;
|
|
||||||
bool mem_train_support;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -657,14 +644,71 @@ typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t);
|
||||||
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
|
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
|
||||||
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
|
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* amdgpu nbio functions
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
struct nbio_hdp_flush_reg {
|
||||||
|
u32 ref_and_mask_cp0;
|
||||||
|
u32 ref_and_mask_cp1;
|
||||||
|
u32 ref_and_mask_cp2;
|
||||||
|
u32 ref_and_mask_cp3;
|
||||||
|
u32 ref_and_mask_cp4;
|
||||||
|
u32 ref_and_mask_cp5;
|
||||||
|
u32 ref_and_mask_cp6;
|
||||||
|
u32 ref_and_mask_cp7;
|
||||||
|
u32 ref_and_mask_cp8;
|
||||||
|
u32 ref_and_mask_cp9;
|
||||||
|
u32 ref_and_mask_sdma0;
|
||||||
|
u32 ref_and_mask_sdma1;
|
||||||
|
u32 ref_and_mask_sdma2;
|
||||||
|
u32 ref_and_mask_sdma3;
|
||||||
|
u32 ref_and_mask_sdma4;
|
||||||
|
u32 ref_and_mask_sdma5;
|
||||||
|
u32 ref_and_mask_sdma6;
|
||||||
|
u32 ref_and_mask_sdma7;
|
||||||
|
};
|
||||||
|
|
||||||
struct amdgpu_mmio_remap {
|
struct amdgpu_mmio_remap {
|
||||||
u32 reg_offset;
|
u32 reg_offset;
|
||||||
resource_size_t bus_addr;
|
resource_size_t bus_addr;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct amdgpu_nbio_funcs {
|
||||||
|
const struct nbio_hdp_flush_reg *hdp_flush_reg;
|
||||||
|
u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
|
||||||
|
u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev);
|
||||||
|
u32 (*get_pcie_index_offset)(struct amdgpu_device *adev);
|
||||||
|
u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
|
||||||
|
u32 (*get_rev_id)(struct amdgpu_device *adev);
|
||||||
|
void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
|
||||||
|
void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
|
||||||
|
u32 (*get_memsize)(struct amdgpu_device *adev);
|
||||||
|
void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
|
||||||
|
bool use_doorbell, int doorbell_index, int doorbell_size);
|
||||||
|
void (*vcn_doorbell_range)(struct amdgpu_device *adev, bool use_doorbell,
|
||||||
|
int doorbell_index, int instance);
|
||||||
|
void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
|
||||||
|
bool enable);
|
||||||
|
void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
|
||||||
|
bool enable);
|
||||||
|
void (*ih_doorbell_range)(struct amdgpu_device *adev,
|
||||||
|
bool use_doorbell, int doorbell_index);
|
||||||
|
void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
|
||||||
|
bool enable);
|
||||||
|
void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev,
|
||||||
|
bool enable);
|
||||||
|
void (*get_clockgating_state)(struct amdgpu_device *adev,
|
||||||
|
u32 *flags);
|
||||||
|
void (*ih_control)(struct amdgpu_device *adev);
|
||||||
|
void (*init_registers)(struct amdgpu_device *adev);
|
||||||
|
void (*detect_hw_virt)(struct amdgpu_device *adev);
|
||||||
|
void (*remap_hdp_registers)(struct amdgpu_device *adev);
|
||||||
|
};
|
||||||
|
|
||||||
struct amdgpu_df_funcs {
|
struct amdgpu_df_funcs {
|
||||||
void (*sw_init)(struct amdgpu_device *adev);
|
void (*sw_init)(struct amdgpu_device *adev);
|
||||||
void (*sw_fini)(struct amdgpu_device *adev);
|
|
||||||
void (*enable_broadcast_mode)(struct amdgpu_device *adev,
|
void (*enable_broadcast_mode)(struct amdgpu_device *adev,
|
||||||
bool enable);
|
bool enable);
|
||||||
u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
|
u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
|
||||||
|
@ -769,7 +813,6 @@ struct amdgpu_device {
|
||||||
uint8_t *bios;
|
uint8_t *bios;
|
||||||
uint32_t bios_size;
|
uint32_t bios_size;
|
||||||
struct amdgpu_bo *stolen_vga_memory;
|
struct amdgpu_bo *stolen_vga_memory;
|
||||||
struct amdgpu_bo *discovery_memory;
|
|
||||||
uint32_t bios_scratch_reg_offset;
|
uint32_t bios_scratch_reg_offset;
|
||||||
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
|
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
|
||||||
|
|
||||||
|
@ -878,12 +921,6 @@ struct amdgpu_device {
|
||||||
u32 cg_flags;
|
u32 cg_flags;
|
||||||
u32 pg_flags;
|
u32 pg_flags;
|
||||||
|
|
||||||
/* nbio */
|
|
||||||
struct amdgpu_nbio nbio;
|
|
||||||
|
|
||||||
/* mmhub */
|
|
||||||
struct amdgpu_mmhub mmhub;
|
|
||||||
|
|
||||||
/* gfx */
|
/* gfx */
|
||||||
struct amdgpu_gfx gfx;
|
struct amdgpu_gfx gfx;
|
||||||
|
|
||||||
|
@ -937,7 +974,9 @@ struct amdgpu_device {
|
||||||
/* soc15 register offset based on ip, instance and segment */
|
/* soc15 register offset based on ip, instance and segment */
|
||||||
uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
|
uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
|
||||||
|
|
||||||
|
const struct amdgpu_nbio_funcs *nbio_funcs;
|
||||||
const struct amdgpu_df_funcs *df_funcs;
|
const struct amdgpu_df_funcs *df_funcs;
|
||||||
|
const struct amdgpu_mmhub_funcs *mmhub_funcs;
|
||||||
|
|
||||||
/* delayed work_func for deferring clockgating during resume */
|
/* delayed work_func for deferring clockgating during resume */
|
||||||
struct delayed_work delayed_init_work;
|
struct delayed_work delayed_init_work;
|
||||||
|
@ -970,6 +1009,8 @@ struct amdgpu_device {
|
||||||
int asic_reset_res;
|
int asic_reset_res;
|
||||||
struct work_struct xgmi_reset_work;
|
struct work_struct xgmi_reset_work;
|
||||||
|
|
||||||
|
bool in_baco_reset;
|
||||||
|
|
||||||
long gfx_timeout;
|
long gfx_timeout;
|
||||||
long sdma_timeout;
|
long sdma_timeout;
|
||||||
long video_timeout;
|
long video_timeout;
|
||||||
|
@ -991,8 +1032,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||||
void amdgpu_device_fini(struct amdgpu_device *adev);
|
void amdgpu_device_fini(struct amdgpu_device *adev);
|
||||||
int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
|
int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
|
||||||
|
|
||||||
void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
|
|
||||||
uint32_t *buf, size_t size, bool write);
|
|
||||||
uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
|
uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
|
||||||
uint32_t acc_flags);
|
uint32_t acc_flags);
|
||||||
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
|
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
|
||||||
|
|
|
@ -63,10 +63,45 @@ void amdgpu_amdkfd_fini(void)
|
||||||
|
|
||||||
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
|
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
bool vf = amdgpu_sriov_vf(adev);
|
const struct kfd2kgd_calls *kfd2kgd;
|
||||||
|
|
||||||
|
switch (adev->asic_type) {
|
||||||
|
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||||
|
case CHIP_KAVERI:
|
||||||
|
case CHIP_HAWAII:
|
||||||
|
kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
|
||||||
|
break;
|
||||||
|
#endif
|
||||||
|
case CHIP_CARRIZO:
|
||||||
|
case CHIP_TONGA:
|
||||||
|
case CHIP_FIJI:
|
||||||
|
case CHIP_POLARIS10:
|
||||||
|
case CHIP_POLARIS11:
|
||||||
|
case CHIP_POLARIS12:
|
||||||
|
case CHIP_VEGAM:
|
||||||
|
kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
|
||||||
|
break;
|
||||||
|
case CHIP_VEGA10:
|
||||||
|
case CHIP_VEGA12:
|
||||||
|
case CHIP_VEGA20:
|
||||||
|
case CHIP_RAVEN:
|
||||||
|
kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
|
||||||
|
break;
|
||||||
|
case CHIP_ARCTURUS:
|
||||||
|
kfd2kgd = amdgpu_amdkfd_arcturus_get_functions();
|
||||||
|
break;
|
||||||
|
case CHIP_NAVI10:
|
||||||
|
case CHIP_NAVI14:
|
||||||
|
case CHIP_NAVI12:
|
||||||
|
kfd2kgd = amdgpu_amdkfd_gfx_10_0_get_functions();
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
dev_info(adev->dev, "kfd not supported on this ASIC\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev,
|
adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev,
|
||||||
adev->pdev, adev->asic_type, vf);
|
adev->pdev, kfd2kgd);
|
||||||
|
|
||||||
if (adev->kfd.dev)
|
if (adev->kfd.dev)
|
||||||
amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
|
amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
|
||||||
|
@ -130,6 +165,14 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
|
||||||
adev->gfx.mec.queue_bitmap,
|
adev->gfx.mec.queue_bitmap,
|
||||||
KGD_MAX_QUEUES);
|
KGD_MAX_QUEUES);
|
||||||
|
|
||||||
|
/* remove the KIQ bit as well */
|
||||||
|
if (adev->gfx.kiq.ring.sched.ready)
|
||||||
|
clear_bit(amdgpu_gfx_mec_queue_to_bit(adev,
|
||||||
|
adev->gfx.kiq.ring.me - 1,
|
||||||
|
adev->gfx.kiq.ring.pipe,
|
||||||
|
adev->gfx.kiq.ring.queue),
|
||||||
|
gpu_resources.queue_bitmap);
|
||||||
|
|
||||||
/* According to linux/bitmap.h we shouldn't use bitmap_clear if
|
/* According to linux/bitmap.h we shouldn't use bitmap_clear if
|
||||||
* nbits is not compile time constant
|
* nbits is not compile time constant
|
||||||
*/
|
*/
|
||||||
|
@ -159,7 +202,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
|
||||||
adev->doorbell_index.last_non_cp;
|
adev->doorbell_index.last_non_cp;
|
||||||
}
|
}
|
||||||
|
|
||||||
kgd2kfd_device_init(adev->kfd.dev, adev->ddev, &gpu_resources);
|
kgd2kfd_device_init(adev->kfd.dev, &gpu_resources);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -666,14 +709,38 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions(void)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
|
struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
|
||||||
unsigned int asic_type, bool vf)
|
const struct kfd2kgd_calls *f2g)
|
||||||
{
|
{
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool kgd2kfd_device_init(struct kfd_dev *kfd,
|
bool kgd2kfd_device_init(struct kfd_dev *kfd,
|
||||||
struct drm_device *ddev,
|
|
||||||
const struct kgd2kfd_shared_resources *gpu_resources)
|
const struct kgd2kfd_shared_resources *gpu_resources)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -57,7 +57,7 @@ struct kgd_mem {
|
||||||
unsigned int mapped_to_gpu_memory;
|
unsigned int mapped_to_gpu_memory;
|
||||||
uint64_t va;
|
uint64_t va;
|
||||||
|
|
||||||
uint32_t alloc_flags;
|
uint32_t mapping_flags;
|
||||||
|
|
||||||
atomic_t invalid;
|
atomic_t invalid;
|
||||||
struct amdkfd_process_info *process_info;
|
struct amdkfd_process_info *process_info;
|
||||||
|
@ -137,6 +137,12 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
|
||||||
void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle);
|
void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle);
|
||||||
bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd);
|
bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd);
|
||||||
|
|
||||||
|
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void);
|
||||||
|
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void);
|
||||||
|
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void);
|
||||||
|
struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void);
|
||||||
|
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions(void);
|
||||||
|
|
||||||
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
|
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
|
||||||
|
|
||||||
int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev);
|
int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev);
|
||||||
|
@ -173,17 +179,10 @@ uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd);
|
||||||
uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
|
uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
|
||||||
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
|
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
|
||||||
|
|
||||||
/* Read user wptr from a specified user address space with page fault
|
|
||||||
* disabled. The memory must be pinned and mapped to the hardware when
|
|
||||||
* this is called in hqd_load functions, so it should never fault in
|
|
||||||
* the first place. This resolves a circular lock dependency involving
|
|
||||||
* four locks, including the DQM lock and mmap_sem.
|
|
||||||
*/
|
|
||||||
#define read_user_wptr(mmptr, wptr, dst) \
|
#define read_user_wptr(mmptr, wptr, dst) \
|
||||||
({ \
|
({ \
|
||||||
bool valid = false; \
|
bool valid = false; \
|
||||||
if ((mmptr) && (wptr)) { \
|
if ((mmptr) && (wptr)) { \
|
||||||
pagefault_disable(); \
|
|
||||||
if ((mmptr) == current->mm) { \
|
if ((mmptr) == current->mm) { \
|
||||||
valid = !get_user((dst), (wptr)); \
|
valid = !get_user((dst), (wptr)); \
|
||||||
} else if (current->mm == NULL) { \
|
} else if (current->mm == NULL) { \
|
||||||
|
@ -191,7 +190,6 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s
|
||||||
valid = !get_user((dst), (wptr)); \
|
valid = !get_user((dst), (wptr)); \
|
||||||
unuse_mm(mmptr); \
|
unuse_mm(mmptr); \
|
||||||
} \
|
} \
|
||||||
pagefault_enable(); \
|
|
||||||
} \
|
} \
|
||||||
valid; \
|
valid; \
|
||||||
})
|
})
|
||||||
|
@ -242,9 +240,8 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
|
||||||
int kgd2kfd_init(void);
|
int kgd2kfd_init(void);
|
||||||
void kgd2kfd_exit(void);
|
void kgd2kfd_exit(void);
|
||||||
struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
|
struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
|
||||||
unsigned int asic_type, bool vf);
|
const struct kfd2kgd_calls *f2g);
|
||||||
bool kgd2kfd_device_init(struct kfd_dev *kfd,
|
bool kgd2kfd_device_init(struct kfd_dev *kfd,
|
||||||
struct drm_device *ddev,
|
|
||||||
const struct kgd2kfd_shared_resources *gpu_resources);
|
const struct kgd2kfd_shared_resources *gpu_resources);
|
||||||
void kgd2kfd_device_exit(struct kfd_dev *kfd);
|
void kgd2kfd_device_exit(struct kfd_dev *kfd);
|
||||||
void kgd2kfd_suspend(struct kfd_dev *kfd);
|
void kgd2kfd_suspend(struct kfd_dev *kfd);
|
||||||
|
|
|
@ -19,6 +19,10 @@
|
||||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||||
* OTHER DEALINGS IN THE SOFTWARE.
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#undef pr_fmt
|
||||||
|
#define pr_fmt(fmt) "kfd2kgd: " fmt
|
||||||
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/fdtable.h>
|
#include <linux/fdtable.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
|
@ -65,11 +69,11 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
|
||||||
return (struct v9_sdma_mqd *)mqd;
|
return (struct v9_sdma_mqd *)mqd;
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
|
static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
|
||||||
unsigned int engine_id,
|
unsigned int engine_id,
|
||||||
unsigned int queue_id)
|
unsigned int queue_id)
|
||||||
{
|
{
|
||||||
uint32_t sdma_engine_reg_base[8] = {
|
uint32_t base[8] = {
|
||||||
SOC15_REG_OFFSET(SDMA0, 0,
|
SOC15_REG_OFFSET(SDMA0, 0,
|
||||||
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
|
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
|
||||||
SOC15_REG_OFFSET(SDMA1, 0,
|
SOC15_REG_OFFSET(SDMA1, 0,
|
||||||
|
@ -87,82 +91,111 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
|
||||||
SOC15_REG_OFFSET(SDMA7, 0,
|
SOC15_REG_OFFSET(SDMA7, 0,
|
||||||
mmSDMA7_RLC0_RB_CNTL) - mmSDMA7_RLC0_RB_CNTL
|
mmSDMA7_RLC0_RB_CNTL) - mmSDMA7_RLC0_RB_CNTL
|
||||||
};
|
};
|
||||||
|
uint32_t retval;
|
||||||
|
|
||||||
uint32_t retval = sdma_engine_reg_base[engine_id]
|
retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL -
|
||||||
+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
|
mmSDMA0_RLC0_RB_CNTL);
|
||||||
|
|
||||||
pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
|
pr_debug("sdma base address: 0x%x\n", retval);
|
||||||
queue_id, retval);
|
|
||||||
|
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
|
||||||
|
u32 instance, u32 offset)
|
||||||
|
{
|
||||||
|
switch (instance) {
|
||||||
|
case 0:
|
||||||
|
return (adev->reg_offset[SDMA0_HWIP][0][0] + offset);
|
||||||
|
case 1:
|
||||||
|
return (adev->reg_offset[SDMA1_HWIP][0][1] + offset);
|
||||||
|
case 2:
|
||||||
|
return (adev->reg_offset[SDMA2_HWIP][0][1] + offset);
|
||||||
|
case 3:
|
||||||
|
return (adev->reg_offset[SDMA3_HWIP][0][1] + offset);
|
||||||
|
case 4:
|
||||||
|
return (adev->reg_offset[SDMA4_HWIP][0][1] + offset);
|
||||||
|
case 5:
|
||||||
|
return (adev->reg_offset[SDMA5_HWIP][0][1] + offset);
|
||||||
|
case 6:
|
||||||
|
return (adev->reg_offset[SDMA6_HWIP][0][1] + offset);
|
||||||
|
case 7:
|
||||||
|
return (adev->reg_offset[SDMA7_HWIP][0][1] + offset);
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
|
static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
|
||||||
uint32_t __user *wptr, struct mm_struct *mm)
|
uint32_t __user *wptr, struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||||
struct v9_sdma_mqd *m;
|
struct v9_sdma_mqd *m;
|
||||||
uint32_t sdma_rlc_reg_offset;
|
uint32_t sdma_base_addr, sdmax_gfx_context_cntl;
|
||||||
unsigned long end_jiffies;
|
unsigned long end_jiffies;
|
||||||
uint32_t data;
|
uint32_t data;
|
||||||
uint64_t data64;
|
uint64_t data64;
|
||||||
uint64_t __user *wptr64 = (uint64_t __user *)wptr;
|
uint64_t __user *wptr64 = (uint64_t __user *)wptr;
|
||||||
|
|
||||||
m = get_sdma_mqd(mqd);
|
m = get_sdma_mqd(mqd);
|
||||||
sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
|
sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
|
||||||
m->sdma_queue_id);
|
m->sdma_queue_id);
|
||||||
|
sdmax_gfx_context_cntl = sdma_v4_0_get_reg_offset(adev,
|
||||||
|
m->sdma_engine_id, mmSDMA0_GFX_CONTEXT_CNTL);
|
||||||
|
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
|
||||||
m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
|
m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
|
||||||
|
|
||||||
end_jiffies = msecs_to_jiffies(2000) + jiffies;
|
end_jiffies = msecs_to_jiffies(2000) + jiffies;
|
||||||
while (true) {
|
while (true) {
|
||||||
data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
|
data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
|
||||||
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
|
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
|
||||||
break;
|
break;
|
||||||
if (time_after(jiffies, end_jiffies)) {
|
if (time_after(jiffies, end_jiffies))
|
||||||
pr_err("SDMA RLC not idle in %s\n", __func__);
|
|
||||||
return -ETIME;
|
return -ETIME;
|
||||||
}
|
|
||||||
usleep_range(500, 1000);
|
usleep_range(500, 1000);
|
||||||
}
|
}
|
||||||
|
data = RREG32(sdmax_gfx_context_cntl);
|
||||||
|
data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
|
||||||
|
RESUME_CTX, 0);
|
||||||
|
WREG32(sdmax_gfx_context_cntl, data);
|
||||||
|
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET,
|
||||||
m->sdmax_rlcx_doorbell_offset);
|
m->sdmax_rlcx_doorbell_offset);
|
||||||
|
|
||||||
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
|
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
|
||||||
ENABLE, 1);
|
ENABLE, 1);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
|
||||||
m->sdmax_rlcx_rb_rptr);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI,
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
|
|
||||||
m->sdmax_rlcx_rb_rptr_hi);
|
m->sdmax_rlcx_rb_rptr_hi);
|
||||||
|
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
|
||||||
if (read_user_wptr(mm, wptr64, data64)) {
|
if (read_user_wptr(mm, wptr64, data64)) {
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
|
||||||
lower_32_bits(data64));
|
lower_32_bits(data64));
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
|
||||||
upper_32_bits(data64));
|
upper_32_bits(data64));
|
||||||
} else {
|
} else {
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
|
||||||
m->sdmax_rlcx_rb_rptr);
|
m->sdmax_rlcx_rb_rptr);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
|
||||||
m->sdmax_rlcx_rb_rptr_hi);
|
m->sdmax_rlcx_rb_rptr_hi);
|
||||||
}
|
}
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
|
||||||
|
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
|
||||||
m->sdmax_rlcx_rb_base_hi);
|
m->sdmax_rlcx_rb_base_hi);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
|
||||||
m->sdmax_rlcx_rb_rptr_addr_lo);
|
m->sdmax_rlcx_rb_rptr_addr_lo);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
|
||||||
m->sdmax_rlcx_rb_rptr_addr_hi);
|
m->sdmax_rlcx_rb_rptr_addr_hi);
|
||||||
|
|
||||||
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
|
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
|
||||||
RB_ENABLE, 1);
|
RB_ENABLE, 1);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -172,8 +205,7 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
|
||||||
uint32_t (**dump)[2], uint32_t *n_regs)
|
uint32_t (**dump)[2], uint32_t *n_regs)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||||
uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
|
uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id);
|
||||||
engine_id, queue_id);
|
|
||||||
uint32_t i = 0, reg;
|
uint32_t i = 0, reg;
|
||||||
#undef HQD_N_REGS
|
#undef HQD_N_REGS
|
||||||
#define HQD_N_REGS (19+6+7+10)
|
#define HQD_N_REGS (19+6+7+10)
|
||||||
|
@ -183,15 +215,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
|
for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
|
||||||
DUMP_REG(sdma_rlc_reg_offset + reg);
|
DUMP_REG(sdma_base_addr + reg);
|
||||||
for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
|
for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
|
||||||
DUMP_REG(sdma_rlc_reg_offset + reg);
|
DUMP_REG(sdma_base_addr + reg);
|
||||||
for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
|
for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
|
||||||
reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
|
reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
|
||||||
DUMP_REG(sdma_rlc_reg_offset + reg);
|
DUMP_REG(sdma_base_addr + reg);
|
||||||
for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
|
for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
|
||||||
reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
|
reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
|
||||||
DUMP_REG(sdma_rlc_reg_offset + reg);
|
DUMP_REG(sdma_base_addr + reg);
|
||||||
|
|
||||||
WARN_ON_ONCE(i != HQD_N_REGS);
|
WARN_ON_ONCE(i != HQD_N_REGS);
|
||||||
*n_regs = i;
|
*n_regs = i;
|
||||||
|
@ -203,14 +235,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||||
struct v9_sdma_mqd *m;
|
struct v9_sdma_mqd *m;
|
||||||
uint32_t sdma_rlc_reg_offset;
|
uint32_t sdma_base_addr;
|
||||||
uint32_t sdma_rlc_rb_cntl;
|
uint32_t sdma_rlc_rb_cntl;
|
||||||
|
|
||||||
m = get_sdma_mqd(mqd);
|
m = get_sdma_mqd(mqd);
|
||||||
sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
|
sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
|
||||||
m->sdma_queue_id);
|
m->sdma_queue_id);
|
||||||
|
|
||||||
sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
|
sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
|
||||||
|
|
||||||
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
|
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
|
||||||
return true;
|
return true;
|
||||||
|
@ -223,42 +255,40 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||||
struct v9_sdma_mqd *m;
|
struct v9_sdma_mqd *m;
|
||||||
uint32_t sdma_rlc_reg_offset;
|
uint32_t sdma_base_addr;
|
||||||
uint32_t temp;
|
uint32_t temp;
|
||||||
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
|
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
|
||||||
|
|
||||||
m = get_sdma_mqd(mqd);
|
m = get_sdma_mqd(mqd);
|
||||||
sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
|
sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
|
||||||
m->sdma_queue_id);
|
m->sdma_queue_id);
|
||||||
|
|
||||||
temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
|
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
|
||||||
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
|
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
|
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
|
||||||
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
|
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
|
||||||
break;
|
break;
|
||||||
if (time_after(jiffies, end_jiffies)) {
|
if (time_after(jiffies, end_jiffies))
|
||||||
pr_err("SDMA RLC not idle in %s\n", __func__);
|
|
||||||
return -ETIME;
|
return -ETIME;
|
||||||
}
|
|
||||||
usleep_range(500, 1000);
|
usleep_range(500, 1000);
|
||||||
}
|
}
|
||||||
|
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
|
||||||
RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
|
RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
|
||||||
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
|
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
|
||||||
|
|
||||||
m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
|
m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
|
||||||
m->sdmax_rlcx_rb_rptr_hi =
|
m->sdmax_rlcx_rb_rptr_hi =
|
||||||
RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
|
RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct kfd2kgd_calls arcturus_kfd2kgd = {
|
static const struct kfd2kgd_calls kfd2kgd = {
|
||||||
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
|
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
|
||||||
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
|
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
|
||||||
.init_interrupts = kgd_gfx_v9_init_interrupts,
|
.init_interrupts = kgd_gfx_v9_init_interrupts,
|
||||||
|
@ -274,11 +304,20 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
|
||||||
.address_watch_execute = kgd_gfx_v9_address_watch_execute,
|
.address_watch_execute = kgd_gfx_v9_address_watch_execute,
|
||||||
.wave_control_execute = kgd_gfx_v9_wave_control_execute,
|
.wave_control_execute = kgd_gfx_v9_wave_control_execute,
|
||||||
.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
|
.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
|
||||||
.get_atc_vmid_pasid_mapping_info =
|
.get_atc_vmid_pasid_mapping_pasid =
|
||||||
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
|
kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid,
|
||||||
|
.get_atc_vmid_pasid_mapping_valid =
|
||||||
|
kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid,
|
||||||
|
.set_scratch_backing_va = kgd_gfx_v9_set_scratch_backing_va,
|
||||||
.get_tile_config = kgd_gfx_v9_get_tile_config,
|
.get_tile_config = kgd_gfx_v9_get_tile_config,
|
||||||
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
|
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
|
||||||
.invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs,
|
.invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs,
|
||||||
.invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid,
|
.invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid,
|
||||||
.get_hive_id = amdgpu_amdkfd_get_hive_id,
|
.get_hive_id = amdgpu_amdkfd_get_hive_id,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void)
|
||||||
|
{
|
||||||
|
return (struct kfd2kgd_calls *)&kfd2kgd;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,9 +19,18 @@
|
||||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||||
* OTHER DEALINGS IN THE SOFTWARE.
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
#undef pr_fmt
|
||||||
|
#define pr_fmt(fmt) "kfd2kgd: " fmt
|
||||||
|
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/fdtable.h>
|
||||||
|
#include <linux/uaccess.h>
|
||||||
|
#include <linux/firmware.h>
|
||||||
#include <linux/mmu_context.h>
|
#include <linux/mmu_context.h>
|
||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
#include "amdgpu_amdkfd.h"
|
#include "amdgpu_amdkfd.h"
|
||||||
|
#include "amdgpu_ucode.h"
|
||||||
|
#include "soc15_hw_ip.h"
|
||||||
#include "gc/gc_10_1_0_offset.h"
|
#include "gc/gc_10_1_0_offset.h"
|
||||||
#include "gc/gc_10_1_0_sh_mask.h"
|
#include "gc/gc_10_1_0_sh_mask.h"
|
||||||
#include "navi10_enum.h"
|
#include "navi10_enum.h"
|
||||||
|
@ -33,7 +42,6 @@
|
||||||
#include "v10_structs.h"
|
#include "v10_structs.h"
|
||||||
#include "nv.h"
|
#include "nv.h"
|
||||||
#include "nvd.h"
|
#include "nvd.h"
|
||||||
#include "gfxhub_v2_0.h"
|
|
||||||
|
|
||||||
enum hqd_dequeue_request_type {
|
enum hqd_dequeue_request_type {
|
||||||
NO_ACTION = 0,
|
NO_ACTION = 0,
|
||||||
|
@ -42,6 +50,63 @@ enum hqd_dequeue_request_type {
|
||||||
SAVE_WAVES
|
SAVE_WAVES
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Register access functions
|
||||||
|
*/
|
||||||
|
|
||||||
|
static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
|
||||||
|
uint32_t sh_mem_config,
|
||||||
|
uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
|
||||||
|
uint32_t sh_mem_bases);
|
||||||
|
static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
|
||||||
|
unsigned int vmid);
|
||||||
|
static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
|
||||||
|
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
|
||||||
|
uint32_t queue_id, uint32_t __user *wptr,
|
||||||
|
uint32_t wptr_shift, uint32_t wptr_mask,
|
||||||
|
struct mm_struct *mm);
|
||||||
|
static int kgd_hqd_dump(struct kgd_dev *kgd,
|
||||||
|
uint32_t pipe_id, uint32_t queue_id,
|
||||||
|
uint32_t (**dump)[2], uint32_t *n_regs);
|
||||||
|
static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
|
||||||
|
uint32_t __user *wptr, struct mm_struct *mm);
|
||||||
|
static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
|
||||||
|
uint32_t engine_id, uint32_t queue_id,
|
||||||
|
uint32_t (**dump)[2], uint32_t *n_regs);
|
||||||
|
static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
|
||||||
|
uint32_t pipe_id, uint32_t queue_id);
|
||||||
|
static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
|
||||||
|
static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
|
||||||
|
enum kfd_preempt_type reset_type,
|
||||||
|
unsigned int utimeout, uint32_t pipe_id,
|
||||||
|
uint32_t queue_id);
|
||||||
|
static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
|
||||||
|
unsigned int utimeout);
|
||||||
|
#if 0
|
||||||
|
static uint32_t get_watch_base_addr(struct amdgpu_device *adev);
|
||||||
|
#endif
|
||||||
|
static int kgd_address_watch_disable(struct kgd_dev *kgd);
|
||||||
|
static int kgd_address_watch_execute(struct kgd_dev *kgd,
|
||||||
|
unsigned int watch_point_id,
|
||||||
|
uint32_t cntl_val,
|
||||||
|
uint32_t addr_hi,
|
||||||
|
uint32_t addr_lo);
|
||||||
|
static int kgd_wave_control_execute(struct kgd_dev *kgd,
|
||||||
|
uint32_t gfx_index_val,
|
||||||
|
uint32_t sq_cmd);
|
||||||
|
static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
|
||||||
|
unsigned int watch_point_id,
|
||||||
|
unsigned int reg_offset);
|
||||||
|
|
||||||
|
static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
|
||||||
|
uint8_t vmid);
|
||||||
|
static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
|
||||||
|
uint8_t vmid);
|
||||||
|
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
|
||||||
|
uint64_t page_table_base);
|
||||||
|
static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
|
||||||
|
static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
|
||||||
|
|
||||||
/* Because of REG_GET_FIELD() being used, we put this function in the
|
/* Because of REG_GET_FIELD() being used, we put this function in the
|
||||||
* asic specific file.
|
* asic specific file.
|
||||||
*/
|
*/
|
||||||
|
@ -74,6 +139,37 @@ static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static const struct kfd2kgd_calls kfd2kgd = {
|
||||||
|
.program_sh_mem_settings = kgd_program_sh_mem_settings,
|
||||||
|
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
|
||||||
|
.init_interrupts = kgd_init_interrupts,
|
||||||
|
.hqd_load = kgd_hqd_load,
|
||||||
|
.hqd_sdma_load = kgd_hqd_sdma_load,
|
||||||
|
.hqd_dump = kgd_hqd_dump,
|
||||||
|
.hqd_sdma_dump = kgd_hqd_sdma_dump,
|
||||||
|
.hqd_is_occupied = kgd_hqd_is_occupied,
|
||||||
|
.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
|
||||||
|
.hqd_destroy = kgd_hqd_destroy,
|
||||||
|
.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
|
||||||
|
.address_watch_disable = kgd_address_watch_disable,
|
||||||
|
.address_watch_execute = kgd_address_watch_execute,
|
||||||
|
.wave_control_execute = kgd_wave_control_execute,
|
||||||
|
.address_watch_get_offset = kgd_address_watch_get_offset,
|
||||||
|
.get_atc_vmid_pasid_mapping_pasid =
|
||||||
|
get_atc_vmid_pasid_mapping_pasid,
|
||||||
|
.get_atc_vmid_pasid_mapping_valid =
|
||||||
|
get_atc_vmid_pasid_mapping_valid,
|
||||||
|
.invalidate_tlbs = invalidate_tlbs,
|
||||||
|
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
|
||||||
|
.set_vm_context_page_table_base = set_vm_context_page_table_base,
|
||||||
|
.get_tile_config = amdgpu_amdkfd_get_tile_config,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions()
|
||||||
|
{
|
||||||
|
return (struct kfd2kgd_calls *)&kfd2kgd;
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
|
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
|
||||||
{
|
{
|
||||||
return (struct amdgpu_device *)kgd;
|
return (struct amdgpu_device *)kgd;
|
||||||
|
@ -154,6 +250,11 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
|
||||||
ATC_VMID0_PASID_MAPPING__VALID_MASK;
|
ATC_VMID0_PASID_MAPPING__VALID_MASK;
|
||||||
|
|
||||||
pr_debug("pasid 0x%x vmid %d, reg value %x\n", pasid, vmid, pasid_mapping);
|
pr_debug("pasid 0x%x vmid %d, reg value %x\n", pasid, vmid, pasid_mapping);
|
||||||
|
/*
|
||||||
|
* need to do this twice, once for gfx and once for mmhub
|
||||||
|
* for ATC add 16 to VMID for mmhub, for IH different registers.
|
||||||
|
* ATC_VMID0..15 registers are separate from ATC_VMID16..31.
|
||||||
|
*/
|
||||||
|
|
||||||
pr_debug("ATHUB, reg %x\n", SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid);
|
pr_debug("ATHUB, reg %x\n", SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid);
|
||||||
WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid,
|
WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid,
|
||||||
|
@ -205,11 +306,11 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
|
static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
|
||||||
unsigned int engine_id,
|
unsigned int engine_id,
|
||||||
unsigned int queue_id)
|
unsigned int queue_id)
|
||||||
{
|
{
|
||||||
uint32_t sdma_engine_reg_base[2] = {
|
uint32_t base[2] = {
|
||||||
SOC15_REG_OFFSET(SDMA0, 0,
|
SOC15_REG_OFFSET(SDMA0, 0,
|
||||||
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
|
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
|
||||||
/* On gfx10, mmSDMA1_xxx registers are defined NOT based
|
/* On gfx10, mmSDMA1_xxx registers are defined NOT based
|
||||||
|
@ -221,12 +322,12 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
|
||||||
SOC15_REG_OFFSET(SDMA1, 0,
|
SOC15_REG_OFFSET(SDMA1, 0,
|
||||||
mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL
|
mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL
|
||||||
};
|
};
|
||||||
|
uint32_t retval;
|
||||||
|
|
||||||
uint32_t retval = sdma_engine_reg_base[engine_id]
|
retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL -
|
||||||
+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
|
mmSDMA0_RLC0_RB_CNTL);
|
||||||
|
|
||||||
pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
|
pr_debug("sdma base address: 0x%x\n", retval);
|
||||||
queue_id, retval);
|
|
||||||
|
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
@ -387,67 +488,72 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||||
struct v10_sdma_mqd *m;
|
struct v10_sdma_mqd *m;
|
||||||
uint32_t sdma_rlc_reg_offset;
|
uint32_t sdma_base_addr, sdmax_gfx_context_cntl;
|
||||||
unsigned long end_jiffies;
|
unsigned long end_jiffies;
|
||||||
uint32_t data;
|
uint32_t data;
|
||||||
uint64_t data64;
|
uint64_t data64;
|
||||||
uint64_t __user *wptr64 = (uint64_t __user *)wptr;
|
uint64_t __user *wptr64 = (uint64_t __user *)wptr;
|
||||||
|
|
||||||
m = get_sdma_mqd(mqd);
|
m = get_sdma_mqd(mqd);
|
||||||
sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
|
sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
|
||||||
m->sdma_queue_id);
|
m->sdma_queue_id);
|
||||||
|
pr_debug("sdma load base addr %x for engine %d, queue %d\n", sdma_base_addr, m->sdma_engine_id, m->sdma_queue_id);
|
||||||
|
sdmax_gfx_context_cntl = m->sdma_engine_id ?
|
||||||
|
SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_CONTEXT_CNTL) :
|
||||||
|
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_CONTEXT_CNTL);
|
||||||
|
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
|
||||||
m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
|
m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
|
||||||
|
|
||||||
end_jiffies = msecs_to_jiffies(2000) + jiffies;
|
end_jiffies = msecs_to_jiffies(2000) + jiffies;
|
||||||
while (true) {
|
while (true) {
|
||||||
data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
|
data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
|
||||||
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
|
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
|
||||||
break;
|
break;
|
||||||
if (time_after(jiffies, end_jiffies)) {
|
if (time_after(jiffies, end_jiffies))
|
||||||
pr_err("SDMA RLC not idle in %s\n", __func__);
|
|
||||||
return -ETIME;
|
return -ETIME;
|
||||||
}
|
|
||||||
usleep_range(500, 1000);
|
usleep_range(500, 1000);
|
||||||
}
|
}
|
||||||
|
data = RREG32(sdmax_gfx_context_cntl);
|
||||||
|
data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
|
||||||
|
RESUME_CTX, 0);
|
||||||
|
WREG32(sdmax_gfx_context_cntl, data);
|
||||||
|
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET,
|
||||||
m->sdmax_rlcx_doorbell_offset);
|
m->sdmax_rlcx_doorbell_offset);
|
||||||
|
|
||||||
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
|
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
|
||||||
ENABLE, 1);
|
ENABLE, 1);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
|
||||||
m->sdmax_rlcx_rb_rptr);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI,
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
|
|
||||||
m->sdmax_rlcx_rb_rptr_hi);
|
m->sdmax_rlcx_rb_rptr_hi);
|
||||||
|
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
|
||||||
if (read_user_wptr(mm, wptr64, data64)) {
|
if (read_user_wptr(mm, wptr64, data64)) {
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
|
||||||
lower_32_bits(data64));
|
lower_32_bits(data64));
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
|
||||||
upper_32_bits(data64));
|
upper_32_bits(data64));
|
||||||
} else {
|
} else {
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
|
||||||
m->sdmax_rlcx_rb_rptr);
|
m->sdmax_rlcx_rb_rptr);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
|
||||||
m->sdmax_rlcx_rb_rptr_hi);
|
m->sdmax_rlcx_rb_rptr_hi);
|
||||||
}
|
}
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
|
||||||
|
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
|
||||||
m->sdmax_rlcx_rb_base_hi);
|
m->sdmax_rlcx_rb_base_hi);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
|
||||||
m->sdmax_rlcx_rb_rptr_addr_lo);
|
m->sdmax_rlcx_rb_rptr_addr_lo);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
|
||||||
m->sdmax_rlcx_rb_rptr_addr_hi);
|
m->sdmax_rlcx_rb_rptr_addr_hi);
|
||||||
|
|
||||||
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
|
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
|
||||||
RB_ENABLE, 1);
|
RB_ENABLE, 1);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -457,26 +563,28 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
|
||||||
uint32_t (**dump)[2], uint32_t *n_regs)
|
uint32_t (**dump)[2], uint32_t *n_regs)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||||
uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
|
uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id);
|
||||||
engine_id, queue_id);
|
|
||||||
uint32_t i = 0, reg;
|
uint32_t i = 0, reg;
|
||||||
#undef HQD_N_REGS
|
#undef HQD_N_REGS
|
||||||
#define HQD_N_REGS (19+6+7+10)
|
#define HQD_N_REGS (19+6+7+10)
|
||||||
|
|
||||||
|
pr_debug("sdma dump engine id %d queue_id %d\n", engine_id, queue_id);
|
||||||
|
pr_debug("sdma base addr %x\n", sdma_base_addr);
|
||||||
|
|
||||||
*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
|
*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
|
||||||
if (*dump == NULL)
|
if (*dump == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
|
for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
|
||||||
DUMP_REG(sdma_rlc_reg_offset + reg);
|
DUMP_REG(sdma_base_addr + reg);
|
||||||
for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
|
for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
|
||||||
DUMP_REG(sdma_rlc_reg_offset + reg);
|
DUMP_REG(sdma_base_addr + reg);
|
||||||
for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
|
for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
|
||||||
reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
|
reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
|
||||||
DUMP_REG(sdma_rlc_reg_offset + reg);
|
DUMP_REG(sdma_base_addr + reg);
|
||||||
for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
|
for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
|
||||||
reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
|
reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
|
||||||
DUMP_REG(sdma_rlc_reg_offset + reg);
|
DUMP_REG(sdma_base_addr + reg);
|
||||||
|
|
||||||
WARN_ON_ONCE(i != HQD_N_REGS);
|
WARN_ON_ONCE(i != HQD_N_REGS);
|
||||||
*n_regs = i;
|
*n_regs = i;
|
||||||
|
@ -510,14 +618,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||||
struct v10_sdma_mqd *m;
|
struct v10_sdma_mqd *m;
|
||||||
uint32_t sdma_rlc_reg_offset;
|
uint32_t sdma_base_addr;
|
||||||
uint32_t sdma_rlc_rb_cntl;
|
uint32_t sdma_rlc_rb_cntl;
|
||||||
|
|
||||||
m = get_sdma_mqd(mqd);
|
m = get_sdma_mqd(mqd);
|
||||||
sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
|
sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
|
||||||
m->sdma_queue_id);
|
m->sdma_queue_id);
|
||||||
|
|
||||||
sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
|
sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
|
||||||
|
|
||||||
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
|
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
|
||||||
return true;
|
return true;
|
||||||
|
@ -638,52 +746,59 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||||
struct v10_sdma_mqd *m;
|
struct v10_sdma_mqd *m;
|
||||||
uint32_t sdma_rlc_reg_offset;
|
uint32_t sdma_base_addr;
|
||||||
uint32_t temp;
|
uint32_t temp;
|
||||||
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
|
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
|
||||||
|
|
||||||
m = get_sdma_mqd(mqd);
|
m = get_sdma_mqd(mqd);
|
||||||
sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
|
sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
|
||||||
m->sdma_queue_id);
|
m->sdma_queue_id);
|
||||||
|
|
||||||
temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
|
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
|
||||||
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
|
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
|
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
|
||||||
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
|
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
|
||||||
break;
|
break;
|
||||||
if (time_after(jiffies, end_jiffies)) {
|
if (time_after(jiffies, end_jiffies))
|
||||||
pr_err("SDMA RLC not idle in %s\n", __func__);
|
|
||||||
return -ETIME;
|
return -ETIME;
|
||||||
}
|
|
||||||
usleep_range(500, 1000);
|
usleep_range(500, 1000);
|
||||||
}
|
}
|
||||||
|
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
|
||||||
RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
|
RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
|
||||||
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
|
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
|
||||||
|
|
||||||
m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
|
m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
|
||||||
m->sdmax_rlcx_rb_rptr_hi =
|
m->sdmax_rlcx_rb_rptr_hi =
|
||||||
RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
|
RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
|
static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
|
||||||
uint8_t vmid, uint16_t *p_pasid)
|
uint8_t vmid)
|
||||||
{
|
{
|
||||||
uint32_t value;
|
uint32_t reg;
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
|
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
|
||||||
|
|
||||||
value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
|
reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
|
||||||
+ vmid);
|
+ vmid);
|
||||||
*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
|
return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
|
||||||
|
}
|
||||||
|
|
||||||
return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
|
static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
|
||||||
|
uint8_t vmid)
|
||||||
|
{
|
||||||
|
uint32_t reg;
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
|
||||||
|
|
||||||
|
reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
|
||||||
|
+ vmid);
|
||||||
|
return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid)
|
static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid)
|
||||||
|
@ -715,8 +830,6 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
|
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
|
||||||
int vmid;
|
int vmid;
|
||||||
uint16_t queried_pasid;
|
|
||||||
bool ret;
|
|
||||||
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
|
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
|
||||||
|
|
||||||
if (amdgpu_emu_mode == 0 && ring->sched.ready)
|
if (amdgpu_emu_mode == 0 && ring->sched.ready)
|
||||||
|
@ -725,15 +838,15 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
|
||||||
for (vmid = 0; vmid < 16; vmid++) {
|
for (vmid = 0; vmid < 16; vmid++) {
|
||||||
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
|
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
|
||||||
continue;
|
continue;
|
||||||
|
if (get_atc_vmid_pasid_mapping_valid(kgd, vmid)) {
|
||||||
ret = get_atc_vmid_pasid_mapping_info(kgd, vmid,
|
if (get_atc_vmid_pasid_mapping_pasid(kgd, vmid)
|
||||||
&queried_pasid);
|
== pasid) {
|
||||||
if (ret && queried_pasid == pasid) {
|
|
||||||
amdgpu_gmc_flush_gpu_tlb(adev, vmid,
|
amdgpu_gmc_flush_gpu_tlb(adev, vmid,
|
||||||
AMDGPU_GFXHUB_0, 0);
|
AMDGPU_GFXHUB_0, 0);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -801,6 +914,7 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
|
||||||
uint64_t page_table_base)
|
uint64_t page_table_base)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||||
|
uint64_t base = page_table_base | AMDGPU_PTE_VALID;
|
||||||
|
|
||||||
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
|
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
|
||||||
pr_err("trying to set page table base for wrong VMID %u\n",
|
pr_err("trying to set page table base for wrong VMID %u\n",
|
||||||
|
@ -808,31 +922,18 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* SDMA is on gfxhub as well for Navi1* series */
|
/* TODO: take advantage of per-process address space size. For
|
||||||
gfxhub_v2_0_setup_vm_pt_regs(adev, vmid, page_table_base);
|
* now, all processes share the same address space size, like
|
||||||
}
|
* on GFX8 and older.
|
||||||
|
*/
|
||||||
|
WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0);
|
||||||
|
WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0);
|
||||||
|
|
||||||
const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
|
WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2),
|
||||||
.program_sh_mem_settings = kgd_program_sh_mem_settings,
|
lower_32_bits(adev->vm_manager.max_pfn - 1));
|
||||||
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
|
WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2),
|
||||||
.init_interrupts = kgd_init_interrupts,
|
upper_32_bits(adev->vm_manager.max_pfn - 1));
|
||||||
.hqd_load = kgd_hqd_load,
|
|
||||||
.hqd_sdma_load = kgd_hqd_sdma_load,
|
WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base));
|
||||||
.hqd_dump = kgd_hqd_dump,
|
WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base));
|
||||||
.hqd_sdma_dump = kgd_hqd_sdma_dump,
|
}
|
||||||
.hqd_is_occupied = kgd_hqd_is_occupied,
|
|
||||||
.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
|
|
||||||
.hqd_destroy = kgd_hqd_destroy,
|
|
||||||
.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
|
|
||||||
.address_watch_disable = kgd_address_watch_disable,
|
|
||||||
.address_watch_execute = kgd_address_watch_execute,
|
|
||||||
.wave_control_execute = kgd_wave_control_execute,
|
|
||||||
.address_watch_get_offset = kgd_address_watch_get_offset,
|
|
||||||
.get_atc_vmid_pasid_mapping_info =
|
|
||||||
get_atc_vmid_pasid_mapping_info,
|
|
||||||
.get_tile_config = amdgpu_amdkfd_get_tile_config,
|
|
||||||
.set_vm_context_page_table_base = set_vm_context_page_table_base,
|
|
||||||
.invalidate_tlbs = invalidate_tlbs,
|
|
||||||
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
|
|
||||||
.get_hive_id = amdgpu_amdkfd_get_hive_id,
|
|
||||||
};
|
|
||||||
|
|
|
@ -20,6 +20,8 @@
|
||||||
* OTHER DEALINGS IN THE SOFTWARE.
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/fdtable.h>
|
||||||
|
#include <linux/uaccess.h>
|
||||||
#include <linux/mmu_context.h>
|
#include <linux/mmu_context.h>
|
||||||
|
|
||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
|
@ -84,6 +86,65 @@ union TCP_WATCH_CNTL_BITS {
|
||||||
float f32All;
|
float f32All;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Register access functions
|
||||||
|
*/
|
||||||
|
|
||||||
|
static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
|
||||||
|
uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
|
||||||
|
uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
|
||||||
|
|
||||||
|
static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
|
||||||
|
unsigned int vmid);
|
||||||
|
|
||||||
|
static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
|
||||||
|
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
|
||||||
|
uint32_t queue_id, uint32_t __user *wptr,
|
||||||
|
uint32_t wptr_shift, uint32_t wptr_mask,
|
||||||
|
struct mm_struct *mm);
|
||||||
|
static int kgd_hqd_dump(struct kgd_dev *kgd,
|
||||||
|
uint32_t pipe_id, uint32_t queue_id,
|
||||||
|
uint32_t (**dump)[2], uint32_t *n_regs);
|
||||||
|
static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
|
||||||
|
uint32_t __user *wptr, struct mm_struct *mm);
|
||||||
|
static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
|
||||||
|
uint32_t engine_id, uint32_t queue_id,
|
||||||
|
uint32_t (**dump)[2], uint32_t *n_regs);
|
||||||
|
static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
|
||||||
|
uint32_t pipe_id, uint32_t queue_id);
|
||||||
|
|
||||||
|
static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
|
||||||
|
enum kfd_preempt_type reset_type,
|
||||||
|
unsigned int utimeout, uint32_t pipe_id,
|
||||||
|
uint32_t queue_id);
|
||||||
|
static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
|
||||||
|
static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
|
||||||
|
unsigned int utimeout);
|
||||||
|
static int kgd_address_watch_disable(struct kgd_dev *kgd);
|
||||||
|
static int kgd_address_watch_execute(struct kgd_dev *kgd,
|
||||||
|
unsigned int watch_point_id,
|
||||||
|
uint32_t cntl_val,
|
||||||
|
uint32_t addr_hi,
|
||||||
|
uint32_t addr_lo);
|
||||||
|
static int kgd_wave_control_execute(struct kgd_dev *kgd,
|
||||||
|
uint32_t gfx_index_val,
|
||||||
|
uint32_t sq_cmd);
|
||||||
|
static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
|
||||||
|
unsigned int watch_point_id,
|
||||||
|
unsigned int reg_offset);
|
||||||
|
|
||||||
|
static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid);
|
||||||
|
static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
|
||||||
|
uint8_t vmid);
|
||||||
|
|
||||||
|
static void set_scratch_backing_va(struct kgd_dev *kgd,
|
||||||
|
uint64_t va, uint32_t vmid);
|
||||||
|
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
|
||||||
|
uint64_t page_table_base);
|
||||||
|
static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
|
||||||
|
static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
|
||||||
|
static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd);
|
||||||
|
|
||||||
/* Because of REG_GET_FIELD() being used, we put this function in the
|
/* Because of REG_GET_FIELD() being used, we put this function in the
|
||||||
* asic specific file.
|
* asic specific file.
|
||||||
*/
|
*/
|
||||||
|
@ -109,6 +170,37 @@ static int get_tile_config(struct kgd_dev *kgd,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static const struct kfd2kgd_calls kfd2kgd = {
|
||||||
|
.program_sh_mem_settings = kgd_program_sh_mem_settings,
|
||||||
|
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
|
||||||
|
.init_interrupts = kgd_init_interrupts,
|
||||||
|
.hqd_load = kgd_hqd_load,
|
||||||
|
.hqd_sdma_load = kgd_hqd_sdma_load,
|
||||||
|
.hqd_dump = kgd_hqd_dump,
|
||||||
|
.hqd_sdma_dump = kgd_hqd_sdma_dump,
|
||||||
|
.hqd_is_occupied = kgd_hqd_is_occupied,
|
||||||
|
.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
|
||||||
|
.hqd_destroy = kgd_hqd_destroy,
|
||||||
|
.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
|
||||||
|
.address_watch_disable = kgd_address_watch_disable,
|
||||||
|
.address_watch_execute = kgd_address_watch_execute,
|
||||||
|
.wave_control_execute = kgd_wave_control_execute,
|
||||||
|
.address_watch_get_offset = kgd_address_watch_get_offset,
|
||||||
|
.get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
|
||||||
|
.get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
|
||||||
|
.set_scratch_backing_va = set_scratch_backing_va,
|
||||||
|
.get_tile_config = get_tile_config,
|
||||||
|
.set_vm_context_page_table_base = set_vm_context_page_table_base,
|
||||||
|
.invalidate_tlbs = invalidate_tlbs,
|
||||||
|
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
|
||||||
|
.read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
|
||||||
|
{
|
||||||
|
return (struct kfd2kgd_calls *)&kfd2kgd;
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
|
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
|
||||||
{
|
{
|
||||||
return (struct amdgpu_device *)kgd;
|
return (struct amdgpu_device *)kgd;
|
||||||
|
@ -211,15 +303,14 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline uint32_t get_sdma_rlc_reg_offset(struct cik_sdma_rlc_registers *m)
|
static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
|
||||||
{
|
{
|
||||||
uint32_t retval;
|
uint32_t retval;
|
||||||
|
|
||||||
retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
|
retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
|
||||||
m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
|
m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
|
||||||
|
|
||||||
pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n",
|
pr_debug("sdma base address: 0x%x\n", retval);
|
||||||
m->sdma_engine_id, m->sdma_queue_id, retval);
|
|
||||||
|
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
@ -322,52 +413,60 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
|
||||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||||
struct cik_sdma_rlc_registers *m;
|
struct cik_sdma_rlc_registers *m;
|
||||||
unsigned long end_jiffies;
|
unsigned long end_jiffies;
|
||||||
uint32_t sdma_rlc_reg_offset;
|
uint32_t sdma_base_addr;
|
||||||
uint32_t data;
|
uint32_t data;
|
||||||
|
|
||||||
m = get_sdma_mqd(mqd);
|
m = get_sdma_mqd(mqd);
|
||||||
sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
|
sdma_base_addr = get_sdma_base_addr(m);
|
||||||
|
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
|
||||||
m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
|
m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
|
||||||
|
|
||||||
end_jiffies = msecs_to_jiffies(2000) + jiffies;
|
end_jiffies = msecs_to_jiffies(2000) + jiffies;
|
||||||
while (true) {
|
while (true) {
|
||||||
data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
|
data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
|
||||||
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
|
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
|
||||||
break;
|
break;
|
||||||
if (time_after(jiffies, end_jiffies)) {
|
if (time_after(jiffies, end_jiffies))
|
||||||
pr_err("SDMA RLC not idle in %s\n", __func__);
|
|
||||||
return -ETIME;
|
return -ETIME;
|
||||||
}
|
|
||||||
usleep_range(500, 1000);
|
usleep_range(500, 1000);
|
||||||
}
|
}
|
||||||
|
if (m->sdma_engine_id) {
|
||||||
|
data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
|
||||||
|
data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
|
||||||
|
RESUME_CTX, 0);
|
||||||
|
WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
|
||||||
|
} else {
|
||||||
|
data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
|
||||||
|
data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
|
||||||
|
RESUME_CTX, 0);
|
||||||
|
WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
|
||||||
|
}
|
||||||
|
|
||||||
data = REG_SET_FIELD(m->sdma_rlc_doorbell, SDMA0_RLC0_DOORBELL,
|
data = REG_SET_FIELD(m->sdma_rlc_doorbell, SDMA0_RLC0_DOORBELL,
|
||||||
ENABLE, 1);
|
ENABLE, 1);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdma_rlc_rb_rptr);
|
||||||
m->sdma_rlc_rb_rptr);
|
|
||||||
|
|
||||||
if (read_user_wptr(mm, wptr, data))
|
if (read_user_wptr(mm, wptr, data))
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data);
|
||||||
else
|
else
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
|
||||||
m->sdma_rlc_rb_rptr);
|
m->sdma_rlc_rb_rptr);
|
||||||
|
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
|
||||||
m->sdma_rlc_virtual_addr);
|
m->sdma_rlc_virtual_addr);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
|
||||||
m->sdma_rlc_rb_base_hi);
|
m->sdma_rlc_rb_base_hi);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
|
||||||
m->sdma_rlc_rb_rptr_addr_lo);
|
m->sdma_rlc_rb_rptr_addr_lo);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
|
||||||
m->sdma_rlc_rb_rptr_addr_hi);
|
m->sdma_rlc_rb_rptr_addr_hi);
|
||||||
|
|
||||||
data = REG_SET_FIELD(m->sdma_rlc_rb_cntl, SDMA0_RLC0_RB_CNTL,
|
data = REG_SET_FIELD(m->sdma_rlc_rb_cntl, SDMA0_RLC0_RB_CNTL,
|
||||||
RB_ENABLE, 1);
|
RB_ENABLE, 1);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -425,13 +524,13 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||||
struct cik_sdma_rlc_registers *m;
|
struct cik_sdma_rlc_registers *m;
|
||||||
uint32_t sdma_rlc_reg_offset;
|
uint32_t sdma_base_addr;
|
||||||
uint32_t sdma_rlc_rb_cntl;
|
uint32_t sdma_rlc_rb_cntl;
|
||||||
|
|
||||||
m = get_sdma_mqd(mqd);
|
m = get_sdma_mqd(mqd);
|
||||||
sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
|
sdma_base_addr = get_sdma_base_addr(m);
|
||||||
|
|
||||||
sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
|
sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
|
||||||
|
|
||||||
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
|
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
|
||||||
return true;
|
return true;
|
||||||
|
@ -546,34 +645,32 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||||
struct cik_sdma_rlc_registers *m;
|
struct cik_sdma_rlc_registers *m;
|
||||||
uint32_t sdma_rlc_reg_offset;
|
uint32_t sdma_base_addr;
|
||||||
uint32_t temp;
|
uint32_t temp;
|
||||||
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
|
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
|
||||||
|
|
||||||
m = get_sdma_mqd(mqd);
|
m = get_sdma_mqd(mqd);
|
||||||
sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
|
sdma_base_addr = get_sdma_base_addr(m);
|
||||||
|
|
||||||
temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
|
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
|
||||||
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
|
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
|
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
|
||||||
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
|
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
|
||||||
break;
|
break;
|
||||||
if (time_after(jiffies, end_jiffies)) {
|
if (time_after(jiffies, end_jiffies))
|
||||||
pr_err("SDMA RLC not idle in %s\n", __func__);
|
|
||||||
return -ETIME;
|
return -ETIME;
|
||||||
}
|
|
||||||
usleep_range(500, 1000);
|
usleep_range(500, 1000);
|
||||||
}
|
}
|
||||||
|
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
|
||||||
RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
|
RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
|
||||||
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
|
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
|
||||||
|
|
||||||
m->sdma_rlc_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
|
m->sdma_rlc_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -661,16 +758,24 @@ static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
|
||||||
return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset];
|
return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset];
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
|
static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
|
||||||
uint8_t vmid, uint16_t *p_pasid)
|
uint8_t vmid)
|
||||||
{
|
{
|
||||||
uint32_t value;
|
uint32_t reg;
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
|
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
|
||||||
|
|
||||||
value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
|
reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
|
||||||
*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
|
return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
|
||||||
|
}
|
||||||
|
|
||||||
return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
|
static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
|
||||||
|
uint8_t vmid)
|
||||||
|
{
|
||||||
|
uint32_t reg;
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
|
||||||
|
|
||||||
|
reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
|
||||||
|
return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void set_scratch_backing_va(struct kgd_dev *kgd,
|
static void set_scratch_backing_va(struct kgd_dev *kgd,
|
||||||
|
@ -750,28 +855,3 @@ static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd)
|
||||||
|
|
||||||
return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
|
return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
|
|
||||||
.program_sh_mem_settings = kgd_program_sh_mem_settings,
|
|
||||||
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
|
|
||||||
.init_interrupts = kgd_init_interrupts,
|
|
||||||
.hqd_load = kgd_hqd_load,
|
|
||||||
.hqd_sdma_load = kgd_hqd_sdma_load,
|
|
||||||
.hqd_dump = kgd_hqd_dump,
|
|
||||||
.hqd_sdma_dump = kgd_hqd_sdma_dump,
|
|
||||||
.hqd_is_occupied = kgd_hqd_is_occupied,
|
|
||||||
.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
|
|
||||||
.hqd_destroy = kgd_hqd_destroy,
|
|
||||||
.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
|
|
||||||
.address_watch_disable = kgd_address_watch_disable,
|
|
||||||
.address_watch_execute = kgd_address_watch_execute,
|
|
||||||
.wave_control_execute = kgd_wave_control_execute,
|
|
||||||
.address_watch_get_offset = kgd_address_watch_get_offset,
|
|
||||||
.get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info,
|
|
||||||
.set_scratch_backing_va = set_scratch_backing_va,
|
|
||||||
.get_tile_config = get_tile_config,
|
|
||||||
.set_vm_context_page_table_base = set_vm_context_page_table_base,
|
|
||||||
.invalidate_tlbs = invalidate_tlbs,
|
|
||||||
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
|
|
||||||
.read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
|
|
||||||
};
|
|
||||||
|
|
|
@ -20,6 +20,9 @@
|
||||||
* OTHER DEALINGS IN THE SOFTWARE.
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/fdtable.h>
|
||||||
|
#include <linux/uaccess.h>
|
||||||
#include <linux/mmu_context.h>
|
#include <linux/mmu_context.h>
|
||||||
|
|
||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
|
@ -41,6 +44,62 @@ enum hqd_dequeue_request_type {
|
||||||
RESET_WAVES
|
RESET_WAVES
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Register access functions
|
||||||
|
*/
|
||||||
|
|
||||||
|
static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
|
||||||
|
uint32_t sh_mem_config,
|
||||||
|
uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
|
||||||
|
uint32_t sh_mem_bases);
|
||||||
|
static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
|
||||||
|
unsigned int vmid);
|
||||||
|
static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
|
||||||
|
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
|
||||||
|
uint32_t queue_id, uint32_t __user *wptr,
|
||||||
|
uint32_t wptr_shift, uint32_t wptr_mask,
|
||||||
|
struct mm_struct *mm);
|
||||||
|
static int kgd_hqd_dump(struct kgd_dev *kgd,
|
||||||
|
uint32_t pipe_id, uint32_t queue_id,
|
||||||
|
uint32_t (**dump)[2], uint32_t *n_regs);
|
||||||
|
static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
|
||||||
|
uint32_t __user *wptr, struct mm_struct *mm);
|
||||||
|
static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
|
||||||
|
uint32_t engine_id, uint32_t queue_id,
|
||||||
|
uint32_t (**dump)[2], uint32_t *n_regs);
|
||||||
|
static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
|
||||||
|
uint32_t pipe_id, uint32_t queue_id);
|
||||||
|
static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
|
||||||
|
static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
|
||||||
|
enum kfd_preempt_type reset_type,
|
||||||
|
unsigned int utimeout, uint32_t pipe_id,
|
||||||
|
uint32_t queue_id);
|
||||||
|
static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
|
||||||
|
unsigned int utimeout);
|
||||||
|
static int kgd_address_watch_disable(struct kgd_dev *kgd);
|
||||||
|
static int kgd_address_watch_execute(struct kgd_dev *kgd,
|
||||||
|
unsigned int watch_point_id,
|
||||||
|
uint32_t cntl_val,
|
||||||
|
uint32_t addr_hi,
|
||||||
|
uint32_t addr_lo);
|
||||||
|
static int kgd_wave_control_execute(struct kgd_dev *kgd,
|
||||||
|
uint32_t gfx_index_val,
|
||||||
|
uint32_t sq_cmd);
|
||||||
|
static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
|
||||||
|
unsigned int watch_point_id,
|
||||||
|
unsigned int reg_offset);
|
||||||
|
|
||||||
|
static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
|
||||||
|
uint8_t vmid);
|
||||||
|
static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
|
||||||
|
uint8_t vmid);
|
||||||
|
static void set_scratch_backing_va(struct kgd_dev *kgd,
|
||||||
|
uint64_t va, uint32_t vmid);
|
||||||
|
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
|
||||||
|
uint64_t page_table_base);
|
||||||
|
static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
|
||||||
|
static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
|
||||||
|
|
||||||
/* Because of REG_GET_FIELD() being used, we put this function in the
|
/* Because of REG_GET_FIELD() being used, we put this function in the
|
||||||
* asic specific file.
|
* asic specific file.
|
||||||
*/
|
*/
|
||||||
|
@ -66,6 +125,38 @@ static int get_tile_config(struct kgd_dev *kgd,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static const struct kfd2kgd_calls kfd2kgd = {
|
||||||
|
.program_sh_mem_settings = kgd_program_sh_mem_settings,
|
||||||
|
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
|
||||||
|
.init_interrupts = kgd_init_interrupts,
|
||||||
|
.hqd_load = kgd_hqd_load,
|
||||||
|
.hqd_sdma_load = kgd_hqd_sdma_load,
|
||||||
|
.hqd_dump = kgd_hqd_dump,
|
||||||
|
.hqd_sdma_dump = kgd_hqd_sdma_dump,
|
||||||
|
.hqd_is_occupied = kgd_hqd_is_occupied,
|
||||||
|
.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
|
||||||
|
.hqd_destroy = kgd_hqd_destroy,
|
||||||
|
.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
|
||||||
|
.address_watch_disable = kgd_address_watch_disable,
|
||||||
|
.address_watch_execute = kgd_address_watch_execute,
|
||||||
|
.wave_control_execute = kgd_wave_control_execute,
|
||||||
|
.address_watch_get_offset = kgd_address_watch_get_offset,
|
||||||
|
.get_atc_vmid_pasid_mapping_pasid =
|
||||||
|
get_atc_vmid_pasid_mapping_pasid,
|
||||||
|
.get_atc_vmid_pasid_mapping_valid =
|
||||||
|
get_atc_vmid_pasid_mapping_valid,
|
||||||
|
.set_scratch_backing_va = set_scratch_backing_va,
|
||||||
|
.get_tile_config = get_tile_config,
|
||||||
|
.set_vm_context_page_table_base = set_vm_context_page_table_base,
|
||||||
|
.invalidate_tlbs = invalidate_tlbs,
|
||||||
|
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
|
||||||
|
{
|
||||||
|
return (struct kfd2kgd_calls *)&kfd2kgd;
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
|
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
|
||||||
{
|
{
|
||||||
return (struct amdgpu_device *)kgd;
|
return (struct amdgpu_device *)kgd;
|
||||||
|
@ -169,15 +260,13 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline uint32_t get_sdma_rlc_reg_offset(struct vi_sdma_mqd *m)
|
static inline uint32_t get_sdma_base_addr(struct vi_sdma_mqd *m)
|
||||||
{
|
{
|
||||||
uint32_t retval;
|
uint32_t retval;
|
||||||
|
|
||||||
retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
|
retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
|
||||||
m->sdma_queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
|
m->sdma_queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
|
||||||
|
pr_debug("sdma base address: 0x%x\n", retval);
|
||||||
pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n",
|
|
||||||
m->sdma_engine_id, m->sdma_queue_id, retval);
|
|
||||||
|
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
@ -309,51 +398,59 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
|
||||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||||
struct vi_sdma_mqd *m;
|
struct vi_sdma_mqd *m;
|
||||||
unsigned long end_jiffies;
|
unsigned long end_jiffies;
|
||||||
uint32_t sdma_rlc_reg_offset;
|
uint32_t sdma_base_addr;
|
||||||
uint32_t data;
|
uint32_t data;
|
||||||
|
|
||||||
m = get_sdma_mqd(mqd);
|
m = get_sdma_mqd(mqd);
|
||||||
sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
|
sdma_base_addr = get_sdma_base_addr(m);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
|
||||||
m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
|
m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
|
||||||
|
|
||||||
end_jiffies = msecs_to_jiffies(2000) + jiffies;
|
end_jiffies = msecs_to_jiffies(2000) + jiffies;
|
||||||
while (true) {
|
while (true) {
|
||||||
data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
|
data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
|
||||||
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
|
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
|
||||||
break;
|
break;
|
||||||
if (time_after(jiffies, end_jiffies)) {
|
if (time_after(jiffies, end_jiffies))
|
||||||
pr_err("SDMA RLC not idle in %s\n", __func__);
|
|
||||||
return -ETIME;
|
return -ETIME;
|
||||||
}
|
|
||||||
usleep_range(500, 1000);
|
usleep_range(500, 1000);
|
||||||
}
|
}
|
||||||
|
if (m->sdma_engine_id) {
|
||||||
|
data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
|
||||||
|
data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
|
||||||
|
RESUME_CTX, 0);
|
||||||
|
WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
|
||||||
|
} else {
|
||||||
|
data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
|
||||||
|
data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
|
||||||
|
RESUME_CTX, 0);
|
||||||
|
WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
|
||||||
|
}
|
||||||
|
|
||||||
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
|
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
|
||||||
ENABLE, 1);
|
ENABLE, 1);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
|
||||||
m->sdmax_rlcx_rb_rptr);
|
|
||||||
|
|
||||||
if (read_user_wptr(mm, wptr, data))
|
if (read_user_wptr(mm, wptr, data))
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data);
|
||||||
else
|
else
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
|
||||||
m->sdmax_rlcx_rb_rptr);
|
m->sdmax_rlcx_rb_rptr);
|
||||||
|
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
|
||||||
m->sdmax_rlcx_virtual_addr);
|
m->sdmax_rlcx_virtual_addr);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
|
||||||
m->sdmax_rlcx_rb_base_hi);
|
m->sdmax_rlcx_rb_base_hi);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
|
||||||
m->sdmax_rlcx_rb_rptr_addr_lo);
|
m->sdmax_rlcx_rb_rptr_addr_lo);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
|
||||||
m->sdmax_rlcx_rb_rptr_addr_hi);
|
m->sdmax_rlcx_rb_rptr_addr_hi);
|
||||||
|
|
||||||
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
|
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
|
||||||
RB_ENABLE, 1);
|
RB_ENABLE, 1);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -420,13 +517,13 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||||
struct vi_sdma_mqd *m;
|
struct vi_sdma_mqd *m;
|
||||||
uint32_t sdma_rlc_reg_offset;
|
uint32_t sdma_base_addr;
|
||||||
uint32_t sdma_rlc_rb_cntl;
|
uint32_t sdma_rlc_rb_cntl;
|
||||||
|
|
||||||
m = get_sdma_mqd(mqd);
|
m = get_sdma_mqd(mqd);
|
||||||
sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
|
sdma_base_addr = get_sdma_base_addr(m);
|
||||||
|
|
||||||
sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
|
sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
|
||||||
|
|
||||||
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
|
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
|
||||||
return true;
|
return true;
|
||||||
|
@ -544,48 +641,54 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||||
struct vi_sdma_mqd *m;
|
struct vi_sdma_mqd *m;
|
||||||
uint32_t sdma_rlc_reg_offset;
|
uint32_t sdma_base_addr;
|
||||||
uint32_t temp;
|
uint32_t temp;
|
||||||
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
|
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
|
||||||
|
|
||||||
m = get_sdma_mqd(mqd);
|
m = get_sdma_mqd(mqd);
|
||||||
sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
|
sdma_base_addr = get_sdma_base_addr(m);
|
||||||
|
|
||||||
temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
|
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
|
||||||
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
|
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
|
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
|
||||||
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
|
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
|
||||||
break;
|
break;
|
||||||
if (time_after(jiffies, end_jiffies)) {
|
if (time_after(jiffies, end_jiffies))
|
||||||
pr_err("SDMA RLC not idle in %s\n", __func__);
|
|
||||||
return -ETIME;
|
return -ETIME;
|
||||||
}
|
|
||||||
usleep_range(500, 1000);
|
usleep_range(500, 1000);
|
||||||
}
|
}
|
||||||
|
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
|
||||||
RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
|
RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
|
||||||
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
|
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
|
||||||
|
|
||||||
m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
|
m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
|
static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
|
||||||
uint8_t vmid, uint16_t *p_pasid)
|
uint8_t vmid)
|
||||||
{
|
{
|
||||||
uint32_t value;
|
uint32_t reg;
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
|
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
|
||||||
|
|
||||||
value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
|
reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
|
||||||
*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
|
return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
|
||||||
|
}
|
||||||
|
|
||||||
return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
|
static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
|
||||||
|
uint8_t vmid)
|
||||||
|
{
|
||||||
|
uint32_t reg;
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
|
||||||
|
|
||||||
|
reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
|
||||||
|
return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kgd_address_watch_disable(struct kgd_dev *kgd)
|
static int kgd_address_watch_disable(struct kgd_dev *kgd)
|
||||||
|
@ -695,28 +798,3 @@ static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
|
||||||
RREG32(mmVM_INVALIDATE_RESPONSE);
|
RREG32(mmVM_INVALIDATE_RESPONSE);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
|
|
||||||
.program_sh_mem_settings = kgd_program_sh_mem_settings,
|
|
||||||
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
|
|
||||||
.init_interrupts = kgd_init_interrupts,
|
|
||||||
.hqd_load = kgd_hqd_load,
|
|
||||||
.hqd_sdma_load = kgd_hqd_sdma_load,
|
|
||||||
.hqd_dump = kgd_hqd_dump,
|
|
||||||
.hqd_sdma_dump = kgd_hqd_sdma_dump,
|
|
||||||
.hqd_is_occupied = kgd_hqd_is_occupied,
|
|
||||||
.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
|
|
||||||
.hqd_destroy = kgd_hqd_destroy,
|
|
||||||
.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
|
|
||||||
.address_watch_disable = kgd_address_watch_disable,
|
|
||||||
.address_watch_execute = kgd_address_watch_execute,
|
|
||||||
.wave_control_execute = kgd_wave_control_execute,
|
|
||||||
.address_watch_get_offset = kgd_address_watch_get_offset,
|
|
||||||
.get_atc_vmid_pasid_mapping_info =
|
|
||||||
get_atc_vmid_pasid_mapping_info,
|
|
||||||
.set_scratch_backing_va = set_scratch_backing_va,
|
|
||||||
.get_tile_config = get_tile_config,
|
|
||||||
.set_vm_context_page_table_base = set_vm_context_page_table_base,
|
|
||||||
.invalidate_tlbs = invalidate_tlbs,
|
|
||||||
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
|
|
||||||
};
|
|
||||||
|
|
|
@ -19,10 +19,17 @@
|
||||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||||
* OTHER DEALINGS IN THE SOFTWARE.
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) "kfd2kgd: " fmt
|
||||||
|
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/fdtable.h>
|
||||||
|
#include <linux/uaccess.h>
|
||||||
#include <linux/mmu_context.h>
|
#include <linux/mmu_context.h>
|
||||||
|
|
||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
#include "amdgpu_amdkfd.h"
|
#include "amdgpu_amdkfd.h"
|
||||||
|
#include "soc15_hw_ip.h"
|
||||||
#include "gc/gc_9_0_offset.h"
|
#include "gc/gc_9_0_offset.h"
|
||||||
#include "gc/gc_9_0_sh_mask.h"
|
#include "gc/gc_9_0_sh_mask.h"
|
||||||
#include "vega10_enum.h"
|
#include "vega10_enum.h"
|
||||||
|
@ -43,6 +50,9 @@
|
||||||
#include "gmc_v9_0.h"
|
#include "gmc_v9_0.h"
|
||||||
|
|
||||||
|
|
||||||
|
#define V9_PIPE_PER_MEC (4)
|
||||||
|
#define V9_QUEUES_PER_PIPE_MEC (8)
|
||||||
|
|
||||||
enum hqd_dequeue_request_type {
|
enum hqd_dequeue_request_type {
|
||||||
NO_ACTION = 0,
|
NO_ACTION = 0,
|
||||||
DRAIN_PIPE,
|
DRAIN_PIPE,
|
||||||
|
@ -216,21 +226,22 @@ int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
|
static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
|
||||||
unsigned int engine_id,
|
unsigned int engine_id,
|
||||||
unsigned int queue_id)
|
unsigned int queue_id)
|
||||||
{
|
{
|
||||||
uint32_t sdma_engine_reg_base[2] = {
|
uint32_t base[2] = {
|
||||||
SOC15_REG_OFFSET(SDMA0, 0,
|
SOC15_REG_OFFSET(SDMA0, 0,
|
||||||
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
|
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
|
||||||
SOC15_REG_OFFSET(SDMA1, 0,
|
SOC15_REG_OFFSET(SDMA1, 0,
|
||||||
mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL
|
mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL
|
||||||
};
|
};
|
||||||
uint32_t retval = sdma_engine_reg_base[engine_id]
|
uint32_t retval;
|
||||||
+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
|
|
||||||
|
|
||||||
pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
|
retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL -
|
||||||
queue_id, retval);
|
mmSDMA0_RLC0_RB_CNTL);
|
||||||
|
|
||||||
|
pr_debug("sdma base address: 0x%x\n", retval);
|
||||||
|
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
@ -377,67 +388,71 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||||
struct v9_sdma_mqd *m;
|
struct v9_sdma_mqd *m;
|
||||||
uint32_t sdma_rlc_reg_offset;
|
uint32_t sdma_base_addr, sdmax_gfx_context_cntl;
|
||||||
unsigned long end_jiffies;
|
unsigned long end_jiffies;
|
||||||
uint32_t data;
|
uint32_t data;
|
||||||
uint64_t data64;
|
uint64_t data64;
|
||||||
uint64_t __user *wptr64 = (uint64_t __user *)wptr;
|
uint64_t __user *wptr64 = (uint64_t __user *)wptr;
|
||||||
|
|
||||||
m = get_sdma_mqd(mqd);
|
m = get_sdma_mqd(mqd);
|
||||||
sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
|
sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
|
||||||
m->sdma_queue_id);
|
m->sdma_queue_id);
|
||||||
|
sdmax_gfx_context_cntl = m->sdma_engine_id ?
|
||||||
|
SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_CONTEXT_CNTL) :
|
||||||
|
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_CONTEXT_CNTL);
|
||||||
|
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
|
||||||
m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
|
m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
|
||||||
|
|
||||||
end_jiffies = msecs_to_jiffies(2000) + jiffies;
|
end_jiffies = msecs_to_jiffies(2000) + jiffies;
|
||||||
while (true) {
|
while (true) {
|
||||||
data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
|
data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
|
||||||
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
|
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
|
||||||
break;
|
break;
|
||||||
if (time_after(jiffies, end_jiffies)) {
|
if (time_after(jiffies, end_jiffies))
|
||||||
pr_err("SDMA RLC not idle in %s\n", __func__);
|
|
||||||
return -ETIME;
|
return -ETIME;
|
||||||
}
|
|
||||||
usleep_range(500, 1000);
|
usleep_range(500, 1000);
|
||||||
}
|
}
|
||||||
|
data = RREG32(sdmax_gfx_context_cntl);
|
||||||
|
data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
|
||||||
|
RESUME_CTX, 0);
|
||||||
|
WREG32(sdmax_gfx_context_cntl, data);
|
||||||
|
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET,
|
||||||
m->sdmax_rlcx_doorbell_offset);
|
m->sdmax_rlcx_doorbell_offset);
|
||||||
|
|
||||||
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
|
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
|
||||||
ENABLE, 1);
|
ENABLE, 1);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
|
||||||
m->sdmax_rlcx_rb_rptr);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI,
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
|
|
||||||
m->sdmax_rlcx_rb_rptr_hi);
|
m->sdmax_rlcx_rb_rptr_hi);
|
||||||
|
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
|
||||||
if (read_user_wptr(mm, wptr64, data64)) {
|
if (read_user_wptr(mm, wptr64, data64)) {
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
|
||||||
lower_32_bits(data64));
|
lower_32_bits(data64));
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
|
||||||
upper_32_bits(data64));
|
upper_32_bits(data64));
|
||||||
} else {
|
} else {
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
|
||||||
m->sdmax_rlcx_rb_rptr);
|
m->sdmax_rlcx_rb_rptr);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
|
||||||
m->sdmax_rlcx_rb_rptr_hi);
|
m->sdmax_rlcx_rb_rptr_hi);
|
||||||
}
|
}
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
|
||||||
|
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
|
||||||
m->sdmax_rlcx_rb_base_hi);
|
m->sdmax_rlcx_rb_base_hi);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
|
||||||
m->sdmax_rlcx_rb_rptr_addr_lo);
|
m->sdmax_rlcx_rb_rptr_addr_lo);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
|
||||||
m->sdmax_rlcx_rb_rptr_addr_hi);
|
m->sdmax_rlcx_rb_rptr_addr_hi);
|
||||||
|
|
||||||
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
|
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
|
||||||
RB_ENABLE, 1);
|
RB_ENABLE, 1);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -447,8 +462,7 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
|
||||||
uint32_t (**dump)[2], uint32_t *n_regs)
|
uint32_t (**dump)[2], uint32_t *n_regs)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||||
uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
|
uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id);
|
||||||
engine_id, queue_id);
|
|
||||||
uint32_t i = 0, reg;
|
uint32_t i = 0, reg;
|
||||||
#undef HQD_N_REGS
|
#undef HQD_N_REGS
|
||||||
#define HQD_N_REGS (19+6+7+10)
|
#define HQD_N_REGS (19+6+7+10)
|
||||||
|
@ -458,15 +472,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
|
for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
|
||||||
DUMP_REG(sdma_rlc_reg_offset + reg);
|
DUMP_REG(sdma_base_addr + reg);
|
||||||
for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
|
for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
|
||||||
DUMP_REG(sdma_rlc_reg_offset + reg);
|
DUMP_REG(sdma_base_addr + reg);
|
||||||
for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
|
for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
|
||||||
reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
|
reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
|
||||||
DUMP_REG(sdma_rlc_reg_offset + reg);
|
DUMP_REG(sdma_base_addr + reg);
|
||||||
for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
|
for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
|
||||||
reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
|
reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
|
||||||
DUMP_REG(sdma_rlc_reg_offset + reg);
|
DUMP_REG(sdma_base_addr + reg);
|
||||||
|
|
||||||
WARN_ON_ONCE(i != HQD_N_REGS);
|
WARN_ON_ONCE(i != HQD_N_REGS);
|
||||||
*n_regs = i;
|
*n_regs = i;
|
||||||
|
@ -500,14 +514,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||||
struct v9_sdma_mqd *m;
|
struct v9_sdma_mqd *m;
|
||||||
uint32_t sdma_rlc_reg_offset;
|
uint32_t sdma_base_addr;
|
||||||
uint32_t sdma_rlc_rb_cntl;
|
uint32_t sdma_rlc_rb_cntl;
|
||||||
|
|
||||||
m = get_sdma_mqd(mqd);
|
m = get_sdma_mqd(mqd);
|
||||||
sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
|
sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
|
||||||
m->sdma_queue_id);
|
m->sdma_queue_id);
|
||||||
|
|
||||||
sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
|
sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
|
||||||
|
|
||||||
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
|
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
|
||||||
return true;
|
return true;
|
||||||
|
@ -570,52 +584,59 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||||
struct v9_sdma_mqd *m;
|
struct v9_sdma_mqd *m;
|
||||||
uint32_t sdma_rlc_reg_offset;
|
uint32_t sdma_base_addr;
|
||||||
uint32_t temp;
|
uint32_t temp;
|
||||||
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
|
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
|
||||||
|
|
||||||
m = get_sdma_mqd(mqd);
|
m = get_sdma_mqd(mqd);
|
||||||
sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
|
sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
|
||||||
m->sdma_queue_id);
|
m->sdma_queue_id);
|
||||||
|
|
||||||
temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
|
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
|
||||||
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
|
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
|
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
|
||||||
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
|
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
|
||||||
break;
|
break;
|
||||||
if (time_after(jiffies, end_jiffies)) {
|
if (time_after(jiffies, end_jiffies))
|
||||||
pr_err("SDMA RLC not idle in %s\n", __func__);
|
|
||||||
return -ETIME;
|
return -ETIME;
|
||||||
}
|
|
||||||
usleep_range(500, 1000);
|
usleep_range(500, 1000);
|
||||||
}
|
}
|
||||||
|
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
|
||||||
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
|
||||||
RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
|
RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
|
||||||
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
|
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
|
||||||
|
|
||||||
m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
|
m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
|
||||||
m->sdmax_rlcx_rb_rptr_hi =
|
m->sdmax_rlcx_rb_rptr_hi =
|
||||||
RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
|
RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
|
bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
|
||||||
uint8_t vmid, uint16_t *p_pasid)
|
uint8_t vmid)
|
||||||
{
|
{
|
||||||
uint32_t value;
|
uint32_t reg;
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
|
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
|
||||||
|
|
||||||
value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
|
reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
|
||||||
+ vmid);
|
+ vmid);
|
||||||
*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
|
return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
|
||||||
|
}
|
||||||
|
|
||||||
return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
|
uint16_t kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
|
||||||
|
uint8_t vmid)
|
||||||
|
{
|
||||||
|
uint32_t reg;
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
|
||||||
|
|
||||||
|
reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
|
||||||
|
+ vmid);
|
||||||
|
return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid,
|
static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid,
|
||||||
|
@ -650,8 +671,6 @@ int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
|
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
|
||||||
int vmid, i;
|
int vmid, i;
|
||||||
uint16_t queried_pasid;
|
|
||||||
bool ret;
|
|
||||||
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
|
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
|
||||||
uint32_t flush_type = 0;
|
uint32_t flush_type = 0;
|
||||||
|
|
||||||
|
@ -667,16 +686,16 @@ int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
|
||||||
for (vmid = 0; vmid < 16; vmid++) {
|
for (vmid = 0; vmid < 16; vmid++) {
|
||||||
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
|
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
|
||||||
continue;
|
continue;
|
||||||
|
if (kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(kgd, vmid)) {
|
||||||
ret = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(kgd, vmid,
|
if (kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(kgd, vmid)
|
||||||
&queried_pasid);
|
== pasid) {
|
||||||
if (ret && queried_pasid == pasid) {
|
|
||||||
for (i = 0; i < adev->num_vmhubs; i++)
|
for (i = 0; i < adev->num_vmhubs; i++)
|
||||||
amdgpu_gmc_flush_gpu_tlb(adev, vmid,
|
amdgpu_gmc_flush_gpu_tlb(adev, vmid,
|
||||||
i, flush_type);
|
i, flush_type);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -758,6 +777,15 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void kgd_gfx_v9_set_scratch_backing_va(struct kgd_dev *kgd,
|
||||||
|
uint64_t va, uint32_t vmid)
|
||||||
|
{
|
||||||
|
/* No longer needed on GFXv9. The scratch base address is
|
||||||
|
* passed to the shader by the CP. It's the user mode driver's
|
||||||
|
* responsibility.
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
|
void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
|
||||||
uint64_t page_table_base)
|
uint64_t page_table_base)
|
||||||
{
|
{
|
||||||
|
@ -783,7 +811,7 @@ void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmi
|
||||||
gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
|
gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
|
static const struct kfd2kgd_calls kfd2kgd = {
|
||||||
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
|
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
|
||||||
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
|
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
|
||||||
.init_interrupts = kgd_gfx_v9_init_interrupts,
|
.init_interrupts = kgd_gfx_v9_init_interrupts,
|
||||||
|
@ -799,11 +827,19 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
|
||||||
.address_watch_execute = kgd_gfx_v9_address_watch_execute,
|
.address_watch_execute = kgd_gfx_v9_address_watch_execute,
|
||||||
.wave_control_execute = kgd_gfx_v9_wave_control_execute,
|
.wave_control_execute = kgd_gfx_v9_wave_control_execute,
|
||||||
.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
|
.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
|
||||||
.get_atc_vmid_pasid_mapping_info =
|
.get_atc_vmid_pasid_mapping_pasid =
|
||||||
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
|
kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid,
|
||||||
|
.get_atc_vmid_pasid_mapping_valid =
|
||||||
|
kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid,
|
||||||
|
.set_scratch_backing_va = kgd_gfx_v9_set_scratch_backing_va,
|
||||||
.get_tile_config = kgd_gfx_v9_get_tile_config,
|
.get_tile_config = kgd_gfx_v9_get_tile_config,
|
||||||
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
|
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
|
||||||
.invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs,
|
.invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs,
|
||||||
.invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid,
|
.invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid,
|
||||||
.get_hive_id = amdgpu_amdkfd_get_hive_id,
|
.get_hive_id = amdgpu_amdkfd_get_hive_id,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
|
||||||
|
{
|
||||||
|
return (struct kfd2kgd_calls *)&kfd2kgd;
|
||||||
|
}
|
||||||
|
|
|
@ -55,10 +55,14 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
|
||||||
unsigned int watch_point_id,
|
unsigned int watch_point_id,
|
||||||
unsigned int reg_offset);
|
unsigned int reg_offset);
|
||||||
|
|
||||||
bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
|
bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
|
||||||
uint8_t vmid, uint16_t *p_pasid);
|
uint8_t vmid);
|
||||||
|
uint16_t kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
|
||||||
|
uint8_t vmid);
|
||||||
void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
|
void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
|
||||||
uint64_t page_table_base);
|
uint64_t page_table_base);
|
||||||
|
void kgd_gfx_v9_set_scratch_backing_va(struct kgd_dev *kgd,
|
||||||
|
uint64_t va, uint32_t vmid);
|
||||||
int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
|
int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
|
||||||
int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
|
int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
|
||||||
int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
|
int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
|
||||||
|
|
|
@ -19,6 +19,9 @@
|
||||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||||
* OTHER DEALINGS IN THE SOFTWARE.
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) "kfd2kgd: " fmt
|
||||||
|
|
||||||
#include <linux/dma-buf.h>
|
#include <linux/dma-buf.h>
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
#include <linux/pagemap.h>
|
#include <linux/pagemap.h>
|
||||||
|
@ -30,6 +33,11 @@
|
||||||
#include "amdgpu_amdkfd.h"
|
#include "amdgpu_amdkfd.h"
|
||||||
#include "amdgpu_dma_buf.h"
|
#include "amdgpu_dma_buf.h"
|
||||||
|
|
||||||
|
/* Special VM and GART address alignment needed for VI pre-Fiji due to
|
||||||
|
* a HW bug.
|
||||||
|
*/
|
||||||
|
#define VI_BO_SIZE_ALIGN (0x8000)
|
||||||
|
|
||||||
/* BO flag to indicate a KFD userptr BO */
|
/* BO flag to indicate a KFD userptr BO */
|
||||||
#define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
|
#define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
|
||||||
|
|
||||||
|
@ -341,46 +349,13 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
|
||||||
struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
|
struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = amdgpu_vm_update_pdes(adev, vm, false);
|
ret = amdgpu_vm_update_directories(adev, vm);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
return amdgpu_sync_fence(NULL, sync, vm->last_update, false);
|
return amdgpu_sync_fence(NULL, sync, vm->last_update, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
|
|
||||||
{
|
|
||||||
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
|
|
||||||
bool coherent = mem->alloc_flags & ALLOC_MEM_FLAGS_COHERENT;
|
|
||||||
uint32_t mapping_flags;
|
|
||||||
|
|
||||||
mapping_flags = AMDGPU_VM_PAGE_READABLE;
|
|
||||||
if (mem->alloc_flags & ALLOC_MEM_FLAGS_WRITABLE)
|
|
||||||
mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
|
|
||||||
if (mem->alloc_flags & ALLOC_MEM_FLAGS_EXECUTABLE)
|
|
||||||
mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
|
|
||||||
|
|
||||||
switch (adev->asic_type) {
|
|
||||||
case CHIP_ARCTURUS:
|
|
||||||
if (mem->alloc_flags & ALLOC_MEM_FLAGS_VRAM) {
|
|
||||||
if (bo_adev == adev)
|
|
||||||
mapping_flags |= coherent ?
|
|
||||||
AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
|
|
||||||
else
|
|
||||||
mapping_flags |= AMDGPU_VM_MTYPE_UC;
|
|
||||||
} else {
|
|
||||||
mapping_flags |= coherent ?
|
|
||||||
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
mapping_flags |= coherent ?
|
|
||||||
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
|
|
||||||
}
|
|
||||||
|
|
||||||
return amdgpu_gem_va_map_flags(adev, mapping_flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* add_bo_to_vm - Add a BO to a VM
|
/* add_bo_to_vm - Add a BO to a VM
|
||||||
*
|
*
|
||||||
* Everything that needs to bo done only once when a BO is first added
|
* Everything that needs to bo done only once when a BO is first added
|
||||||
|
@ -429,7 +404,8 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
|
||||||
}
|
}
|
||||||
|
|
||||||
bo_va_entry->va = va;
|
bo_va_entry->va = va;
|
||||||
bo_va_entry->pte_flags = get_pte_flags(adev, mem);
|
bo_va_entry->pte_flags = amdgpu_gmc_get_pte_flags(adev,
|
||||||
|
mem->mapping_flags);
|
||||||
bo_va_entry->kgd_dev = (void *)adev;
|
bo_va_entry->kgd_dev = (void *)adev;
|
||||||
list_add(&bo_va_entry->bo_list, list_bo_va);
|
list_add(&bo_va_entry->bo_list, list_bo_va);
|
||||||
|
|
||||||
|
@ -1103,8 +1079,10 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
|
||||||
uint64_t user_addr = 0;
|
uint64_t user_addr = 0;
|
||||||
struct amdgpu_bo *bo;
|
struct amdgpu_bo *bo;
|
||||||
struct amdgpu_bo_param bp;
|
struct amdgpu_bo_param bp;
|
||||||
|
int byte_align;
|
||||||
u32 domain, alloc_domain;
|
u32 domain, alloc_domain;
|
||||||
u64 alloc_flags;
|
u64 alloc_flags;
|
||||||
|
uint32_t mapping_flags;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1157,7 +1135,25 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
|
||||||
if ((*mem)->aql_queue)
|
if ((*mem)->aql_queue)
|
||||||
size = size >> 1;
|
size = size >> 1;
|
||||||
|
|
||||||
(*mem)->alloc_flags = flags;
|
/* Workaround for TLB bug on older VI chips */
|
||||||
|
byte_align = (adev->family == AMDGPU_FAMILY_VI &&
|
||||||
|
adev->asic_type != CHIP_FIJI &&
|
||||||
|
adev->asic_type != CHIP_POLARIS10 &&
|
||||||
|
adev->asic_type != CHIP_POLARIS11 &&
|
||||||
|
adev->asic_type != CHIP_POLARIS12 &&
|
||||||
|
adev->asic_type != CHIP_VEGAM) ?
|
||||||
|
VI_BO_SIZE_ALIGN : 1;
|
||||||
|
|
||||||
|
mapping_flags = AMDGPU_VM_PAGE_READABLE;
|
||||||
|
if (flags & ALLOC_MEM_FLAGS_WRITABLE)
|
||||||
|
mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
|
||||||
|
if (flags & ALLOC_MEM_FLAGS_EXECUTABLE)
|
||||||
|
mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
|
||||||
|
if (flags & ALLOC_MEM_FLAGS_COHERENT)
|
||||||
|
mapping_flags |= AMDGPU_VM_MTYPE_UC;
|
||||||
|
else
|
||||||
|
mapping_flags |= AMDGPU_VM_MTYPE_NC;
|
||||||
|
(*mem)->mapping_flags = mapping_flags;
|
||||||
|
|
||||||
amdgpu_sync_create(&(*mem)->sync);
|
amdgpu_sync_create(&(*mem)->sync);
|
||||||
|
|
||||||
|
@ -1172,7 +1168,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
|
||||||
|
|
||||||
memset(&bp, 0, sizeof(bp));
|
memset(&bp, 0, sizeof(bp));
|
||||||
bp.size = size;
|
bp.size = size;
|
||||||
bp.byte_align = 1;
|
bp.byte_align = byte_align;
|
||||||
bp.domain = alloc_domain;
|
bp.domain = alloc_domain;
|
||||||
bp.flags = alloc_flags;
|
bp.flags = alloc_flags;
|
||||||
bp.type = bo_type;
|
bp.type = bo_type;
|
||||||
|
@ -1630,10 +1626,9 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
|
||||||
|
|
||||||
INIT_LIST_HEAD(&(*mem)->bo_va_list);
|
INIT_LIST_HEAD(&(*mem)->bo_va_list);
|
||||||
mutex_init(&(*mem)->lock);
|
mutex_init(&(*mem)->lock);
|
||||||
(*mem)->alloc_flags =
|
(*mem)->mapping_flags =
|
||||||
((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
|
AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
|
||||||
ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT) |
|
AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_NC;
|
||||||
ALLOC_MEM_FLAGS_WRITABLE | ALLOC_MEM_FLAGS_EXECUTABLE;
|
|
||||||
|
|
||||||
(*mem)->bo = amdgpu_bo_ref(bo);
|
(*mem)->bo = amdgpu_bo_ref(bo);
|
||||||
(*mem)->va = va;
|
(*mem)->va = va;
|
||||||
|
|
|
@ -2038,11 +2038,6 @@ int amdgpu_atombios_init(struct amdgpu_device *adev)
|
||||||
if (adev->is_atom_fw) {
|
if (adev->is_atom_fw) {
|
||||||
amdgpu_atomfirmware_scratch_regs_init(adev);
|
amdgpu_atomfirmware_scratch_regs_init(adev);
|
||||||
amdgpu_atomfirmware_allocate_fb_scratch(adev);
|
amdgpu_atomfirmware_allocate_fb_scratch(adev);
|
||||||
ret = amdgpu_atomfirmware_get_mem_train_fb_loc(adev);
|
|
||||||
if (ret) {
|
|
||||||
DRM_ERROR("Failed to get mem train fb location.\n");
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
amdgpu_atombios_scratch_regs_init(adev);
|
amdgpu_atombios_scratch_regs_init(adev);
|
||||||
amdgpu_atombios_allocate_fb_scratch(adev);
|
amdgpu_atombios_allocate_fb_scratch(adev);
|
||||||
|
|
|
@ -27,7 +27,6 @@
|
||||||
#include "amdgpu_atomfirmware.h"
|
#include "amdgpu_atomfirmware.h"
|
||||||
#include "atom.h"
|
#include "atom.h"
|
||||||
#include "atombios.h"
|
#include "atombios.h"
|
||||||
#include "soc15_hw_ip.h"
|
|
||||||
|
|
||||||
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
|
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
|
@ -121,11 +120,62 @@ union vram_info {
|
||||||
struct atom_vram_info_header_v2_3 v23;
|
struct atom_vram_info_header_v2_3 v23;
|
||||||
struct atom_vram_info_header_v2_4 v24;
|
struct atom_vram_info_header_v2_4 v24;
|
||||||
};
|
};
|
||||||
|
/*
|
||||||
|
* Return vram width from integrated system info table, if available,
|
||||||
|
* or 0 if not.
|
||||||
|
*/
|
||||||
|
int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
struct amdgpu_mode_info *mode_info = &adev->mode_info;
|
||||||
|
int index;
|
||||||
|
u16 data_offset, size;
|
||||||
|
union igp_info *igp_info;
|
||||||
|
union vram_info *vram_info;
|
||||||
|
u32 mem_channel_number;
|
||||||
|
u32 mem_channel_width;
|
||||||
|
u8 frev, crev;
|
||||||
|
|
||||||
union vram_module {
|
if (adev->flags & AMD_IS_APU)
|
||||||
struct atom_vram_module_v9 v9;
|
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
||||||
struct atom_vram_module_v10 v10;
|
integratedsysteminfo);
|
||||||
};
|
else
|
||||||
|
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
||||||
|
vram_info);
|
||||||
|
|
||||||
|
/* get any igp specific overrides */
|
||||||
|
if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size,
|
||||||
|
&frev, &crev, &data_offset)) {
|
||||||
|
if (adev->flags & AMD_IS_APU) {
|
||||||
|
igp_info = (union igp_info *)
|
||||||
|
(mode_info->atom_context->bios + data_offset);
|
||||||
|
switch (crev) {
|
||||||
|
case 11:
|
||||||
|
mem_channel_number = igp_info->v11.umachannelnumber;
|
||||||
|
/* channel width is 64 */
|
||||||
|
return mem_channel_number * 64;
|
||||||
|
default:
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
vram_info = (union vram_info *)
|
||||||
|
(mode_info->atom_context->bios + data_offset);
|
||||||
|
switch (crev) {
|
||||||
|
case 3:
|
||||||
|
mem_channel_number = vram_info->v23.vram_module[0].channel_num;
|
||||||
|
mem_channel_width = vram_info->v23.vram_module[0].channel_width;
|
||||||
|
return mem_channel_number * (1 << mem_channel_width);
|
||||||
|
case 4:
|
||||||
|
mem_channel_number = vram_info->v24.vram_module[0].channel_num;
|
||||||
|
mem_channel_width = vram_info->v24.vram_module[0].channel_width;
|
||||||
|
return mem_channel_number * (1 << mem_channel_width);
|
||||||
|
default:
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int convert_atom_mem_type_to_vram_type (struct amdgpu_device *adev,
|
static int convert_atom_mem_type_to_vram_type (struct amdgpu_device *adev,
|
||||||
int atom_mem_type)
|
int atom_mem_type)
|
||||||
|
@ -169,25 +219,19 @@ static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
|
||||||
|
|
||||||
return vram_type;
|
return vram_type;
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
* Return vram type from either integrated system info table
|
||||||
int
|
* or umc info table, if available, or 0 (TYPE_UNKNOWN) if not
|
||||||
amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
|
*/
|
||||||
int *vram_width, int *vram_type,
|
int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
|
||||||
int *vram_vendor)
|
|
||||||
{
|
{
|
||||||
struct amdgpu_mode_info *mode_info = &adev->mode_info;
|
struct amdgpu_mode_info *mode_info = &adev->mode_info;
|
||||||
int index, i = 0;
|
int index;
|
||||||
u16 data_offset, size;
|
u16 data_offset, size;
|
||||||
union igp_info *igp_info;
|
union igp_info *igp_info;
|
||||||
union vram_info *vram_info;
|
union vram_info *vram_info;
|
||||||
union vram_module *vram_module;
|
|
||||||
u8 frev, crev;
|
u8 frev, crev;
|
||||||
u8 mem_type;
|
u8 mem_type;
|
||||||
u8 mem_vendor;
|
|
||||||
u32 mem_channel_number;
|
|
||||||
u32 mem_channel_width;
|
|
||||||
u32 module_id;
|
|
||||||
|
|
||||||
if (adev->flags & AMD_IS_APU)
|
if (adev->flags & AMD_IS_APU)
|
||||||
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
||||||
|
@ -195,7 +239,6 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
|
||||||
else
|
else
|
||||||
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
||||||
vram_info);
|
vram_info);
|
||||||
|
|
||||||
if (amdgpu_atom_parse_data_header(mode_info->atom_context,
|
if (amdgpu_atom_parse_data_header(mode_info->atom_context,
|
||||||
index, &size,
|
index, &size,
|
||||||
&frev, &crev, &data_offset)) {
|
&frev, &crev, &data_offset)) {
|
||||||
|
@ -204,67 +247,25 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
|
||||||
(mode_info->atom_context->bios + data_offset);
|
(mode_info->atom_context->bios + data_offset);
|
||||||
switch (crev) {
|
switch (crev) {
|
||||||
case 11:
|
case 11:
|
||||||
mem_channel_number = igp_info->v11.umachannelnumber;
|
|
||||||
/* channel width is 64 */
|
|
||||||
if (vram_width)
|
|
||||||
*vram_width = mem_channel_number * 64;
|
|
||||||
mem_type = igp_info->v11.memorytype;
|
mem_type = igp_info->v11.memorytype;
|
||||||
if (vram_type)
|
return convert_atom_mem_type_to_vram_type(adev, mem_type);
|
||||||
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
|
|
||||||
break;
|
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return 0;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
vram_info = (union vram_info *)
|
vram_info = (union vram_info *)
|
||||||
(mode_info->atom_context->bios + data_offset);
|
(mode_info->atom_context->bios + data_offset);
|
||||||
module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16;
|
|
||||||
switch (crev) {
|
switch (crev) {
|
||||||
case 3:
|
case 3:
|
||||||
if (module_id > vram_info->v23.vram_module_num)
|
mem_type = vram_info->v23.vram_module[0].memory_type;
|
||||||
module_id = 0;
|
return convert_atom_mem_type_to_vram_type(adev, mem_type);
|
||||||
vram_module = (union vram_module *)vram_info->v23.vram_module;
|
|
||||||
while (i < module_id) {
|
|
||||||
vram_module = (union vram_module *)
|
|
||||||
((u8 *)vram_module + vram_module->v9.vram_module_size);
|
|
||||||
i++;
|
|
||||||
}
|
|
||||||
mem_type = vram_module->v9.memory_type;
|
|
||||||
if (vram_type)
|
|
||||||
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
|
|
||||||
mem_channel_number = vram_module->v9.channel_num;
|
|
||||||
mem_channel_width = vram_module->v9.channel_width;
|
|
||||||
if (vram_width)
|
|
||||||
*vram_width = mem_channel_number * (1 << mem_channel_width);
|
|
||||||
mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
|
|
||||||
if (vram_vendor)
|
|
||||||
*vram_vendor = mem_vendor;
|
|
||||||
break;
|
|
||||||
case 4:
|
case 4:
|
||||||
if (module_id > vram_info->v24.vram_module_num)
|
mem_type = vram_info->v24.vram_module[0].memory_type;
|
||||||
module_id = 0;
|
return convert_atom_mem_type_to_vram_type(adev, mem_type);
|
||||||
vram_module = (union vram_module *)vram_info->v24.vram_module;
|
|
||||||
while (i < module_id) {
|
|
||||||
vram_module = (union vram_module *)
|
|
||||||
((u8 *)vram_module + vram_module->v10.vram_module_size);
|
|
||||||
i++;
|
|
||||||
}
|
|
||||||
mem_type = vram_module->v10.memory_type;
|
|
||||||
if (vram_type)
|
|
||||||
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
|
|
||||||
mem_channel_number = vram_module->v10.channel_num;
|
|
||||||
mem_channel_width = vram_module->v10.channel_width;
|
|
||||||
if (vram_width)
|
|
||||||
*vram_width = mem_channel_number * (1 << mem_channel_width);
|
|
||||||
mem_vendor = (vram_module->v10.vender_rev_id) & 0xF;
|
|
||||||
if (vram_vendor)
|
|
||||||
*vram_vendor = mem_vendor;
|
|
||||||
break;
|
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -463,138 +464,3 @@ int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
|
||||||
}
|
}
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Check if VBIOS supports GDDR6 training data save/restore
|
|
||||||
*/
|
|
||||||
static bool gddr6_mem_train_vbios_support(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
uint16_t data_offset;
|
|
||||||
int index;
|
|
||||||
|
|
||||||
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
|
||||||
firmwareinfo);
|
|
||||||
if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
|
|
||||||
NULL, NULL, &data_offset)) {
|
|
||||||
struct atom_firmware_info_v3_1 *firmware_info =
|
|
||||||
(struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
|
|
||||||
data_offset);
|
|
||||||
|
|
||||||
DRM_DEBUG("atom firmware capability:0x%08x.\n",
|
|
||||||
le32_to_cpu(firmware_info->firmware_capability));
|
|
||||||
|
|
||||||
if (le32_to_cpu(firmware_info->firmware_capability) &
|
|
||||||
ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING)
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int gddr6_mem_train_support(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
uint32_t major, minor, revision, hw_v;
|
|
||||||
|
|
||||||
if (gddr6_mem_train_vbios_support(adev)) {
|
|
||||||
amdgpu_discovery_get_ip_version(adev, MP0_HWID, &major, &minor, &revision);
|
|
||||||
hw_v = HW_REV(major, minor, revision);
|
|
||||||
/*
|
|
||||||
* treat 0 revision as a special case since register for MP0 and MMHUB is missing
|
|
||||||
* for some Navi10 A0, preventing driver from discovering the hwip information since
|
|
||||||
* none of the functions will be initialized, it should not cause any problems
|
|
||||||
*/
|
|
||||||
switch (hw_v) {
|
|
||||||
case HW_REV(11, 0, 0):
|
|
||||||
case HW_REV(11, 0, 5):
|
|
||||||
ret = 1;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
DRM_ERROR("memory training vbios supports but psp hw(%08x)"
|
|
||||||
" doesn't support!\n", hw_v);
|
|
||||||
ret = -1;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ret = 0;
|
|
||||||
hw_v = -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
DRM_DEBUG("mp0 hw_v %08x, ret:%d.\n", hw_v, ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
struct atom_context *ctx = adev->mode_info.atom_context;
|
|
||||||
unsigned char *bios = ctx->bios;
|
|
||||||
struct vram_reserve_block *reserved_block;
|
|
||||||
int index, block_number;
|
|
||||||
uint8_t frev, crev;
|
|
||||||
uint16_t data_offset, size;
|
|
||||||
uint32_t start_address_in_kb;
|
|
||||||
uint64_t offset;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
adev->fw_vram_usage.mem_train_support = false;
|
|
||||||
|
|
||||||
if (adev->asic_type != CHIP_NAVI10 &&
|
|
||||||
adev->asic_type != CHIP_NAVI14)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
ret = gddr6_mem_train_support(adev);
|
|
||||||
if (ret == -1)
|
|
||||||
return -EINVAL;
|
|
||||||
else if (ret == 0)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
|
||||||
vram_usagebyfirmware);
|
|
||||||
ret = amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev,
|
|
||||||
&data_offset);
|
|
||||||
if (ret == 0) {
|
|
||||||
DRM_ERROR("parse data header failed.\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
DRM_DEBUG("atom firmware common table header size:0x%04x, frev:0x%02x,"
|
|
||||||
" crev:0x%02x, data_offset:0x%04x.\n", size, frev, crev, data_offset);
|
|
||||||
/* only support 2.1+ */
|
|
||||||
if (((uint16_t)frev << 8 | crev) < 0x0201) {
|
|
||||||
DRM_ERROR("frev:0x%02x, crev:0x%02x < 2.1 !\n", frev, crev);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
reserved_block = (struct vram_reserve_block *)
|
|
||||||
(bios + data_offset + sizeof(struct atom_common_table_header));
|
|
||||||
block_number = ((unsigned int)size - sizeof(struct atom_common_table_header))
|
|
||||||
/ sizeof(struct vram_reserve_block);
|
|
||||||
reserved_block += (block_number > 0) ? block_number-1 : 0;
|
|
||||||
DRM_DEBUG("block_number:0x%04x, last block: 0x%08xkb sz, %dkb fw, %dkb drv.\n",
|
|
||||||
block_number,
|
|
||||||
le32_to_cpu(reserved_block->start_address_in_kb),
|
|
||||||
le16_to_cpu(reserved_block->used_by_firmware_in_kb),
|
|
||||||
le16_to_cpu(reserved_block->used_by_driver_in_kb));
|
|
||||||
if (reserved_block->used_by_firmware_in_kb > 0) {
|
|
||||||
start_address_in_kb = le32_to_cpu(reserved_block->start_address_in_kb);
|
|
||||||
offset = (uint64_t)start_address_in_kb * ONE_KiB;
|
|
||||||
if ((offset & (ONE_MiB - 1)) < (4 * ONE_KiB + 1) ) {
|
|
||||||
offset -= ONE_MiB;
|
|
||||||
}
|
|
||||||
|
|
||||||
offset &= ~(ONE_MiB - 1);
|
|
||||||
adev->fw_vram_usage.mem_train_fb_loc = offset;
|
|
||||||
adev->fw_vram_usage.mem_train_support = true;
|
|
||||||
DRM_DEBUG("mem_train_fb_loc:0x%09llx.\n", offset);
|
|
||||||
ret = 0;
|
|
||||||
} else {
|
|
||||||
DRM_ERROR("used_by_firmware_in_kb is 0!\n");
|
|
||||||
ret = -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
|
@ -29,9 +29,8 @@
|
||||||
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev);
|
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev);
|
||||||
void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
|
void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
|
||||||
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
|
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
|
||||||
int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
|
int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev);
|
||||||
int *vram_width, int *vram_type, int *vram_vendor);
|
int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev);
|
||||||
int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev);
|
|
||||||
int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
|
int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
|
||||||
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
|
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
|
||||||
bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
|
bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
|
||||||
|
|
|
@ -613,7 +613,17 @@ static bool amdgpu_atpx_detect(void)
|
||||||
bool d3_supported = false;
|
bool d3_supported = false;
|
||||||
struct pci_dev *parent_pdev;
|
struct pci_dev *parent_pdev;
|
||||||
|
|
||||||
while ((pdev = pci_get_class(PCI_BASE_CLASS_DISPLAY << 16, pdev)) != NULL) {
|
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
|
||||||
|
vga_count++;
|
||||||
|
|
||||||
|
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
|
||||||
|
|
||||||
|
parent_pdev = pci_upstream_bridge(pdev);
|
||||||
|
d3_supported |= parent_pdev && parent_pdev->bridge_d3;
|
||||||
|
amdgpu_atpx_get_quirks(pdev);
|
||||||
|
}
|
||||||
|
|
||||||
|
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
|
||||||
vga_count++;
|
vga_count++;
|
||||||
|
|
||||||
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
|
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
|
||||||
|
|
|
@ -140,12 +140,7 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
error_free:
|
error_free:
|
||||||
for (i = 0; i < last_entry; ++i) {
|
while (i--) {
|
||||||
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
|
|
||||||
|
|
||||||
amdgpu_bo_unref(&bo);
|
|
||||||
}
|
|
||||||
for (i = first_userptr; i < num_entries; ++i) {
|
|
||||||
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
|
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
|
||||||
|
|
||||||
amdgpu_bo_unref(&bo);
|
amdgpu_bo_unref(&bo);
|
||||||
|
|
|
@ -1019,12 +1019,8 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
|
||||||
*/
|
*/
|
||||||
if (amdgpu_connector->shared_ddc && (ret == connector_status_connected)) {
|
if (amdgpu_connector->shared_ddc && (ret == connector_status_connected)) {
|
||||||
struct drm_connector *list_connector;
|
struct drm_connector *list_connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
struct amdgpu_connector *list_amdgpu_connector;
|
struct amdgpu_connector *list_amdgpu_connector;
|
||||||
|
list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
|
||||||
drm_for_each_connector_iter(list_connector,
|
|
||||||
&iter) {
|
|
||||||
if (connector == list_connector)
|
if (connector == list_connector)
|
||||||
continue;
|
continue;
|
||||||
list_amdgpu_connector = to_amdgpu_connector(list_connector);
|
list_amdgpu_connector = to_amdgpu_connector(list_connector);
|
||||||
|
@ -1041,7 +1037,6 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1499,7 +1494,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev->ddev;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
struct amdgpu_connector *amdgpu_connector;
|
struct amdgpu_connector *amdgpu_connector;
|
||||||
struct amdgpu_connector_atom_dig *amdgpu_dig_connector;
|
struct amdgpu_connector_atom_dig *amdgpu_dig_connector;
|
||||||
struct drm_encoder *encoder;
|
struct drm_encoder *encoder;
|
||||||
|
@ -1514,12 +1508,10 @@ amdgpu_connector_add(struct amdgpu_device *adev,
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* see if we already added it */
|
/* see if we already added it */
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||||
drm_for_each_connector_iter(connector, &iter) {
|
|
||||||
amdgpu_connector = to_amdgpu_connector(connector);
|
amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
if (amdgpu_connector->connector_id == connector_id) {
|
if (amdgpu_connector->connector_id == connector_id) {
|
||||||
amdgpu_connector->devices |= supported_device;
|
amdgpu_connector->devices |= supported_device;
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (amdgpu_connector->ddc_bus && i2c_bus->valid) {
|
if (amdgpu_connector->ddc_bus && i2c_bus->valid) {
|
||||||
|
@ -1534,7 +1526,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
|
|
||||||
/* check if it's a dp bridge */
|
/* check if it's a dp bridge */
|
||||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||||
|
|
|
@ -35,7 +35,6 @@
|
||||||
#include "amdgpu_trace.h"
|
#include "amdgpu_trace.h"
|
||||||
#include "amdgpu_gmc.h"
|
#include "amdgpu_gmc.h"
|
||||||
#include "amdgpu_gem.h"
|
#include "amdgpu_gem.h"
|
||||||
#include "amdgpu_ras.h"
|
|
||||||
|
|
||||||
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
|
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
|
||||||
struct drm_amdgpu_cs_chunk_fence *data,
|
struct drm_amdgpu_cs_chunk_fence *data,
|
||||||
|
@ -450,12 +449,75 @@ retry:
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Last resort, try to evict something from the current working set */
|
||||||
|
static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
|
||||||
|
struct amdgpu_bo *validated)
|
||||||
|
{
|
||||||
|
uint32_t domain = validated->allowed_domains;
|
||||||
|
struct ttm_operation_ctx ctx = { true, false };
|
||||||
|
int r;
|
||||||
|
|
||||||
|
if (!p->evictable)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
for (;&p->evictable->tv.head != &p->validated;
|
||||||
|
p->evictable = list_prev_entry(p->evictable, tv.head)) {
|
||||||
|
|
||||||
|
struct amdgpu_bo_list_entry *candidate = p->evictable;
|
||||||
|
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(candidate->tv.bo);
|
||||||
|
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||||
|
bool update_bytes_moved_vis;
|
||||||
|
uint32_t other;
|
||||||
|
|
||||||
|
/* If we reached our current BO we can forget it */
|
||||||
|
if (bo == validated)
|
||||||
|
break;
|
||||||
|
|
||||||
|
/* We can't move pinned BOs here */
|
||||||
|
if (bo->pin_count)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
|
||||||
|
|
||||||
|
/* Check if this BO is in one of the domains we need space for */
|
||||||
|
if (!(other & domain))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/* Check if we can move this BO somewhere else */
|
||||||
|
other = bo->allowed_domains & ~domain;
|
||||||
|
if (!other)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/* Good we can try to move this BO somewhere else */
|
||||||
|
update_bytes_moved_vis =
|
||||||
|
!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
|
||||||
|
amdgpu_bo_in_cpu_visible_vram(bo);
|
||||||
|
amdgpu_bo_placement_from_domain(bo, other);
|
||||||
|
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
||||||
|
p->bytes_moved += ctx.bytes_moved;
|
||||||
|
if (update_bytes_moved_vis)
|
||||||
|
p->bytes_moved_vis += ctx.bytes_moved;
|
||||||
|
|
||||||
|
if (unlikely(r))
|
||||||
|
break;
|
||||||
|
|
||||||
|
p->evictable = list_prev_entry(p->evictable, tv.head);
|
||||||
|
list_move(&candidate->tv.head, &p->validated);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
|
static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
|
||||||
{
|
{
|
||||||
struct amdgpu_cs_parser *p = param;
|
struct amdgpu_cs_parser *p = param;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
do {
|
||||||
r = amdgpu_cs_bo_validate(p, bo);
|
r = amdgpu_cs_bo_validate(p, bo);
|
||||||
|
} while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -474,6 +536,7 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
||||||
|
|
||||||
list_for_each_entry(lobj, validated, tv.head) {
|
list_for_each_entry(lobj, validated, tv.head) {
|
||||||
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
|
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
|
||||||
|
bool binding_userptr = false;
|
||||||
struct mm_struct *usermm;
|
struct mm_struct *usermm;
|
||||||
|
|
||||||
usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
|
usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
|
||||||
|
@ -490,15 +553,21 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
||||||
|
|
||||||
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
|
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
|
||||||
lobj->user_pages);
|
lobj->user_pages);
|
||||||
|
binding_userptr = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (p->evictable == lobj)
|
||||||
|
p->evictable = NULL;
|
||||||
|
|
||||||
r = amdgpu_cs_validate(p, bo);
|
r = amdgpu_cs_validate(p, bo);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
if (binding_userptr) {
|
||||||
kvfree(lobj->user_pages);
|
kvfree(lobj->user_pages);
|
||||||
lobj->user_pages = NULL;
|
lobj->user_pages = NULL;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -592,6 +661,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
||||||
&p->bytes_moved_vis_threshold);
|
&p->bytes_moved_vis_threshold);
|
||||||
p->bytes_moved = 0;
|
p->bytes_moved = 0;
|
||||||
p->bytes_moved_vis = 0;
|
p->bytes_moved_vis = 0;
|
||||||
|
p->evictable = list_last_entry(&p->validated,
|
||||||
|
struct amdgpu_bo_list_entry,
|
||||||
|
tv.head);
|
||||||
|
|
||||||
r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
|
r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
|
||||||
amdgpu_cs_validate, p);
|
amdgpu_cs_validate, p);
|
||||||
|
@ -843,7 +915,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
r = amdgpu_vm_update_pdes(adev, vm, false);
|
r = amdgpu_vm_update_directories(adev, vm);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -1287,9 +1359,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||||
bool reserved_buffers = false;
|
bool reserved_buffers = false;
|
||||||
int i, r;
|
int i, r;
|
||||||
|
|
||||||
if (amdgpu_ras_intr_triggered())
|
|
||||||
return -EHWPOISON;
|
|
||||||
|
|
||||||
if (!adev->accel_working)
|
if (!adev->accel_working)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
|
|
|
@ -1077,6 +1077,7 @@ failure:
|
||||||
|
|
||||||
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
|
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
|
||||||
|
|
||||||
|
if (fences)
|
||||||
kfree(fences);
|
kfree(fences);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1089,8 +1090,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
adev->debugfs_preempt =
|
adev->debugfs_preempt =
|
||||||
debugfs_create_file("amdgpu_preempt_ib", 0600,
|
debugfs_create_file("amdgpu_preempt_ib", 0600,
|
||||||
adev->ddev->primary->debugfs_root, adev,
|
adev->ddev->primary->debugfs_root,
|
||||||
&fops_ib_preempt);
|
(void *)adev, &fops_ib_preempt);
|
||||||
if (!(adev->debugfs_preempt)) {
|
if (!(adev->debugfs_preempt)) {
|
||||||
DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
|
DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
@ -1102,6 +1103,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
|
||||||
|
|
||||||
void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev)
|
void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
|
if (adev->debugfs_preempt)
|
||||||
debugfs_remove(adev->debugfs_preempt);
|
debugfs_remove(adev->debugfs_preempt);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -65,8 +65,6 @@
|
||||||
#include "amdgpu_ras.h"
|
#include "amdgpu_ras.h"
|
||||||
#include "amdgpu_pmu.h"
|
#include "amdgpu_pmu.h"
|
||||||
|
|
||||||
#include <linux/suspend.h>
|
|
||||||
|
|
||||||
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
|
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
|
||||||
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
|
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
|
||||||
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
|
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
|
||||||
|
@ -80,7 +78,7 @@ MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
|
||||||
|
|
||||||
#define AMDGPU_RESUME_MS 2000
|
#define AMDGPU_RESUME_MS 2000
|
||||||
|
|
||||||
const char *amdgpu_asic_name[] = {
|
static const char *amdgpu_asic_name[] = {
|
||||||
"TAHITI",
|
"TAHITI",
|
||||||
"PITCAIRN",
|
"PITCAIRN",
|
||||||
"VERDE",
|
"VERDE",
|
||||||
|
@ -153,36 +151,6 @@ bool amdgpu_device_is_px(struct drm_device *dev)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* VRAM access helper functions.
|
|
||||||
*
|
|
||||||
* amdgpu_device_vram_access - read/write a buffer in vram
|
|
||||||
*
|
|
||||||
* @adev: amdgpu_device pointer
|
|
||||||
* @pos: offset of the buffer in vram
|
|
||||||
* @buf: virtual address of the buffer in system memory
|
|
||||||
* @size: read/write size, sizeof(@buf) must > @size
|
|
||||||
* @write: true - write to vram, otherwise - read from vram
|
|
||||||
*/
|
|
||||||
void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
|
|
||||||
uint32_t *buf, size_t size, bool write)
|
|
||||||
{
|
|
||||||
uint64_t last;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
last = size - 4;
|
|
||||||
for (last += pos; pos <= last; pos += 4) {
|
|
||||||
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
|
|
||||||
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
|
|
||||||
WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31);
|
|
||||||
if (write)
|
|
||||||
WREG32_NO_KIQ(mmMM_DATA, *buf++);
|
|
||||||
else
|
|
||||||
*buf++ = RREG32_NO_KIQ(mmMM_DATA);
|
|
||||||
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* MMIO register access helper functions.
|
* MMIO register access helper functions.
|
||||||
*/
|
*/
|
||||||
|
@ -1055,6 +1023,12 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
|
||||||
|
|
||||||
amdgpu_device_check_block_size(adev);
|
amdgpu_device_check_block_size(adev);
|
||||||
|
|
||||||
|
ret = amdgpu_device_get_job_timeout_settings(adev);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
|
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1495,9 +1469,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
|
||||||
(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
|
(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
|
||||||
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
||||||
|
|
||||||
if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
|
|
||||||
goto parse_soc_bounding_box;
|
|
||||||
|
|
||||||
adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
|
adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
|
||||||
adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
|
adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
|
||||||
adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
|
adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
|
||||||
|
@ -1525,13 +1496,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
|
||||||
adev->gfx.config.num_packer_per_sc =
|
adev->gfx.config.num_packer_per_sc =
|
||||||
le32_to_cpu(gpu_info_fw->num_packer_per_sc);
|
le32_to_cpu(gpu_info_fw->num_packer_per_sc);
|
||||||
}
|
}
|
||||||
|
|
||||||
parse_soc_bounding_box:
|
|
||||||
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
|
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
|
||||||
/*
|
|
||||||
* soc bounding box info is not integrated in disocovery table,
|
|
||||||
* we always need to parse it from gpu info firmware.
|
|
||||||
*/
|
|
||||||
if (hdr->version_minor == 2) {
|
if (hdr->version_minor == 2) {
|
||||||
const struct gpu_info_firmware_v1_2 *gpu_info_fw =
|
const struct gpu_info_firmware_v1_2 *gpu_info_fw =
|
||||||
(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
|
(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
|
||||||
|
@ -1648,9 +1613,6 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
|
|
||||||
amdgpu_discovery_get_gfx_info(adev);
|
|
||||||
|
|
||||||
amdgpu_amdkfd_device_probe(adev);
|
amdgpu_amdkfd_device_probe(adev);
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev)) {
|
if (amdgpu_sriov_vf(adev)) {
|
||||||
|
@ -1660,7 +1622,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
|
||||||
}
|
}
|
||||||
|
|
||||||
adev->pm.pp_feature = amdgpu_pp_feature_mask;
|
adev->pm.pp_feature = amdgpu_pp_feature_mask;
|
||||||
if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
|
if (amdgpu_sriov_vf(adev))
|
||||||
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
|
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
|
||||||
|
|
||||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||||
|
@ -1877,19 +1839,6 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
|
||||||
if (r)
|
if (r)
|
||||||
goto init_failed;
|
goto init_failed;
|
||||||
|
|
||||||
/*
|
|
||||||
* retired pages will be loaded from eeprom and reserved here,
|
|
||||||
* it should be called after amdgpu_device_ip_hw_init_phase2 since
|
|
||||||
* for some ASICs the RAS EEPROM code relies on SMU fully functioning
|
|
||||||
* for I2C communication which only true at this point.
|
|
||||||
* recovery_init may fail, but it can free all resources allocated by
|
|
||||||
* itself and its failure should not stop amdgpu init process.
|
|
||||||
*
|
|
||||||
* Note: theoretically, this should be called before all vram allocations
|
|
||||||
* to protect retired page from abusing
|
|
||||||
*/
|
|
||||||
amdgpu_ras_recovery_init(adev);
|
|
||||||
|
|
||||||
if (adev->gmc.xgmi.num_physical_nodes > 1)
|
if (adev->gmc.xgmi.num_physical_nodes > 1)
|
||||||
amdgpu_xgmi_add_device(adev);
|
amdgpu_xgmi_add_device(adev);
|
||||||
amdgpu_amdkfd_device_init(adev);
|
amdgpu_amdkfd_device_init(adev);
|
||||||
|
@ -2271,12 +2220,6 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
|
||||||
/* displays are handled in phase1 */
|
/* displays are handled in phase1 */
|
||||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
|
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
|
||||||
continue;
|
continue;
|
||||||
/* PSP lost connection when err_event_athub occurs */
|
|
||||||
if (amdgpu_ras_intr_triggered() &&
|
|
||||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
|
|
||||||
adev->ip_blocks[i].status.hw = false;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
/* XXX handle errors */
|
/* XXX handle errors */
|
||||||
r = adev->ip_blocks[i].version->funcs->suspend(adev);
|
r = adev->ip_blocks[i].version->funcs->suspend(adev);
|
||||||
/* XXX handle errors */
|
/* XXX handle errors */
|
||||||
|
@ -2288,19 +2231,19 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
|
||||||
/* handle putting the SMC in the appropriate state */
|
/* handle putting the SMC in the appropriate state */
|
||||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
|
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
|
||||||
if (is_support_sw_smu(adev)) {
|
if (is_support_sw_smu(adev)) {
|
||||||
r = smu_set_mp1_state(&adev->smu, adev->mp1_state);
|
/* todo */
|
||||||
} else if (adev->powerplay.pp_funcs &&
|
} else if (adev->powerplay.pp_funcs &&
|
||||||
adev->powerplay.pp_funcs->set_mp1_state) {
|
adev->powerplay.pp_funcs->set_mp1_state) {
|
||||||
r = adev->powerplay.pp_funcs->set_mp1_state(
|
r = adev->powerplay.pp_funcs->set_mp1_state(
|
||||||
adev->powerplay.pp_handle,
|
adev->powerplay.pp_handle,
|
||||||
adev->mp1_state);
|
adev->mp1_state);
|
||||||
}
|
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
|
DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
|
||||||
adev->mp1_state, r);
|
adev->mp1_state, r);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
adev->ip_blocks[i].status.hw = false;
|
adev->ip_blocks[i].status.hw = false;
|
||||||
}
|
}
|
||||||
|
@ -2613,73 +2556,6 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
|
||||||
adev->asic_reset_res, adev->ddev->unique);
|
adev->asic_reset_res, adev->ddev->unique);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
char *input = amdgpu_lockup_timeout;
|
|
||||||
char *timeout_setting = NULL;
|
|
||||||
int index = 0;
|
|
||||||
long timeout;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* By default timeout for non compute jobs is 10000.
|
|
||||||
* And there is no timeout enforced on compute jobs.
|
|
||||||
* In SR-IOV or passthrough mode, timeout for compute
|
|
||||||
* jobs are 10000 by default.
|
|
||||||
*/
|
|
||||||
adev->gfx_timeout = msecs_to_jiffies(10000);
|
|
||||||
adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
|
|
||||||
if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
|
|
||||||
adev->compute_timeout = adev->gfx_timeout;
|
|
||||||
else
|
|
||||||
adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
|
|
||||||
|
|
||||||
if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
|
|
||||||
while ((timeout_setting = strsep(&input, ",")) &&
|
|
||||||
strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
|
|
||||||
ret = kstrtol(timeout_setting, 0, &timeout);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
if (timeout == 0) {
|
|
||||||
index++;
|
|
||||||
continue;
|
|
||||||
} else if (timeout < 0) {
|
|
||||||
timeout = MAX_SCHEDULE_TIMEOUT;
|
|
||||||
} else {
|
|
||||||
timeout = msecs_to_jiffies(timeout);
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (index++) {
|
|
||||||
case 0:
|
|
||||||
adev->gfx_timeout = timeout;
|
|
||||||
break;
|
|
||||||
case 1:
|
|
||||||
adev->compute_timeout = timeout;
|
|
||||||
break;
|
|
||||||
case 2:
|
|
||||||
adev->sdma_timeout = timeout;
|
|
||||||
break;
|
|
||||||
case 3:
|
|
||||||
adev->video_timeout = timeout;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
* There is only one value specified and
|
|
||||||
* it should apply to all non-compute jobs.
|
|
||||||
*/
|
|
||||||
if (index == 1) {
|
|
||||||
adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
|
|
||||||
if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
|
|
||||||
adev->compute_timeout = adev->gfx_timeout;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_device_init - initialize the driver
|
* amdgpu_device_init - initialize the driver
|
||||||
|
@ -2707,12 +2583,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||||
adev->ddev = ddev;
|
adev->ddev = ddev;
|
||||||
adev->pdev = pdev;
|
adev->pdev = pdev;
|
||||||
adev->flags = flags;
|
adev->flags = flags;
|
||||||
|
|
||||||
if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
|
|
||||||
adev->asic_type = amdgpu_force_asic_type;
|
|
||||||
else
|
|
||||||
adev->asic_type = flags & AMD_ASIC_MASK;
|
adev->asic_type = flags & AMD_ASIC_MASK;
|
||||||
|
|
||||||
adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
|
adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
|
||||||
if (amdgpu_emu_mode == 1)
|
if (amdgpu_emu_mode == 1)
|
||||||
adev->usec_timeout *= 2;
|
adev->usec_timeout *= 2;
|
||||||
|
@ -2855,12 +2726,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
r = amdgpu_device_get_job_timeout_settings(adev);
|
|
||||||
if (r) {
|
|
||||||
dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* doorbell bar mapping and doorbell index init*/
|
/* doorbell bar mapping and doorbell index init*/
|
||||||
amdgpu_device_doorbell_init(adev);
|
amdgpu_device_doorbell_init(adev);
|
||||||
|
|
||||||
|
@ -3142,7 +3007,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
|
||||||
struct amdgpu_device *adev;
|
struct amdgpu_device *adev;
|
||||||
struct drm_crtc *crtc;
|
struct drm_crtc *crtc;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (dev == NULL || dev->dev_private == NULL) {
|
if (dev == NULL || dev->dev_private == NULL) {
|
||||||
|
@ -3165,11 +3029,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
|
||||||
if (!amdgpu_device_has_dc_support(adev)) {
|
if (!amdgpu_device_has_dc_support(adev)) {
|
||||||
/* turn off display hw */
|
/* turn off display hw */
|
||||||
drm_modeset_lock_all(dev);
|
drm_modeset_lock_all(dev);
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||||
drm_for_each_connector_iter(connector, &iter)
|
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
|
||||||
drm_helper_connector_dpms(connector,
|
}
|
||||||
DRM_MODE_DPMS_OFF);
|
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
drm_modeset_unlock_all(dev);
|
drm_modeset_unlock_all(dev);
|
||||||
/* unpin the front buffers and cursors */
|
/* unpin the front buffers and cursors */
|
||||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||||
|
@ -3220,11 +3082,15 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
|
||||||
*/
|
*/
|
||||||
amdgpu_bo_evict_vram(adev);
|
amdgpu_bo_evict_vram(adev);
|
||||||
|
|
||||||
if (suspend) {
|
|
||||||
pci_save_state(dev->pdev);
|
pci_save_state(dev->pdev);
|
||||||
|
if (suspend) {
|
||||||
/* Shut down the device */
|
/* Shut down the device */
|
||||||
pci_disable_device(dev->pdev);
|
pci_disable_device(dev->pdev);
|
||||||
pci_set_power_state(dev->pdev, PCI_D3hot);
|
pci_set_power_state(dev->pdev, PCI_D3hot);
|
||||||
|
} else {
|
||||||
|
r = amdgpu_asic_reset(adev);
|
||||||
|
if (r)
|
||||||
|
DRM_ERROR("amdgpu asic reset failed\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -3244,7 +3110,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
|
||||||
int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
|
int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
|
||||||
{
|
{
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
struct amdgpu_device *adev = dev->dev_private;
|
||||||
struct drm_crtc *crtc;
|
struct drm_crtc *crtc;
|
||||||
int r = 0;
|
int r = 0;
|
||||||
|
@ -3315,13 +3180,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
|
||||||
|
|
||||||
/* turn on display hw */
|
/* turn on display hw */
|
||||||
drm_modeset_lock_all(dev);
|
drm_modeset_lock_all(dev);
|
||||||
|
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
|
||||||
drm_for_each_connector_iter(connector, &iter)
|
}
|
||||||
drm_helper_connector_dpms(connector,
|
|
||||||
DRM_MODE_DPMS_ON);
|
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
|
|
||||||
drm_modeset_unlock_all(dev);
|
drm_modeset_unlock_all(dev);
|
||||||
}
|
}
|
||||||
amdgpu_fbdev_set_suspend(adev, 0);
|
amdgpu_fbdev_set_suspend(adev, 0);
|
||||||
|
@ -3767,6 +3628,11 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
list_for_each_entry(tmp_adev, device_list_handle,
|
||||||
|
gmc.xgmi.head) {
|
||||||
|
amdgpu_ras_reserve_bad_pages(tmp_adev);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3870,18 +3736,25 @@ static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock)
|
||||||
adev->mp1_state = PP_MP1_STATE_NONE;
|
adev->mp1_state = PP_MP1_STATE_NONE;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
/* Block kfd: SRIOV would do it separately */
|
||||||
|
if (!amdgpu_sriov_vf(adev))
|
||||||
|
amdgpu_amdkfd_pre_reset(adev);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
|
static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
|
/*unlock kfd: SRIOV would do it separately */
|
||||||
|
if (!amdgpu_sriov_vf(adev))
|
||||||
|
amdgpu_amdkfd_post_reset(adev);
|
||||||
amdgpu_vf_error_trans_all(adev);
|
amdgpu_vf_error_trans_all(adev);
|
||||||
adev->mp1_state = PP_MP1_STATE_NONE;
|
adev->mp1_state = PP_MP1_STATE_NONE;
|
||||||
adev->in_gpu_reset = 0;
|
adev->in_gpu_reset = 0;
|
||||||
mutex_unlock(&adev->lock_reset);
|
mutex_unlock(&adev->lock_reset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_device_gpu_recover - reset the asic and recover scheduler
|
* amdgpu_device_gpu_recover - reset the asic and recover scheduler
|
||||||
*
|
*
|
||||||
|
@ -3901,24 +3774,11 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||||
struct amdgpu_hive_info *hive = NULL;
|
struct amdgpu_hive_info *hive = NULL;
|
||||||
struct amdgpu_device *tmp_adev = NULL;
|
struct amdgpu_device *tmp_adev = NULL;
|
||||||
int i, r = 0;
|
int i, r = 0;
|
||||||
bool in_ras_intr = amdgpu_ras_intr_triggered();
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Flush RAM to disk so that after reboot
|
|
||||||
* the user can read log and see why the system rebooted.
|
|
||||||
*/
|
|
||||||
if (in_ras_intr && amdgpu_ras_get_context(adev)->reboot) {
|
|
||||||
|
|
||||||
DRM_WARN("Emergency reboot.");
|
|
||||||
|
|
||||||
ksys_sync_helper();
|
|
||||||
emergency_restart();
|
|
||||||
}
|
|
||||||
|
|
||||||
need_full_reset = job_signaled = false;
|
need_full_reset = job_signaled = false;
|
||||||
INIT_LIST_HEAD(&device_list);
|
INIT_LIST_HEAD(&device_list);
|
||||||
|
|
||||||
dev_info(adev->dev, "GPU %s begin!\n", in_ras_intr ? "jobs stop":"reset");
|
dev_info(adev->dev, "GPU reset begin!\n");
|
||||||
|
|
||||||
cancel_delayed_work_sync(&adev->delayed_init_work);
|
cancel_delayed_work_sync(&adev->delayed_init_work);
|
||||||
|
|
||||||
|
@ -3945,16 +3805,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Block kfd: SRIOV would do it separately */
|
|
||||||
if (!amdgpu_sriov_vf(adev))
|
|
||||||
amdgpu_amdkfd_pre_reset(adev);
|
|
||||||
|
|
||||||
/* Build list of devices to reset */
|
/* Build list of devices to reset */
|
||||||
if (adev->gmc.xgmi.num_physical_nodes > 1) {
|
if (adev->gmc.xgmi.num_physical_nodes > 1) {
|
||||||
if (!hive) {
|
if (!hive) {
|
||||||
/*unlock kfd: SRIOV would do it separately */
|
|
||||||
if (!amdgpu_sriov_vf(adev))
|
|
||||||
amdgpu_amdkfd_post_reset(adev);
|
|
||||||
amdgpu_device_unlock_adev(adev);
|
amdgpu_device_unlock_adev(adev);
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
@ -3970,22 +3823,17 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||||
device_list_handle = &device_list;
|
device_list_handle = &device_list;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* block all schedulers and reset given job's ring */
|
|
||||||
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
|
|
||||||
if (tmp_adev != adev) {
|
|
||||||
amdgpu_device_lock_adev(tmp_adev, false);
|
|
||||||
if (!amdgpu_sriov_vf(tmp_adev))
|
|
||||||
amdgpu_amdkfd_pre_reset(tmp_adev);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Mark these ASICs to be reseted as untracked first
|
* Mark these ASICs to be reseted as untracked first
|
||||||
* And add them back after reset completed
|
* And add them back after reset completed
|
||||||
*/
|
*/
|
||||||
|
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head)
|
||||||
amdgpu_unregister_gpu_instance(tmp_adev);
|
amdgpu_unregister_gpu_instance(tmp_adev);
|
||||||
|
|
||||||
|
/* block all schedulers and reset given job's ring */
|
||||||
|
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
|
||||||
/* disable ras on ALL IPs */
|
/* disable ras on ALL IPs */
|
||||||
if (!in_ras_intr && amdgpu_device_ip_need_full_reset(tmp_adev))
|
if (amdgpu_device_ip_need_full_reset(tmp_adev))
|
||||||
amdgpu_ras_suspend(tmp_adev);
|
amdgpu_ras_suspend(tmp_adev);
|
||||||
|
|
||||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||||
|
@ -3995,16 +3843,10 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
drm_sched_stop(&ring->sched, job ? &job->base : NULL);
|
drm_sched_stop(&ring->sched, job ? &job->base : NULL);
|
||||||
|
|
||||||
if (in_ras_intr)
|
|
||||||
amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if (in_ras_intr)
|
|
||||||
goto skip_sched_resume;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Must check guilty signal here since after this point all old
|
* Must check guilty signal here since after this point all old
|
||||||
* HW fences are force signaled.
|
* HW fences are force signaled.
|
||||||
|
@ -4015,6 +3857,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||||
dma_fence_is_signaled(job->base.s_fence->parent))
|
dma_fence_is_signaled(job->base.s_fence->parent))
|
||||||
job_signaled = true;
|
job_signaled = true;
|
||||||
|
|
||||||
|
if (!amdgpu_device_ip_need_full_reset(adev))
|
||||||
|
device_list_handle = &device_list;
|
||||||
|
|
||||||
if (job_signaled) {
|
if (job_signaled) {
|
||||||
dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
|
dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
|
||||||
goto skip_hw_reset;
|
goto skip_hw_reset;
|
||||||
|
@ -4036,6 +3881,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
|
||||||
if (tmp_adev == adev)
|
if (tmp_adev == adev)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
amdgpu_device_lock_adev(tmp_adev, false);
|
||||||
r = amdgpu_device_pre_asic_reset(tmp_adev,
|
r = amdgpu_device_pre_asic_reset(tmp_adev,
|
||||||
NULL,
|
NULL,
|
||||||
&need_full_reset);
|
&need_full_reset);
|
||||||
|
@ -4063,7 +3909,6 @@ skip_hw_reset:
|
||||||
|
|
||||||
/* Post ASIC reset for all devs .*/
|
/* Post ASIC reset for all devs .*/
|
||||||
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
|
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
|
||||||
|
|
||||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||||
struct amdgpu_ring *ring = tmp_adev->rings[i];
|
struct amdgpu_ring *ring = tmp_adev->rings[i];
|
||||||
|
|
||||||
|
@ -4085,18 +3930,12 @@ skip_hw_reset:
|
||||||
|
|
||||||
if (r) {
|
if (r) {
|
||||||
/* bad news, how to tell it to userspace ? */
|
/* bad news, how to tell it to userspace ? */
|
||||||
dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
|
dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
|
||||||
amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
|
amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
|
||||||
} else {
|
} else {
|
||||||
dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
|
dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&adev->gpu_reset_counter));
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
skip_sched_resume:
|
|
||||||
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
|
|
||||||
/*unlock kfd: SRIOV would do it separately */
|
|
||||||
if (!in_ras_intr && !amdgpu_sriov_vf(tmp_adev))
|
|
||||||
amdgpu_amdkfd_post_reset(tmp_adev);
|
|
||||||
amdgpu_device_unlock_adev(tmp_adev);
|
amdgpu_device_unlock_adev(tmp_adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -134,10 +134,20 @@ static int hw_id_map[MAX_HWIP] = {
|
||||||
|
|
||||||
static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary)
|
static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary)
|
||||||
{
|
{
|
||||||
|
uint32_t *p = (uint32_t *)binary;
|
||||||
uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
|
uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
|
||||||
uint64_t pos = vram_size - DISCOVERY_TMR_SIZE;
|
uint64_t pos = vram_size - BINARY_MAX_SIZE;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
while (pos < vram_size) {
|
||||||
|
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
|
||||||
|
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
|
||||||
|
WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31);
|
||||||
|
*p++ = RREG32_NO_KIQ(mmMM_DATA);
|
||||||
|
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
|
||||||
|
pos += 4;
|
||||||
|
}
|
||||||
|
|
||||||
amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, DISCOVERY_TMR_SIZE, false);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -169,7 +179,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
|
||||||
uint16_t checksum;
|
uint16_t checksum;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
adev->discovery = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL);
|
adev->discovery = kzalloc(BINARY_MAX_SIZE, GFP_KERNEL);
|
||||||
if (!adev->discovery)
|
if (!adev->discovery)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -323,7 +333,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
|
int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
|
||||||
int *major, int *minor, int *revision)
|
int *major, int *minor)
|
||||||
{
|
{
|
||||||
struct binary_header *bhdr;
|
struct binary_header *bhdr;
|
||||||
struct ip_discovery_header *ihdr;
|
struct ip_discovery_header *ihdr;
|
||||||
|
@ -359,8 +369,6 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
|
||||||
*major = ip->major;
|
*major = ip->major;
|
||||||
if (minor)
|
if (minor)
|
||||||
*minor = ip->minor;
|
*minor = ip->minor;
|
||||||
if (revision)
|
|
||||||
*revision = ip->revision;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
ip_offset += sizeof(*ip) + 4 * (ip->num_base_address - 1);
|
ip_offset += sizeof(*ip) + 4 * (ip->num_base_address - 1);
|
||||||
|
|
|
@ -24,13 +24,11 @@
|
||||||
#ifndef __AMDGPU_DISCOVERY__
|
#ifndef __AMDGPU_DISCOVERY__
|
||||||
#define __AMDGPU_DISCOVERY__
|
#define __AMDGPU_DISCOVERY__
|
||||||
|
|
||||||
#define DISCOVERY_TMR_SIZE (64 << 10)
|
|
||||||
|
|
||||||
int amdgpu_discovery_init(struct amdgpu_device *adev);
|
int amdgpu_discovery_init(struct amdgpu_device *adev);
|
||||||
void amdgpu_discovery_fini(struct amdgpu_device *adev);
|
void amdgpu_discovery_fini(struct amdgpu_device *adev);
|
||||||
int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev);
|
int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev);
|
||||||
int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
|
int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
|
||||||
int *major, int *minor, int *revision);
|
int *major, int *minor);
|
||||||
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev);
|
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev);
|
||||||
|
|
||||||
#endif /* __AMDGPU_DISCOVERY__ */
|
#endif /* __AMDGPU_DISCOVERY__ */
|
||||||
|
|
|
@ -370,13 +370,11 @@ void amdgpu_display_print_display_setup(struct drm_device *dev)
|
||||||
struct amdgpu_connector *amdgpu_connector;
|
struct amdgpu_connector *amdgpu_connector;
|
||||||
struct drm_encoder *encoder;
|
struct drm_encoder *encoder;
|
||||||
struct amdgpu_encoder *amdgpu_encoder;
|
struct amdgpu_encoder *amdgpu_encoder;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
uint32_t devices;
|
uint32_t devices;
|
||||||
int i = 0;
|
int i = 0;
|
||||||
|
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
|
||||||
DRM_INFO("AMDGPU Display Connectors\n");
|
DRM_INFO("AMDGPU Display Connectors\n");
|
||||||
drm_for_each_connector_iter(connector, &iter) {
|
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||||
amdgpu_connector = to_amdgpu_connector(connector);
|
amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
DRM_INFO("Connector %d:\n", i);
|
DRM_INFO("Connector %d:\n", i);
|
||||||
DRM_INFO(" %s\n", connector->name);
|
DRM_INFO(" %s\n", connector->name);
|
||||||
|
@ -440,7 +438,6 @@ void amdgpu_display_print_display_setup(struct drm_device *dev)
|
||||||
}
|
}
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -341,6 +341,7 @@ const struct dma_buf_ops amdgpu_dmabuf_ops = {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
|
* amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
|
||||||
|
* @dev: DRM device
|
||||||
* @gobj: GEM BO
|
* @gobj: GEM BO
|
||||||
* @flags: Flags such as DRM_CLOEXEC and DRM_RDWR.
|
* @flags: Flags such as DRM_CLOEXEC and DRM_RDWR.
|
||||||
*
|
*
|
||||||
|
|
|
@ -911,8 +911,7 @@ int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
|
||||||
if (is_support_sw_smu(adev)) {
|
if (is_support_sw_smu(adev)) {
|
||||||
ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK,
|
ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK,
|
||||||
low ? &clk_freq : NULL,
|
low ? &clk_freq : NULL,
|
||||||
!low ? &clk_freq : NULL,
|
!low ? &clk_freq : NULL);
|
||||||
true);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return 0;
|
return 0;
|
||||||
return clk_freq * 100;
|
return clk_freq * 100;
|
||||||
|
@ -929,8 +928,7 @@ int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
|
||||||
if (is_support_sw_smu(adev)) {
|
if (is_support_sw_smu(adev)) {
|
||||||
ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK,
|
ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK,
|
||||||
low ? &clk_freq : NULL,
|
low ? &clk_freq : NULL,
|
||||||
!low ? &clk_freq : NULL,
|
!low ? &clk_freq : NULL);
|
||||||
true);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return 0;
|
return 0;
|
||||||
return clk_freq * 100;
|
return clk_freq * 100;
|
||||||
|
|
|
@ -298,6 +298,12 @@ enum amdgpu_pcie_gen {
|
||||||
#define amdgpu_dpm_get_current_power_state(adev) \
|
#define amdgpu_dpm_get_current_power_state(adev) \
|
||||||
((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
|
((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
|
||||||
|
|
||||||
|
#define amdgpu_smu_get_current_power_state(adev) \
|
||||||
|
((adev)->smu.ppt_funcs->get_current_power_state(&((adev)->smu)))
|
||||||
|
|
||||||
|
#define amdgpu_smu_set_power_state(adev) \
|
||||||
|
((adev)->smu.ppt_funcs->set_power_state(&((adev)->smu)))
|
||||||
|
|
||||||
#define amdgpu_dpm_get_pp_num_states(adev, data) \
|
#define amdgpu_dpm_get_pp_num_states(adev, data) \
|
||||||
((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data))
|
((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data))
|
||||||
|
|
||||||
|
|
|
@ -43,8 +43,6 @@
|
||||||
|
|
||||||
#include "amdgpu_amdkfd.h"
|
#include "amdgpu_amdkfd.h"
|
||||||
|
|
||||||
#include "amdgpu_ras.h"
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* KMS wrapper.
|
* KMS wrapper.
|
||||||
* - 3.0.0 - initial driver
|
* - 3.0.0 - initial driver
|
||||||
|
@ -84,12 +82,13 @@
|
||||||
* - 3.33.0 - Fixes for GDS ENOMEM failures in AMDGPU_CS.
|
* - 3.33.0 - Fixes for GDS ENOMEM failures in AMDGPU_CS.
|
||||||
* - 3.34.0 - Non-DC can flip correctly between buffers with different pitches
|
* - 3.34.0 - Non-DC can flip correctly between buffers with different pitches
|
||||||
* - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
|
* - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
|
||||||
* - 3.36.0 - Allow reading more status registers on si/cik
|
|
||||||
*/
|
*/
|
||||||
#define KMS_DRIVER_MAJOR 3
|
#define KMS_DRIVER_MAJOR 3
|
||||||
#define KMS_DRIVER_MINOR 36
|
#define KMS_DRIVER_MINOR 35
|
||||||
#define KMS_DRIVER_PATCHLEVEL 0
|
#define KMS_DRIVER_PATCHLEVEL 0
|
||||||
|
|
||||||
|
#define AMDGPU_MAX_TIMEOUT_PARAM_LENTH 256
|
||||||
|
|
||||||
int amdgpu_vram_limit = 0;
|
int amdgpu_vram_limit = 0;
|
||||||
int amdgpu_vis_vram_limit = 0;
|
int amdgpu_vis_vram_limit = 0;
|
||||||
int amdgpu_gart_size = -1; /* auto */
|
int amdgpu_gart_size = -1; /* auto */
|
||||||
|
@ -102,7 +101,7 @@ int amdgpu_disp_priority = 0;
|
||||||
int amdgpu_hw_i2c = 0;
|
int amdgpu_hw_i2c = 0;
|
||||||
int amdgpu_pcie_gen2 = -1;
|
int amdgpu_pcie_gen2 = -1;
|
||||||
int amdgpu_msi = -1;
|
int amdgpu_msi = -1;
|
||||||
char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH];
|
char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENTH];
|
||||||
int amdgpu_dpm = -1;
|
int amdgpu_dpm = -1;
|
||||||
int amdgpu_fw_load_type = -1;
|
int amdgpu_fw_load_type = -1;
|
||||||
int amdgpu_aspm = -1;
|
int amdgpu_aspm = -1;
|
||||||
|
@ -129,7 +128,11 @@ char *amdgpu_disable_cu = NULL;
|
||||||
char *amdgpu_virtual_display = NULL;
|
char *amdgpu_virtual_display = NULL;
|
||||||
/* OverDrive(bit 14) disabled by default*/
|
/* OverDrive(bit 14) disabled by default*/
|
||||||
uint amdgpu_pp_feature_mask = 0xffffbfff;
|
uint amdgpu_pp_feature_mask = 0xffffbfff;
|
||||||
uint amdgpu_force_long_training = 0;
|
int amdgpu_ngg = 0;
|
||||||
|
int amdgpu_prim_buf_per_se = 0;
|
||||||
|
int amdgpu_pos_buf_per_se = 0;
|
||||||
|
int amdgpu_cntl_sb_buf_per_se = 0;
|
||||||
|
int amdgpu_param_buf_per_se = 0;
|
||||||
int amdgpu_job_hang_limit = 0;
|
int amdgpu_job_hang_limit = 0;
|
||||||
int amdgpu_lbpw = -1;
|
int amdgpu_lbpw = -1;
|
||||||
int amdgpu_compute_multipipe = -1;
|
int amdgpu_compute_multipipe = -1;
|
||||||
|
@ -143,13 +146,12 @@ int amdgpu_mcbp = 0;
|
||||||
int amdgpu_discovery = -1;
|
int amdgpu_discovery = -1;
|
||||||
int amdgpu_mes = 0;
|
int amdgpu_mes = 0;
|
||||||
int amdgpu_noretry = 1;
|
int amdgpu_noretry = 1;
|
||||||
int amdgpu_force_asic_type = -1;
|
|
||||||
|
|
||||||
struct amdgpu_mgpu_info mgpu_info = {
|
struct amdgpu_mgpu_info mgpu_info = {
|
||||||
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
|
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
|
||||||
};
|
};
|
||||||
int amdgpu_ras_enable = -1;
|
int amdgpu_ras_enable = -1;
|
||||||
uint amdgpu_ras_mask = 0xffffffff;
|
uint amdgpu_ras_mask = 0xfffffffb;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* DOC: vramlimit (int)
|
* DOC: vramlimit (int)
|
||||||
|
@ -242,21 +244,16 @@ module_param_named(msi, amdgpu_msi, int, 0444);
|
||||||
*
|
*
|
||||||
* The format can be [Non-Compute] or [GFX,Compute,SDMA,Video]. That is there can be one or
|
* The format can be [Non-Compute] or [GFX,Compute,SDMA,Video]. That is there can be one or
|
||||||
* multiple values specified. 0 and negative values are invalidated. They will be adjusted
|
* multiple values specified. 0 and negative values are invalidated. They will be adjusted
|
||||||
* to the default timeout.
|
* to default timeout.
|
||||||
*
|
|
||||||
* - With one value specified, the setting will apply to all non-compute jobs.
|
* - With one value specified, the setting will apply to all non-compute jobs.
|
||||||
* - With multiple values specified, the first one will be for GFX.
|
* - With multiple values specified, the first one will be for GFX. The second one is for Compute.
|
||||||
* The second one is for Compute. The third and fourth ones are
|
* And the third and fourth ones are for SDMA and Video.
|
||||||
* for SDMA and Video.
|
|
||||||
*
|
|
||||||
* By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video)
|
* By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video)
|
||||||
* jobs is 10000. And there is no timeout enforced on compute jobs.
|
* jobs is 10000. And there is no timeout enforced on compute jobs.
|
||||||
*/
|
*/
|
||||||
MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and infinity timeout for compute jobs; "
|
MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: 10000 for non-compute jobs and infinity timeout for compute jobs."
|
||||||
"for passthrough or sriov, 10000 for all jobs."
|
|
||||||
" 0: keep default value. negative: infinity timeout), "
|
" 0: keep default value. negative: infinity timeout), "
|
||||||
"format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
|
"format is [Non-Compute] or [GFX,Compute,SDMA,Video]");
|
||||||
"for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video].");
|
|
||||||
module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444);
|
module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -394,14 +391,6 @@ module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
|
||||||
MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
|
MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
|
||||||
module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444);
|
module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444);
|
||||||
|
|
||||||
/**
|
|
||||||
* DOC: forcelongtraining (uint)
|
|
||||||
* Force long memory training in resume.
|
|
||||||
* The default is zero, indicates short training in resume.
|
|
||||||
*/
|
|
||||||
MODULE_PARM_DESC(forcelongtraining, "force memory long training");
|
|
||||||
module_param_named(forcelongtraining, amdgpu_force_long_training, uint, 0444);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* DOC: pcie_gen_cap (uint)
|
* DOC: pcie_gen_cap (uint)
|
||||||
* Override PCIE gen speed capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
|
* Override PCIE gen speed capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
|
||||||
|
@ -459,6 +448,42 @@ MODULE_PARM_DESC(virtual_display,
|
||||||
"Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x)");
|
"Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x)");
|
||||||
module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
|
module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* DOC: ngg (int)
|
||||||
|
* Set to enable Next Generation Graphics (1 = enable). The default is 0 (disabled).
|
||||||
|
*/
|
||||||
|
MODULE_PARM_DESC(ngg, "Next Generation Graphics (1 = enable, 0 = disable(default depending on gfx))");
|
||||||
|
module_param_named(ngg, amdgpu_ngg, int, 0444);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* DOC: prim_buf_per_se (int)
|
||||||
|
* Override the size of Primitive Buffer per Shader Engine in Byte. The default is 0 (depending on gfx).
|
||||||
|
*/
|
||||||
|
MODULE_PARM_DESC(prim_buf_per_se, "the size of Primitive Buffer per Shader Engine (default depending on gfx)");
|
||||||
|
module_param_named(prim_buf_per_se, amdgpu_prim_buf_per_se, int, 0444);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* DOC: pos_buf_per_se (int)
|
||||||
|
* Override the size of Position Buffer per Shader Engine in Byte. The default is 0 (depending on gfx).
|
||||||
|
*/
|
||||||
|
MODULE_PARM_DESC(pos_buf_per_se, "the size of Position Buffer per Shader Engine (default depending on gfx)");
|
||||||
|
module_param_named(pos_buf_per_se, amdgpu_pos_buf_per_se, int, 0444);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* DOC: cntl_sb_buf_per_se (int)
|
||||||
|
* Override the size of Control Sideband per Shader Engine in Byte. The default is 0 (depending on gfx).
|
||||||
|
*/
|
||||||
|
MODULE_PARM_DESC(cntl_sb_buf_per_se, "the size of Control Sideband per Shader Engine (default depending on gfx)");
|
||||||
|
module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* DOC: param_buf_per_se (int)
|
||||||
|
* Override the size of Off-Chip Parameter Cache per Shader Engine in Byte.
|
||||||
|
* The default is 0 (depending on gfx).
|
||||||
|
*/
|
||||||
|
MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Parameter Cache per Shader Engine (default depending on gfx)");
|
||||||
|
module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* DOC: job_hang_limit (int)
|
* DOC: job_hang_limit (int)
|
||||||
* Set how much time allow a job hang and not drop it. The default is 0.
|
* Set how much time allow a job hang and not drop it. The default is 0.
|
||||||
|
@ -591,16 +616,6 @@ MODULE_PARM_DESC(noretry,
|
||||||
"Disable retry faults (0 = retry enabled, 1 = retry disabled (default))");
|
"Disable retry faults (0 = retry enabled, 1 = retry disabled (default))");
|
||||||
module_param_named(noretry, amdgpu_noretry, int, 0644);
|
module_param_named(noretry, amdgpu_noretry, int, 0644);
|
||||||
|
|
||||||
/**
|
|
||||||
* DOC: force_asic_type (int)
|
|
||||||
* A non negative value used to specify the asic type for all supported GPUs.
|
|
||||||
*/
|
|
||||||
MODULE_PARM_DESC(force_asic_type,
|
|
||||||
"A non negative value used to specify the asic type for all supported GPUs");
|
|
||||||
module_param_named(force_asic_type, amdgpu_force_asic_type, int, 0444);
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef CONFIG_HSA_AMD
|
#ifdef CONFIG_HSA_AMD
|
||||||
/**
|
/**
|
||||||
* DOC: sched_policy (int)
|
* DOC: sched_policy (int)
|
||||||
|
@ -1007,7 +1022,6 @@ static const struct pci_device_id pciidlist[] = {
|
||||||
|
|
||||||
/* Navi12 */
|
/* Navi12 */
|
||||||
{0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
|
{0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
|
||||||
{0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
|
|
||||||
|
|
||||||
{0, 0, 0}
|
{0, 0, 0}
|
||||||
};
|
};
|
||||||
|
@ -1113,10 +1127,7 @@ amdgpu_pci_remove(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||||
|
|
||||||
#ifdef MODULE
|
DRM_ERROR("Device removal is currently not supported outside of fbcon\n");
|
||||||
if (THIS_MODULE->state != MODULE_STATE_GOING)
|
|
||||||
#endif
|
|
||||||
DRM_ERROR("Hotplug removal is not supported\n");
|
|
||||||
drm_dev_unplug(dev);
|
drm_dev_unplug(dev);
|
||||||
drm_dev_put(dev);
|
drm_dev_put(dev);
|
||||||
pci_disable_device(pdev);
|
pci_disable_device(pdev);
|
||||||
|
@ -1129,9 +1140,6 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
|
||||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
struct amdgpu_device *adev = dev->dev_private;
|
||||||
|
|
||||||
if (amdgpu_ras_intr_triggered())
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* if we are running in a VM, make sure the device
|
/* if we are running in a VM, make sure the device
|
||||||
* torn down properly on reboot/shutdown.
|
* torn down properly on reboot/shutdown.
|
||||||
* unfortunately we can't detect certain
|
* unfortunately we can't detect certain
|
||||||
|
@ -1166,13 +1174,8 @@ static int amdgpu_pmops_resume(struct device *dev)
|
||||||
static int amdgpu_pmops_freeze(struct device *dev)
|
static int amdgpu_pmops_freeze(struct device *dev)
|
||||||
{
|
{
|
||||||
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
||||||
struct amdgpu_device *adev = drm_dev->dev_private;
|
|
||||||
int r;
|
|
||||||
|
|
||||||
r = amdgpu_device_suspend(drm_dev, false, true);
|
return amdgpu_device_suspend(drm_dev, false, true);
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
return amdgpu_asic_reset(adev);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amdgpu_pmops_thaw(struct device *dev)
|
static int amdgpu_pmops_thaw(struct device *dev)
|
||||||
|
@ -1344,6 +1347,66 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
char *input = amdgpu_lockup_timeout;
|
||||||
|
char *timeout_setting = NULL;
|
||||||
|
int index = 0;
|
||||||
|
long timeout;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* By default timeout for non compute jobs is 10000.
|
||||||
|
* And there is no timeout enforced on compute jobs.
|
||||||
|
*/
|
||||||
|
adev->gfx_timeout = msecs_to_jiffies(10000);
|
||||||
|
adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
|
||||||
|
adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
|
||||||
|
|
||||||
|
if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENTH)) {
|
||||||
|
while ((timeout_setting = strsep(&input, ",")) &&
|
||||||
|
strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENTH)) {
|
||||||
|
ret = kstrtol(timeout_setting, 0, &timeout);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (timeout == 0) {
|
||||||
|
index++;
|
||||||
|
continue;
|
||||||
|
} else if (timeout < 0) {
|
||||||
|
timeout = MAX_SCHEDULE_TIMEOUT;
|
||||||
|
} else {
|
||||||
|
timeout = msecs_to_jiffies(timeout);
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (index++) {
|
||||||
|
case 0:
|
||||||
|
adev->gfx_timeout = timeout;
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
adev->compute_timeout = timeout;
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
adev->sdma_timeout = timeout;
|
||||||
|
break;
|
||||||
|
case 3:
|
||||||
|
adev->video_timeout = timeout;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* There is only one value specified and
|
||||||
|
* it should apply to all non-compute jobs.
|
||||||
|
*/
|
||||||
|
if (index == 1)
|
||||||
|
adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
|
amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
|
||||||
bool in_vblank_irq, int *vpos, int *hpos,
|
bool in_vblank_irq, int *vpos, int *hpos,
|
||||||
|
|
|
@ -37,14 +37,12 @@ amdgpu_link_encoder_connector(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
struct amdgpu_device *adev = dev->dev_private;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
struct amdgpu_connector *amdgpu_connector;
|
struct amdgpu_connector *amdgpu_connector;
|
||||||
struct drm_encoder *encoder;
|
struct drm_encoder *encoder;
|
||||||
struct amdgpu_encoder *amdgpu_encoder;
|
struct amdgpu_encoder *amdgpu_encoder;
|
||||||
|
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
|
||||||
/* walk the list and link encoders to connectors */
|
/* walk the list and link encoders to connectors */
|
||||||
drm_for_each_connector_iter(connector, &iter) {
|
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||||
amdgpu_connector = to_amdgpu_connector(connector);
|
amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||||
amdgpu_encoder = to_amdgpu_encoder(encoder);
|
amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
|
@ -57,7 +55,6 @@ amdgpu_link_encoder_connector(struct drm_device *dev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void amdgpu_encoder_set_active_device(struct drm_encoder *encoder)
|
void amdgpu_encoder_set_active_device(struct drm_encoder *encoder)
|
||||||
|
@ -65,10 +62,8 @@ void amdgpu_encoder_set_active_device(struct drm_encoder *encoder)
|
||||||
struct drm_device *dev = encoder->dev;
|
struct drm_device *dev = encoder->dev;
|
||||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
|
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||||
drm_for_each_connector_iter(connector, &iter) {
|
|
||||||
if (connector->encoder == encoder) {
|
if (connector->encoder == encoder) {
|
||||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
amdgpu_encoder->active_device = amdgpu_encoder->devices & amdgpu_connector->devices;
|
amdgpu_encoder->active_device = amdgpu_encoder->devices & amdgpu_connector->devices;
|
||||||
|
@ -77,7 +72,6 @@ void amdgpu_encoder_set_active_device(struct drm_encoder *encoder)
|
||||||
amdgpu_connector->devices, encoder->encoder_type);
|
amdgpu_connector->devices, encoder->encoder_type);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct drm_connector *
|
struct drm_connector *
|
||||||
|
@ -85,20 +79,15 @@ amdgpu_get_connector_for_encoder(struct drm_encoder *encoder)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = encoder->dev;
|
struct drm_device *dev = encoder->dev;
|
||||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
struct drm_connector *connector, *found = NULL;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
struct amdgpu_connector *amdgpu_connector;
|
struct amdgpu_connector *amdgpu_connector;
|
||||||
|
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||||
drm_for_each_connector_iter(connector, &iter) {
|
|
||||||
amdgpu_connector = to_amdgpu_connector(connector);
|
amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
if (amdgpu_encoder->active_device & amdgpu_connector->devices) {
|
if (amdgpu_encoder->active_device & amdgpu_connector->devices)
|
||||||
found = connector;
|
return connector;
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
return NULL;
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
return found;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct drm_connector *
|
struct drm_connector *
|
||||||
|
@ -106,20 +95,15 @@ amdgpu_get_connector_for_encoder_init(struct drm_encoder *encoder)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = encoder->dev;
|
struct drm_device *dev = encoder->dev;
|
||||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
struct drm_connector *connector, *found = NULL;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
struct amdgpu_connector *amdgpu_connector;
|
struct amdgpu_connector *amdgpu_connector;
|
||||||
|
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||||
drm_for_each_connector_iter(connector, &iter) {
|
|
||||||
amdgpu_connector = to_amdgpu_connector(connector);
|
amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
if (amdgpu_encoder->devices & amdgpu_connector->devices) {
|
if (amdgpu_encoder->devices & amdgpu_connector->devices)
|
||||||
found = connector;
|
return connector;
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
return NULL;
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
return found;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct drm_encoder *amdgpu_get_external_encoder(struct drm_encoder *encoder)
|
struct drm_encoder *amdgpu_get_external_encoder(struct drm_encoder *encoder)
|
||||||
|
|
|
@ -462,7 +462,18 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
||||||
timeout = adev->gfx_timeout;
|
timeout = adev->gfx_timeout;
|
||||||
break;
|
break;
|
||||||
case AMDGPU_RING_TYPE_COMPUTE:
|
case AMDGPU_RING_TYPE_COMPUTE:
|
||||||
|
/*
|
||||||
|
* For non-sriov case, no timeout enforce
|
||||||
|
* on compute ring by default. Unless user
|
||||||
|
* specifies a timeout for compute ring.
|
||||||
|
*
|
||||||
|
* For sriov case, always use the timeout
|
||||||
|
* as gfx ring
|
||||||
|
*/
|
||||||
|
if (!amdgpu_sriov_vf(ring->adev))
|
||||||
timeout = adev->compute_timeout;
|
timeout = adev->compute_timeout;
|
||||||
|
else
|
||||||
|
timeout = adev->gfx_timeout;
|
||||||
break;
|
break;
|
||||||
case AMDGPU_RING_TYPE_SDMA:
|
case AMDGPU_RING_TYPE_SDMA:
|
||||||
timeout = adev->sdma_timeout;
|
timeout = adev->sdma_timeout;
|
||||||
|
|
|
@ -527,41 +527,13 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_vm_update_pdes(adev, vm, false);
|
r = amdgpu_vm_update_directories(adev, vm);
|
||||||
|
|
||||||
error:
|
error:
|
||||||
if (r && r != -ERESTARTSYS)
|
if (r && r != -ERESTARTSYS)
|
||||||
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
|
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
|
|
||||||
*
|
|
||||||
* @adev: amdgpu_device pointer
|
|
||||||
* @flags: GEM UAPI flags
|
|
||||||
*
|
|
||||||
* Returns the GEM UAPI flags mapped into hardware for the ASIC.
|
|
||||||
*/
|
|
||||||
uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
|
|
||||||
{
|
|
||||||
uint64_t pte_flag = 0;
|
|
||||||
|
|
||||||
if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
|
|
||||||
pte_flag |= AMDGPU_PTE_EXECUTABLE;
|
|
||||||
if (flags & AMDGPU_VM_PAGE_READABLE)
|
|
||||||
pte_flag |= AMDGPU_PTE_READABLE;
|
|
||||||
if (flags & AMDGPU_VM_PAGE_WRITEABLE)
|
|
||||||
pte_flag |= AMDGPU_PTE_WRITEABLE;
|
|
||||||
if (flags & AMDGPU_VM_PAGE_PRT)
|
|
||||||
pte_flag |= AMDGPU_PTE_PRT;
|
|
||||||
|
|
||||||
if (adev->gmc.gmc_funcs->map_mtype)
|
|
||||||
pte_flag |= amdgpu_gmc_map_mtype(adev,
|
|
||||||
flags & AMDGPU_VM_MTYPE_MASK);
|
|
||||||
|
|
||||||
return pte_flag;
|
|
||||||
}
|
|
||||||
|
|
||||||
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *filp)
|
struct drm_file *filp)
|
||||||
{
|
{
|
||||||
|
@ -659,7 +631,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||||
|
|
||||||
switch (args->operation) {
|
switch (args->operation) {
|
||||||
case AMDGPU_VA_OP_MAP:
|
case AMDGPU_VA_OP_MAP:
|
||||||
va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
|
va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
|
||||||
r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
|
r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
|
||||||
args->offset_in_bo, args->map_size,
|
args->offset_in_bo, args->map_size,
|
||||||
va_flags);
|
va_flags);
|
||||||
|
@ -674,7 +646,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||||
args->map_size);
|
args->map_size);
|
||||||
break;
|
break;
|
||||||
case AMDGPU_VA_OP_REPLACE:
|
case AMDGPU_VA_OP_REPLACE:
|
||||||
va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
|
va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
|
||||||
r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
|
r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
|
||||||
args->offset_in_bo, args->map_size,
|
args->offset_in_bo, args->map_size,
|
||||||
va_flags);
|
va_flags);
|
||||||
|
|
|
@ -67,7 +67,6 @@ int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *filp);
|
struct drm_file *filp);
|
||||||
int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *filp);
|
struct drm_file *filp);
|
||||||
uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags);
|
|
||||||
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *filp);
|
struct drm_file *filp);
|
||||||
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
|
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
|
||||||
|
|
|
@ -26,7 +26,6 @@
|
||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
#include "amdgpu_gfx.h"
|
#include "amdgpu_gfx.h"
|
||||||
#include "amdgpu_rlc.h"
|
#include "amdgpu_rlc.h"
|
||||||
#include "amdgpu_ras.h"
|
|
||||||
|
|
||||||
/* delay 0.1 second to enable gfx off feature */
|
/* delay 0.1 second to enable gfx off feature */
|
||||||
#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
|
#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
|
||||||
|
@ -232,10 +231,12 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
|
||||||
|
|
||||||
void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
|
void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
int i, queue, me;
|
int i, queue, pipe, me;
|
||||||
|
|
||||||
for (i = 0; i < AMDGPU_MAX_GFX_QUEUES; ++i) {
|
for (i = 0; i < AMDGPU_MAX_GFX_QUEUES; ++i) {
|
||||||
queue = i % adev->gfx.me.num_queue_per_pipe;
|
queue = i % adev->gfx.me.num_queue_per_pipe;
|
||||||
|
pipe = (i / adev->gfx.me.num_queue_per_pipe)
|
||||||
|
% adev->gfx.me.num_pipe_per_me;
|
||||||
me = (i / adev->gfx.me.num_queue_per_pipe)
|
me = (i / adev->gfx.me.num_queue_per_pipe)
|
||||||
/ adev->gfx.me.num_pipe_per_me;
|
/ adev->gfx.me.num_pipe_per_me;
|
||||||
|
|
||||||
|
@ -319,7 +320,8 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
|
void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring,
|
||||||
|
struct amdgpu_irq_src *irq)
|
||||||
{
|
{
|
||||||
amdgpu_device_wb_free(ring->adev, ring->adev->virt.reg_val_offs);
|
amdgpu_device_wb_free(ring->adev, ring->adev->virt.reg_val_offs);
|
||||||
amdgpu_ring_fini(ring);
|
amdgpu_ring_fini(ring);
|
||||||
|
@ -567,102 +569,3 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
|
||||||
|
|
||||||
mutex_unlock(&adev->gfx.gfx_off_mutex);
|
mutex_unlock(&adev->gfx.gfx_off_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
int r;
|
|
||||||
struct ras_fs_if fs_info = {
|
|
||||||
.sysfs_name = "gfx_err_count",
|
|
||||||
.debugfs_name = "gfx_err_inject",
|
|
||||||
};
|
|
||||||
struct ras_ih_if ih_info = {
|
|
||||||
.cb = amdgpu_gfx_process_ras_data_cb,
|
|
||||||
};
|
|
||||||
|
|
||||||
if (!adev->gfx.ras_if) {
|
|
||||||
adev->gfx.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
|
|
||||||
if (!adev->gfx.ras_if)
|
|
||||||
return -ENOMEM;
|
|
||||||
adev->gfx.ras_if->block = AMDGPU_RAS_BLOCK__GFX;
|
|
||||||
adev->gfx.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
|
|
||||||
adev->gfx.ras_if->sub_block_index = 0;
|
|
||||||
strcpy(adev->gfx.ras_if->name, "gfx");
|
|
||||||
}
|
|
||||||
fs_info.head = ih_info.head = *adev->gfx.ras_if;
|
|
||||||
|
|
||||||
r = amdgpu_ras_late_init(adev, adev->gfx.ras_if,
|
|
||||||
&fs_info, &ih_info);
|
|
||||||
if (r)
|
|
||||||
goto free;
|
|
||||||
|
|
||||||
if (amdgpu_ras_is_supported(adev, adev->gfx.ras_if->block)) {
|
|
||||||
r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
|
|
||||||
if (r)
|
|
||||||
goto late_fini;
|
|
||||||
} else {
|
|
||||||
/* free gfx ras_if if ras is not supported */
|
|
||||||
r = 0;
|
|
||||||
goto free;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
late_fini:
|
|
||||||
amdgpu_ras_late_fini(adev, adev->gfx.ras_if, &ih_info);
|
|
||||||
free:
|
|
||||||
kfree(adev->gfx.ras_if);
|
|
||||||
adev->gfx.ras_if = NULL;
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
void amdgpu_gfx_ras_fini(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
|
|
||||||
adev->gfx.ras_if) {
|
|
||||||
struct ras_common_if *ras_if = adev->gfx.ras_if;
|
|
||||||
struct ras_ih_if ih_info = {
|
|
||||||
.head = *ras_if,
|
|
||||||
.cb = amdgpu_gfx_process_ras_data_cb,
|
|
||||||
};
|
|
||||||
|
|
||||||
amdgpu_ras_late_fini(adev, ras_if, &ih_info);
|
|
||||||
kfree(ras_if);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
|
|
||||||
void *err_data,
|
|
||||||
struct amdgpu_iv_entry *entry)
|
|
||||||
{
|
|
||||||
/* TODO ue will trigger an interrupt.
|
|
||||||
*
|
|
||||||
* When “Full RAS” is enabled, the per-IP interrupt sources should
|
|
||||||
* be disabled and the driver should only look for the aggregated
|
|
||||||
* interrupt via sync flood
|
|
||||||
*/
|
|
||||||
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
|
|
||||||
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
|
|
||||||
if (adev->gfx.funcs->query_ras_error_count)
|
|
||||||
adev->gfx.funcs->query_ras_error_count(adev, err_data);
|
|
||||||
amdgpu_ras_reset_gpu(adev, 0);
|
|
||||||
}
|
|
||||||
return AMDGPU_RAS_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
|
|
||||||
struct amdgpu_irq_src *source,
|
|
||||||
struct amdgpu_iv_entry *entry)
|
|
||||||
{
|
|
||||||
struct ras_common_if *ras_if = adev->gfx.ras_if;
|
|
||||||
struct ras_dispatch_if ih_data = {
|
|
||||||
.entry = entry,
|
|
||||||
};
|
|
||||||
|
|
||||||
if (!ras_if)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
ih_data.head = *ras_if;
|
|
||||||
|
|
||||||
DRM_ERROR("CP ECC ERROR IRQ\n");
|
|
||||||
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
|
@ -201,6 +201,28 @@ struct amdgpu_gfx_funcs {
|
||||||
int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status);
|
int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct amdgpu_ngg_buf {
|
||||||
|
struct amdgpu_bo *bo;
|
||||||
|
uint64_t gpu_addr;
|
||||||
|
uint32_t size;
|
||||||
|
uint32_t bo_size;
|
||||||
|
};
|
||||||
|
|
||||||
|
enum {
|
||||||
|
NGG_PRIM = 0,
|
||||||
|
NGG_POS,
|
||||||
|
NGG_CNTL,
|
||||||
|
NGG_PARAM,
|
||||||
|
NGG_BUF_MAX
|
||||||
|
};
|
||||||
|
|
||||||
|
struct amdgpu_ngg {
|
||||||
|
struct amdgpu_ngg_buf buf[NGG_BUF_MAX];
|
||||||
|
uint32_t gds_reserve_addr;
|
||||||
|
uint32_t gds_reserve_size;
|
||||||
|
bool init;
|
||||||
|
};
|
||||||
|
|
||||||
struct sq_work {
|
struct sq_work {
|
||||||
struct work_struct work;
|
struct work_struct work;
|
||||||
unsigned ih_data;
|
unsigned ih_data;
|
||||||
|
@ -289,6 +311,9 @@ struct amdgpu_gfx {
|
||||||
uint32_t grbm_soft_reset;
|
uint32_t grbm_soft_reset;
|
||||||
uint32_t srbm_soft_reset;
|
uint32_t srbm_soft_reset;
|
||||||
|
|
||||||
|
/* NGG */
|
||||||
|
struct amdgpu_ngg ngg;
|
||||||
|
|
||||||
/* gfx off */
|
/* gfx off */
|
||||||
bool gfx_off_state; /* true: enabled, false: disabled */
|
bool gfx_off_state; /* true: enabled, false: disabled */
|
||||||
struct mutex gfx_off_mutex;
|
struct mutex gfx_off_mutex;
|
||||||
|
@ -330,7 +355,8 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
|
||||||
struct amdgpu_ring *ring,
|
struct amdgpu_ring *ring,
|
||||||
struct amdgpu_irq_src *irq);
|
struct amdgpu_irq_src *irq);
|
||||||
|
|
||||||
void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring);
|
void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring,
|
||||||
|
struct amdgpu_irq_src *irq);
|
||||||
|
|
||||||
void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev);
|
void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev);
|
||||||
int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
|
int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
|
||||||
|
@ -358,12 +384,5 @@ void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
|
||||||
bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me,
|
bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me,
|
||||||
int pipe, int queue);
|
int pipe, int queue);
|
||||||
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
|
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
|
||||||
int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev);
|
|
||||||
void amdgpu_gfx_ras_fini(struct amdgpu_device *adev);
|
|
||||||
int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
|
|
||||||
void *err_data,
|
|
||||||
struct amdgpu_iv_entry *entry);
|
|
||||||
int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
|
|
||||||
struct amdgpu_irq_src *source,
|
|
||||||
struct amdgpu_iv_entry *entry);
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -27,8 +27,6 @@
|
||||||
#include <linux/io-64-nonatomic-lo-hi.h>
|
#include <linux/io-64-nonatomic-lo-hi.h>
|
||||||
|
|
||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
#include "amdgpu_ras.h"
|
|
||||||
#include "amdgpu_xgmi.h"
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_gmc_get_pde_for_bo - get the PDE for a BO
|
* amdgpu_gmc_get_pde_for_bo - get the PDE for a BO
|
||||||
|
@ -307,29 +305,3 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
|
||||||
gmc->fault_hash[hash].idx = gmc->last_fault++;
|
gmc->fault_hash[hash].idx = gmc->last_fault++;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
int r;
|
|
||||||
|
|
||||||
if (adev->umc.funcs && adev->umc.funcs->ras_late_init) {
|
|
||||||
r = adev->umc.funcs->ras_late_init(adev);
|
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (adev->mmhub.funcs && adev->mmhub.funcs->ras_late_init) {
|
|
||||||
r = adev->mmhub.funcs->ras_late_init(adev);
|
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
return amdgpu_xgmi_ras_late_init(adev);
|
|
||||||
}
|
|
||||||
|
|
||||||
void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
amdgpu_umc_ras_fini(adev);
|
|
||||||
amdgpu_mmhub_ras_fini(adev);
|
|
||||||
amdgpu_xgmi_ras_fini(adev);
|
|
||||||
}
|
|
||||||
|
|
|
@ -99,15 +99,12 @@ struct amdgpu_gmc_funcs {
|
||||||
unsigned pasid);
|
unsigned pasid);
|
||||||
/* enable/disable PRT support */
|
/* enable/disable PRT support */
|
||||||
void (*set_prt)(struct amdgpu_device *adev, bool enable);
|
void (*set_prt)(struct amdgpu_device *adev, bool enable);
|
||||||
/* map mtype to hardware flags */
|
/* set pte flags based per asic */
|
||||||
uint64_t (*map_mtype)(struct amdgpu_device *adev, uint32_t flags);
|
uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
|
||||||
|
uint32_t flags);
|
||||||
/* get the pde for a given mc addr */
|
/* get the pde for a given mc addr */
|
||||||
void (*get_vm_pde)(struct amdgpu_device *adev, int level,
|
void (*get_vm_pde)(struct amdgpu_device *adev, int level,
|
||||||
u64 *dst, u64 *flags);
|
u64 *dst, u64 *flags);
|
||||||
/* get the pte flags to use for a BO VA mapping */
|
|
||||||
void (*get_vm_pte)(struct amdgpu_device *adev,
|
|
||||||
struct amdgpu_bo_va_mapping *mapping,
|
|
||||||
uint64_t *flags);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_xgmi {
|
struct amdgpu_xgmi {
|
||||||
|
@ -123,7 +120,6 @@ struct amdgpu_xgmi {
|
||||||
/* gpu list in the same hive */
|
/* gpu list in the same hive */
|
||||||
struct list_head head;
|
struct list_head head;
|
||||||
bool supported;
|
bool supported;
|
||||||
struct ras_common_if *ras_if;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_gmc {
|
struct amdgpu_gmc {
|
||||||
|
@ -157,7 +153,6 @@ struct amdgpu_gmc {
|
||||||
uint32_t fw_version;
|
uint32_t fw_version;
|
||||||
struct amdgpu_irq_src vm_fault;
|
struct amdgpu_irq_src vm_fault;
|
||||||
uint32_t vram_type;
|
uint32_t vram_type;
|
||||||
uint8_t vram_vendor;
|
|
||||||
uint32_t srbm_soft_reset;
|
uint32_t srbm_soft_reset;
|
||||||
bool prt_warning;
|
bool prt_warning;
|
||||||
uint64_t stolen_size;
|
uint64_t stolen_size;
|
||||||
|
@ -182,14 +177,15 @@ struct amdgpu_gmc {
|
||||||
|
|
||||||
struct amdgpu_xgmi xgmi;
|
struct amdgpu_xgmi xgmi;
|
||||||
struct amdgpu_irq_src ecc_irq;
|
struct amdgpu_irq_src ecc_irq;
|
||||||
|
struct ras_common_if *umc_ras_if;
|
||||||
|
struct ras_common_if *mmhub_ras_if;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
|
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
|
||||||
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
|
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
|
||||||
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
|
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
|
||||||
#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
|
|
||||||
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
|
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
|
||||||
#define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags))
|
#define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
|
* amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
|
||||||
|
@ -234,7 +230,5 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev,
|
||||||
struct amdgpu_gmc *mc);
|
struct amdgpu_gmc *mc);
|
||||||
bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
|
bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
|
||||||
uint16_t pasid, uint64_t timestamp);
|
uint16_t pasid, uint64_t timestamp);
|
||||||
int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev);
|
|
||||||
void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -282,7 +282,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
|
||||||
!dma_fence_is_later(updates, (*id)->flushed_updates))
|
!dma_fence_is_later(updates, (*id)->flushed_updates))
|
||||||
updates = NULL;
|
updates = NULL;
|
||||||
|
|
||||||
if ((*id)->owner != vm->direct.fence_context ||
|
if ((*id)->owner != vm->entity.fence_context ||
|
||||||
job->vm_pd_addr != (*id)->pd_gpu_addr ||
|
job->vm_pd_addr != (*id)->pd_gpu_addr ||
|
||||||
updates || !(*id)->last_flush ||
|
updates || !(*id)->last_flush ||
|
||||||
((*id)->last_flush->context != fence_context &&
|
((*id)->last_flush->context != fence_context &&
|
||||||
|
@ -349,7 +349,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
|
||||||
struct dma_fence *flushed;
|
struct dma_fence *flushed;
|
||||||
|
|
||||||
/* Check all the prerequisites to using this VMID */
|
/* Check all the prerequisites to using this VMID */
|
||||||
if ((*id)->owner != vm->direct.fence_context)
|
if ((*id)->owner != vm->entity.fence_context)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if ((*id)->pd_gpu_addr != job->vm_pd_addr)
|
if ((*id)->pd_gpu_addr != job->vm_pd_addr)
|
||||||
|
@ -449,7 +449,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||||
}
|
}
|
||||||
|
|
||||||
id->pd_gpu_addr = job->vm_pd_addr;
|
id->pd_gpu_addr = job->vm_pd_addr;
|
||||||
id->owner = vm->direct.fence_context;
|
id->owner = vm->entity.fence_context;
|
||||||
|
|
||||||
if (job->vm_needs_flush) {
|
if (job->vm_needs_flush) {
|
||||||
dma_fence_put(id->last_flush);
|
dma_fence_put(id->last_flush);
|
||||||
|
|
|
@ -87,13 +87,10 @@ static void amdgpu_hotplug_work_func(struct work_struct *work)
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev->ddev;
|
||||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
|
|
||||||
mutex_lock(&mode_config->mutex);
|
mutex_lock(&mode_config->mutex);
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
list_for_each_entry(connector, &mode_config->connector_list, head)
|
||||||
drm_for_each_connector_iter(connector, &iter)
|
|
||||||
amdgpu_connector_hotplug(connector);
|
amdgpu_connector_hotplug(connector);
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
mutex_unlock(&mode_config->mutex);
|
mutex_unlock(&mode_config->mutex);
|
||||||
/* Just fire off a uevent and let userspace tell us what to do */
|
/* Just fire off a uevent and let userspace tell us what to do */
|
||||||
drm_helper_hpd_irq_event(dev);
|
drm_helper_hpd_irq_event(dev);
|
||||||
|
@ -156,20 +153,6 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg)
|
||||||
ret = amdgpu_ih_process(adev, &adev->irq.ih);
|
ret = amdgpu_ih_process(adev, &adev->irq.ih);
|
||||||
if (ret == IRQ_HANDLED)
|
if (ret == IRQ_HANDLED)
|
||||||
pm_runtime_mark_last_busy(dev->dev);
|
pm_runtime_mark_last_busy(dev->dev);
|
||||||
|
|
||||||
/* For the hardware that cannot enable bif ring for both ras_controller_irq
|
|
||||||
* and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
|
|
||||||
* register to check whether the interrupt is triggered or not, and properly
|
|
||||||
* ack the interrupt if it is there
|
|
||||||
*/
|
|
||||||
if (adev->nbio.funcs &&
|
|
||||||
adev->nbio.funcs->handle_ras_controller_intr_no_bifring)
|
|
||||||
adev->nbio.funcs->handle_ras_controller_intr_no_bifring(adev);
|
|
||||||
|
|
||||||
if (adev->nbio.funcs &&
|
|
||||||
adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring)
|
|
||||||
adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring(adev);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -245,19 +228,10 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
|
||||||
adev->irq.msi_enabled = false;
|
adev->irq.msi_enabled = false;
|
||||||
|
|
||||||
if (amdgpu_msi_ok(adev)) {
|
if (amdgpu_msi_ok(adev)) {
|
||||||
int nvec = pci_msix_vec_count(adev->pdev);
|
int ret = pci_enable_msi(adev->pdev);
|
||||||
unsigned int flags;
|
if (!ret) {
|
||||||
|
|
||||||
if (nvec <= 0) {
|
|
||||||
flags = PCI_IRQ_MSI;
|
|
||||||
} else {
|
|
||||||
flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
|
|
||||||
}
|
|
||||||
/* we only need one vector */
|
|
||||||
nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
|
|
||||||
if (nvec > 0) {
|
|
||||||
adev->irq.msi_enabled = true;
|
adev->irq.msi_enabled = true;
|
||||||
dev_dbg(adev->dev, "amdgpu: using MSI/MSI-X.\n");
|
dev_dbg(adev->dev, "amdgpu: using MSI.\n");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -280,8 +254,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
|
||||||
INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
|
INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
|
||||||
|
|
||||||
adev->irq.installed = true;
|
adev->irq.installed = true;
|
||||||
/* Use vector 0 for MSI-X */
|
r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
|
||||||
r = drm_irq_install(adev->ddev, pci_irq_vector(adev->pdev, 0));
|
|
||||||
if (r) {
|
if (r) {
|
||||||
adev->irq.installed = false;
|
adev->irq.installed = false;
|
||||||
if (!amdgpu_device_has_dc_support(adev))
|
if (!amdgpu_device_has_dc_support(adev))
|
||||||
|
@ -396,7 +369,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
|
||||||
* amdgpu_irq_dispatch - dispatch IRQ to IP blocks
|
* amdgpu_irq_dispatch - dispatch IRQ to IP blocks
|
||||||
*
|
*
|
||||||
* @adev: amdgpu device pointer
|
* @adev: amdgpu device pointer
|
||||||
* @ih: interrupt ring instance
|
* @entry: interrupt vector pointer
|
||||||
*
|
*
|
||||||
* Dispatches IRQ to IP blocks.
|
* Dispatches IRQ to IP blocks.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -218,7 +218,7 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
|
||||||
struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
|
struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
|
||||||
struct dma_fence *fence = NULL, *finished;
|
struct dma_fence *fence = NULL, *finished;
|
||||||
struct amdgpu_job *job;
|
struct amdgpu_job *job;
|
||||||
int r = 0;
|
int r;
|
||||||
|
|
||||||
job = to_amdgpu_job(sched_job);
|
job = to_amdgpu_job(sched_job);
|
||||||
finished = &job->base.s_fence->finished;
|
finished = &job->base.s_fence->finished;
|
||||||
|
@ -243,49 +243,9 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
|
||||||
job->fence = dma_fence_get(fence);
|
job->fence = dma_fence_get(fence);
|
||||||
|
|
||||||
amdgpu_job_free_resources(job);
|
amdgpu_job_free_resources(job);
|
||||||
|
|
||||||
fence = r ? ERR_PTR(r) : fence;
|
|
||||||
return fence;
|
return fence;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define to_drm_sched_job(sched_job) \
|
|
||||||
container_of((sched_job), struct drm_sched_job, queue_node)
|
|
||||||
|
|
||||||
void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
|
|
||||||
{
|
|
||||||
struct drm_sched_job *s_job;
|
|
||||||
struct drm_sched_entity *s_entity = NULL;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
/* Signal all jobs not yet scheduled */
|
|
||||||
for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
|
|
||||||
struct drm_sched_rq *rq = &sched->sched_rq[i];
|
|
||||||
|
|
||||||
if (!rq)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
spin_lock(&rq->lock);
|
|
||||||
list_for_each_entry(s_entity, &rq->entities, list) {
|
|
||||||
while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
|
|
||||||
struct drm_sched_fence *s_fence = s_job->s_fence;
|
|
||||||
|
|
||||||
dma_fence_signal(&s_fence->scheduled);
|
|
||||||
dma_fence_set_error(&s_fence->finished, -EHWPOISON);
|
|
||||||
dma_fence_signal(&s_fence->finished);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
spin_unlock(&rq->lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Signal all jobs already scheduled to HW */
|
|
||||||
list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
|
|
||||||
struct drm_sched_fence *s_fence = s_job->s_fence;
|
|
||||||
|
|
||||||
dma_fence_set_error(&s_fence->finished, -EHWPOISON);
|
|
||||||
dma_fence_signal(&s_fence->finished);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct drm_sched_backend_ops amdgpu_sched_ops = {
|
const struct drm_sched_backend_ops amdgpu_sched_ops = {
|
||||||
.dependency = amdgpu_job_dependency,
|
.dependency = amdgpu_job_dependency,
|
||||||
.run_job = amdgpu_job_run,
|
.run_job = amdgpu_job_run,
|
||||||
|
|
|
@ -76,7 +76,4 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
|
||||||
void *owner, struct dma_fence **f);
|
void *owner, struct dma_fence **f);
|
||||||
int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
||||||
struct dma_fence **fence);
|
struct dma_fence **fence);
|
||||||
|
|
||||||
void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched);
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -584,12 +584,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||||
struct drm_amdgpu_info_vram_gtt vram_gtt;
|
struct drm_amdgpu_info_vram_gtt vram_gtt;
|
||||||
|
|
||||||
vram_gtt.vram_size = adev->gmc.real_vram_size -
|
vram_gtt.vram_size = adev->gmc.real_vram_size -
|
||||||
atomic64_read(&adev->vram_pin_size) -
|
atomic64_read(&adev->vram_pin_size);
|
||||||
AMDGPU_VM_RESERVED_VRAM;
|
vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size -
|
||||||
vram_gtt.vram_cpu_accessible_size =
|
atomic64_read(&adev->visible_pin_size);
|
||||||
min(adev->gmc.visible_vram_size -
|
|
||||||
atomic64_read(&adev->visible_pin_size),
|
|
||||||
vram_gtt.vram_size);
|
|
||||||
vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
|
vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
|
||||||
vram_gtt.gtt_size *= PAGE_SIZE;
|
vram_gtt.gtt_size *= PAGE_SIZE;
|
||||||
vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
|
vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
|
||||||
|
@ -602,18 +599,15 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||||
memset(&mem, 0, sizeof(mem));
|
memset(&mem, 0, sizeof(mem));
|
||||||
mem.vram.total_heap_size = adev->gmc.real_vram_size;
|
mem.vram.total_heap_size = adev->gmc.real_vram_size;
|
||||||
mem.vram.usable_heap_size = adev->gmc.real_vram_size -
|
mem.vram.usable_heap_size = adev->gmc.real_vram_size -
|
||||||
atomic64_read(&adev->vram_pin_size) -
|
atomic64_read(&adev->vram_pin_size);
|
||||||
AMDGPU_VM_RESERVED_VRAM;
|
|
||||||
mem.vram.heap_usage =
|
mem.vram.heap_usage =
|
||||||
amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
|
amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
|
||||||
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
|
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
|
||||||
|
|
||||||
mem.cpu_accessible_vram.total_heap_size =
|
mem.cpu_accessible_vram.total_heap_size =
|
||||||
adev->gmc.visible_vram_size;
|
adev->gmc.visible_vram_size;
|
||||||
mem.cpu_accessible_vram.usable_heap_size =
|
mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size -
|
||||||
min(adev->gmc.visible_vram_size -
|
atomic64_read(&adev->visible_pin_size);
|
||||||
atomic64_read(&adev->visible_pin_size),
|
|
||||||
mem.vram.usable_heap_size);
|
|
||||||
mem.cpu_accessible_vram.heap_usage =
|
mem.cpu_accessible_vram.heap_usage =
|
||||||
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
|
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
|
||||||
mem.cpu_accessible_vram.max_allocation =
|
mem.cpu_accessible_vram.max_allocation =
|
||||||
|
@ -735,6 +729,17 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||||
dev_info.vce_harvest_config = adev->vce.harvest_config;
|
dev_info.vce_harvest_config = adev->vce.harvest_config;
|
||||||
dev_info.gc_double_offchip_lds_buf =
|
dev_info.gc_double_offchip_lds_buf =
|
||||||
adev->gfx.config.double_offchip_lds_buf;
|
adev->gfx.config.double_offchip_lds_buf;
|
||||||
|
|
||||||
|
if (amdgpu_ngg) {
|
||||||
|
dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PRIM].gpu_addr;
|
||||||
|
dev_info.prim_buf_size = adev->gfx.ngg.buf[NGG_PRIM].size;
|
||||||
|
dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[NGG_POS].gpu_addr;
|
||||||
|
dev_info.pos_buf_size = adev->gfx.ngg.buf[NGG_POS].size;
|
||||||
|
dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[NGG_CNTL].gpu_addr;
|
||||||
|
dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[NGG_CNTL].size;
|
||||||
|
dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PARAM].gpu_addr;
|
||||||
|
dev_info.param_buf_size = adev->gfx.ngg.buf[NGG_PARAM].size;
|
||||||
|
}
|
||||||
dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
|
dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
|
||||||
dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
|
dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
|
||||||
dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
|
dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
|
||||||
|
@ -963,12 +968,6 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
||||||
/* Ensure IB tests are run on ring */
|
/* Ensure IB tests are run on ring */
|
||||||
flush_delayed_work(&adev->delayed_init_work);
|
flush_delayed_work(&adev->delayed_init_work);
|
||||||
|
|
||||||
|
|
||||||
if (amdgpu_ras_intr_triggered()) {
|
|
||||||
DRM_ERROR("RAS Intr triggered, device disabled!!");
|
|
||||||
return -EHWPOISON;
|
|
||||||
}
|
|
||||||
|
|
||||||
file_priv->driver_priv = NULL;
|
file_priv->driver_priv = NULL;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(dev->dev);
|
r = pm_runtime_get_sync(dev->dev);
|
||||||
|
|
|
@ -23,17 +23,9 @@
|
||||||
|
|
||||||
struct amdgpu_mmhub_funcs {
|
struct amdgpu_mmhub_funcs {
|
||||||
void (*ras_init)(struct amdgpu_device *adev);
|
void (*ras_init)(struct amdgpu_device *adev);
|
||||||
int (*ras_late_init)(struct amdgpu_device *adev);
|
|
||||||
void (*query_ras_error_count)(struct amdgpu_device *adev,
|
void (*query_ras_error_count)(struct amdgpu_device *adev,
|
||||||
void *ras_error_status);
|
void *ras_error_status);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_mmhub {
|
|
||||||
struct ras_common_if *ras_if;
|
|
||||||
const struct amdgpu_mmhub_funcs *funcs;
|
|
||||||
};
|
|
||||||
|
|
||||||
int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev);
|
|
||||||
void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev);
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -136,7 +136,6 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn)
|
||||||
* amdgpu_mn_read_lock - take the read side lock for this notifier
|
* amdgpu_mn_read_lock - take the read side lock for this notifier
|
||||||
*
|
*
|
||||||
* @amn: our notifier
|
* @amn: our notifier
|
||||||
* @blockable: is the notifier blockable
|
|
||||||
*/
|
*/
|
||||||
static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable)
|
static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable)
|
||||||
{
|
{
|
||||||
|
|
|
@ -342,70 +342,6 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
|
|
||||||
*
|
|
||||||
* @adev: amdgpu device object
|
|
||||||
* @offset: offset of the BO
|
|
||||||
* @size: size of the BO
|
|
||||||
* @domain: where to place it
|
|
||||||
* @bo_ptr: used to initialize BOs in structures
|
|
||||||
* @cpu_addr: optional CPU address mapping
|
|
||||||
*
|
|
||||||
* Creates a kernel BO at a specific offset in the address space of the domain.
|
|
||||||
*
|
|
||||||
* Returns:
|
|
||||||
* 0 on success, negative error code otherwise.
|
|
||||||
*/
|
|
||||||
int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
|
|
||||||
uint64_t offset, uint64_t size, uint32_t domain,
|
|
||||||
struct amdgpu_bo **bo_ptr, void **cpu_addr)
|
|
||||||
{
|
|
||||||
struct ttm_operation_ctx ctx = { false, false };
|
|
||||||
unsigned int i;
|
|
||||||
int r;
|
|
||||||
|
|
||||||
offset &= PAGE_MASK;
|
|
||||||
size = ALIGN(size, PAGE_SIZE);
|
|
||||||
|
|
||||||
r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr,
|
|
||||||
NULL, cpu_addr);
|
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Remove the original mem node and create a new one at the request
|
|
||||||
* position.
|
|
||||||
*/
|
|
||||||
if (cpu_addr)
|
|
||||||
amdgpu_bo_kunmap(*bo_ptr);
|
|
||||||
|
|
||||||
ttm_bo_mem_put(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
|
|
||||||
|
|
||||||
for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
|
|
||||||
(*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
|
|
||||||
(*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
|
|
||||||
}
|
|
||||||
r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
|
|
||||||
&(*bo_ptr)->tbo.mem, &ctx);
|
|
||||||
if (r)
|
|
||||||
goto error;
|
|
||||||
|
|
||||||
if (cpu_addr) {
|
|
||||||
r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
|
|
||||||
if (r)
|
|
||||||
goto error;
|
|
||||||
}
|
|
||||||
|
|
||||||
amdgpu_bo_unreserve(*bo_ptr);
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
error:
|
|
||||||
amdgpu_bo_unreserve(*bo_ptr);
|
|
||||||
amdgpu_bo_unref(bo_ptr);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_bo_free_kernel - free BO for kernel use
|
* amdgpu_bo_free_kernel - free BO for kernel use
|
||||||
*
|
*
|
||||||
|
@ -515,10 +451,9 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
|
||||||
{
|
{
|
||||||
struct ttm_operation_ctx ctx = {
|
struct ttm_operation_ctx ctx = {
|
||||||
.interruptible = (bp->type != ttm_bo_type_kernel),
|
.interruptible = (bp->type != ttm_bo_type_kernel),
|
||||||
.no_wait_gpu = bp->no_wait_gpu,
|
.no_wait_gpu = false,
|
||||||
.resv = bp->resv,
|
.resv = bp->resv,
|
||||||
.flags = bp->type != ttm_bo_type_kernel ?
|
.flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
|
||||||
TTM_OPT_FLAG_ALLOW_RES_EVICT : 0
|
|
||||||
};
|
};
|
||||||
struct amdgpu_bo *bo;
|
struct amdgpu_bo *bo;
|
||||||
unsigned long page_align, size = bp->size;
|
unsigned long page_align, size = bp->size;
|
||||||
|
|
|
@ -41,7 +41,6 @@ struct amdgpu_bo_param {
|
||||||
u32 preferred_domain;
|
u32 preferred_domain;
|
||||||
u64 flags;
|
u64 flags;
|
||||||
enum ttm_bo_type type;
|
enum ttm_bo_type type;
|
||||||
bool no_wait_gpu;
|
|
||||||
struct dma_resv *resv;
|
struct dma_resv *resv;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -238,9 +237,6 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
|
||||||
unsigned long size, int align,
|
unsigned long size, int align,
|
||||||
u32 domain, struct amdgpu_bo **bo_ptr,
|
u32 domain, struct amdgpu_bo **bo_ptr,
|
||||||
u64 *gpu_addr, void **cpu_addr);
|
u64 *gpu_addr, void **cpu_addr);
|
||||||
int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
|
|
||||||
uint64_t offset, uint64_t size, uint32_t domain,
|
|
||||||
struct amdgpu_bo **bo_ptr, void **cpu_addr);
|
|
||||||
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
|
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
|
||||||
void **cpu_addr);
|
void **cpu_addr);
|
||||||
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
|
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
|
||||||
|
|
|
@ -161,7 +161,7 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
|
||||||
|
|
||||||
if (is_support_sw_smu(adev)) {
|
if (is_support_sw_smu(adev)) {
|
||||||
if (adev->smu.ppt_funcs->get_current_power_state)
|
if (adev->smu.ppt_funcs->get_current_power_state)
|
||||||
pm = smu_get_current_power_state(&adev->smu);
|
pm = amdgpu_smu_get_current_power_state(adev);
|
||||||
else
|
else
|
||||||
pm = adev->pm.dpm.user_state;
|
pm = adev->pm.dpm.user_state;
|
||||||
} else if (adev->powerplay.pp_funcs->get_current_power_state) {
|
} else if (adev->powerplay.pp_funcs->get_current_power_state) {
|
||||||
|
@ -805,7 +805,8 @@ static ssize_t amdgpu_get_pp_feature_status(struct device *dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
|
* DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk
|
||||||
|
* pp_dpm_pcie
|
||||||
*
|
*
|
||||||
* The amdgpu driver provides a sysfs API for adjusting what power levels
|
* The amdgpu driver provides a sysfs API for adjusting what power levels
|
||||||
* are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
|
* are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
|
||||||
|
@ -821,15 +822,9 @@ static ssize_t amdgpu_get_pp_feature_status(struct device *dev,
|
||||||
*
|
*
|
||||||
* To manually adjust these states, first select manual using
|
* To manually adjust these states, first select manual using
|
||||||
* power_dpm_force_performance_level.
|
* power_dpm_force_performance_level.
|
||||||
* Secondly, enter a new value for each level by inputing a string that
|
* Secondly,Enter a new value for each level by inputing a string that
|
||||||
* contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
|
* contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
|
||||||
* E.g.,
|
* E.g., echo 4 5 6 to > pp_dpm_sclk will enable sclk levels 4, 5, and 6.
|
||||||
*
|
|
||||||
* .. code-block:: bash
|
|
||||||
*
|
|
||||||
* echo "4 5 6" > pp_dpm_sclk
|
|
||||||
*
|
|
||||||
* will enable sclk levels 4, 5, and 6.
|
|
||||||
*
|
*
|
||||||
* NOTE: change to the dcefclk max dpm level is not supported now
|
* NOTE: change to the dcefclk max dpm level is not supported now
|
||||||
*/
|
*/
|
||||||
|
@ -907,7 +902,7 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (is_support_sw_smu(adev))
|
if (is_support_sw_smu(adev))
|
||||||
ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true);
|
ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask);
|
||||||
else if (adev->powerplay.pp_funcs->force_clock_level)
|
else if (adev->powerplay.pp_funcs->force_clock_level)
|
||||||
ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
|
ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
|
||||||
|
|
||||||
|
@ -954,7 +949,7 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (is_support_sw_smu(adev))
|
if (is_support_sw_smu(adev))
|
||||||
ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true);
|
ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask);
|
||||||
else if (adev->powerplay.pp_funcs->force_clock_level)
|
else if (adev->powerplay.pp_funcs->force_clock_level)
|
||||||
ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
|
ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
|
||||||
|
|
||||||
|
@ -994,7 +989,7 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (is_support_sw_smu(adev))
|
if (is_support_sw_smu(adev))
|
||||||
ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true);
|
ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask);
|
||||||
else if (adev->powerplay.pp_funcs->force_clock_level)
|
else if (adev->powerplay.pp_funcs->force_clock_level)
|
||||||
ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
|
ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
|
||||||
|
|
||||||
|
@ -1034,7 +1029,7 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (is_support_sw_smu(adev))
|
if (is_support_sw_smu(adev))
|
||||||
ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true);
|
ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask);
|
||||||
else if (adev->powerplay.pp_funcs->force_clock_level)
|
else if (adev->powerplay.pp_funcs->force_clock_level)
|
||||||
ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
|
ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
|
||||||
|
|
||||||
|
@ -1074,7 +1069,7 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (is_support_sw_smu(adev))
|
if (is_support_sw_smu(adev))
|
||||||
ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true);
|
ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask);
|
||||||
else if (adev->powerplay.pp_funcs->force_clock_level)
|
else if (adev->powerplay.pp_funcs->force_clock_level)
|
||||||
ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
|
ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
|
||||||
|
|
||||||
|
@ -1114,7 +1109,7 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (is_support_sw_smu(adev))
|
if (is_support_sw_smu(adev))
|
||||||
ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true);
|
ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask);
|
||||||
else if (adev->powerplay.pp_funcs->force_clock_level)
|
else if (adev->powerplay.pp_funcs->force_clock_level)
|
||||||
ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
|
ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
|
||||||
|
|
||||||
|
@ -1306,7 +1301,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
|
||||||
}
|
}
|
||||||
parameter[parameter_size] = profile_mode;
|
parameter[parameter_size] = profile_mode;
|
||||||
if (is_support_sw_smu(adev))
|
if (is_support_sw_smu(adev))
|
||||||
ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true);
|
ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size);
|
||||||
else if (adev->powerplay.pp_funcs->set_power_profile_mode)
|
else if (adev->powerplay.pp_funcs->set_power_profile_mode)
|
||||||
ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
|
ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
|
@ -2015,7 +2010,7 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
|
||||||
uint32_t limit = 0;
|
uint32_t limit = 0;
|
||||||
|
|
||||||
if (is_support_sw_smu(adev)) {
|
if (is_support_sw_smu(adev)) {
|
||||||
smu_get_power_limit(&adev->smu, &limit, true, true);
|
smu_get_power_limit(&adev->smu, &limit, true);
|
||||||
return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
|
return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
|
||||||
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
|
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
|
||||||
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
|
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
|
||||||
|
@ -2033,7 +2028,7 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
|
||||||
uint32_t limit = 0;
|
uint32_t limit = 0;
|
||||||
|
|
||||||
if (is_support_sw_smu(adev)) {
|
if (is_support_sw_smu(adev)) {
|
||||||
smu_get_power_limit(&adev->smu, &limit, false, true);
|
smu_get_power_limit(&adev->smu, &limit, false);
|
||||||
return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
|
return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
|
||||||
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
|
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
|
||||||
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
|
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
|
||||||
|
@ -2201,9 +2196,9 @@ static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
|
||||||
*
|
*
|
||||||
* - fan1_input: fan speed in RPM
|
* - fan1_input: fan speed in RPM
|
||||||
*
|
*
|
||||||
* - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
|
* - fan[1-*]_target: Desired fan speed Unit: revolution/min (RPM)
|
||||||
*
|
*
|
||||||
* - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
|
* - fan[1-*]_enable: Enable or disable the sensors.1: Enable 0: Disable
|
||||||
*
|
*
|
||||||
* hwmon interfaces for GPU clocks:
|
* hwmon interfaces for GPU clocks:
|
||||||
*
|
*
|
||||||
|
@ -2830,19 +2825,6 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
|
||||||
DRM_ERROR("failed to create device file pp_dpm_sclk\n");
|
DRM_ERROR("failed to create device file pp_dpm_sclk\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Arcturus does not support standalone mclk/socclk/fclk level setting */
|
|
||||||
if (adev->asic_type == CHIP_ARCTURUS) {
|
|
||||||
dev_attr_pp_dpm_mclk.attr.mode &= ~S_IWUGO;
|
|
||||||
dev_attr_pp_dpm_mclk.store = NULL;
|
|
||||||
|
|
||||||
dev_attr_pp_dpm_socclk.attr.mode &= ~S_IWUGO;
|
|
||||||
dev_attr_pp_dpm_socclk.store = NULL;
|
|
||||||
|
|
||||||
dev_attr_pp_dpm_fclk.attr.mode &= ~S_IWUGO;
|
|
||||||
dev_attr_pp_dpm_fclk.store = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
|
ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_ERROR("failed to create device file pp_dpm_mclk\n");
|
DRM_ERROR("failed to create device file pp_dpm_mclk\n");
|
||||||
|
@ -3026,8 +3008,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
|
||||||
struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
|
struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
|
||||||
smu_handle_task(&adev->smu,
|
smu_handle_task(&adev->smu,
|
||||||
smu_dpm->dpm_level,
|
smu_dpm->dpm_level,
|
||||||
AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
|
AMD_PP_TASK_DISPLAY_CONFIG_CHANGE);
|
||||||
true);
|
|
||||||
} else {
|
} else {
|
||||||
if (adev->powerplay.pp_funcs->dispatch_tasks) {
|
if (adev->powerplay.pp_funcs->dispatch_tasks) {
|
||||||
if (!amdgpu_device_has_dc_support(adev)) {
|
if (!amdgpu_device_has_dc_support(adev)) {
|
||||||
|
|
|
@ -34,8 +34,6 @@
|
||||||
#include "psp_v11_0.h"
|
#include "psp_v11_0.h"
|
||||||
#include "psp_v12_0.h"
|
#include "psp_v12_0.h"
|
||||||
|
|
||||||
#include "amdgpu_ras.h"
|
|
||||||
|
|
||||||
static void psp_set_funcs(struct amdgpu_device *adev);
|
static void psp_set_funcs(struct amdgpu_device *adev);
|
||||||
|
|
||||||
static int psp_early_init(void *handle)
|
static int psp_early_init(void *handle)
|
||||||
|
@ -90,17 +88,6 @@ static int psp_sw_init(void *handle)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = psp_mem_training_init(psp);
|
|
||||||
if (ret) {
|
|
||||||
DRM_ERROR("Failed to initialize memory training!\n");
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
|
|
||||||
if (ret) {
|
|
||||||
DRM_ERROR("Failed to process memory training!\n");
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -108,7 +95,6 @@ static int psp_sw_fini(void *handle)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
psp_mem_training_fini(&adev->psp);
|
|
||||||
release_firmware(adev->psp.sos_fw);
|
release_firmware(adev->psp.sos_fw);
|
||||||
adev->psp.sos_fw = NULL;
|
adev->psp.sos_fw = NULL;
|
||||||
release_firmware(adev->psp.asd_fw);
|
release_firmware(adev->psp.asd_fw);
|
||||||
|
@ -165,19 +151,10 @@ psp_cmd_submit_buf(struct psp_context *psp,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
amdgpu_asic_invalidate_hdp(psp->adev, NULL);
|
|
||||||
while (*((unsigned int *)psp->fence_buf) != index) {
|
while (*((unsigned int *)psp->fence_buf) != index) {
|
||||||
if (--timeout == 0)
|
if (--timeout == 0)
|
||||||
break;
|
break;
|
||||||
/*
|
|
||||||
* Shouldn't wait for timeout when err_event_athub occurs,
|
|
||||||
* because gpu reset thread triggered and lock resource should
|
|
||||||
* be released for psp resume sequence.
|
|
||||||
*/
|
|
||||||
if (amdgpu_ras_intr_triggered())
|
|
||||||
break;
|
|
||||||
msleep(1);
|
msleep(1);
|
||||||
amdgpu_asic_invalidate_hdp(psp->adev, NULL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* In some cases, psp response status is not 0 even there is no
|
/* In some cases, psp response status is not 0 even there is no
|
||||||
|
@ -191,8 +168,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
|
||||||
if (ucode)
|
if (ucode)
|
||||||
DRM_WARN("failed to load ucode id (%d) ",
|
DRM_WARN("failed to load ucode id (%d) ",
|
||||||
ucode->ucode_id);
|
ucode->ucode_id);
|
||||||
DRM_DEBUG_DRIVER("psp command (0x%X) failed and response status is (0x%X)\n",
|
DRM_WARN("psp command failed and response status is (0x%X)\n",
|
||||||
psp->cmd_buf_mem->cmd_id,
|
|
||||||
psp->cmd_buf_mem->resp.status & GFX_CMD_STATUS_MASK);
|
psp->cmd_buf_mem->resp.status & GFX_CMD_STATUS_MASK);
|
||||||
if (!timeout) {
|
if (!timeout) {
|
||||||
mutex_unlock(&psp->mutex);
|
mutex_unlock(&psp->mutex);
|
||||||
|
@ -277,8 +253,7 @@ static int psp_tmr_init(struct psp_context *psp)
|
||||||
|
|
||||||
/* For ASICs support RLC autoload, psp will parse the toc
|
/* For ASICs support RLC autoload, psp will parse the toc
|
||||||
* and calculate the total size of TMR needed */
|
* and calculate the total size of TMR needed */
|
||||||
if (!amdgpu_sriov_vf(psp->adev) &&
|
if (psp->toc_start_addr &&
|
||||||
psp->toc_start_addr &&
|
|
||||||
psp->toc_bin_size &&
|
psp->toc_bin_size &&
|
||||||
psp->fw_pri_buf) {
|
psp->fw_pri_buf) {
|
||||||
ret = psp_load_toc(psp, &tmr_size);
|
ret = psp_load_toc(psp, &tmr_size);
|
||||||
|
@ -312,9 +287,15 @@ static int psp_tmr_load(struct psp_context *psp)
|
||||||
|
|
||||||
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
||||||
psp->fence_buf_mc_addr);
|
psp->fence_buf_mc_addr);
|
||||||
|
if (ret)
|
||||||
|
goto failed;
|
||||||
|
|
||||||
kfree(cmd);
|
kfree(cmd);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
failed:
|
||||||
|
kfree(cmd);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -791,324 +772,6 @@ static int psp_ras_initialize(struct psp_context *psp)
|
||||||
}
|
}
|
||||||
// ras end
|
// ras end
|
||||||
|
|
||||||
// HDCP start
|
|
||||||
static void psp_prep_hdcp_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
|
||||||
uint64_t hdcp_ta_mc,
|
|
||||||
uint64_t hdcp_mc_shared,
|
|
||||||
uint32_t hdcp_ta_size,
|
|
||||||
uint32_t shared_size)
|
|
||||||
{
|
|
||||||
cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
|
|
||||||
cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(hdcp_ta_mc);
|
|
||||||
cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(hdcp_ta_mc);
|
|
||||||
cmd->cmd.cmd_load_ta.app_len = hdcp_ta_size;
|
|
||||||
|
|
||||||
cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
|
|
||||||
lower_32_bits(hdcp_mc_shared);
|
|
||||||
cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
|
|
||||||
upper_32_bits(hdcp_mc_shared);
|
|
||||||
cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int psp_hdcp_init_shared_buf(struct psp_context *psp)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Allocate 16k memory aligned to 4k from Frame Buffer (local
|
|
||||||
* physical) for hdcp ta <-> Driver
|
|
||||||
*/
|
|
||||||
ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE,
|
|
||||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
|
|
||||||
&psp->hdcp_context.hdcp_shared_bo,
|
|
||||||
&psp->hdcp_context.hdcp_shared_mc_addr,
|
|
||||||
&psp->hdcp_context.hdcp_shared_buf);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int psp_hdcp_load(struct psp_context *psp)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
struct psp_gfx_cmd_resp *cmd;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* TODO: bypass the loading in sriov for now
|
|
||||||
*/
|
|
||||||
if (amdgpu_sriov_vf(psp->adev))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
|
||||||
if (!cmd)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
|
|
||||||
memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr,
|
|
||||||
psp->ta_hdcp_ucode_size);
|
|
||||||
|
|
||||||
psp_prep_hdcp_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
|
|
||||||
psp->hdcp_context.hdcp_shared_mc_addr,
|
|
||||||
psp->ta_hdcp_ucode_size,
|
|
||||||
PSP_HDCP_SHARED_MEM_SIZE);
|
|
||||||
|
|
||||||
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
|
|
||||||
|
|
||||||
if (!ret) {
|
|
||||||
psp->hdcp_context.hdcp_initialized = 1;
|
|
||||||
psp->hdcp_context.session_id = cmd->resp.session_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
kfree(cmd);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
static int psp_hdcp_initialize(struct psp_context *psp)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!psp->hdcp_context.hdcp_initialized) {
|
|
||||||
ret = psp_hdcp_init_shared_buf(psp);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = psp_hdcp_load(psp);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
static void psp_prep_hdcp_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
|
||||||
uint32_t hdcp_session_id)
|
|
||||||
{
|
|
||||||
cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
|
|
||||||
cmd->cmd.cmd_unload_ta.session_id = hdcp_session_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int psp_hdcp_unload(struct psp_context *psp)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
struct psp_gfx_cmd_resp *cmd;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* TODO: bypass the unloading in sriov for now
|
|
||||||
*/
|
|
||||||
if (amdgpu_sriov_vf(psp->adev))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
|
||||||
if (!cmd)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
psp_prep_hdcp_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id);
|
|
||||||
|
|
||||||
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
|
|
||||||
|
|
||||||
kfree(cmd);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void psp_prep_hdcp_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
|
||||||
uint32_t ta_cmd_id,
|
|
||||||
uint32_t hdcp_session_id)
|
|
||||||
{
|
|
||||||
cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
|
|
||||||
cmd->cmd.cmd_invoke_cmd.session_id = hdcp_session_id;
|
|
||||||
cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
|
|
||||||
/* Note: cmd_invoke_cmd.buf is not used for now */
|
|
||||||
}
|
|
||||||
|
|
||||||
int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
struct psp_gfx_cmd_resp *cmd;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* TODO: bypass the loading in sriov for now
|
|
||||||
*/
|
|
||||||
if (amdgpu_sriov_vf(psp->adev))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
|
||||||
if (!cmd)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
psp_prep_hdcp_ta_invoke_cmd_buf(cmd, ta_cmd_id,
|
|
||||||
psp->hdcp_context.session_id);
|
|
||||||
|
|
||||||
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
|
|
||||||
|
|
||||||
kfree(cmd);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int psp_hdcp_terminate(struct psp_context *psp)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!psp->hdcp_context.hdcp_initialized)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
ret = psp_hdcp_unload(psp);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
psp->hdcp_context.hdcp_initialized = 0;
|
|
||||||
|
|
||||||
/* free hdcp shared memory */
|
|
||||||
amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
|
|
||||||
&psp->hdcp_context.hdcp_shared_mc_addr,
|
|
||||||
&psp->hdcp_context.hdcp_shared_buf);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
// HDCP end
|
|
||||||
|
|
||||||
// DTM start
|
|
||||||
static void psp_prep_dtm_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
|
||||||
uint64_t dtm_ta_mc,
|
|
||||||
uint64_t dtm_mc_shared,
|
|
||||||
uint32_t dtm_ta_size,
|
|
||||||
uint32_t shared_size)
|
|
||||||
{
|
|
||||||
cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
|
|
||||||
cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(dtm_ta_mc);
|
|
||||||
cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(dtm_ta_mc);
|
|
||||||
cmd->cmd.cmd_load_ta.app_len = dtm_ta_size;
|
|
||||||
|
|
||||||
cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(dtm_mc_shared);
|
|
||||||
cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(dtm_mc_shared);
|
|
||||||
cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int psp_dtm_init_shared_buf(struct psp_context *psp)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Allocate 16k memory aligned to 4k from Frame Buffer (local
|
|
||||||
* physical) for dtm ta <-> Driver
|
|
||||||
*/
|
|
||||||
ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE,
|
|
||||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
|
|
||||||
&psp->dtm_context.dtm_shared_bo,
|
|
||||||
&psp->dtm_context.dtm_shared_mc_addr,
|
|
||||||
&psp->dtm_context.dtm_shared_buf);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int psp_dtm_load(struct psp_context *psp)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
struct psp_gfx_cmd_resp *cmd;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* TODO: bypass the loading in sriov for now
|
|
||||||
*/
|
|
||||||
if (amdgpu_sriov_vf(psp->adev))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
|
||||||
if (!cmd)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
|
|
||||||
memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size);
|
|
||||||
|
|
||||||
psp_prep_dtm_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
|
|
||||||
psp->dtm_context.dtm_shared_mc_addr,
|
|
||||||
psp->ta_dtm_ucode_size,
|
|
||||||
PSP_DTM_SHARED_MEM_SIZE);
|
|
||||||
|
|
||||||
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
|
|
||||||
|
|
||||||
if (!ret) {
|
|
||||||
psp->dtm_context.dtm_initialized = 1;
|
|
||||||
psp->dtm_context.session_id = cmd->resp.session_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
kfree(cmd);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int psp_dtm_initialize(struct psp_context *psp)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!psp->dtm_context.dtm_initialized) {
|
|
||||||
ret = psp_dtm_init_shared_buf(psp);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = psp_dtm_load(psp);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void psp_prep_dtm_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
|
||||||
uint32_t ta_cmd_id,
|
|
||||||
uint32_t dtm_session_id)
|
|
||||||
{
|
|
||||||
cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
|
|
||||||
cmd->cmd.cmd_invoke_cmd.session_id = dtm_session_id;
|
|
||||||
cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
|
|
||||||
/* Note: cmd_invoke_cmd.buf is not used for now */
|
|
||||||
}
|
|
||||||
|
|
||||||
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
struct psp_gfx_cmd_resp *cmd;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* TODO: bypass the loading in sriov for now
|
|
||||||
*/
|
|
||||||
if (amdgpu_sriov_vf(psp->adev))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
|
||||||
if (!cmd)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
psp_prep_dtm_ta_invoke_cmd_buf(cmd, ta_cmd_id,
|
|
||||||
psp->dtm_context.session_id);
|
|
||||||
|
|
||||||
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
|
|
||||||
|
|
||||||
kfree(cmd);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int psp_dtm_terminate(struct psp_context *psp)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!psp->dtm_context.dtm_initialized)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
ret = psp_hdcp_unload(psp);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
psp->dtm_context.dtm_initialized = 0;
|
|
||||||
|
|
||||||
/* free hdcp shared memory */
|
|
||||||
amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
|
|
||||||
&psp->dtm_context.dtm_shared_mc_addr,
|
|
||||||
&psp->dtm_context.dtm_shared_buf);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
// DTM end
|
|
||||||
|
|
||||||
static int psp_hw_start(struct psp_context *psp)
|
static int psp_hw_start(struct psp_context *psp)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = psp->adev;
|
struct amdgpu_device *adev = psp->adev;
|
||||||
|
@ -1182,16 +845,6 @@ static int psp_hw_start(struct psp_context *psp)
|
||||||
if (ret)
|
if (ret)
|
||||||
dev_err(psp->adev->dev,
|
dev_err(psp->adev->dev,
|
||||||
"RAS: Failed to initialize RAS\n");
|
"RAS: Failed to initialize RAS\n");
|
||||||
|
|
||||||
ret = psp_hdcp_initialize(psp);
|
|
||||||
if (ret)
|
|
||||||
dev_err(psp->adev->dev,
|
|
||||||
"HDCP: Failed to initialize HDCP\n");
|
|
||||||
|
|
||||||
ret = psp_dtm_initialize(psp);
|
|
||||||
if (ret)
|
|
||||||
dev_err(psp->adev->dev,
|
|
||||||
"DTM: Failed to initialize DTM\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1297,7 +950,21 @@ static void psp_print_fw_hdr(struct psp_context *psp,
|
||||||
struct amdgpu_firmware_info *ucode)
|
struct amdgpu_firmware_info *ucode)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = psp->adev;
|
struct amdgpu_device *adev = psp->adev;
|
||||||
struct common_firmware_header *hdr;
|
const struct sdma_firmware_header_v1_0 *sdma_hdr =
|
||||||
|
(const struct sdma_firmware_header_v1_0 *)
|
||||||
|
adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
|
||||||
|
const struct gfx_firmware_header_v1_0 *ce_hdr =
|
||||||
|
(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
|
||||||
|
const struct gfx_firmware_header_v1_0 *pfp_hdr =
|
||||||
|
(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
|
||||||
|
const struct gfx_firmware_header_v1_0 *me_hdr =
|
||||||
|
(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
|
||||||
|
const struct gfx_firmware_header_v1_0 *mec_hdr =
|
||||||
|
(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
|
||||||
|
const struct rlc_firmware_header_v2_0 *rlc_hdr =
|
||||||
|
(const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
|
||||||
|
const struct smc_firmware_header_v1_0 *smc_hdr =
|
||||||
|
(const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
|
||||||
|
|
||||||
switch (ucode->ucode_id) {
|
switch (ucode->ucode_id) {
|
||||||
case AMDGPU_UCODE_ID_SDMA0:
|
case AMDGPU_UCODE_ID_SDMA0:
|
||||||
|
@ -1308,33 +975,25 @@ static void psp_print_fw_hdr(struct psp_context *psp,
|
||||||
case AMDGPU_UCODE_ID_SDMA5:
|
case AMDGPU_UCODE_ID_SDMA5:
|
||||||
case AMDGPU_UCODE_ID_SDMA6:
|
case AMDGPU_UCODE_ID_SDMA6:
|
||||||
case AMDGPU_UCODE_ID_SDMA7:
|
case AMDGPU_UCODE_ID_SDMA7:
|
||||||
hdr = (struct common_firmware_header *)
|
amdgpu_ucode_print_sdma_hdr(&sdma_hdr->header);
|
||||||
adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
|
|
||||||
amdgpu_ucode_print_sdma_hdr(hdr);
|
|
||||||
break;
|
break;
|
||||||
case AMDGPU_UCODE_ID_CP_CE:
|
case AMDGPU_UCODE_ID_CP_CE:
|
||||||
hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
|
amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
|
||||||
amdgpu_ucode_print_gfx_hdr(hdr);
|
|
||||||
break;
|
break;
|
||||||
case AMDGPU_UCODE_ID_CP_PFP:
|
case AMDGPU_UCODE_ID_CP_PFP:
|
||||||
hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
|
amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
|
||||||
amdgpu_ucode_print_gfx_hdr(hdr);
|
|
||||||
break;
|
break;
|
||||||
case AMDGPU_UCODE_ID_CP_ME:
|
case AMDGPU_UCODE_ID_CP_ME:
|
||||||
hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
|
amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
|
||||||
amdgpu_ucode_print_gfx_hdr(hdr);
|
|
||||||
break;
|
break;
|
||||||
case AMDGPU_UCODE_ID_CP_MEC1:
|
case AMDGPU_UCODE_ID_CP_MEC1:
|
||||||
hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
|
amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
|
||||||
amdgpu_ucode_print_gfx_hdr(hdr);
|
|
||||||
break;
|
break;
|
||||||
case AMDGPU_UCODE_ID_RLC_G:
|
case AMDGPU_UCODE_ID_RLC_G:
|
||||||
hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
|
amdgpu_ucode_print_rlc_hdr(&rlc_hdr->header);
|
||||||
amdgpu_ucode_print_rlc_hdr(hdr);
|
|
||||||
break;
|
break;
|
||||||
case AMDGPU_UCODE_ID_SMC:
|
case AMDGPU_UCODE_ID_SMC:
|
||||||
hdr = (struct common_firmware_header *)adev->pm.fw->data;
|
amdgpu_ucode_print_smc_hdr(&smc_hdr->header);
|
||||||
amdgpu_ucode_print_smc_hdr(hdr);
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
@ -1420,6 +1079,10 @@ out:
|
||||||
ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
|
ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
|
||||||
/* skip mec JT when autoload is enabled */
|
/* skip mec JT when autoload is enabled */
|
||||||
continue;
|
continue;
|
||||||
|
/* Renoir only needs to load mec jump table one time */
|
||||||
|
if (adev->asic_type == CHIP_RENOIR &&
|
||||||
|
ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)
|
||||||
|
continue;
|
||||||
|
|
||||||
psp_print_fw_hdr(psp, ucode);
|
psp_print_fw_hdr(psp, ucode);
|
||||||
|
|
||||||
|
@ -1428,8 +1091,7 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/* Start rlc autoload after psp recieved all the gfx firmware */
|
/* Start rlc autoload after psp recieved all the gfx firmware */
|
||||||
if (psp->autoload_supported && ucode->ucode_id ==
|
if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) {
|
||||||
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) {
|
|
||||||
ret = psp_rlc_autoload(psp);
|
ret = psp_rlc_autoload(psp);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_ERROR("Failed to start rlc autoload\n");
|
DRM_ERROR("Failed to start rlc autoload\n");
|
||||||
|
@ -1554,11 +1216,8 @@ static int psp_hw_fini(void *handle)
|
||||||
psp->xgmi_context.initialized == 1)
|
psp->xgmi_context.initialized == 1)
|
||||||
psp_xgmi_terminate(psp);
|
psp_xgmi_terminate(psp);
|
||||||
|
|
||||||
if (psp->adev->psp.ta_fw) {
|
if (psp->adev->psp.ta_fw)
|
||||||
psp_ras_terminate(psp);
|
psp_ras_terminate(psp);
|
||||||
psp_dtm_terminate(psp);
|
|
||||||
psp_hdcp_terminate(psp);
|
|
||||||
}
|
|
||||||
|
|
||||||
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
|
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
|
||||||
|
|
||||||
|
@ -1600,16 +1259,6 @@ static int psp_suspend(void *handle)
|
||||||
DRM_ERROR("Failed to terminate ras ta\n");
|
DRM_ERROR("Failed to terminate ras ta\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
ret = psp_hdcp_terminate(psp);
|
|
||||||
if (ret) {
|
|
||||||
DRM_ERROR("Failed to terminate hdcp ta\n");
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
ret = psp_dtm_terminate(psp);
|
|
||||||
if (ret) {
|
|
||||||
DRM_ERROR("Failed to terminate dtm ta\n");
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
|
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
|
||||||
|
@ -1629,12 +1278,6 @@ static int psp_resume(void *handle)
|
||||||
|
|
||||||
DRM_INFO("PSP is resuming...\n");
|
DRM_INFO("PSP is resuming...\n");
|
||||||
|
|
||||||
ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
|
|
||||||
if (ret) {
|
|
||||||
DRM_ERROR("Failed to process memory training!\n");
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
mutex_lock(&adev->firmware.mutex);
|
mutex_lock(&adev->firmware.mutex);
|
||||||
|
|
||||||
ret = psp_hw_start(psp);
|
ret = psp_hw_start(psp);
|
||||||
|
@ -1674,6 +1317,9 @@ int psp_rlc_autoload_start(struct psp_context *psp)
|
||||||
int ret;
|
int ret;
|
||||||
struct psp_gfx_cmd_resp *cmd;
|
struct psp_gfx_cmd_resp *cmd;
|
||||||
|
|
||||||
|
if (amdgpu_sriov_vf(psp->adev))
|
||||||
|
return 0;
|
||||||
|
|
||||||
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
||||||
if (!cmd)
|
if (!cmd)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
|
@ -37,9 +37,6 @@
|
||||||
#define PSP_RAS_SHARED_MEM_SIZE 0x4000
|
#define PSP_RAS_SHARED_MEM_SIZE 0x4000
|
||||||
#define PSP_1_MEG 0x100000
|
#define PSP_1_MEG 0x100000
|
||||||
#define PSP_TMR_SIZE 0x400000
|
#define PSP_TMR_SIZE 0x400000
|
||||||
#define PSP_HDCP_SHARED_MEM_SIZE 0x4000
|
|
||||||
#define PSP_DTM_SHARED_MEM_SIZE 0x4000
|
|
||||||
#define PSP_SHARED_MEM_SIZE 0x4000
|
|
||||||
|
|
||||||
struct psp_context;
|
struct psp_context;
|
||||||
struct psp_xgmi_node_info;
|
struct psp_xgmi_node_info;
|
||||||
|
@ -49,8 +46,6 @@ enum psp_bootloader_cmd {
|
||||||
PSP_BL__LOAD_SYSDRV = 0x10000,
|
PSP_BL__LOAD_SYSDRV = 0x10000,
|
||||||
PSP_BL__LOAD_SOSDRV = 0x20000,
|
PSP_BL__LOAD_SOSDRV = 0x20000,
|
||||||
PSP_BL__LOAD_KEY_DATABASE = 0x80000,
|
PSP_BL__LOAD_KEY_DATABASE = 0x80000,
|
||||||
PSP_BL__DRAM_LONG_TRAIN = 0x100000,
|
|
||||||
PSP_BL__DRAM_SHORT_TRAIN = 0x200000,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
enum psp_ring_type
|
enum psp_ring_type
|
||||||
|
@ -113,9 +108,6 @@ struct psp_funcs
|
||||||
struct ta_ras_trigger_error_input *info);
|
struct ta_ras_trigger_error_input *info);
|
||||||
int (*ras_cure_posion)(struct psp_context *psp, uint64_t *mode_ptr);
|
int (*ras_cure_posion)(struct psp_context *psp, uint64_t *mode_ptr);
|
||||||
int (*rlc_autoload_start)(struct psp_context *psp);
|
int (*rlc_autoload_start)(struct psp_context *psp);
|
||||||
int (*mem_training_init)(struct psp_context *psp);
|
|
||||||
void (*mem_training_fini)(struct psp_context *psp);
|
|
||||||
int (*mem_training)(struct psp_context *psp, uint32_t ops);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#define AMDGPU_XGMI_MAX_CONNECTED_NODES 64
|
#define AMDGPU_XGMI_MAX_CONNECTED_NODES 64
|
||||||
|
@ -150,65 +142,6 @@ struct psp_ras_context {
|
||||||
struct amdgpu_ras *ras;
|
struct amdgpu_ras *ras;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct psp_hdcp_context {
|
|
||||||
bool hdcp_initialized;
|
|
||||||
uint32_t session_id;
|
|
||||||
struct amdgpu_bo *hdcp_shared_bo;
|
|
||||||
uint64_t hdcp_shared_mc_addr;
|
|
||||||
void *hdcp_shared_buf;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct psp_dtm_context {
|
|
||||||
bool dtm_initialized;
|
|
||||||
uint32_t session_id;
|
|
||||||
struct amdgpu_bo *dtm_shared_bo;
|
|
||||||
uint64_t dtm_shared_mc_addr;
|
|
||||||
void *dtm_shared_buf;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942
|
|
||||||
#define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000
|
|
||||||
#define GDDR6_MEM_TRAINING_OFFSET 0x8000
|
|
||||||
|
|
||||||
enum psp_memory_training_init_flag {
|
|
||||||
PSP_MEM_TRAIN_NOT_SUPPORT = 0x0,
|
|
||||||
PSP_MEM_TRAIN_SUPPORT = 0x1,
|
|
||||||
PSP_MEM_TRAIN_INIT_FAILED = 0x2,
|
|
||||||
PSP_MEM_TRAIN_RESERVE_SUCCESS = 0x4,
|
|
||||||
PSP_MEM_TRAIN_INIT_SUCCESS = 0x8,
|
|
||||||
};
|
|
||||||
|
|
||||||
enum psp_memory_training_ops {
|
|
||||||
PSP_MEM_TRAIN_SEND_LONG_MSG = 0x1,
|
|
||||||
PSP_MEM_TRAIN_SAVE = 0x2,
|
|
||||||
PSP_MEM_TRAIN_RESTORE = 0x4,
|
|
||||||
PSP_MEM_TRAIN_SEND_SHORT_MSG = 0x8,
|
|
||||||
PSP_MEM_TRAIN_COLD_BOOT = PSP_MEM_TRAIN_SEND_LONG_MSG,
|
|
||||||
PSP_MEM_TRAIN_RESUME = PSP_MEM_TRAIN_SEND_SHORT_MSG,
|
|
||||||
};
|
|
||||||
|
|
||||||
struct psp_memory_training_context {
|
|
||||||
/*training data size*/
|
|
||||||
u64 train_data_size;
|
|
||||||
/*
|
|
||||||
* sys_cache
|
|
||||||
* cpu virtual address
|
|
||||||
* system memory buffer that used to store the training data.
|
|
||||||
*/
|
|
||||||
void *sys_cache;
|
|
||||||
|
|
||||||
/*vram offset of the p2c training data*/
|
|
||||||
u64 p2c_train_data_offset;
|
|
||||||
struct amdgpu_bo *p2c_bo;
|
|
||||||
|
|
||||||
/*vram offset of the c2p training data*/
|
|
||||||
u64 c2p_train_data_offset;
|
|
||||||
struct amdgpu_bo *c2p_bo;
|
|
||||||
|
|
||||||
enum psp_memory_training_init_flag init;
|
|
||||||
u32 training_cnt;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct psp_context
|
struct psp_context
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev;
|
struct amdgpu_device *adev;
|
||||||
|
@ -273,21 +206,9 @@ struct psp_context
|
||||||
uint32_t ta_ras_ucode_version;
|
uint32_t ta_ras_ucode_version;
|
||||||
uint32_t ta_ras_ucode_size;
|
uint32_t ta_ras_ucode_size;
|
||||||
uint8_t *ta_ras_start_addr;
|
uint8_t *ta_ras_start_addr;
|
||||||
|
|
||||||
uint32_t ta_hdcp_ucode_version;
|
|
||||||
uint32_t ta_hdcp_ucode_size;
|
|
||||||
uint8_t *ta_hdcp_start_addr;
|
|
||||||
|
|
||||||
uint32_t ta_dtm_ucode_version;
|
|
||||||
uint32_t ta_dtm_ucode_size;
|
|
||||||
uint8_t *ta_dtm_start_addr;
|
|
||||||
|
|
||||||
struct psp_xgmi_context xgmi_context;
|
struct psp_xgmi_context xgmi_context;
|
||||||
struct psp_ras_context ras;
|
struct psp_ras_context ras;
|
||||||
struct psp_hdcp_context hdcp_context;
|
|
||||||
struct psp_dtm_context dtm_context;
|
|
||||||
struct mutex mutex;
|
struct mutex mutex;
|
||||||
struct psp_memory_training_context mem_train_ctx;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_psp_funcs {
|
struct amdgpu_psp_funcs {
|
||||||
|
@ -330,12 +251,6 @@ struct amdgpu_psp_funcs {
|
||||||
(psp)->funcs->xgmi_set_topology_info((psp), (num_device), (topology)) : -EINVAL)
|
(psp)->funcs->xgmi_set_topology_info((psp), (num_device), (topology)) : -EINVAL)
|
||||||
#define psp_rlc_autoload(psp) \
|
#define psp_rlc_autoload(psp) \
|
||||||
((psp)->funcs->rlc_autoload_start ? (psp)->funcs->rlc_autoload_start((psp)) : 0)
|
((psp)->funcs->rlc_autoload_start ? (psp)->funcs->rlc_autoload_start((psp)) : 0)
|
||||||
#define psp_mem_training_init(psp) \
|
|
||||||
((psp)->funcs->mem_training_init ? (psp)->funcs->mem_training_init((psp)) : 0)
|
|
||||||
#define psp_mem_training_fini(psp) \
|
|
||||||
((psp)->funcs->mem_training_fini ? (psp)->funcs->mem_training_fini((psp)) : 0)
|
|
||||||
#define psp_mem_training(psp, ops) \
|
|
||||||
((psp)->funcs->mem_training ? (psp)->funcs->mem_training((psp), (ops)) : 0)
|
|
||||||
|
|
||||||
#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
|
#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
|
||||||
|
|
||||||
|
@ -364,8 +279,6 @@ int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
|
||||||
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
|
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
|
||||||
int psp_ras_enable_features(struct psp_context *psp,
|
int psp_ras_enable_features(struct psp_context *psp,
|
||||||
union ta_ras_cmd_input *info, bool enable);
|
union ta_ras_cmd_input *info, bool enable);
|
||||||
int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
|
|
||||||
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
|
|
||||||
|
|
||||||
int psp_rlc_autoload_start(struct psp_context *psp);
|
int psp_rlc_autoload_start(struct psp_context *psp);
|
||||||
|
|
||||||
|
|
|
@ -25,13 +25,10 @@
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <linux/reboot.h>
|
|
||||||
#include <linux/syscalls.h>
|
|
||||||
|
|
||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
#include "amdgpu_ras.h"
|
#include "amdgpu_ras.h"
|
||||||
#include "amdgpu_atomfirmware.h"
|
#include "amdgpu_atomfirmware.h"
|
||||||
#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
|
|
||||||
|
|
||||||
const char *ras_error_string[] = {
|
const char *ras_error_string[] = {
|
||||||
"none",
|
"none",
|
||||||
|
@ -68,16 +65,11 @@ const char *ras_block_string[] = {
|
||||||
/* inject address is 52 bits */
|
/* inject address is 52 bits */
|
||||||
#define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
|
#define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
|
||||||
|
|
||||||
enum amdgpu_ras_retire_page_reservation {
|
static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
|
||||||
AMDGPU_RAS_RETIRE_PAGE_RESERVED,
|
uint64_t offset, uint64_t size,
|
||||||
AMDGPU_RAS_RETIRE_PAGE_PENDING,
|
struct amdgpu_bo **bo_ptr);
|
||||||
AMDGPU_RAS_RETIRE_PAGE_FAULT,
|
static int amdgpu_ras_release_vram(struct amdgpu_device *adev,
|
||||||
};
|
struct amdgpu_bo **bo_ptr);
|
||||||
|
|
||||||
atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
|
|
||||||
|
|
||||||
static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
|
|
||||||
uint64_t addr);
|
|
||||||
|
|
||||||
static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
|
static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
|
||||||
size_t size, loff_t *pos)
|
size_t size, loff_t *pos)
|
||||||
|
@ -197,10 +189,6 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
|
|
||||||
struct ras_common_if *head);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* DOC: AMDGPU RAS debugfs control interface
|
* DOC: AMDGPU RAS debugfs control interface
|
||||||
*
|
*
|
||||||
|
@ -222,36 +210,29 @@ static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
|
||||||
*
|
*
|
||||||
* Second member: struct ras_debug_if::op.
|
* Second member: struct ras_debug_if::op.
|
||||||
* It has three kinds of operations.
|
* It has three kinds of operations.
|
||||||
*
|
* 0: disable RAS on the block. Take ::head as its data.
|
||||||
* - 0: disable RAS on the block. Take ::head as its data.
|
* 1: enable RAS on the block. Take ::head as its data.
|
||||||
* - 1: enable RAS on the block. Take ::head as its data.
|
* 2: inject errors on the block. Take ::inject as its data.
|
||||||
* - 2: inject errors on the block. Take ::inject as its data.
|
|
||||||
*
|
*
|
||||||
* How to use the interface?
|
* How to use the interface?
|
||||||
* programs:
|
* programs:
|
||||||
* copy the struct ras_debug_if in your codes and initialize it.
|
* copy the struct ras_debug_if in your codes and initialize it.
|
||||||
* write the struct to the control node.
|
* write the struct to the control node.
|
||||||
*
|
*
|
||||||
* .. code-block:: bash
|
* bash:
|
||||||
*
|
* echo op block [error [sub_blcok address value]] > .../ras/ras_ctrl
|
||||||
* echo op block [error [sub_block address value]] > .../ras/ras_ctrl
|
|
||||||
*
|
|
||||||
* op: disable, enable, inject
|
* op: disable, enable, inject
|
||||||
* disable: only block is needed
|
* disable: only block is needed
|
||||||
* enable: block and error are needed
|
* enable: block and error are needed
|
||||||
* inject: error, address, value are needed
|
* inject: error, address, value are needed
|
||||||
* block: umc, sdma, gfx, .........
|
* block: umc, smda, gfx, .........
|
||||||
* see ras_block_string[] for details
|
* see ras_block_string[] for details
|
||||||
* error: ue, ce
|
* error: ue, ce
|
||||||
* ue: multi_uncorrectable
|
* ue: multi_uncorrectable
|
||||||
* ce: single_correctable
|
* ce: single_correctable
|
||||||
* sub_block:
|
* sub_block: sub block index, pass 0 if there is no sub block
|
||||||
* sub block index, pass 0 if there is no sub block
|
|
||||||
*
|
|
||||||
* here are some examples for bash commands:
|
|
||||||
*
|
|
||||||
* .. code-block:: bash
|
|
||||||
*
|
*
|
||||||
|
* here are some examples for bash commands,
|
||||||
* echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
|
* echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
|
||||||
* echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
|
* echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
|
||||||
* echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
|
* echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
|
||||||
|
@ -264,8 +245,7 @@ static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
|
||||||
* For inject, please check corresponding err count at
|
* For inject, please check corresponding err count at
|
||||||
* /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
|
* /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
|
||||||
*
|
*
|
||||||
* .. note::
|
* NOTE: operation is only allowed on blocks which are supported.
|
||||||
* Operation is only allowed on blocks which are supported.
|
|
||||||
* Please check ras mask at /sys/module/amdgpu/parameters/ras_mask
|
* Please check ras mask at /sys/module/amdgpu/parameters/ras_mask
|
||||||
*/
|
*/
|
||||||
static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf,
|
static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf,
|
||||||
|
@ -296,14 +276,6 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* umc ce/ue error injection for a bad page is not allowed */
|
|
||||||
if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
|
|
||||||
amdgpu_ras_check_bad_page(adev, data.inject.address)) {
|
|
||||||
DRM_WARN("RAS WARN: 0x%llx has been marked as bad before error injection!\n",
|
|
||||||
data.inject.address);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* data.inject.address is offset instead of absolute gpu address */
|
/* data.inject.address is offset instead of absolute gpu address */
|
||||||
ret = amdgpu_ras_error_inject(adev, &data.inject);
|
ret = amdgpu_ras_error_inject(adev, &data.inject);
|
||||||
break;
|
break;
|
||||||
|
@ -318,33 +290,6 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* DOC: AMDGPU RAS debugfs EEPROM table reset interface
|
|
||||||
*
|
|
||||||
* Some boards contain an EEPROM which is used to persistently store a list of
|
|
||||||
* bad pages containing ECC errors detected in vram. This interface provides
|
|
||||||
* a way to reset the EEPROM, e.g., after testing error injection.
|
|
||||||
*
|
|
||||||
* Usage:
|
|
||||||
*
|
|
||||||
* .. code-block:: bash
|
|
||||||
*
|
|
||||||
* echo 1 > ../ras/ras_eeprom_reset
|
|
||||||
*
|
|
||||||
* will reset EEPROM table to 0 entries.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf,
|
|
||||||
size_t size, loff_t *pos)
|
|
||||||
{
|
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = amdgpu_ras_eeprom_reset_table(&adev->psp.ras.ras->eeprom_control);
|
|
||||||
|
|
||||||
return ret == 1 ? size : -EIO;
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
|
static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
.read = NULL,
|
.read = NULL,
|
||||||
|
@ -352,34 +297,6 @@ static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
|
||||||
.llseek = default_llseek
|
.llseek = default_llseek
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
|
|
||||||
.owner = THIS_MODULE,
|
|
||||||
.read = NULL,
|
|
||||||
.write = amdgpu_ras_debugfs_eeprom_write,
|
|
||||||
.llseek = default_llseek
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* DOC: AMDGPU RAS sysfs Error Count Interface
|
|
||||||
*
|
|
||||||
* It allows user to read the error count for each IP block on the gpu through
|
|
||||||
* /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
|
|
||||||
*
|
|
||||||
* It outputs the multiple lines which report the uncorrected (ue) and corrected
|
|
||||||
* (ce) error counts.
|
|
||||||
*
|
|
||||||
* The format of one line is below,
|
|
||||||
*
|
|
||||||
* [ce|ue]: count
|
|
||||||
*
|
|
||||||
* Example:
|
|
||||||
*
|
|
||||||
* .. code-block:: bash
|
|
||||||
*
|
|
||||||
* ue: 0
|
|
||||||
* ce: 1
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
|
static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
|
||||||
struct device_attribute *attr, char *buf)
|
struct device_attribute *attr, char *buf)
|
||||||
{
|
{
|
||||||
|
@ -558,7 +475,6 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
|
||||||
if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
|
if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (!amdgpu_ras_intr_triggered()) {
|
|
||||||
ret = psp_ras_enable_features(&adev->psp, &info, enable);
|
ret = psp_ras_enable_features(&adev->psp, &info, enable);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n",
|
DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n",
|
||||||
|
@ -569,7 +485,6 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/* setup the obj */
|
/* setup the obj */
|
||||||
__amdgpu_ras_feature_enable(adev, head, enable);
|
__amdgpu_ras_feature_enable(adev, head, enable);
|
||||||
|
@ -700,12 +615,8 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
|
||||||
adev->gfx.funcs->query_ras_error_count(adev, &err_data);
|
adev->gfx.funcs->query_ras_error_count(adev, &err_data);
|
||||||
break;
|
break;
|
||||||
case AMDGPU_RAS_BLOCK__MMHUB:
|
case AMDGPU_RAS_BLOCK__MMHUB:
|
||||||
if (adev->mmhub.funcs->query_ras_error_count)
|
if (adev->mmhub_funcs->query_ras_error_count)
|
||||||
adev->mmhub.funcs->query_ras_error_count(adev, &err_data);
|
adev->mmhub_funcs->query_ras_error_count(adev, &err_data);
|
||||||
break;
|
|
||||||
case AMDGPU_RAS_BLOCK__PCIE_BIF:
|
|
||||||
if (adev->nbio.funcs->query_ras_error_count)
|
|
||||||
adev->nbio.funcs->query_ras_error_count(adev, &err_data);
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
@ -717,14 +628,12 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
|
||||||
info->ue_count = obj->err_data.ue_count;
|
info->ue_count = obj->err_data.ue_count;
|
||||||
info->ce_count = obj->err_data.ce_count;
|
info->ce_count = obj->err_data.ce_count;
|
||||||
|
|
||||||
if (err_data.ce_count) {
|
if (err_data.ce_count)
|
||||||
dev_info(adev->dev, "%ld correctable errors detected in %s block\n",
|
dev_info(adev->dev, "%ld correctable errors detected in %s block\n",
|
||||||
obj->err_data.ce_count, ras_block_str(info->head.block));
|
obj->err_data.ce_count, ras_block_str(info->head.block));
|
||||||
}
|
if (err_data.ue_count)
|
||||||
if (err_data.ue_count) {
|
|
||||||
dev_info(adev->dev, "%ld uncorrectable errors detected in %s block\n",
|
dev_info(adev->dev, "%ld uncorrectable errors detected in %s block\n",
|
||||||
obj->err_data.ue_count, ras_block_str(info->head.block));
|
obj->err_data.ue_count, ras_block_str(info->head.block));
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -755,8 +664,6 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
|
||||||
break;
|
break;
|
||||||
case AMDGPU_RAS_BLOCK__UMC:
|
case AMDGPU_RAS_BLOCK__UMC:
|
||||||
case AMDGPU_RAS_BLOCK__MMHUB:
|
case AMDGPU_RAS_BLOCK__MMHUB:
|
||||||
case AMDGPU_RAS_BLOCK__XGMI_WAFL:
|
|
||||||
case AMDGPU_RAS_BLOCK__PCIE_BIF:
|
|
||||||
ret = psp_ras_trigger_error(&adev->psp, &block_info);
|
ret = psp_ras_trigger_error(&adev->psp, &block_info);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -816,18 +723,18 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
|
||||||
static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
|
static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
|
||||||
{
|
{
|
||||||
switch (flags) {
|
switch (flags) {
|
||||||
case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
|
case 0:
|
||||||
return "R";
|
return "R";
|
||||||
case AMDGPU_RAS_RETIRE_PAGE_PENDING:
|
case 1:
|
||||||
return "P";
|
return "P";
|
||||||
case AMDGPU_RAS_RETIRE_PAGE_FAULT:
|
case 2:
|
||||||
default:
|
default:
|
||||||
return "F";
|
return "F";
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
|
* DOC: ras sysfs gpu_vram_bad_pages interface
|
||||||
*
|
*
|
||||||
* It allows user to read the bad pages of vram on the gpu through
|
* It allows user to read the bad pages of vram on the gpu through
|
||||||
* /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
|
* /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
|
||||||
|
@ -839,21 +746,14 @@ static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
|
||||||
*
|
*
|
||||||
* gpu pfn and gpu page size are printed in hex format.
|
* gpu pfn and gpu page size are printed in hex format.
|
||||||
* flags can be one of below character,
|
* flags can be one of below character,
|
||||||
*
|
|
||||||
* R: reserved, this gpu page is reserved and not able to use.
|
* R: reserved, this gpu page is reserved and not able to use.
|
||||||
*
|
|
||||||
* P: pending for reserve, this gpu page is marked as bad, will be reserved
|
* P: pending for reserve, this gpu page is marked as bad, will be reserved
|
||||||
* in next window of page_reserve.
|
* in next window of page_reserve.
|
||||||
*
|
|
||||||
* F: unable to reserve. this gpu page can't be reserved due to some reasons.
|
* F: unable to reserve. this gpu page can't be reserved due to some reasons.
|
||||||
*
|
*
|
||||||
* Examples:
|
* examples:
|
||||||
*
|
|
||||||
* .. code-block:: bash
|
|
||||||
*
|
|
||||||
* 0x00000001 : 0x00001000 : R
|
* 0x00000001 : 0x00001000 : R
|
||||||
* 0x00000002 : 0x00001000 : P
|
* 0x00000002 : 0x00001000 : P
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
|
static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
|
||||||
|
@ -1034,21 +934,8 @@ static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
|
||||||
struct drm_minor *minor = adev->ddev->primary;
|
struct drm_minor *minor = adev->ddev->primary;
|
||||||
|
|
||||||
con->dir = debugfs_create_dir("ras", minor->debugfs_root);
|
con->dir = debugfs_create_dir("ras", minor->debugfs_root);
|
||||||
debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir,
|
con->ent = debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir,
|
||||||
adev, &amdgpu_ras_debugfs_ctrl_ops);
|
adev, &amdgpu_ras_debugfs_ctrl_ops);
|
||||||
debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, con->dir,
|
|
||||||
adev, &amdgpu_ras_debugfs_eeprom_ops);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* After one uncorrectable error happens, usually GPU recovery will
|
|
||||||
* be scheduled. But due to the known problem in GPU recovery failing
|
|
||||||
* to bring GPU back, below interface provides one direct way to
|
|
||||||
* user to reboot system automatically in such case within
|
|
||||||
* ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
|
|
||||||
* will never be called.
|
|
||||||
*/
|
|
||||||
debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, con->dir,
|
|
||||||
&con->reboot);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
|
void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
|
||||||
|
@ -1093,8 +980,10 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
|
||||||
amdgpu_ras_debugfs_remove(adev, &obj->head);
|
amdgpu_ras_debugfs_remove(adev, &obj->head);
|
||||||
}
|
}
|
||||||
|
|
||||||
debugfs_remove_recursive(con->dir);
|
debugfs_remove(con->ent);
|
||||||
|
debugfs_remove(con->dir);
|
||||||
con->dir = NULL;
|
con->dir = NULL;
|
||||||
|
con->ent = NULL;
|
||||||
}
|
}
|
||||||
/* debugfs end */
|
/* debugfs end */
|
||||||
|
|
||||||
|
@ -1299,15 +1188,15 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
|
||||||
|
|
||||||
for (; i < data->count; i++) {
|
for (; i < data->count; i++) {
|
||||||
(*bps)[i] = (struct ras_badpage){
|
(*bps)[i] = (struct ras_badpage){
|
||||||
.bp = data->bps[i].retired_page,
|
.bp = data->bps[i].bp,
|
||||||
.size = AMDGPU_GPU_PAGE_SIZE,
|
.size = AMDGPU_GPU_PAGE_SIZE,
|
||||||
.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
|
.flags = 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
if (data->last_reserved <= i)
|
if (data->last_reserved <= i)
|
||||||
(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
|
(*bps)[i].flags = 1;
|
||||||
else if (data->bps_bo[i] == NULL)
|
else if (data->bps[i].bo == NULL)
|
||||||
(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
|
(*bps)[i].flags = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
*count = data->count;
|
*count = data->count;
|
||||||
|
@ -1325,46 +1214,105 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
|
||||||
atomic_set(&ras->in_recovery, 0);
|
atomic_set(&ras->in_recovery, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int amdgpu_ras_release_vram(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_bo **bo_ptr)
|
||||||
|
{
|
||||||
|
/* no need to free it actually. */
|
||||||
|
amdgpu_bo_free_kernel(bo_ptr, NULL, NULL);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* reserve vram with size@offset */
|
||||||
|
static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
|
||||||
|
uint64_t offset, uint64_t size,
|
||||||
|
struct amdgpu_bo **bo_ptr)
|
||||||
|
{
|
||||||
|
struct ttm_operation_ctx ctx = { false, false };
|
||||||
|
struct amdgpu_bo_param bp;
|
||||||
|
int r = 0;
|
||||||
|
int i;
|
||||||
|
struct amdgpu_bo *bo;
|
||||||
|
|
||||||
|
if (bo_ptr)
|
||||||
|
*bo_ptr = NULL;
|
||||||
|
memset(&bp, 0, sizeof(bp));
|
||||||
|
bp.size = size;
|
||||||
|
bp.byte_align = PAGE_SIZE;
|
||||||
|
bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||||
|
bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
|
||||||
|
AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
|
||||||
|
bp.type = ttm_bo_type_kernel;
|
||||||
|
bp.resv = NULL;
|
||||||
|
|
||||||
|
r = amdgpu_bo_create(adev, &bp, &bo);
|
||||||
|
if (r)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
r = amdgpu_bo_reserve(bo, false);
|
||||||
|
if (r)
|
||||||
|
goto error_reserve;
|
||||||
|
|
||||||
|
offset = ALIGN(offset, PAGE_SIZE);
|
||||||
|
for (i = 0; i < bo->placement.num_placement; ++i) {
|
||||||
|
bo->placements[i].fpfn = offset >> PAGE_SHIFT;
|
||||||
|
bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
|
||||||
|
}
|
||||||
|
|
||||||
|
ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
|
||||||
|
r = ttm_bo_mem_space(&bo->tbo, &bo->placement, &bo->tbo.mem, &ctx);
|
||||||
|
if (r)
|
||||||
|
goto error_pin;
|
||||||
|
|
||||||
|
r = amdgpu_bo_pin_restricted(bo,
|
||||||
|
AMDGPU_GEM_DOMAIN_VRAM,
|
||||||
|
offset,
|
||||||
|
offset + size);
|
||||||
|
if (r)
|
||||||
|
goto error_pin;
|
||||||
|
|
||||||
|
if (bo_ptr)
|
||||||
|
*bo_ptr = bo;
|
||||||
|
|
||||||
|
amdgpu_bo_unreserve(bo);
|
||||||
|
return r;
|
||||||
|
|
||||||
|
error_pin:
|
||||||
|
amdgpu_bo_unreserve(bo);
|
||||||
|
error_reserve:
|
||||||
|
amdgpu_bo_unref(&bo);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
/* alloc/realloc bps array */
|
/* alloc/realloc bps array */
|
||||||
static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
|
static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
|
||||||
struct ras_err_handler_data *data, int pages)
|
struct ras_err_handler_data *data, int pages)
|
||||||
{
|
{
|
||||||
unsigned int old_space = data->count + data->space_left;
|
unsigned int old_space = data->count + data->space_left;
|
||||||
unsigned int new_space = old_space + pages;
|
unsigned int new_space = old_space + pages;
|
||||||
unsigned int align_space = ALIGN(new_space, 512);
|
unsigned int align_space = ALIGN(new_space, 1024);
|
||||||
void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
|
void *tmp = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
|
||||||
struct amdgpu_bo **bps_bo =
|
|
||||||
kmalloc(align_space * sizeof(*data->bps_bo), GFP_KERNEL);
|
|
||||||
|
|
||||||
if (!bps || !bps_bo) {
|
if (!tmp)
|
||||||
kfree(bps);
|
|
||||||
kfree(bps_bo);
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
|
|
||||||
if (data->bps) {
|
if (data->bps) {
|
||||||
memcpy(bps, data->bps,
|
memcpy(tmp, data->bps,
|
||||||
data->count * sizeof(*data->bps));
|
data->count * sizeof(*data->bps));
|
||||||
kfree(data->bps);
|
kfree(data->bps);
|
||||||
}
|
}
|
||||||
if (data->bps_bo) {
|
|
||||||
memcpy(bps_bo, data->bps_bo,
|
|
||||||
data->count * sizeof(*data->bps_bo));
|
|
||||||
kfree(data->bps_bo);
|
|
||||||
}
|
|
||||||
|
|
||||||
data->bps = bps;
|
data->bps = tmp;
|
||||||
data->bps_bo = bps_bo;
|
|
||||||
data->space_left += align_space - old_space;
|
data->space_left += align_space - old_space;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* it deal with vram only. */
|
/* it deal with vram only. */
|
||||||
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
|
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
|
||||||
struct eeprom_table_record *bps, int pages)
|
unsigned long *bps, int pages)
|
||||||
{
|
{
|
||||||
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
||||||
struct ras_err_handler_data *data;
|
struct ras_err_handler_data *data;
|
||||||
|
int i = pages;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (!con || !con->eh_data || !bps || pages <= 0)
|
if (!con || !con->eh_data || !bps || pages <= 0)
|
||||||
|
@ -1381,120 +1329,24 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps));
|
while (i--)
|
||||||
data->count += pages;
|
data->bps[data->count++].bp = bps[i];
|
||||||
|
|
||||||
data->space_left -= pages;
|
data->space_left -= pages;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&con->recovery_lock);
|
mutex_unlock(&con->recovery_lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* write error record array to eeprom, the function should be
|
|
||||||
* protected by recovery_lock
|
|
||||||
*/
|
|
||||||
static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
|
||||||
struct ras_err_handler_data *data;
|
|
||||||
struct amdgpu_ras_eeprom_control *control;
|
|
||||||
int save_count;
|
|
||||||
|
|
||||||
if (!con || !con->eh_data)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
control = &con->eeprom_control;
|
|
||||||
data = con->eh_data;
|
|
||||||
save_count = data->count - control->num_recs;
|
|
||||||
/* only new entries are saved */
|
|
||||||
if (save_count > 0)
|
|
||||||
if (amdgpu_ras_eeprom_process_recods(control,
|
|
||||||
&data->bps[control->num_recs],
|
|
||||||
true,
|
|
||||||
save_count)) {
|
|
||||||
DRM_ERROR("Failed to save EEPROM table data!");
|
|
||||||
return -EIO;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* read error record array in eeprom and reserve enough space for
|
|
||||||
* storing new bad pages
|
|
||||||
*/
|
|
||||||
static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
struct amdgpu_ras_eeprom_control *control =
|
|
||||||
&adev->psp.ras.ras->eeprom_control;
|
|
||||||
struct eeprom_table_record *bps = NULL;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
/* no bad page record, skip eeprom access */
|
|
||||||
if (!control->num_recs)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL);
|
|
||||||
if (!bps)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
if (amdgpu_ras_eeprom_process_recods(control, bps, false,
|
|
||||||
control->num_recs)) {
|
|
||||||
DRM_ERROR("Failed to load EEPROM table records!");
|
|
||||||
ret = -EIO;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = amdgpu_ras_add_bad_pages(adev, bps, control->num_recs);
|
|
||||||
|
|
||||||
out:
|
|
||||||
kfree(bps);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* check if an address belongs to bad page
|
|
||||||
*
|
|
||||||
* Note: this check is only for umc block
|
|
||||||
*/
|
|
||||||
static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
|
|
||||||
uint64_t addr)
|
|
||||||
{
|
|
||||||
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
|
||||||
struct ras_err_handler_data *data;
|
|
||||||
int i;
|
|
||||||
bool ret = false;
|
|
||||||
|
|
||||||
if (!con || !con->eh_data)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
mutex_lock(&con->recovery_lock);
|
|
||||||
data = con->eh_data;
|
|
||||||
if (!data)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
addr >>= AMDGPU_GPU_PAGE_SHIFT;
|
|
||||||
for (i = 0; i < data->count; i++)
|
|
||||||
if (addr == data->bps[i].retired_page) {
|
|
||||||
ret = true;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
out:
|
|
||||||
mutex_unlock(&con->recovery_lock);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* called in gpu recovery/init */
|
/* called in gpu recovery/init */
|
||||||
int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
|
int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
||||||
struct ras_err_handler_data *data;
|
struct ras_err_handler_data *data;
|
||||||
uint64_t bp;
|
uint64_t bp;
|
||||||
struct amdgpu_bo *bo = NULL;
|
struct amdgpu_bo *bo;
|
||||||
int i, ret = 0;
|
int i;
|
||||||
|
|
||||||
if (!con || !con->eh_data)
|
if (!con || !con->eh_data)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1505,29 +1357,18 @@ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
|
||||||
goto out;
|
goto out;
|
||||||
/* reserve vram at driver post stage. */
|
/* reserve vram at driver post stage. */
|
||||||
for (i = data->last_reserved; i < data->count; i++) {
|
for (i = data->last_reserved; i < data->count; i++) {
|
||||||
bp = data->bps[i].retired_page;
|
bp = data->bps[i].bp;
|
||||||
|
|
||||||
/* There are two cases of reserve error should be ignored:
|
if (amdgpu_ras_reserve_vram(adev, bp << PAGE_SHIFT,
|
||||||
* 1) a ras bad page has been allocated (used by someone);
|
PAGE_SIZE, &bo))
|
||||||
* 2) a ras bad page has been reserved (duplicate error injection
|
DRM_ERROR("RAS ERROR: reserve vram %llx fail\n", bp);
|
||||||
* for one page);
|
|
||||||
*/
|
|
||||||
if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
|
|
||||||
AMDGPU_GPU_PAGE_SIZE,
|
|
||||||
AMDGPU_GEM_DOMAIN_VRAM,
|
|
||||||
&bo, NULL))
|
|
||||||
DRM_WARN("RAS WARN: reserve vram for retired page %llx fail\n", bp);
|
|
||||||
|
|
||||||
data->bps_bo[i] = bo;
|
data->bps[i].bo = bo;
|
||||||
data->last_reserved = i + 1;
|
data->last_reserved = i + 1;
|
||||||
bo = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* continue to save bad pages to eeprom even reesrve_vram fails */
|
|
||||||
ret = amdgpu_ras_save_bad_pages(adev);
|
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&con->recovery_lock);
|
mutex_unlock(&con->recovery_lock);
|
||||||
return ret;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* called when driver unload */
|
/* called when driver unload */
|
||||||
|
@ -1547,11 +1388,11 @@ static int amdgpu_ras_release_bad_pages(struct amdgpu_device *adev)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
for (i = data->last_reserved - 1; i >= 0; i--) {
|
for (i = data->last_reserved - 1; i >= 0; i--) {
|
||||||
bo = data->bps_bo[i];
|
bo = data->bps[i].bo;
|
||||||
|
|
||||||
amdgpu_bo_free_kernel(&bo, NULL, NULL);
|
amdgpu_ras_release_vram(adev, &bo);
|
||||||
|
|
||||||
data->bps_bo[i] = bo;
|
data->bps[i].bo = bo;
|
||||||
data->last_reserved = i;
|
data->last_reserved = i;
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
|
@ -1559,54 +1400,41 @@ out:
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
|
static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
/* TODO
|
||||||
|
* write the array to eeprom when SMU disabled.
|
||||||
|
*/
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
/* TODO
|
||||||
|
* read the array to eeprom when SMU disabled.
|
||||||
|
*/
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
||||||
struct ras_err_handler_data **data;
|
struct ras_err_handler_data **data = &con->eh_data;
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (con)
|
*data = kmalloc(sizeof(**data),
|
||||||
data = &con->eh_data;
|
GFP_KERNEL|__GFP_ZERO);
|
||||||
else
|
if (!*data)
|
||||||
return 0;
|
return -ENOMEM;
|
||||||
|
|
||||||
*data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
|
|
||||||
if (!*data) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
mutex_init(&con->recovery_lock);
|
mutex_init(&con->recovery_lock);
|
||||||
INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
|
INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
|
||||||
atomic_set(&con->in_recovery, 0);
|
atomic_set(&con->in_recovery, 0);
|
||||||
con->adev = adev;
|
con->adev = adev;
|
||||||
|
|
||||||
ret = amdgpu_ras_eeprom_init(&con->eeprom_control);
|
amdgpu_ras_load_bad_pages(adev);
|
||||||
if (ret)
|
amdgpu_ras_reserve_bad_pages(adev);
|
||||||
goto free;
|
|
||||||
|
|
||||||
if (con->eeprom_control.num_recs) {
|
|
||||||
ret = amdgpu_ras_load_bad_pages(adev);
|
|
||||||
if (ret)
|
|
||||||
goto free;
|
|
||||||
ret = amdgpu_ras_reserve_bad_pages(adev);
|
|
||||||
if (ret)
|
|
||||||
goto release;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
release:
|
|
||||||
amdgpu_ras_release_bad_pages(adev);
|
|
||||||
free:
|
|
||||||
kfree((*data)->bps);
|
|
||||||
kfree((*data)->bps_bo);
|
|
||||||
kfree(*data);
|
|
||||||
con->eh_data = NULL;
|
|
||||||
out:
|
|
||||||
DRM_WARN("Failed to initialize ras recovery!\n");
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
|
static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
|
||||||
|
@ -1614,17 +1442,13 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
|
||||||
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
||||||
struct ras_err_handler_data *data = con->eh_data;
|
struct ras_err_handler_data *data = con->eh_data;
|
||||||
|
|
||||||
/* recovery_init failed to init it, fini is useless */
|
|
||||||
if (!data)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
cancel_work_sync(&con->recovery_work);
|
cancel_work_sync(&con->recovery_work);
|
||||||
|
amdgpu_ras_save_bad_pages(adev);
|
||||||
amdgpu_ras_release_bad_pages(adev);
|
amdgpu_ras_release_bad_pages(adev);
|
||||||
|
|
||||||
mutex_lock(&con->recovery_lock);
|
mutex_lock(&con->recovery_lock);
|
||||||
con->eh_data = NULL;
|
con->eh_data = NULL;
|
||||||
kfree(data->bps);
|
kfree(data->bps);
|
||||||
kfree(data->bps_bo);
|
|
||||||
kfree(data);
|
kfree(data);
|
||||||
mutex_unlock(&con->recovery_lock);
|
mutex_unlock(&con->recovery_lock);
|
||||||
|
|
||||||
|
@ -1676,7 +1500,6 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
|
||||||
int amdgpu_ras_init(struct amdgpu_device *adev)
|
int amdgpu_ras_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
||||||
int r;
|
|
||||||
|
|
||||||
if (con)
|
if (con)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1704,106 +1527,31 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
|
||||||
/* Might need get this flag from vbios. */
|
/* Might need get this flag from vbios. */
|
||||||
con->flags = RAS_DEFAULT_FLAGS;
|
con->flags = RAS_DEFAULT_FLAGS;
|
||||||
|
|
||||||
if (adev->nbio.funcs->init_ras_controller_interrupt) {
|
if (amdgpu_ras_recovery_init(adev))
|
||||||
r = adev->nbio.funcs->init_ras_controller_interrupt(adev);
|
goto recovery_out;
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) {
|
|
||||||
r = adev->nbio.funcs->init_ras_err_event_athub_interrupt(adev);
|
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK;
|
amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK;
|
||||||
|
|
||||||
if (amdgpu_ras_fs_init(adev))
|
if (amdgpu_ras_fs_init(adev))
|
||||||
goto fs_out;
|
goto fs_out;
|
||||||
|
|
||||||
|
/* ras init for each ras block */
|
||||||
|
if (adev->umc.funcs->ras_init)
|
||||||
|
adev->umc.funcs->ras_init(adev);
|
||||||
|
|
||||||
DRM_INFO("RAS INFO: ras initialized successfully, "
|
DRM_INFO("RAS INFO: ras initialized successfully, "
|
||||||
"hardware ability[%x] ras_mask[%x]\n",
|
"hardware ability[%x] ras_mask[%x]\n",
|
||||||
con->hw_supported, con->supported);
|
con->hw_supported, con->supported);
|
||||||
return 0;
|
return 0;
|
||||||
fs_out:
|
fs_out:
|
||||||
|
amdgpu_ras_recovery_fini(adev);
|
||||||
|
recovery_out:
|
||||||
amdgpu_ras_set_context(adev, NULL);
|
amdgpu_ras_set_context(adev, NULL);
|
||||||
kfree(con);
|
kfree(con);
|
||||||
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* helper function to handle common stuff in ip late init phase */
|
|
||||||
int amdgpu_ras_late_init(struct amdgpu_device *adev,
|
|
||||||
struct ras_common_if *ras_block,
|
|
||||||
struct ras_fs_if *fs_info,
|
|
||||||
struct ras_ih_if *ih_info)
|
|
||||||
{
|
|
||||||
int r;
|
|
||||||
|
|
||||||
/* disable RAS feature per IP block if it is not supported */
|
|
||||||
if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
|
|
||||||
amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
|
|
||||||
if (r) {
|
|
||||||
if (r == -EAGAIN) {
|
|
||||||
/* request gpu reset. will run again */
|
|
||||||
amdgpu_ras_request_reset_on_boot(adev,
|
|
||||||
ras_block->block);
|
|
||||||
return 0;
|
|
||||||
} else if (adev->in_suspend || adev->in_gpu_reset) {
|
|
||||||
/* in resume phase, if fail to enable ras,
|
|
||||||
* clean up all ras fs nodes, and disable ras */
|
|
||||||
goto cleanup;
|
|
||||||
} else
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* in resume phase, no need to create ras fs node */
|
|
||||||
if (adev->in_suspend || adev->in_gpu_reset)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (ih_info->cb) {
|
|
||||||
r = amdgpu_ras_interrupt_add_handler(adev, ih_info);
|
|
||||||
if (r)
|
|
||||||
goto interrupt;
|
|
||||||
}
|
|
||||||
|
|
||||||
amdgpu_ras_debugfs_create(adev, fs_info);
|
|
||||||
|
|
||||||
r = amdgpu_ras_sysfs_create(adev, fs_info);
|
|
||||||
if (r)
|
|
||||||
goto sysfs;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
cleanup:
|
|
||||||
amdgpu_ras_sysfs_remove(adev, ras_block);
|
|
||||||
sysfs:
|
|
||||||
amdgpu_ras_debugfs_remove(adev, ras_block);
|
|
||||||
if (ih_info->cb)
|
|
||||||
amdgpu_ras_interrupt_remove_handler(adev, ih_info);
|
|
||||||
interrupt:
|
|
||||||
amdgpu_ras_feature_enable(adev, ras_block, 0);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* helper function to remove ras fs node and interrupt handler */
|
|
||||||
void amdgpu_ras_late_fini(struct amdgpu_device *adev,
|
|
||||||
struct ras_common_if *ras_block,
|
|
||||||
struct ras_ih_if *ih_info)
|
|
||||||
{
|
|
||||||
if (!ras_block || !ih_info)
|
|
||||||
return;
|
|
||||||
|
|
||||||
amdgpu_ras_sysfs_remove(adev, ras_block);
|
|
||||||
amdgpu_ras_debugfs_remove(adev, ras_block);
|
|
||||||
if (ih_info->cb)
|
|
||||||
amdgpu_ras_interrupt_remove_handler(adev, ih_info);
|
|
||||||
amdgpu_ras_feature_enable(adev, ras_block, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* do some init work after IP late init as dependence.
|
/* do some init work after IP late init as dependence.
|
||||||
* and it runs in resume/gpu reset/booting up cases.
|
* and it runs in resume/gpu reset/booting up cases.
|
||||||
*/
|
*/
|
||||||
|
@ -1897,18 +1645,3 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
uint32_t hw_supported, supported;
|
|
||||||
|
|
||||||
amdgpu_ras_check_supported(adev, &hw_supported, &supported);
|
|
||||||
if (!hw_supported)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
|
|
||||||
DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n");
|
|
||||||
|
|
||||||
amdgpu_ras_reset_gpu(adev, false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -317,6 +317,8 @@ struct amdgpu_ras {
|
||||||
struct list_head head;
|
struct list_head head;
|
||||||
/* debugfs */
|
/* debugfs */
|
||||||
struct dentry *dir;
|
struct dentry *dir;
|
||||||
|
/* debugfs ctrl */
|
||||||
|
struct dentry *ent;
|
||||||
/* sysfs */
|
/* sysfs */
|
||||||
struct device_attribute features_attr;
|
struct device_attribute features_attr;
|
||||||
struct bin_attribute badpages_attr;
|
struct bin_attribute badpages_attr;
|
||||||
|
@ -332,7 +334,7 @@ struct amdgpu_ras {
|
||||||
struct mutex recovery_lock;
|
struct mutex recovery_lock;
|
||||||
|
|
||||||
uint32_t flags;
|
uint32_t flags;
|
||||||
bool reboot;
|
|
||||||
struct amdgpu_ras_eeprom_control eeprom_control;
|
struct amdgpu_ras_eeprom_control eeprom_control;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -345,14 +347,15 @@ struct ras_err_data {
|
||||||
unsigned long ue_count;
|
unsigned long ue_count;
|
||||||
unsigned long ce_count;
|
unsigned long ce_count;
|
||||||
unsigned long err_addr_cnt;
|
unsigned long err_addr_cnt;
|
||||||
struct eeprom_table_record *err_addr;
|
uint64_t *err_addr;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ras_err_handler_data {
|
struct ras_err_handler_data {
|
||||||
/* point to bad page records array */
|
/* point to bad pages array */
|
||||||
struct eeprom_table_record *bps;
|
struct {
|
||||||
/* point to reserved bo array */
|
unsigned long bp;
|
||||||
struct amdgpu_bo **bps_bo;
|
struct amdgpu_bo *bo;
|
||||||
|
} *bps;
|
||||||
/* the count of entries */
|
/* the count of entries */
|
||||||
int count;
|
int count;
|
||||||
/* the space can place new entries */
|
/* the space can place new entries */
|
||||||
|
@ -362,7 +365,7 @@ struct ras_err_handler_data {
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef int (*ras_ih_cb)(struct amdgpu_device *adev,
|
typedef int (*ras_ih_cb)(struct amdgpu_device *adev,
|
||||||
void *err_data,
|
struct ras_err_data *err_data,
|
||||||
struct amdgpu_iv_entry *entry);
|
struct amdgpu_iv_entry *entry);
|
||||||
|
|
||||||
struct ras_ih_data {
|
struct ras_ih_data {
|
||||||
|
@ -478,7 +481,6 @@ static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev,
|
||||||
return ras && (ras->supported & (1 << block));
|
return ras && (ras->supported & (1 << block));
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_ras_recovery_init(struct amdgpu_device *adev);
|
|
||||||
int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
|
int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
|
||||||
unsigned int block);
|
unsigned int block);
|
||||||
|
|
||||||
|
@ -490,7 +492,7 @@ unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
|
||||||
|
|
||||||
/* error handling functions */
|
/* error handling functions */
|
||||||
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
|
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
|
||||||
struct eeprom_table_record *bps, int pages);
|
unsigned long *bps, int pages);
|
||||||
|
|
||||||
int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev);
|
int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev);
|
||||||
|
|
||||||
|
@ -499,12 +501,6 @@ static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev,
|
||||||
{
|
{
|
||||||
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
|
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
|
||||||
|
|
||||||
/* save bad page to eeprom before gpu reset,
|
|
||||||
* i2c may be unstable in gpu reset
|
|
||||||
*/
|
|
||||||
if (in_task())
|
|
||||||
amdgpu_ras_reserve_bad_pages(adev);
|
|
||||||
|
|
||||||
if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
|
if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
|
||||||
schedule_work(&ras->recovery_work);
|
schedule_work(&ras->recovery_work);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -570,13 +566,6 @@ amdgpu_ras_error_to_ta(enum amdgpu_ras_error_type error) {
|
||||||
int amdgpu_ras_init(struct amdgpu_device *adev);
|
int amdgpu_ras_init(struct amdgpu_device *adev);
|
||||||
int amdgpu_ras_fini(struct amdgpu_device *adev);
|
int amdgpu_ras_fini(struct amdgpu_device *adev);
|
||||||
int amdgpu_ras_pre_fini(struct amdgpu_device *adev);
|
int amdgpu_ras_pre_fini(struct amdgpu_device *adev);
|
||||||
int amdgpu_ras_late_init(struct amdgpu_device *adev,
|
|
||||||
struct ras_common_if *ras_block,
|
|
||||||
struct ras_fs_if *fs_info,
|
|
||||||
struct ras_ih_if *ih_info);
|
|
||||||
void amdgpu_ras_late_fini(struct amdgpu_device *adev,
|
|
||||||
struct ras_common_if *ras_block,
|
|
||||||
struct ras_ih_if *ih_info);
|
|
||||||
|
|
||||||
int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
|
int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
|
||||||
struct ras_common_if *head, bool enable);
|
struct ras_common_if *head, bool enable);
|
||||||
|
@ -610,14 +599,4 @@ int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
|
||||||
|
|
||||||
int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
|
int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
|
||||||
struct ras_dispatch_if *info);
|
struct ras_dispatch_if *info);
|
||||||
|
|
||||||
extern atomic_t amdgpu_ras_in_intr;
|
|
||||||
|
|
||||||
static inline bool amdgpu_ras_intr_triggered(void)
|
|
||||||
{
|
|
||||||
return !!atomic_read(&amdgpu_ras_in_intr);
|
|
||||||
}
|
|
||||||
|
|
||||||
void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev);
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -100,6 +100,171 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control);
|
||||||
|
|
||||||
|
int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||||
|
unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 };
|
||||||
|
struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
|
||||||
|
struct i2c_msg msg = {
|
||||||
|
.addr = EEPROM_I2C_TARGET_ADDR,
|
||||||
|
.flags = I2C_M_RD,
|
||||||
|
.len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE,
|
||||||
|
.buf = buff,
|
||||||
|
};
|
||||||
|
|
||||||
|
mutex_init(&control->tbl_mutex);
|
||||||
|
|
||||||
|
switch (adev->asic_type) {
|
||||||
|
case CHIP_VEGA20:
|
||||||
|
ret = smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor);
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ret) {
|
||||||
|
DRM_ERROR("Failed to init I2C controller, ret:%d", ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Read/Create table header from EEPROM address 0 */
|
||||||
|
ret = i2c_transfer(&control->eeprom_accessor, &msg, 1);
|
||||||
|
if (ret < 1) {
|
||||||
|
DRM_ERROR("Failed to read EEPROM table header, ret:%d", ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
__decode_table_header_from_buff(hdr, &buff[2]);
|
||||||
|
|
||||||
|
if (hdr->header == EEPROM_TABLE_HDR_VAL) {
|
||||||
|
control->num_recs = (hdr->tbl_size - EEPROM_TABLE_HEADER_SIZE) /
|
||||||
|
EEPROM_TABLE_RECORD_SIZE;
|
||||||
|
DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records",
|
||||||
|
control->num_recs);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
DRM_INFO("Creating new EEPROM table");
|
||||||
|
|
||||||
|
hdr->header = EEPROM_TABLE_HDR_VAL;
|
||||||
|
hdr->version = EEPROM_TABLE_VER;
|
||||||
|
hdr->first_rec_offset = EEPROM_RECORD_START;
|
||||||
|
hdr->tbl_size = EEPROM_TABLE_HEADER_SIZE;
|
||||||
|
|
||||||
|
adev->psp.ras.ras->eeprom_control.tbl_byte_sum =
|
||||||
|
__calc_hdr_byte_sum(&adev->psp.ras.ras->eeprom_control);
|
||||||
|
ret = __update_table_header(control, buff);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Start inserting records from here */
|
||||||
|
adev->psp.ras.ras->eeprom_control.next_addr = EEPROM_RECORD_START;
|
||||||
|
|
||||||
|
return ret == 1 ? 0 : -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control)
|
||||||
|
{
|
||||||
|
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||||
|
|
||||||
|
switch (adev->asic_type) {
|
||||||
|
case CHIP_VEGA20:
|
||||||
|
smu_v11_0_i2c_eeprom_control_fini(&control->eeprom_accessor);
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __encode_table_record_to_buff(struct amdgpu_ras_eeprom_control *control,
|
||||||
|
struct eeprom_table_record *record,
|
||||||
|
unsigned char *buff)
|
||||||
|
{
|
||||||
|
__le64 tmp = 0;
|
||||||
|
int i = 0;
|
||||||
|
|
||||||
|
/* Next are all record fields according to EEPROM page spec in LE foramt */
|
||||||
|
buff[i++] = record->err_type;
|
||||||
|
|
||||||
|
buff[i++] = record->bank;
|
||||||
|
|
||||||
|
tmp = cpu_to_le64(record->ts);
|
||||||
|
memcpy(buff + i, &tmp, 8);
|
||||||
|
i += 8;
|
||||||
|
|
||||||
|
tmp = cpu_to_le64((record->offset & 0xffffffffffff));
|
||||||
|
memcpy(buff + i, &tmp, 6);
|
||||||
|
i += 6;
|
||||||
|
|
||||||
|
buff[i++] = record->mem_channel;
|
||||||
|
buff[i++] = record->mcumc_id;
|
||||||
|
|
||||||
|
tmp = cpu_to_le64((record->retired_page & 0xffffffffffff));
|
||||||
|
memcpy(buff + i, &tmp, 6);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __decode_table_record_from_buff(struct amdgpu_ras_eeprom_control *control,
|
||||||
|
struct eeprom_table_record *record,
|
||||||
|
unsigned char *buff)
|
||||||
|
{
|
||||||
|
__le64 tmp = 0;
|
||||||
|
int i = 0;
|
||||||
|
|
||||||
|
/* Next are all record fields according to EEPROM page spec in LE foramt */
|
||||||
|
record->err_type = buff[i++];
|
||||||
|
|
||||||
|
record->bank = buff[i++];
|
||||||
|
|
||||||
|
memcpy(&tmp, buff + i, 8);
|
||||||
|
record->ts = le64_to_cpu(tmp);
|
||||||
|
i += 8;
|
||||||
|
|
||||||
|
memcpy(&tmp, buff + i, 6);
|
||||||
|
record->offset = (le64_to_cpu(tmp) & 0xffffffffffff);
|
||||||
|
i += 6;
|
||||||
|
|
||||||
|
buff[i++] = record->mem_channel;
|
||||||
|
buff[i++] = record->mcumc_id;
|
||||||
|
|
||||||
|
memcpy(&tmp, buff + i, 6);
|
||||||
|
record->retired_page = (le64_to_cpu(tmp) & 0xffffffffffff);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When reaching end of EEPROM memory jump back to 0 record address
|
||||||
|
* When next record access will go beyond EEPROM page boundary modify bits A17/A8
|
||||||
|
* in I2C selector to go to next page
|
||||||
|
*/
|
||||||
|
static uint32_t __correct_eeprom_dest_address(uint32_t curr_address)
|
||||||
|
{
|
||||||
|
uint32_t next_address = curr_address + EEPROM_TABLE_RECORD_SIZE;
|
||||||
|
|
||||||
|
/* When all EEPROM memory used jump back to 0 address */
|
||||||
|
if (next_address > EEPROM_SIZE_BYTES) {
|
||||||
|
DRM_INFO("Reached end of EEPROM memory, jumping to 0 "
|
||||||
|
"and overriding old record");
|
||||||
|
return EEPROM_RECORD_START;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* To check if we overflow page boundary compare next address with
|
||||||
|
* current and see if bits 17/8 of the EEPROM address will change
|
||||||
|
* If they do start from the next 256b page
|
||||||
|
*
|
||||||
|
* https://www.st.com/resource/en/datasheet/m24m02-dr.pdf sec. 5.1.2
|
||||||
|
*/
|
||||||
|
if ((curr_address & EEPROM_ADDR_MSB_MASK) != (next_address & EEPROM_ADDR_MSB_MASK)) {
|
||||||
|
DRM_DEBUG_DRIVER("Reached end of EEPROM memory page, jumping to next: %lx",
|
||||||
|
(next_address & EEPROM_ADDR_MSB_MASK));
|
||||||
|
|
||||||
|
return (next_address & EEPROM_ADDR_MSB_MASK);
|
||||||
|
}
|
||||||
|
|
||||||
|
return curr_address;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control)
|
static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control)
|
||||||
|
@ -171,207 +336,17 @@ static bool __validate_tbl_checksum(struct amdgpu_ras_eeprom_control *control,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
|
|
||||||
{
|
|
||||||
unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 };
|
|
||||||
struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
mutex_lock(&control->tbl_mutex);
|
|
||||||
|
|
||||||
hdr->header = EEPROM_TABLE_HDR_VAL;
|
|
||||||
hdr->version = EEPROM_TABLE_VER;
|
|
||||||
hdr->first_rec_offset = EEPROM_RECORD_START;
|
|
||||||
hdr->tbl_size = EEPROM_TABLE_HEADER_SIZE;
|
|
||||||
|
|
||||||
control->tbl_byte_sum = 0;
|
|
||||||
__update_tbl_checksum(control, NULL, 0, 0);
|
|
||||||
control->next_addr = EEPROM_RECORD_START;
|
|
||||||
|
|
||||||
ret = __update_table_header(control, buff);
|
|
||||||
|
|
||||||
mutex_unlock(&control->tbl_mutex);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
|
||||||
unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 };
|
|
||||||
struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
|
|
||||||
struct i2c_msg msg = {
|
|
||||||
.addr = EEPROM_I2C_TARGET_ADDR,
|
|
||||||
.flags = I2C_M_RD,
|
|
||||||
.len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE,
|
|
||||||
.buf = buff,
|
|
||||||
};
|
|
||||||
|
|
||||||
mutex_init(&control->tbl_mutex);
|
|
||||||
|
|
||||||
switch (adev->asic_type) {
|
|
||||||
case CHIP_VEGA20:
|
|
||||||
ret = smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case CHIP_ARCTURUS:
|
|
||||||
ret = smu_i2c_eeprom_init(&adev->smu, &control->eeprom_accessor);
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ret) {
|
|
||||||
DRM_ERROR("Failed to init I2C controller, ret:%d", ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Read/Create table header from EEPROM address 0 */
|
|
||||||
ret = i2c_transfer(&control->eeprom_accessor, &msg, 1);
|
|
||||||
if (ret < 1) {
|
|
||||||
DRM_ERROR("Failed to read EEPROM table header, ret:%d", ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
__decode_table_header_from_buff(hdr, &buff[2]);
|
|
||||||
|
|
||||||
if (hdr->header == EEPROM_TABLE_HDR_VAL) {
|
|
||||||
control->num_recs = (hdr->tbl_size - EEPROM_TABLE_HEADER_SIZE) /
|
|
||||||
EEPROM_TABLE_RECORD_SIZE;
|
|
||||||
control->tbl_byte_sum = __calc_hdr_byte_sum(control);
|
|
||||||
control->next_addr = EEPROM_RECORD_START;
|
|
||||||
|
|
||||||
DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records",
|
|
||||||
control->num_recs);
|
|
||||||
|
|
||||||
} else {
|
|
||||||
DRM_INFO("Creating new EEPROM table");
|
|
||||||
|
|
||||||
ret = amdgpu_ras_eeprom_reset_table(control);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret == 1 ? 0 : -EIO;
|
|
||||||
}
|
|
||||||
|
|
||||||
void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control)
|
|
||||||
{
|
|
||||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
|
||||||
|
|
||||||
switch (adev->asic_type) {
|
|
||||||
case CHIP_VEGA20:
|
|
||||||
smu_v11_0_i2c_eeprom_control_fini(&control->eeprom_accessor);
|
|
||||||
break;
|
|
||||||
case CHIP_ARCTURUS:
|
|
||||||
smu_i2c_eeprom_fini(&adev->smu, &control->eeprom_accessor);
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __encode_table_record_to_buff(struct amdgpu_ras_eeprom_control *control,
|
|
||||||
struct eeprom_table_record *record,
|
|
||||||
unsigned char *buff)
|
|
||||||
{
|
|
||||||
__le64 tmp = 0;
|
|
||||||
int i = 0;
|
|
||||||
|
|
||||||
/* Next are all record fields according to EEPROM page spec in LE foramt */
|
|
||||||
buff[i++] = record->err_type;
|
|
||||||
|
|
||||||
buff[i++] = record->bank;
|
|
||||||
|
|
||||||
tmp = cpu_to_le64(record->ts);
|
|
||||||
memcpy(buff + i, &tmp, 8);
|
|
||||||
i += 8;
|
|
||||||
|
|
||||||
tmp = cpu_to_le64((record->offset & 0xffffffffffff));
|
|
||||||
memcpy(buff + i, &tmp, 6);
|
|
||||||
i += 6;
|
|
||||||
|
|
||||||
buff[i++] = record->mem_channel;
|
|
||||||
buff[i++] = record->mcumc_id;
|
|
||||||
|
|
||||||
tmp = cpu_to_le64((record->retired_page & 0xffffffffffff));
|
|
||||||
memcpy(buff + i, &tmp, 6);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __decode_table_record_from_buff(struct amdgpu_ras_eeprom_control *control,
|
|
||||||
struct eeprom_table_record *record,
|
|
||||||
unsigned char *buff)
|
|
||||||
{
|
|
||||||
__le64 tmp = 0;
|
|
||||||
int i = 0;
|
|
||||||
|
|
||||||
/* Next are all record fields according to EEPROM page spec in LE foramt */
|
|
||||||
record->err_type = buff[i++];
|
|
||||||
|
|
||||||
record->bank = buff[i++];
|
|
||||||
|
|
||||||
memcpy(&tmp, buff + i, 8);
|
|
||||||
record->ts = le64_to_cpu(tmp);
|
|
||||||
i += 8;
|
|
||||||
|
|
||||||
memcpy(&tmp, buff + i, 6);
|
|
||||||
record->offset = (le64_to_cpu(tmp) & 0xffffffffffff);
|
|
||||||
i += 6;
|
|
||||||
|
|
||||||
record->mem_channel = buff[i++];
|
|
||||||
record->mcumc_id = buff[i++];
|
|
||||||
|
|
||||||
memcpy(&tmp, buff + i, 6);
|
|
||||||
record->retired_page = (le64_to_cpu(tmp) & 0xffffffffffff);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* When reaching end of EEPROM memory jump back to 0 record address
|
|
||||||
* When next record access will go beyond EEPROM page boundary modify bits A17/A8
|
|
||||||
* in I2C selector to go to next page
|
|
||||||
*/
|
|
||||||
static uint32_t __correct_eeprom_dest_address(uint32_t curr_address)
|
|
||||||
{
|
|
||||||
uint32_t next_address = curr_address + EEPROM_TABLE_RECORD_SIZE;
|
|
||||||
|
|
||||||
/* When all EEPROM memory used jump back to 0 address */
|
|
||||||
if (next_address > EEPROM_SIZE_BYTES) {
|
|
||||||
DRM_INFO("Reached end of EEPROM memory, jumping to 0 "
|
|
||||||
"and overriding old record");
|
|
||||||
return EEPROM_RECORD_START;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* To check if we overflow page boundary compare next address with
|
|
||||||
* current and see if bits 17/8 of the EEPROM address will change
|
|
||||||
* If they do start from the next 256b page
|
|
||||||
*
|
|
||||||
* https://www.st.com/resource/en/datasheet/m24m02-dr.pdf sec. 5.1.2
|
|
||||||
*/
|
|
||||||
if ((curr_address & EEPROM_ADDR_MSB_MASK) != (next_address & EEPROM_ADDR_MSB_MASK)) {
|
|
||||||
DRM_DEBUG_DRIVER("Reached end of EEPROM memory page, jumping to next: %lx",
|
|
||||||
(next_address & EEPROM_ADDR_MSB_MASK));
|
|
||||||
|
|
||||||
return (next_address & EEPROM_ADDR_MSB_MASK);
|
|
||||||
}
|
|
||||||
|
|
||||||
return curr_address;
|
|
||||||
}
|
|
||||||
|
|
||||||
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
|
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
|
||||||
struct eeprom_table_record *records,
|
struct eeprom_table_record *records,
|
||||||
bool write,
|
bool write,
|
||||||
int num)
|
int num)
|
||||||
{
|
{
|
||||||
int i, ret = 0;
|
int i, ret = 0;
|
||||||
struct i2c_msg *msgs, *msg;
|
struct i2c_msg *msgs;
|
||||||
unsigned char *buffs, *buff;
|
unsigned char *buffs;
|
||||||
struct eeprom_table_record *record;
|
|
||||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||||
|
|
||||||
if (adev->asic_type != CHIP_VEGA20 && adev->asic_type != CHIP_ARCTURUS)
|
if (adev->asic_type != CHIP_VEGA20)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
buffs = kcalloc(num, EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE,
|
buffs = kcalloc(num, EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE,
|
||||||
|
@ -398,9 +373,9 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
|
||||||
* 256b
|
* 256b
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < num; i++) {
|
for (i = 0; i < num; i++) {
|
||||||
buff = &buffs[i * (EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)];
|
unsigned char *buff = &buffs[i * (EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)];
|
||||||
record = &records[i];
|
struct eeprom_table_record *record = &records[i];
|
||||||
msg = &msgs[i];
|
struct i2c_msg *msg = &msgs[i];
|
||||||
|
|
||||||
control->next_addr = __correct_eeprom_dest_address(control->next_addr);
|
control->next_addr = __correct_eeprom_dest_address(control->next_addr);
|
||||||
|
|
||||||
|
@ -440,8 +415,8 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
|
||||||
|
|
||||||
if (!write) {
|
if (!write) {
|
||||||
for (i = 0; i < num; i++) {
|
for (i = 0; i < num; i++) {
|
||||||
buff = &buffs[i*(EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)];
|
unsigned char *buff = &buffs[i*(EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)];
|
||||||
record = &records[i];
|
struct eeprom_table_record *record = &records[i];
|
||||||
|
|
||||||
__decode_table_record_from_buff(control, record, buff + EEPROM_ADDRESS_SIZE);
|
__decode_table_record_from_buff(control, record, buff + EEPROM_ADDRESS_SIZE);
|
||||||
}
|
}
|
||||||
|
|
|
@ -79,7 +79,6 @@ struct eeprom_table_record {
|
||||||
|
|
||||||
int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control);
|
int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control);
|
||||||
void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control);
|
void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control);
|
||||||
int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control);
|
|
||||||
|
|
||||||
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
|
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
|
||||||
struct eeprom_table_record *records,
|
struct eeprom_table_record *records,
|
||||||
|
|
|
@ -23,7 +23,6 @@
|
||||||
|
|
||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
#include "amdgpu_sdma.h"
|
#include "amdgpu_sdma.h"
|
||||||
#include "amdgpu_ras.h"
|
|
||||||
|
|
||||||
#define AMDGPU_CSA_SDMA_SIZE 64
|
#define AMDGPU_CSA_SDMA_SIZE 64
|
||||||
/* SDMA CSA reside in the 3rd page of CSA */
|
/* SDMA CSA reside in the 3rd page of CSA */
|
||||||
|
@ -84,101 +83,3 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
|
||||||
|
|
||||||
return csa_mc_addr;
|
return csa_mc_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
|
|
||||||
void *ras_ih_info)
|
|
||||||
{
|
|
||||||
int r, i;
|
|
||||||
struct ras_ih_if *ih_info = (struct ras_ih_if *)ras_ih_info;
|
|
||||||
struct ras_fs_if fs_info = {
|
|
||||||
.sysfs_name = "sdma_err_count",
|
|
||||||
.debugfs_name = "sdma_err_inject",
|
|
||||||
};
|
|
||||||
|
|
||||||
if (!ih_info)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (!adev->sdma.ras_if) {
|
|
||||||
adev->sdma.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
|
|
||||||
if (!adev->sdma.ras_if)
|
|
||||||
return -ENOMEM;
|
|
||||||
adev->sdma.ras_if->block = AMDGPU_RAS_BLOCK__SDMA;
|
|
||||||
adev->sdma.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
|
|
||||||
adev->sdma.ras_if->sub_block_index = 0;
|
|
||||||
strcpy(adev->sdma.ras_if->name, "sdma");
|
|
||||||
}
|
|
||||||
fs_info.head = ih_info->head = *adev->sdma.ras_if;
|
|
||||||
|
|
||||||
r = amdgpu_ras_late_init(adev, adev->sdma.ras_if,
|
|
||||||
&fs_info, ih_info);
|
|
||||||
if (r)
|
|
||||||
goto free;
|
|
||||||
|
|
||||||
if (amdgpu_ras_is_supported(adev, adev->sdma.ras_if->block)) {
|
|
||||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
|
||||||
r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq,
|
|
||||||
AMDGPU_SDMA_IRQ_INSTANCE0 + i);
|
|
||||||
if (r)
|
|
||||||
goto late_fini;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
r = 0;
|
|
||||||
goto free;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
late_fini:
|
|
||||||
amdgpu_ras_late_fini(adev, adev->sdma.ras_if, ih_info);
|
|
||||||
free:
|
|
||||||
kfree(adev->sdma.ras_if);
|
|
||||||
adev->sdma.ras_if = NULL;
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
void amdgpu_sdma_ras_fini(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) &&
|
|
||||||
adev->sdma.ras_if) {
|
|
||||||
struct ras_common_if *ras_if = adev->sdma.ras_if;
|
|
||||||
struct ras_ih_if ih_info = {
|
|
||||||
.head = *ras_if,
|
|
||||||
/* the cb member will not be used by
|
|
||||||
* amdgpu_ras_interrupt_remove_handler, init it only
|
|
||||||
* to cheat the check in ras_late_fini
|
|
||||||
*/
|
|
||||||
.cb = amdgpu_sdma_process_ras_data_cb,
|
|
||||||
};
|
|
||||||
|
|
||||||
amdgpu_ras_late_fini(adev, ras_if, &ih_info);
|
|
||||||
kfree(ras_if);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
|
|
||||||
void *err_data,
|
|
||||||
struct amdgpu_iv_entry *entry)
|
|
||||||
{
|
|
||||||
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
|
|
||||||
amdgpu_ras_reset_gpu(adev, 0);
|
|
||||||
|
|
||||||
return AMDGPU_RAS_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
|
|
||||||
struct amdgpu_irq_src *source,
|
|
||||||
struct amdgpu_iv_entry *entry)
|
|
||||||
{
|
|
||||||
struct ras_common_if *ras_if = adev->sdma.ras_if;
|
|
||||||
struct ras_dispatch_if ih_data = {
|
|
||||||
.entry = entry,
|
|
||||||
};
|
|
||||||
|
|
||||||
if (!ras_if)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
ih_data.head = *ras_if;
|
|
||||||
|
|
||||||
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
|
@ -104,13 +104,4 @@ struct amdgpu_sdma_instance *
|
||||||
amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring);
|
amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring);
|
||||||
int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index);
|
int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index);
|
||||||
uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, unsigned vmid);
|
uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, unsigned vmid);
|
||||||
int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
|
|
||||||
void *ras_ih_info);
|
|
||||||
void amdgpu_sdma_ras_fini(struct amdgpu_device *adev);
|
|
||||||
int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
|
|
||||||
void *err_data,
|
|
||||||
struct amdgpu_iv_entry *entry);
|
|
||||||
int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
|
|
||||||
struct amdgpu_irq_src *source,
|
|
||||||
struct amdgpu_iv_entry *entry);
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -170,7 +170,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
|
||||||
__field(unsigned int, context)
|
__field(unsigned int, context)
|
||||||
__field(unsigned int, seqno)
|
__field(unsigned int, seqno)
|
||||||
__field(struct dma_fence *, fence)
|
__field(struct dma_fence *, fence)
|
||||||
__string(ring, to_amdgpu_ring(job->base.sched)->name)
|
__field(char *, ring_name)
|
||||||
__field(u32, num_ibs)
|
__field(u32, num_ibs)
|
||||||
),
|
),
|
||||||
|
|
||||||
|
@ -179,12 +179,12 @@ TRACE_EVENT(amdgpu_cs_ioctl,
|
||||||
__assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
|
__assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
|
||||||
__entry->context = job->base.s_fence->finished.context;
|
__entry->context = job->base.s_fence->finished.context;
|
||||||
__entry->seqno = job->base.s_fence->finished.seqno;
|
__entry->seqno = job->base.s_fence->finished.seqno;
|
||||||
__assign_str(ring, to_amdgpu_ring(job->base.sched)->name)
|
__entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
|
||||||
__entry->num_ibs = job->num_ibs;
|
__entry->num_ibs = job->num_ibs;
|
||||||
),
|
),
|
||||||
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
|
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
|
||||||
__entry->sched_job_id, __get_str(timeline), __entry->context,
|
__entry->sched_job_id, __get_str(timeline), __entry->context,
|
||||||
__entry->seqno, __get_str(ring), __entry->num_ibs)
|
__entry->seqno, __entry->ring_name, __entry->num_ibs)
|
||||||
);
|
);
|
||||||
|
|
||||||
TRACE_EVENT(amdgpu_sched_run_job,
|
TRACE_EVENT(amdgpu_sched_run_job,
|
||||||
|
@ -195,7 +195,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
|
||||||
__string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
|
__string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
|
||||||
__field(unsigned int, context)
|
__field(unsigned int, context)
|
||||||
__field(unsigned int, seqno)
|
__field(unsigned int, seqno)
|
||||||
__string(ring, to_amdgpu_ring(job->base.sched)->name)
|
__field(char *, ring_name)
|
||||||
__field(u32, num_ibs)
|
__field(u32, num_ibs)
|
||||||
),
|
),
|
||||||
|
|
||||||
|
@ -204,12 +204,12 @@ TRACE_EVENT(amdgpu_sched_run_job,
|
||||||
__assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
|
__assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
|
||||||
__entry->context = job->base.s_fence->finished.context;
|
__entry->context = job->base.s_fence->finished.context;
|
||||||
__entry->seqno = job->base.s_fence->finished.seqno;
|
__entry->seqno = job->base.s_fence->finished.seqno;
|
||||||
__assign_str(ring, to_amdgpu_ring(job->base.sched)->name)
|
__entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
|
||||||
__entry->num_ibs = job->num_ibs;
|
__entry->num_ibs = job->num_ibs;
|
||||||
),
|
),
|
||||||
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
|
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
|
||||||
__entry->sched_job_id, __get_str(timeline), __entry->context,
|
__entry->sched_job_id, __get_str(timeline), __entry->context,
|
||||||
__entry->seqno, __get_str(ring), __entry->num_ibs)
|
__entry->seqno, __entry->ring_name, __entry->num_ibs)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
||||||
|
@ -323,15 +323,14 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_cs,
|
||||||
|
|
||||||
TRACE_EVENT(amdgpu_vm_set_ptes,
|
TRACE_EVENT(amdgpu_vm_set_ptes,
|
||||||
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
|
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
|
||||||
uint32_t incr, uint64_t flags, bool direct),
|
uint32_t incr, uint64_t flags),
|
||||||
TP_ARGS(pe, addr, count, incr, flags, direct),
|
TP_ARGS(pe, addr, count, incr, flags),
|
||||||
TP_STRUCT__entry(
|
TP_STRUCT__entry(
|
||||||
__field(u64, pe)
|
__field(u64, pe)
|
||||||
__field(u64, addr)
|
__field(u64, addr)
|
||||||
__field(u32, count)
|
__field(u32, count)
|
||||||
__field(u32, incr)
|
__field(u32, incr)
|
||||||
__field(u64, flags)
|
__field(u64, flags)
|
||||||
__field(bool, direct)
|
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
|
@ -340,32 +339,28 @@ TRACE_EVENT(amdgpu_vm_set_ptes,
|
||||||
__entry->count = count;
|
__entry->count = count;
|
||||||
__entry->incr = incr;
|
__entry->incr = incr;
|
||||||
__entry->flags = flags;
|
__entry->flags = flags;
|
||||||
__entry->direct = direct;
|
|
||||||
),
|
),
|
||||||
TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u, "
|
TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u",
|
||||||
"direct=%d", __entry->pe, __entry->addr, __entry->incr,
|
__entry->pe, __entry->addr, __entry->incr,
|
||||||
__entry->flags, __entry->count, __entry->direct)
|
__entry->flags, __entry->count)
|
||||||
);
|
);
|
||||||
|
|
||||||
TRACE_EVENT(amdgpu_vm_copy_ptes,
|
TRACE_EVENT(amdgpu_vm_copy_ptes,
|
||||||
TP_PROTO(uint64_t pe, uint64_t src, unsigned count, bool direct),
|
TP_PROTO(uint64_t pe, uint64_t src, unsigned count),
|
||||||
TP_ARGS(pe, src, count, direct),
|
TP_ARGS(pe, src, count),
|
||||||
TP_STRUCT__entry(
|
TP_STRUCT__entry(
|
||||||
__field(u64, pe)
|
__field(u64, pe)
|
||||||
__field(u64, src)
|
__field(u64, src)
|
||||||
__field(u32, count)
|
__field(u32, count)
|
||||||
__field(bool, direct)
|
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->pe = pe;
|
__entry->pe = pe;
|
||||||
__entry->src = src;
|
__entry->src = src;
|
||||||
__entry->count = count;
|
__entry->count = count;
|
||||||
__entry->direct = direct;
|
|
||||||
),
|
),
|
||||||
TP_printk("pe=%010Lx, src=%010Lx, count=%u, direct=%d",
|
TP_printk("pe=%010Lx, src=%010Lx, count=%u",
|
||||||
__entry->pe, __entry->src, __entry->count,
|
__entry->pe, __entry->src, __entry->count)
|
||||||
__entry->direct)
|
|
||||||
);
|
);
|
||||||
|
|
||||||
TRACE_EVENT(amdgpu_vm_flush,
|
TRACE_EVENT(amdgpu_vm_flush,
|
||||||
|
@ -473,7 +468,7 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
|
||||||
TP_PROTO(struct amdgpu_job *sched_job, struct dma_fence *fence),
|
TP_PROTO(struct amdgpu_job *sched_job, struct dma_fence *fence),
|
||||||
TP_ARGS(sched_job, fence),
|
TP_ARGS(sched_job, fence),
|
||||||
TP_STRUCT__entry(
|
TP_STRUCT__entry(
|
||||||
__string(ring, sched_job->base.sched->name);
|
__field(const char *,name)
|
||||||
__field(uint64_t, id)
|
__field(uint64_t, id)
|
||||||
__field(struct dma_fence *, fence)
|
__field(struct dma_fence *, fence)
|
||||||
__field(uint64_t, ctx)
|
__field(uint64_t, ctx)
|
||||||
|
@ -481,14 +476,14 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__assign_str(ring, sched_job->base.sched->name)
|
__entry->name = sched_job->base.sched->name;
|
||||||
__entry->id = sched_job->base.id;
|
__entry->id = sched_job->base.id;
|
||||||
__entry->fence = fence;
|
__entry->fence = fence;
|
||||||
__entry->ctx = fence->context;
|
__entry->ctx = fence->context;
|
||||||
__entry->seqno = fence->seqno;
|
__entry->seqno = fence->seqno;
|
||||||
),
|
),
|
||||||
TP_printk("job ring=%s, id=%llu, need pipe sync to fence=%p, context=%llu, seq=%u",
|
TP_printk("job ring=%s, id=%llu, need pipe sync to fence=%p, context=%llu, seq=%u",
|
||||||
__get_str(ring), __entry->id,
|
__entry->name, __entry->id,
|
||||||
__entry->fence, __entry->ctx,
|
__entry->fence, __entry->ctx,
|
||||||
__entry->seqno)
|
__entry->seqno)
|
||||||
);
|
);
|
||||||
|
|
|
@ -55,7 +55,6 @@
|
||||||
#include "amdgpu_trace.h"
|
#include "amdgpu_trace.h"
|
||||||
#include "amdgpu_amdkfd.h"
|
#include "amdgpu_amdkfd.h"
|
||||||
#include "amdgpu_sdma.h"
|
#include "amdgpu_sdma.h"
|
||||||
#include "amdgpu_ras.h"
|
|
||||||
#include "bif/bif_4_1_d.h"
|
#include "bif/bif_4_1_d.h"
|
||||||
|
|
||||||
static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
|
static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
|
||||||
|
@ -486,12 +485,15 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
|
||||||
struct ttm_operation_ctx *ctx,
|
struct ttm_operation_ctx *ctx,
|
||||||
struct ttm_mem_reg *new_mem)
|
struct ttm_mem_reg *new_mem)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev;
|
||||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||||
struct ttm_mem_reg tmp_mem;
|
struct ttm_mem_reg tmp_mem;
|
||||||
struct ttm_place placements;
|
struct ttm_place placements;
|
||||||
struct ttm_placement placement;
|
struct ttm_placement placement;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
adev = amdgpu_ttm_adev(bo->bdev);
|
||||||
|
|
||||||
/* create space/pages for new_mem in GTT space */
|
/* create space/pages for new_mem in GTT space */
|
||||||
tmp_mem = *new_mem;
|
tmp_mem = *new_mem;
|
||||||
tmp_mem.mm_node = NULL;
|
tmp_mem.mm_node = NULL;
|
||||||
|
@ -542,12 +544,15 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
|
||||||
struct ttm_operation_ctx *ctx,
|
struct ttm_operation_ctx *ctx,
|
||||||
struct ttm_mem_reg *new_mem)
|
struct ttm_mem_reg *new_mem)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev;
|
||||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||||
struct ttm_mem_reg tmp_mem;
|
struct ttm_mem_reg tmp_mem;
|
||||||
struct ttm_placement placement;
|
struct ttm_placement placement;
|
||||||
struct ttm_place placements;
|
struct ttm_place placements;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
adev = amdgpu_ttm_adev(bo->bdev);
|
||||||
|
|
||||||
/* make space in GTT for old_mem buffer */
|
/* make space in GTT for old_mem buffer */
|
||||||
tmp_mem = *new_mem;
|
tmp_mem = *new_mem;
|
||||||
tmp_mem.mm_node = NULL;
|
tmp_mem.mm_node = NULL;
|
||||||
|
@ -1214,8 +1219,11 @@ static struct ttm_backend_func amdgpu_backend_func = {
|
||||||
static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
|
static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
|
||||||
uint32_t page_flags)
|
uint32_t page_flags)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev;
|
||||||
struct amdgpu_ttm_tt *gtt;
|
struct amdgpu_ttm_tt *gtt;
|
||||||
|
|
||||||
|
adev = amdgpu_ttm_adev(bo->bdev);
|
||||||
|
|
||||||
gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
|
gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
|
||||||
if (gtt == NULL) {
|
if (gtt == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -1648,105 +1656,81 @@ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
|
||||||
*/
|
*/
|
||||||
static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
|
static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
uint64_t vram_size = adev->gmc.visible_vram_size;
|
struct ttm_operation_ctx ctx = { false, false };
|
||||||
|
struct amdgpu_bo_param bp;
|
||||||
|
int r = 0;
|
||||||
|
int i;
|
||||||
|
u64 vram_size = adev->gmc.visible_vram_size;
|
||||||
|
u64 offset = adev->fw_vram_usage.start_offset;
|
||||||
|
u64 size = adev->fw_vram_usage.size;
|
||||||
|
struct amdgpu_bo *bo;
|
||||||
|
|
||||||
|
memset(&bp, 0, sizeof(bp));
|
||||||
|
bp.size = adev->fw_vram_usage.size;
|
||||||
|
bp.byte_align = PAGE_SIZE;
|
||||||
|
bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||||
|
bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||||
|
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
||||||
|
bp.type = ttm_bo_type_kernel;
|
||||||
|
bp.resv = NULL;
|
||||||
adev->fw_vram_usage.va = NULL;
|
adev->fw_vram_usage.va = NULL;
|
||||||
adev->fw_vram_usage.reserved_bo = NULL;
|
adev->fw_vram_usage.reserved_bo = NULL;
|
||||||
|
|
||||||
if (adev->fw_vram_usage.size == 0 ||
|
if (adev->fw_vram_usage.size > 0 &&
|
||||||
adev->fw_vram_usage.size > vram_size)
|
adev->fw_vram_usage.size <= vram_size) {
|
||||||
return 0;
|
|
||||||
|
|
||||||
return amdgpu_bo_create_kernel_at(adev,
|
r = amdgpu_bo_create(adev, &bp,
|
||||||
|
&adev->fw_vram_usage.reserved_bo);
|
||||||
|
if (r)
|
||||||
|
goto error_create;
|
||||||
|
|
||||||
|
r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
|
||||||
|
if (r)
|
||||||
|
goto error_reserve;
|
||||||
|
|
||||||
|
/* remove the original mem node and create a new one at the
|
||||||
|
* request position
|
||||||
|
*/
|
||||||
|
bo = adev->fw_vram_usage.reserved_bo;
|
||||||
|
offset = ALIGN(offset, PAGE_SIZE);
|
||||||
|
for (i = 0; i < bo->placement.num_placement; ++i) {
|
||||||
|
bo->placements[i].fpfn = offset >> PAGE_SHIFT;
|
||||||
|
bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
|
||||||
|
}
|
||||||
|
|
||||||
|
ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
|
||||||
|
r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
|
||||||
|
&bo->tbo.mem, &ctx);
|
||||||
|
if (r)
|
||||||
|
goto error_pin;
|
||||||
|
|
||||||
|
r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
|
||||||
|
AMDGPU_GEM_DOMAIN_VRAM,
|
||||||
adev->fw_vram_usage.start_offset,
|
adev->fw_vram_usage.start_offset,
|
||||||
adev->fw_vram_usage.size,
|
(adev->fw_vram_usage.start_offset +
|
||||||
AMDGPU_GEM_DOMAIN_VRAM,
|
adev->fw_vram_usage.size));
|
||||||
&adev->fw_vram_usage.reserved_bo,
|
if (r)
|
||||||
|
goto error_pin;
|
||||||
|
r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
|
||||||
&adev->fw_vram_usage.va);
|
&adev->fw_vram_usage.va);
|
||||||
|
if (r)
|
||||||
|
goto error_kmap;
|
||||||
|
|
||||||
|
amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
|
||||||
}
|
}
|
||||||
|
return r;
|
||||||
|
|
||||||
/*
|
error_kmap:
|
||||||
* Memoy training reservation functions
|
amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
|
||||||
*/
|
error_pin:
|
||||||
|
amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
|
||||||
/**
|
error_reserve:
|
||||||
* amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
|
amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
|
||||||
*
|
error_create:
|
||||||
* @adev: amdgpu_device pointer
|
adev->fw_vram_usage.va = NULL;
|
||||||
*
|
adev->fw_vram_usage.reserved_bo = NULL;
|
||||||
* free memory training reserved vram if it has been reserved.
|
return r;
|
||||||
*/
|
|
||||||
static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
|
|
||||||
|
|
||||||
ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
|
|
||||||
amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
|
|
||||||
ctx->c2p_bo = NULL;
|
|
||||||
|
|
||||||
amdgpu_bo_free_kernel(&ctx->p2c_bo, NULL, NULL);
|
|
||||||
ctx->p2c_bo = NULL;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* amdgpu_ttm_training_reserve_vram_init - create bo vram reservation from memory training
|
|
||||||
*
|
|
||||||
* @adev: amdgpu_device pointer
|
|
||||||
*
|
|
||||||
* create bo vram reservation from memory training.
|
|
||||||
*/
|
|
||||||
static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
|
|
||||||
|
|
||||||
memset(ctx, 0, sizeof(*ctx));
|
|
||||||
if (!adev->fw_vram_usage.mem_train_support) {
|
|
||||||
DRM_DEBUG("memory training does not support!\n");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx->c2p_train_data_offset = adev->fw_vram_usage.mem_train_fb_loc;
|
|
||||||
ctx->p2c_train_data_offset = (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
|
|
||||||
ctx->train_data_size = GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
|
|
||||||
|
|
||||||
DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
|
|
||||||
ctx->train_data_size,
|
|
||||||
ctx->p2c_train_data_offset,
|
|
||||||
ctx->c2p_train_data_offset);
|
|
||||||
|
|
||||||
ret = amdgpu_bo_create_kernel_at(adev,
|
|
||||||
ctx->p2c_train_data_offset,
|
|
||||||
ctx->train_data_size,
|
|
||||||
AMDGPU_GEM_DOMAIN_VRAM,
|
|
||||||
&ctx->p2c_bo,
|
|
||||||
NULL);
|
|
||||||
if (ret) {
|
|
||||||
DRM_ERROR("alloc p2c_bo failed(%d)!\n", ret);
|
|
||||||
goto Err_out;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = amdgpu_bo_create_kernel_at(adev,
|
|
||||||
ctx->c2p_train_data_offset,
|
|
||||||
ctx->train_data_size,
|
|
||||||
AMDGPU_GEM_DOMAIN_VRAM,
|
|
||||||
&ctx->c2p_bo,
|
|
||||||
NULL);
|
|
||||||
if (ret) {
|
|
||||||
DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
|
|
||||||
goto Err_out;
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
Err_out:
|
|
||||||
amdgpu_ttm_training_reserve_vram_fini(adev);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_ttm_init - Init the memory management (ttm) as well as various
|
* amdgpu_ttm_init - Init the memory management (ttm) as well as various
|
||||||
* gtt/vram related fields.
|
* gtt/vram related fields.
|
||||||
|
@ -1810,14 +1794,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
*The reserved vram for memory training must be pinned to the specified
|
|
||||||
*place on the VRAM, so reserve it early.
|
|
||||||
*/
|
|
||||||
r = amdgpu_ttm_training_reserve_vram_init(adev);
|
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
/* allocate memory as required for VGA
|
/* allocate memory as required for VGA
|
||||||
* This is used for VGA emulation and pre-OS scanout buffers to
|
* This is used for VGA emulation and pre-OS scanout buffers to
|
||||||
* avoid display artifacts while transitioning between pre-OS
|
* avoid display artifacts while transitioning between pre-OS
|
||||||
|
@ -1828,20 +1804,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
||||||
NULL, &stolen_vga_buf);
|
NULL, &stolen_vga_buf);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
/*
|
|
||||||
* reserve one TMR (64K) memory at the top of VRAM which holds
|
|
||||||
* IP Discovery data and is protected by PSP.
|
|
||||||
*/
|
|
||||||
r = amdgpu_bo_create_kernel_at(adev,
|
|
||||||
adev->gmc.real_vram_size - DISCOVERY_TMR_SIZE,
|
|
||||||
DISCOVERY_TMR_SIZE,
|
|
||||||
AMDGPU_GEM_DOMAIN_VRAM,
|
|
||||||
&adev->discovery_memory,
|
|
||||||
NULL);
|
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
|
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
|
||||||
(unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
|
(unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
|
||||||
|
|
||||||
|
@ -1906,9 +1868,6 @@ void amdgpu_ttm_late_init(struct amdgpu_device *adev)
|
||||||
void *stolen_vga_buf;
|
void *stolen_vga_buf;
|
||||||
/* return the VGA stolen memory (if any) back to VRAM */
|
/* return the VGA stolen memory (if any) back to VRAM */
|
||||||
amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
|
amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
|
||||||
|
|
||||||
/* return the IP Discovery TMR memory back to VRAM */
|
|
||||||
amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1920,7 +1879,6 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
amdgpu_ttm_debugfs_fini(adev);
|
amdgpu_ttm_debugfs_fini(adev);
|
||||||
amdgpu_ttm_training_reserve_vram_fini(adev);
|
|
||||||
amdgpu_ttm_fw_reserve_vram_fini(adev);
|
amdgpu_ttm_fw_reserve_vram_fini(adev);
|
||||||
if (adev->mman.aper_base_kaddr)
|
if (adev->mman.aper_base_kaddr)
|
||||||
iounmap(adev->mman.aper_base_kaddr);
|
iounmap(adev->mman.aper_base_kaddr);
|
||||||
|
@ -2017,7 +1975,10 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
|
||||||
*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
|
*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
|
||||||
AMDGPU_GPU_PAGE_SIZE;
|
AMDGPU_GPU_PAGE_SIZE;
|
||||||
|
|
||||||
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
|
num_dw = adev->mman.buffer_funcs->copy_num_dw;
|
||||||
|
while (num_dw & 0x7)
|
||||||
|
num_dw++;
|
||||||
|
|
||||||
num_bytes = num_pages * 8;
|
num_bytes = num_pages * 8;
|
||||||
|
|
||||||
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
|
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
|
||||||
|
@ -2077,7 +2038,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
|
||||||
|
|
||||||
max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
|
max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
|
||||||
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
|
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
|
||||||
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
|
num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
|
||||||
|
|
||||||
|
/* for IB padding */
|
||||||
|
while (num_dw & 0x7)
|
||||||
|
num_dw++;
|
||||||
|
|
||||||
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
|
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
|
||||||
if (r)
|
if (r)
|
||||||
|
|
|
@ -360,7 +360,6 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
|
||||||
case CHIP_RAVEN:
|
case CHIP_RAVEN:
|
||||||
case CHIP_VEGA12:
|
case CHIP_VEGA12:
|
||||||
case CHIP_VEGA20:
|
case CHIP_VEGA20:
|
||||||
case CHIP_ARCTURUS:
|
|
||||||
case CHIP_RENOIR:
|
case CHIP_RENOIR:
|
||||||
case CHIP_NAVI10:
|
case CHIP_NAVI10:
|
||||||
case CHIP_NAVI14:
|
case CHIP_NAVI14:
|
||||||
|
@ -369,6 +368,8 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
|
||||||
return AMDGPU_FW_LOAD_DIRECT;
|
return AMDGPU_FW_LOAD_DIRECT;
|
||||||
else
|
else
|
||||||
return AMDGPU_FW_LOAD_PSP;
|
return AMDGPU_FW_LOAD_PSP;
|
||||||
|
case CHIP_ARCTURUS:
|
||||||
|
return AMDGPU_FW_LOAD_DIRECT;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
DRM_ERROR("Unknown firmware load type\n");
|
DRM_ERROR("Unknown firmware load type\n");
|
||||||
|
|
|
@ -108,12 +108,6 @@ struct ta_firmware_header_v1_0 {
|
||||||
uint32_t ta_ras_ucode_version;
|
uint32_t ta_ras_ucode_version;
|
||||||
uint32_t ta_ras_offset_bytes;
|
uint32_t ta_ras_offset_bytes;
|
||||||
uint32_t ta_ras_size_bytes;
|
uint32_t ta_ras_size_bytes;
|
||||||
uint32_t ta_hdcp_ucode_version;
|
|
||||||
uint32_t ta_hdcp_offset_bytes;
|
|
||||||
uint32_t ta_hdcp_size_bytes;
|
|
||||||
uint32_t ta_dtm_ucode_version;
|
|
||||||
uint32_t ta_dtm_offset_bytes;
|
|
||||||
uint32_t ta_dtm_size_bytes;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* version_major=1, version_minor=0 */
|
/* version_major=1, version_minor=0 */
|
||||||
|
|
|
@ -54,8 +54,7 @@
|
||||||
adev->umc.funcs->disable_umc_index_mode(adev);
|
adev->umc.funcs->disable_umc_index_mode(adev);
|
||||||
|
|
||||||
struct amdgpu_umc_funcs {
|
struct amdgpu_umc_funcs {
|
||||||
void (*err_cnt_init)(struct amdgpu_device *adev);
|
void (*ras_init)(struct amdgpu_device *adev);
|
||||||
int (*ras_late_init)(struct amdgpu_device *adev);
|
|
||||||
void (*query_ras_error_count)(struct amdgpu_device *adev,
|
void (*query_ras_error_count)(struct amdgpu_device *adev,
|
||||||
void *ras_error_status);
|
void *ras_error_status);
|
||||||
void (*query_ras_error_address)(struct amdgpu_device *adev,
|
void (*query_ras_error_address)(struct amdgpu_device *adev,
|
||||||
|
@ -63,7 +62,6 @@ struct amdgpu_umc_funcs {
|
||||||
void (*enable_umc_index_mode)(struct amdgpu_device *adev,
|
void (*enable_umc_index_mode)(struct amdgpu_device *adev,
|
||||||
uint32_t umc_instance);
|
uint32_t umc_instance);
|
||||||
void (*disable_umc_index_mode)(struct amdgpu_device *adev);
|
void (*disable_umc_index_mode)(struct amdgpu_device *adev);
|
||||||
void (*init_registers)(struct amdgpu_device *adev);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_umc {
|
struct amdgpu_umc {
|
||||||
|
@ -77,17 +75,8 @@ struct amdgpu_umc {
|
||||||
uint32_t channel_offs;
|
uint32_t channel_offs;
|
||||||
/* channel index table of interleaved memory */
|
/* channel index table of interleaved memory */
|
||||||
const uint32_t *channel_idx_tbl;
|
const uint32_t *channel_idx_tbl;
|
||||||
struct ras_common_if *ras_if;
|
|
||||||
|
|
||||||
const struct amdgpu_umc_funcs *funcs;
|
const struct amdgpu_umc_funcs *funcs;
|
||||||
};
|
};
|
||||||
|
|
||||||
int amdgpu_umc_ras_late_init(struct amdgpu_device *adev);
|
|
||||||
void amdgpu_umc_ras_fini(struct amdgpu_device *adev);
|
|
||||||
int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
|
|
||||||
void *ras_error_status,
|
|
||||||
struct amdgpu_iv_entry *entry);
|
|
||||||
int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
|
|
||||||
struct amdgpu_irq_src *source,
|
|
||||||
struct amdgpu_iv_entry *entry);
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -39,8 +39,6 @@
|
||||||
#include "cikd.h"
|
#include "cikd.h"
|
||||||
#include "uvd/uvd_4_2_d.h"
|
#include "uvd/uvd_4_2_d.h"
|
||||||
|
|
||||||
#include "amdgpu_ras.h"
|
|
||||||
|
|
||||||
/* 1 second timeout */
|
/* 1 second timeout */
|
||||||
#define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000)
|
#define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000)
|
||||||
|
|
||||||
|
@ -374,14 +372,8 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
|
||||||
if (!adev->uvd.inst[j].saved_bo)
|
if (!adev->uvd.inst[j].saved_bo)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/* re-write 0 since err_event_athub will corrupt VCPU buffer */
|
|
||||||
if (amdgpu_ras_intr_triggered()) {
|
|
||||||
DRM_WARN("UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n");
|
|
||||||
memset(adev->uvd.inst[j].saved_bo, 0, size);
|
|
||||||
} else {
|
|
||||||
memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
|
memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -80,11 +80,6 @@ MODULE_FIRMWARE(FIRMWARE_VEGA12);
|
||||||
MODULE_FIRMWARE(FIRMWARE_VEGA20);
|
MODULE_FIRMWARE(FIRMWARE_VEGA20);
|
||||||
|
|
||||||
static void amdgpu_vce_idle_work_handler(struct work_struct *work);
|
static void amdgpu_vce_idle_work_handler(struct work_struct *work);
|
||||||
static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|
||||||
struct amdgpu_bo *bo,
|
|
||||||
struct dma_fence **fence);
|
|
||||||
static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|
||||||
bool direct, struct dma_fence **fence);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_vce_init - allocate memory, load vce firmware
|
* amdgpu_vce_init - allocate memory, load vce firmware
|
||||||
|
@ -433,15 +428,14 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
|
||||||
*
|
*
|
||||||
* Open up a stream for HW test
|
* Open up a stream for HW test
|
||||||
*/
|
*/
|
||||||
static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||||
struct amdgpu_bo *bo,
|
|
||||||
struct dma_fence **fence)
|
struct dma_fence **fence)
|
||||||
{
|
{
|
||||||
const unsigned ib_size_dw = 1024;
|
const unsigned ib_size_dw = 1024;
|
||||||
struct amdgpu_job *job;
|
struct amdgpu_job *job;
|
||||||
struct amdgpu_ib *ib;
|
struct amdgpu_ib *ib;
|
||||||
struct dma_fence *f = NULL;
|
struct dma_fence *f = NULL;
|
||||||
uint64_t addr;
|
uint64_t dummy;
|
||||||
int i, r;
|
int i, r;
|
||||||
|
|
||||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||||
|
@ -450,7 +444,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||||
|
|
||||||
ib = &job->ibs[0];
|
ib = &job->ibs[0];
|
||||||
|
|
||||||
addr = amdgpu_bo_gpu_offset(bo);
|
dummy = ib->gpu_addr + 1024;
|
||||||
|
|
||||||
/* stitch together an VCE create msg */
|
/* stitch together an VCE create msg */
|
||||||
ib->length_dw = 0;
|
ib->length_dw = 0;
|
||||||
|
@ -482,8 +476,8 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||||
|
|
||||||
ib->ptr[ib->length_dw++] = 0x00000014; /* len */
|
ib->ptr[ib->length_dw++] = 0x00000014; /* len */
|
||||||
ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
|
ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
|
||||||
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
|
ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
|
||||||
ib->ptr[ib->length_dw++] = addr;
|
ib->ptr[ib->length_dw++] = dummy;
|
||||||
ib->ptr[ib->length_dw++] = 0x00000001;
|
ib->ptr[ib->length_dw++] = 0x00000001;
|
||||||
|
|
||||||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||||
|
@ -513,7 +507,7 @@ err:
|
||||||
*
|
*
|
||||||
* Close up a stream for HW test or if userspace failed to do so
|
* Close up a stream for HW test or if userspace failed to do so
|
||||||
*/
|
*/
|
||||||
static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||||
bool direct, struct dma_fence **fence)
|
bool direct, struct dma_fence **fence)
|
||||||
{
|
{
|
||||||
const unsigned ib_size_dw = 1024;
|
const unsigned ib_size_dw = 1024;
|
||||||
|
@ -1116,20 +1110,13 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
|
||||||
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||||
{
|
{
|
||||||
struct dma_fence *fence = NULL;
|
struct dma_fence *fence = NULL;
|
||||||
struct amdgpu_bo *bo = NULL;
|
|
||||||
long r;
|
long r;
|
||||||
|
|
||||||
/* skip vce ring1/2 ib test for now, since it's not reliable */
|
/* skip vce ring1/2 ib test for now, since it's not reliable */
|
||||||
if (ring != &ring->adev->vce.ring[0])
|
if (ring != &ring->adev->vce.ring[0])
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
|
r = amdgpu_vce_get_create_msg(ring, 1, NULL);
|
||||||
AMDGPU_GEM_DOMAIN_VRAM,
|
|
||||||
&bo, NULL, NULL);
|
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL);
|
|
||||||
if (r)
|
if (r)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
|
@ -1145,7 +1132,5 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||||
|
|
||||||
error:
|
error:
|
||||||
dma_fence_put(fence);
|
dma_fence_put(fence);
|
||||||
amdgpu_bo_unreserve(bo);
|
|
||||||
amdgpu_bo_unref(&bo);
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
|
@ -58,6 +58,10 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
|
||||||
int amdgpu_vce_entity_init(struct amdgpu_device *adev);
|
int amdgpu_vce_entity_init(struct amdgpu_device *adev);
|
||||||
int amdgpu_vce_suspend(struct amdgpu_device *adev);
|
int amdgpu_vce_suspend(struct amdgpu_device *adev);
|
||||||
int amdgpu_vce_resume(struct amdgpu_device *adev);
|
int amdgpu_vce_resume(struct amdgpu_device *adev);
|
||||||
|
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||||
|
struct dma_fence **fence);
|
||||||
|
int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||||
|
bool direct, struct dma_fence **fence);
|
||||||
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
|
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
|
||||||
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
|
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
|
||||||
int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
|
int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
|
||||||
|
|
|
@ -569,14 +569,13 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||||
struct amdgpu_bo *bo,
|
|
||||||
struct dma_fence **fence)
|
struct dma_fence **fence)
|
||||||
{
|
{
|
||||||
const unsigned ib_size_dw = 16;
|
const unsigned ib_size_dw = 16;
|
||||||
struct amdgpu_job *job;
|
struct amdgpu_job *job;
|
||||||
struct amdgpu_ib *ib;
|
struct amdgpu_ib *ib;
|
||||||
struct dma_fence *f = NULL;
|
struct dma_fence *f = NULL;
|
||||||
uint64_t addr;
|
uint64_t dummy;
|
||||||
int i, r;
|
int i, r;
|
||||||
|
|
||||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||||
|
@ -584,14 +583,14 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
ib = &job->ibs[0];
|
ib = &job->ibs[0];
|
||||||
addr = amdgpu_bo_gpu_offset(bo);
|
dummy = ib->gpu_addr + 1024;
|
||||||
|
|
||||||
ib->length_dw = 0;
|
ib->length_dw = 0;
|
||||||
ib->ptr[ib->length_dw++] = 0x00000018;
|
ib->ptr[ib->length_dw++] = 0x00000018;
|
||||||
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
|
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
|
||||||
ib->ptr[ib->length_dw++] = handle;
|
ib->ptr[ib->length_dw++] = handle;
|
||||||
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
|
ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
|
||||||
ib->ptr[ib->length_dw++] = addr;
|
ib->ptr[ib->length_dw++] = dummy;
|
||||||
ib->ptr[ib->length_dw++] = 0x0000000b;
|
ib->ptr[ib->length_dw++] = 0x0000000b;
|
||||||
|
|
||||||
ib->ptr[ib->length_dw++] = 0x00000014;
|
ib->ptr[ib->length_dw++] = 0x00000014;
|
||||||
|
@ -622,14 +621,13 @@ err:
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||||
struct amdgpu_bo *bo,
|
|
||||||
struct dma_fence **fence)
|
struct dma_fence **fence)
|
||||||
{
|
{
|
||||||
const unsigned ib_size_dw = 16;
|
const unsigned ib_size_dw = 16;
|
||||||
struct amdgpu_job *job;
|
struct amdgpu_job *job;
|
||||||
struct amdgpu_ib *ib;
|
struct amdgpu_ib *ib;
|
||||||
struct dma_fence *f = NULL;
|
struct dma_fence *f = NULL;
|
||||||
uint64_t addr;
|
uint64_t dummy;
|
||||||
int i, r;
|
int i, r;
|
||||||
|
|
||||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||||
|
@ -637,14 +635,14 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
ib = &job->ibs[0];
|
ib = &job->ibs[0];
|
||||||
addr = amdgpu_bo_gpu_offset(bo);
|
dummy = ib->gpu_addr + 1024;
|
||||||
|
|
||||||
ib->length_dw = 0;
|
ib->length_dw = 0;
|
||||||
ib->ptr[ib->length_dw++] = 0x00000018;
|
ib->ptr[ib->length_dw++] = 0x00000018;
|
||||||
ib->ptr[ib->length_dw++] = 0x00000001;
|
ib->ptr[ib->length_dw++] = 0x00000001;
|
||||||
ib->ptr[ib->length_dw++] = handle;
|
ib->ptr[ib->length_dw++] = handle;
|
||||||
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
|
ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
|
||||||
ib->ptr[ib->length_dw++] = addr;
|
ib->ptr[ib->length_dw++] = dummy;
|
||||||
ib->ptr[ib->length_dw++] = 0x0000000b;
|
ib->ptr[ib->length_dw++] = 0x0000000b;
|
||||||
|
|
||||||
ib->ptr[ib->length_dw++] = 0x00000014;
|
ib->ptr[ib->length_dw++] = 0x00000014;
|
||||||
|
@ -677,20 +675,13 @@ err:
|
||||||
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||||
{
|
{
|
||||||
struct dma_fence *fence = NULL;
|
struct dma_fence *fence = NULL;
|
||||||
struct amdgpu_bo *bo = NULL;
|
|
||||||
long r;
|
long r;
|
||||||
|
|
||||||
r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
|
r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
|
||||||
AMDGPU_GEM_DOMAIN_VRAM,
|
|
||||||
&bo, NULL, NULL);
|
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
|
|
||||||
if (r)
|
if (r)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
|
r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
|
||||||
if (r)
|
if (r)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
|
@ -702,8 +693,6 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||||
|
|
||||||
error:
|
error:
|
||||||
dma_fence_put(fence);
|
dma_fence_put(fence);
|
||||||
amdgpu_bo_unreserve(bo);
|
|
||||||
amdgpu_bo_unref(&bo);
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -130,8 +130,7 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
|
||||||
|
|
||||||
if (level == adev->vm_manager.root_level)
|
if (level == adev->vm_manager.root_level)
|
||||||
/* For the root directory */
|
/* For the root directory */
|
||||||
return round_up(adev->vm_manager.max_pfn, 1ULL << shift)
|
return round_up(adev->vm_manager.max_pfn, 1ULL << shift) >> shift;
|
||||||
>> shift;
|
|
||||||
else if (level != AMDGPU_VM_PTB)
|
else if (level != AMDGPU_VM_PTB)
|
||||||
/* Everything in between */
|
/* Everything in between */
|
||||||
return 512;
|
return 512;
|
||||||
|
@ -342,7 +341,7 @@ static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
|
||||||
return container_of(parent->vm_bo, struct amdgpu_vm_pt, base);
|
return container_of(parent->vm_bo, struct amdgpu_vm_pt, base);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
|
* amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
|
||||||
*/
|
*/
|
||||||
struct amdgpu_vm_pt_cursor {
|
struct amdgpu_vm_pt_cursor {
|
||||||
|
@ -483,7 +482,6 @@ static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
|
||||||
*
|
*
|
||||||
* @adev: amdgpu_device structure
|
* @adev: amdgpu_device structure
|
||||||
* @vm: amdgpu_vm structure
|
* @vm: amdgpu_vm structure
|
||||||
* @start: optional cursor to start with
|
|
||||||
* @cursor: state to initialize
|
* @cursor: state to initialize
|
||||||
*
|
*
|
||||||
* Starts a deep first traversal of the PD/PT tree.
|
* Starts a deep first traversal of the PD/PT tree.
|
||||||
|
@ -537,7 +535,7 @@ static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
|
||||||
amdgpu_vm_pt_ancestor(cursor);
|
amdgpu_vm_pt_ancestor(cursor);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
|
* for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
|
||||||
*/
|
*/
|
||||||
#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \
|
#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \
|
||||||
|
@ -568,14 +566,6 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
|
||||||
list_add(&entry->tv.head, validated);
|
list_add(&entry->tv.head, validated);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* amdgpu_vm_del_from_lru_notify - update bulk_moveable flag
|
|
||||||
*
|
|
||||||
* @bo: BO which was removed from the LRU
|
|
||||||
*
|
|
||||||
* Make sure the bulk_moveable flag is updated when a BO is removed from the
|
|
||||||
* LRU.
|
|
||||||
*/
|
|
||||||
void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
|
void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
|
||||||
{
|
{
|
||||||
struct amdgpu_bo *abo;
|
struct amdgpu_bo *abo;
|
||||||
|
@ -702,7 +692,6 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
|
||||||
* @adev: amdgpu_device pointer
|
* @adev: amdgpu_device pointer
|
||||||
* @vm: VM to clear BO from
|
* @vm: VM to clear BO from
|
||||||
* @bo: BO to clear
|
* @bo: BO to clear
|
||||||
* @direct: use a direct update
|
|
||||||
*
|
*
|
||||||
* Root PD needs to be reserved when calling this.
|
* Root PD needs to be reserved when calling this.
|
||||||
*
|
*
|
||||||
|
@ -711,8 +700,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
|
||||||
*/
|
*/
|
||||||
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
|
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
|
||||||
struct amdgpu_vm *vm,
|
struct amdgpu_vm *vm,
|
||||||
struct amdgpu_bo *bo,
|
struct amdgpu_bo *bo)
|
||||||
bool direct)
|
|
||||||
{
|
{
|
||||||
struct ttm_operation_ctx ctx = { true, false };
|
struct ttm_operation_ctx ctx = { true, false };
|
||||||
unsigned level = adev->vm_manager.root_level;
|
unsigned level = adev->vm_manager.root_level;
|
||||||
|
@ -771,7 +759,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
|
||||||
memset(¶ms, 0, sizeof(params));
|
memset(¶ms, 0, sizeof(params));
|
||||||
params.adev = adev;
|
params.adev = adev;
|
||||||
params.vm = vm;
|
params.vm = vm;
|
||||||
params.direct = direct;
|
|
||||||
|
|
||||||
r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_KFD, NULL);
|
r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_KFD, NULL);
|
||||||
if (r)
|
if (r)
|
||||||
|
@ -825,13 +812,10 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
|
||||||
*
|
*
|
||||||
* @adev: amdgpu_device pointer
|
* @adev: amdgpu_device pointer
|
||||||
* @vm: requesting vm
|
* @vm: requesting vm
|
||||||
* @level: the page table level
|
|
||||||
* @direct: use a direct update
|
|
||||||
* @bp: resulting BO allocation parameters
|
* @bp: resulting BO allocation parameters
|
||||||
*/
|
*/
|
||||||
static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||||
int level, bool direct,
|
int level, struct amdgpu_bo_param *bp)
|
||||||
struct amdgpu_bo_param *bp)
|
|
||||||
{
|
{
|
||||||
memset(bp, 0, sizeof(*bp));
|
memset(bp, 0, sizeof(*bp));
|
||||||
|
|
||||||
|
@ -846,7 +830,6 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||||
else if (!vm->root.base.bo || vm->root.base.bo->shadow)
|
else if (!vm->root.base.bo || vm->root.base.bo->shadow)
|
||||||
bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
|
bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
|
||||||
bp->type = ttm_bo_type_kernel;
|
bp->type = ttm_bo_type_kernel;
|
||||||
bp->no_wait_gpu = direct;
|
|
||||||
if (vm->root.base.bo)
|
if (vm->root.base.bo)
|
||||||
bp->resv = vm->root.base.bo->tbo.base.resv;
|
bp->resv = vm->root.base.bo->tbo.base.resv;
|
||||||
}
|
}
|
||||||
|
@ -857,7 +840,6 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||||
* @adev: amdgpu_device pointer
|
* @adev: amdgpu_device pointer
|
||||||
* @vm: VM to allocate page tables for
|
* @vm: VM to allocate page tables for
|
||||||
* @cursor: Which page table to allocate
|
* @cursor: Which page table to allocate
|
||||||
* @direct: use a direct update
|
|
||||||
*
|
*
|
||||||
* Make sure a specific page table or directory is allocated.
|
* Make sure a specific page table or directory is allocated.
|
||||||
*
|
*
|
||||||
|
@ -867,8 +849,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||||
*/
|
*/
|
||||||
static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
|
static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
|
||||||
struct amdgpu_vm *vm,
|
struct amdgpu_vm *vm,
|
||||||
struct amdgpu_vm_pt_cursor *cursor,
|
struct amdgpu_vm_pt_cursor *cursor)
|
||||||
bool direct)
|
|
||||||
{
|
{
|
||||||
struct amdgpu_vm_pt *entry = cursor->entry;
|
struct amdgpu_vm_pt *entry = cursor->entry;
|
||||||
struct amdgpu_bo_param bp;
|
struct amdgpu_bo_param bp;
|
||||||
|
@ -889,7 +870,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
|
||||||
if (entry->base.bo)
|
if (entry->base.bo)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
amdgpu_vm_bo_param(adev, vm, cursor->level, direct, &bp);
|
amdgpu_vm_bo_param(adev, vm, cursor->level, &bp);
|
||||||
|
|
||||||
r = amdgpu_bo_create(adev, &bp, &pt);
|
r = amdgpu_bo_create(adev, &bp, &pt);
|
||||||
if (r)
|
if (r)
|
||||||
|
@ -901,7 +882,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
|
||||||
pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
|
pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
|
||||||
amdgpu_vm_bo_base_init(&entry->base, vm, pt);
|
amdgpu_vm_bo_base_init(&entry->base, vm, pt);
|
||||||
|
|
||||||
r = amdgpu_vm_clear_bo(adev, vm, pt, direct);
|
r = amdgpu_vm_clear_bo(adev, vm, pt);
|
||||||
if (r)
|
if (r)
|
||||||
goto error_free_pt;
|
goto error_free_pt;
|
||||||
|
|
||||||
|
@ -1038,8 +1019,7 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
|
||||||
* Returns:
|
* Returns:
|
||||||
* 0 on success, errno otherwise.
|
* 0 on success, errno otherwise.
|
||||||
*/
|
*/
|
||||||
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
|
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
|
||||||
bool need_pipe_sync)
|
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = ring->adev;
|
struct amdgpu_device *adev = ring->adev;
|
||||||
unsigned vmhub = ring->funcs->vmhub;
|
unsigned vmhub = ring->funcs->vmhub;
|
||||||
|
@ -1053,8 +1033,10 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
|
||||||
id->oa_base != job->oa_base ||
|
id->oa_base != job->oa_base ||
|
||||||
id->oa_size != job->oa_size);
|
id->oa_size != job->oa_size);
|
||||||
bool vm_flush_needed = job->vm_needs_flush;
|
bool vm_flush_needed = job->vm_needs_flush;
|
||||||
|
bool pasid_mapping_needed = id->pasid != job->pasid ||
|
||||||
|
!id->pasid_mapping ||
|
||||||
|
!dma_fence_is_signaled(id->pasid_mapping);
|
||||||
struct dma_fence *fence = NULL;
|
struct dma_fence *fence = NULL;
|
||||||
bool pasid_mapping_needed = false;
|
|
||||||
unsigned patch_offset = 0;
|
unsigned patch_offset = 0;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
@ -1064,12 +1046,6 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
|
||||||
pasid_mapping_needed = true;
|
pasid_mapping_needed = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&id_mgr->lock);
|
|
||||||
if (id->pasid != job->pasid || !id->pasid_mapping ||
|
|
||||||
!dma_fence_is_signaled(id->pasid_mapping))
|
|
||||||
pasid_mapping_needed = true;
|
|
||||||
mutex_unlock(&id_mgr->lock);
|
|
||||||
|
|
||||||
gds_switch_needed &= !!ring->funcs->emit_gds_switch;
|
gds_switch_needed &= !!ring->funcs->emit_gds_switch;
|
||||||
vm_flush_needed &= !!ring->funcs->emit_vm_flush &&
|
vm_flush_needed &= !!ring->funcs->emit_vm_flush &&
|
||||||
job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
|
job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
|
||||||
|
@ -1109,11 +1085,9 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pasid_mapping_needed) {
|
if (pasid_mapping_needed) {
|
||||||
mutex_lock(&id_mgr->lock);
|
|
||||||
id->pasid = job->pasid;
|
id->pasid = job->pasid;
|
||||||
dma_fence_put(id->pasid_mapping);
|
dma_fence_put(id->pasid_mapping);
|
||||||
id->pasid_mapping = dma_fence_get(fence);
|
id->pasid_mapping = dma_fence_get(fence);
|
||||||
mutex_unlock(&id_mgr->lock);
|
|
||||||
}
|
}
|
||||||
dma_fence_put(fence);
|
dma_fence_put(fence);
|
||||||
|
|
||||||
|
@ -1197,10 +1171,10 @@ uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* amdgpu_vm_update_pde - update a single level in the hierarchy
|
* amdgpu_vm_update_pde - update a single level in the hierarchy
|
||||||
*
|
*
|
||||||
* @params: parameters for the update
|
* @param: parameters for the update
|
||||||
* @vm: requested vm
|
* @vm: requested vm
|
||||||
* @entry: entry to update
|
* @entry: entry to update
|
||||||
*
|
*
|
||||||
|
@ -1224,7 +1198,7 @@ static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
|
||||||
return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags);
|
return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* amdgpu_vm_invalidate_pds - mark all PDs as invalid
|
* amdgpu_vm_invalidate_pds - mark all PDs as invalid
|
||||||
*
|
*
|
||||||
* @adev: amdgpu_device pointer
|
* @adev: amdgpu_device pointer
|
||||||
|
@ -1243,20 +1217,19 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
|
||||||
amdgpu_vm_bo_relocated(&entry->base);
|
amdgpu_vm_bo_relocated(&entry->base);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* amdgpu_vm_update_pdes - make sure that all directories are valid
|
* amdgpu_vm_update_directories - make sure that all directories are valid
|
||||||
*
|
*
|
||||||
* @adev: amdgpu_device pointer
|
* @adev: amdgpu_device pointer
|
||||||
* @vm: requested vm
|
* @vm: requested vm
|
||||||
* @direct: submit directly to the paging queue
|
|
||||||
*
|
*
|
||||||
* Makes sure all directories are up to date.
|
* Makes sure all directories are up to date.
|
||||||
*
|
*
|
||||||
* Returns:
|
* Returns:
|
||||||
* 0 for success, error for failure.
|
* 0 for success, error for failure.
|
||||||
*/
|
*/
|
||||||
int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
|
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
|
||||||
struct amdgpu_vm *vm, bool direct)
|
struct amdgpu_vm *vm)
|
||||||
{
|
{
|
||||||
struct amdgpu_vm_update_params params;
|
struct amdgpu_vm_update_params params;
|
||||||
int r;
|
int r;
|
||||||
|
@ -1267,7 +1240,6 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
|
||||||
memset(¶ms, 0, sizeof(params));
|
memset(¶ms, 0, sizeof(params));
|
||||||
params.adev = adev;
|
params.adev = adev;
|
||||||
params.vm = vm;
|
params.vm = vm;
|
||||||
params.direct = direct;
|
|
||||||
|
|
||||||
r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_VM, NULL);
|
r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_VM, NULL);
|
||||||
if (r)
|
if (r)
|
||||||
|
@ -1295,7 +1267,7 @@ error:
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* amdgpu_vm_update_flags - figure out flags for PTE updates
|
* amdgpu_vm_update_flags - figure out flags for PTE updates
|
||||||
*
|
*
|
||||||
* Make sure to set the right flags for the PTEs at the desired level.
|
* Make sure to set the right flags for the PTEs at the desired level.
|
||||||
|
@ -1418,8 +1390,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
|
||||||
uint64_t incr, entry_end, pe_start;
|
uint64_t incr, entry_end, pe_start;
|
||||||
struct amdgpu_bo *pt;
|
struct amdgpu_bo *pt;
|
||||||
|
|
||||||
r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor,
|
r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor);
|
||||||
params->direct);
|
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -1510,14 +1481,13 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
|
||||||
* amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
|
* amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
|
||||||
*
|
*
|
||||||
* @adev: amdgpu_device pointer
|
* @adev: amdgpu_device pointer
|
||||||
* @vm: requested vm
|
|
||||||
* @direct: direct submission in a page fault
|
|
||||||
* @exclusive: fence we need to sync to
|
* @exclusive: fence we need to sync to
|
||||||
|
* @pages_addr: DMA addresses to use for mapping
|
||||||
|
* @vm: requested vm
|
||||||
* @start: start of mapped range
|
* @start: start of mapped range
|
||||||
* @last: last mapped entry
|
* @last: last mapped entry
|
||||||
* @flags: flags for the entries
|
* @flags: flags for the entries
|
||||||
* @addr: addr to set the area to
|
* @addr: addr to set the area to
|
||||||
* @pages_addr: DMA addresses to use for mapping
|
|
||||||
* @fence: optional resulting fence
|
* @fence: optional resulting fence
|
||||||
*
|
*
|
||||||
* Fill in the page table entries between @start and @last.
|
* Fill in the page table entries between @start and @last.
|
||||||
|
@ -1526,11 +1496,11 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
|
||||||
* 0 for success, -EINVAL for failure.
|
* 0 for success, -EINVAL for failure.
|
||||||
*/
|
*/
|
||||||
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
||||||
struct amdgpu_vm *vm, bool direct,
|
|
||||||
struct dma_fence *exclusive,
|
struct dma_fence *exclusive,
|
||||||
|
dma_addr_t *pages_addr,
|
||||||
|
struct amdgpu_vm *vm,
|
||||||
uint64_t start, uint64_t last,
|
uint64_t start, uint64_t last,
|
||||||
uint64_t flags, uint64_t addr,
|
uint64_t flags, uint64_t addr,
|
||||||
dma_addr_t *pages_addr,
|
|
||||||
struct dma_fence **fence)
|
struct dma_fence **fence)
|
||||||
{
|
{
|
||||||
struct amdgpu_vm_update_params params;
|
struct amdgpu_vm_update_params params;
|
||||||
|
@ -1540,7 +1510,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
||||||
memset(¶ms, 0, sizeof(params));
|
memset(¶ms, 0, sizeof(params));
|
||||||
params.adev = adev;
|
params.adev = adev;
|
||||||
params.vm = vm;
|
params.vm = vm;
|
||||||
params.direct = direct;
|
|
||||||
params.pages_addr = pages_addr;
|
params.pages_addr = pages_addr;
|
||||||
|
|
||||||
/* sync to everything except eviction fences on unmapping */
|
/* sync to everything except eviction fences on unmapping */
|
||||||
|
@ -1599,8 +1568,27 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
|
||||||
if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
|
if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
|
||||||
flags &= ~AMDGPU_PTE_WRITEABLE;
|
flags &= ~AMDGPU_PTE_WRITEABLE;
|
||||||
|
|
||||||
/* Apply ASIC specific mapping flags */
|
flags &= ~AMDGPU_PTE_EXECUTABLE;
|
||||||
amdgpu_gmc_get_vm_pte(adev, mapping, &flags);
|
flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
|
||||||
|
|
||||||
|
if (adev->asic_type >= CHIP_NAVI10) {
|
||||||
|
flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
|
||||||
|
flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
|
||||||
|
} else {
|
||||||
|
flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
|
||||||
|
flags |= (mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK);
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((mapping->flags & AMDGPU_PTE_PRT) &&
|
||||||
|
(adev->asic_type >= CHIP_VEGA10)) {
|
||||||
|
flags |= AMDGPU_PTE_PRT;
|
||||||
|
if (adev->asic_type >= CHIP_NAVI10) {
|
||||||
|
flags |= AMDGPU_PTE_SNOOPED;
|
||||||
|
flags |= AMDGPU_PTE_LOG;
|
||||||
|
flags |= AMDGPU_PTE_SYSTEM;
|
||||||
|
}
|
||||||
|
flags &= ~AMDGPU_PTE_VALID;
|
||||||
|
}
|
||||||
|
|
||||||
trace_amdgpu_vm_bo_update(mapping);
|
trace_amdgpu_vm_bo_update(mapping);
|
||||||
|
|
||||||
|
@ -1644,8 +1632,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
|
||||||
dma_addr = pages_addr;
|
dma_addr = pages_addr;
|
||||||
} else {
|
} else {
|
||||||
addr = pages_addr[pfn];
|
addr = pages_addr[pfn];
|
||||||
max_entries = count *
|
max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
|
||||||
AMDGPU_GPU_PAGES_IN_CPU_PAGE;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if (flags & AMDGPU_PTE_VALID) {
|
} else if (flags & AMDGPU_PTE_VALID) {
|
||||||
|
@ -1654,9 +1641,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
|
||||||
}
|
}
|
||||||
|
|
||||||
last = min((uint64_t)mapping->last, start + max_entries - 1);
|
last = min((uint64_t)mapping->last, start + max_entries - 1);
|
||||||
r = amdgpu_vm_bo_update_mapping(adev, vm, false, exclusive,
|
r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
|
||||||
start, last, flags, addr,
|
start, last, flags, addr,
|
||||||
dma_addr, fence);
|
fence);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -1684,7 +1671,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
|
||||||
* Returns:
|
* Returns:
|
||||||
* 0 for success, -EINVAL for failure.
|
* 0 for success, -EINVAL for failure.
|
||||||
*/
|
*/
|
||||||
int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_bo_va *bo_va,
|
||||||
bool clear)
|
bool clear)
|
||||||
{
|
{
|
||||||
struct amdgpu_bo *bo = bo_va->base.bo;
|
struct amdgpu_bo *bo = bo_va->base.bo;
|
||||||
|
@ -1711,7 +1699,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
||||||
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
|
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
|
||||||
pages_addr = ttm->dma_address;
|
pages_addr = ttm->dma_address;
|
||||||
}
|
}
|
||||||
exclusive = bo->tbo.moving;
|
exclusive = dma_resv_get_excl(bo->tbo.base.resv);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bo) {
|
if (bo) {
|
||||||
|
@ -1742,6 +1730,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (vm->use_cpu_for_update) {
|
||||||
|
/* Flush HDP */
|
||||||
|
mb();
|
||||||
|
amdgpu_asic_flush_hdp(adev, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
/* If the BO is not in its preferred location add it back to
|
/* If the BO is not in its preferred location add it back to
|
||||||
* the evicted list so that it gets validated again on the
|
* the evicted list so that it gets validated again on the
|
||||||
* next command submission.
|
* next command submission.
|
||||||
|
@ -1749,8 +1743,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
||||||
if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
|
if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
|
||||||
uint32_t mem_type = bo->tbo.mem.mem_type;
|
uint32_t mem_type = bo->tbo.mem.mem_type;
|
||||||
|
|
||||||
if (!(bo->preferred_domains &
|
if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
|
||||||
amdgpu_mem_type_to_domain(mem_type)))
|
|
||||||
amdgpu_vm_bo_evicted(&bo_va->base);
|
amdgpu_vm_bo_evicted(&bo_va->base);
|
||||||
else
|
else
|
||||||
amdgpu_vm_bo_idle(&bo_va->base);
|
amdgpu_vm_bo_idle(&bo_va->base);
|
||||||
|
@ -1944,9 +1937,9 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
||||||
mapping->start < AMDGPU_GMC_HOLE_START)
|
mapping->start < AMDGPU_GMC_HOLE_START)
|
||||||
init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
|
init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
|
||||||
|
|
||||||
r = amdgpu_vm_bo_update_mapping(adev, vm, false, NULL,
|
r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
|
||||||
mapping->start, mapping->last,
|
mapping->start, mapping->last,
|
||||||
init_pte_value, 0, NULL, &f);
|
init_pte_value, 0, &f);
|
||||||
amdgpu_vm_free_mapping(adev, vm, mapping, f);
|
amdgpu_vm_free_mapping(adev, vm, mapping, f);
|
||||||
if (r) {
|
if (r) {
|
||||||
dma_fence_put(f);
|
dma_fence_put(f);
|
||||||
|
@ -2688,17 +2681,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||||
spin_lock_init(&vm->invalidated_lock);
|
spin_lock_init(&vm->invalidated_lock);
|
||||||
INIT_LIST_HEAD(&vm->freed);
|
INIT_LIST_HEAD(&vm->freed);
|
||||||
|
|
||||||
/* create scheduler entities for page table updates */
|
/* create scheduler entity for page table updates */
|
||||||
r = drm_sched_entity_init(&vm->direct, adev->vm_manager.vm_pte_rqs,
|
r = drm_sched_entity_init(&vm->entity, adev->vm_manager.vm_pte_rqs,
|
||||||
adev->vm_manager.vm_pte_num_rqs, NULL);
|
adev->vm_manager.vm_pte_num_rqs, NULL);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
r = drm_sched_entity_init(&vm->delayed, adev->vm_manager.vm_pte_rqs,
|
|
||||||
adev->vm_manager.vm_pte_num_rqs, NULL);
|
|
||||||
if (r)
|
|
||||||
goto error_free_direct;
|
|
||||||
|
|
||||||
vm->pte_support_ats = false;
|
vm->pte_support_ats = false;
|
||||||
|
|
||||||
if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
|
if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
|
||||||
|
@ -2713,8 +2701,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||||
}
|
}
|
||||||
DRM_DEBUG_DRIVER("VM update mode is %s\n",
|
DRM_DEBUG_DRIVER("VM update mode is %s\n",
|
||||||
vm->use_cpu_for_update ? "CPU" : "SDMA");
|
vm->use_cpu_for_update ? "CPU" : "SDMA");
|
||||||
WARN_ONCE((vm->use_cpu_for_update &&
|
WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
|
||||||
!amdgpu_gmc_vram_full_visible(&adev->gmc)),
|
|
||||||
"CPU update of VM recommended only for large BAR system\n");
|
"CPU update of VM recommended only for large BAR system\n");
|
||||||
|
|
||||||
if (vm->use_cpu_for_update)
|
if (vm->use_cpu_for_update)
|
||||||
|
@ -2723,12 +2710,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||||
vm->update_funcs = &amdgpu_vm_sdma_funcs;
|
vm->update_funcs = &amdgpu_vm_sdma_funcs;
|
||||||
vm->last_update = NULL;
|
vm->last_update = NULL;
|
||||||
|
|
||||||
amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp);
|
amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp);
|
||||||
if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
|
if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
|
||||||
bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW;
|
bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW;
|
||||||
r = amdgpu_bo_create(adev, &bp, &root);
|
r = amdgpu_bo_create(adev, &bp, &root);
|
||||||
if (r)
|
if (r)
|
||||||
goto error_free_delayed;
|
goto error_free_sched_entity;
|
||||||
|
|
||||||
r = amdgpu_bo_reserve(root, true);
|
r = amdgpu_bo_reserve(root, true);
|
||||||
if (r)
|
if (r)
|
||||||
|
@ -2740,7 +2727,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||||
|
|
||||||
amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
|
amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
|
||||||
|
|
||||||
r = amdgpu_vm_clear_bo(adev, vm, root, false);
|
r = amdgpu_vm_clear_bo(adev, vm, root);
|
||||||
if (r)
|
if (r)
|
||||||
goto error_unreserve;
|
goto error_unreserve;
|
||||||
|
|
||||||
|
@ -2771,11 +2758,8 @@ error_free_root:
|
||||||
amdgpu_bo_unref(&vm->root.base.bo);
|
amdgpu_bo_unref(&vm->root.base.bo);
|
||||||
vm->root.base.bo = NULL;
|
vm->root.base.bo = NULL;
|
||||||
|
|
||||||
error_free_delayed:
|
error_free_sched_entity:
|
||||||
drm_sched_entity_destroy(&vm->delayed);
|
drm_sched_entity_destroy(&vm->entity);
|
||||||
|
|
||||||
error_free_direct:
|
|
||||||
drm_sched_entity_destroy(&vm->direct);
|
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -2816,7 +2800,6 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
|
||||||
*
|
*
|
||||||
* @adev: amdgpu_device pointer
|
* @adev: amdgpu_device pointer
|
||||||
* @vm: requested vm
|
* @vm: requested vm
|
||||||
* @pasid: pasid to use
|
|
||||||
*
|
*
|
||||||
* This only works on GFX VMs that don't have any BOs added and no
|
* This only works on GFX VMs that don't have any BOs added and no
|
||||||
* page tables allocated yet.
|
* page tables allocated yet.
|
||||||
|
@ -2832,8 +2815,7 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
|
||||||
* Returns:
|
* Returns:
|
||||||
* 0 for success, -errno for errors.
|
* 0 for success, -errno for errors.
|
||||||
*/
|
*/
|
||||||
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid)
|
||||||
unsigned int pasid)
|
|
||||||
{
|
{
|
||||||
bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
|
bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
|
||||||
int r;
|
int r;
|
||||||
|
@ -2865,7 +2847,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||||
*/
|
*/
|
||||||
if (pte_support_ats != vm->pte_support_ats) {
|
if (pte_support_ats != vm->pte_support_ats) {
|
||||||
vm->pte_support_ats = pte_support_ats;
|
vm->pte_support_ats = pte_support_ats;
|
||||||
r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, false);
|
r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo);
|
||||||
if (r)
|
if (r)
|
||||||
goto free_idr;
|
goto free_idr;
|
||||||
}
|
}
|
||||||
|
@ -2875,8 +2857,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||||
AMDGPU_VM_USE_CPU_FOR_COMPUTE);
|
AMDGPU_VM_USE_CPU_FOR_COMPUTE);
|
||||||
DRM_DEBUG_DRIVER("VM update mode is %s\n",
|
DRM_DEBUG_DRIVER("VM update mode is %s\n",
|
||||||
vm->use_cpu_for_update ? "CPU" : "SDMA");
|
vm->use_cpu_for_update ? "CPU" : "SDMA");
|
||||||
WARN_ONCE((vm->use_cpu_for_update &&
|
WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
|
||||||
!amdgpu_gmc_vram_full_visible(&adev->gmc)),
|
|
||||||
"CPU update of VM recommended only for large BAR system\n");
|
"CPU update of VM recommended only for large BAR system\n");
|
||||||
|
|
||||||
if (vm->use_cpu_for_update)
|
if (vm->use_cpu_for_update)
|
||||||
|
@ -2955,38 +2936,19 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||||
struct amdgpu_bo_va_mapping *mapping, *tmp;
|
struct amdgpu_bo_va_mapping *mapping, *tmp;
|
||||||
bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
|
bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
|
||||||
struct amdgpu_bo *root;
|
struct amdgpu_bo *root;
|
||||||
int i;
|
int i, r;
|
||||||
|
|
||||||
amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
|
amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
|
||||||
|
|
||||||
root = amdgpu_bo_ref(vm->root.base.bo);
|
|
||||||
amdgpu_bo_reserve(root, true);
|
|
||||||
if (vm->pasid) {
|
if (vm->pasid) {
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
||||||
idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
|
idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
|
||||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
||||||
vm->pasid = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
|
drm_sched_entity_destroy(&vm->entity);
|
||||||
if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
|
|
||||||
amdgpu_vm_prt_fini(adev, vm);
|
|
||||||
prt_fini_needed = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
list_del(&mapping->list);
|
|
||||||
amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
amdgpu_vm_free_pts(adev, vm, NULL);
|
|
||||||
amdgpu_bo_unreserve(root);
|
|
||||||
amdgpu_bo_unref(&root);
|
|
||||||
WARN_ON(vm->root.base.bo);
|
|
||||||
|
|
||||||
drm_sched_entity_destroy(&vm->direct);
|
|
||||||
drm_sched_entity_destroy(&vm->delayed);
|
|
||||||
|
|
||||||
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
|
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
|
||||||
dev_err(adev->dev, "still active bo inside vm\n");
|
dev_err(adev->dev, "still active bo inside vm\n");
|
||||||
|
@ -2999,7 +2961,26 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||||
list_del(&mapping->list);
|
list_del(&mapping->list);
|
||||||
kfree(mapping);
|
kfree(mapping);
|
||||||
}
|
}
|
||||||
|
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
|
||||||
|
if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
|
||||||
|
amdgpu_vm_prt_fini(adev, vm);
|
||||||
|
prt_fini_needed = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
list_del(&mapping->list);
|
||||||
|
amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
root = amdgpu_bo_ref(vm->root.base.bo);
|
||||||
|
r = amdgpu_bo_reserve(root, true);
|
||||||
|
if (r) {
|
||||||
|
dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
|
||||||
|
} else {
|
||||||
|
amdgpu_vm_free_pts(adev, vm, NULL);
|
||||||
|
amdgpu_bo_unreserve(root);
|
||||||
|
}
|
||||||
|
amdgpu_bo_unref(&root);
|
||||||
|
WARN_ON(vm->root.base.bo);
|
||||||
dma_fence_put(vm->last_update);
|
dma_fence_put(vm->last_update);
|
||||||
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
|
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
|
||||||
amdgpu_vmid_free_reserved(adev, vm, i);
|
amdgpu_vmid_free_reserved(adev, vm, i);
|
||||||
|
@ -3083,9 +3064,8 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||||
|
|
||||||
switch (args->in.op) {
|
switch (args->in.op) {
|
||||||
case AMDGPU_VM_OP_RESERVE_VMID:
|
case AMDGPU_VM_OP_RESERVE_VMID:
|
||||||
/* We only have requirement to reserve vmid from gfxhub */
|
/* current, we only have requirement to reserve vmid from gfxhub */
|
||||||
r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm,
|
r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
|
||||||
AMDGPU_GFXHUB_0);
|
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
break;
|
break;
|
||||||
|
@ -3128,88 +3108,13 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
|
||||||
*/
|
*/
|
||||||
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
|
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
|
||||||
{
|
{
|
||||||
if (vm->task_info.pid)
|
if (!vm->task_info.pid) {
|
||||||
return;
|
|
||||||
|
|
||||||
vm->task_info.pid = current->pid;
|
vm->task_info.pid = current->pid;
|
||||||
get_task_comm(vm->task_info.task_name, current);
|
get_task_comm(vm->task_info.task_name, current);
|
||||||
|
|
||||||
if (current->group_leader->mm != current->mm)
|
if (current->group_leader->mm == current->mm) {
|
||||||
return;
|
|
||||||
|
|
||||||
vm->task_info.tgid = current->group_leader->pid;
|
vm->task_info.tgid = current->group_leader->pid;
|
||||||
get_task_comm(vm->task_info.process_name, current->group_leader);
|
get_task_comm(vm->task_info.process_name, current->group_leader);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* amdgpu_vm_handle_fault - graceful handling of VM faults.
|
|
||||||
* @adev: amdgpu device pointer
|
|
||||||
* @pasid: PASID of the VM
|
|
||||||
* @addr: Address of the fault
|
|
||||||
*
|
|
||||||
* Try to gracefully handle a VM fault. Return true if the fault was handled and
|
|
||||||
* shouldn't be reported any more.
|
|
||||||
*/
|
|
||||||
bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
|
|
||||||
uint64_t addr)
|
|
||||||
{
|
|
||||||
struct amdgpu_bo *root;
|
|
||||||
uint64_t value, flags;
|
|
||||||
struct amdgpu_vm *vm;
|
|
||||||
long r;
|
|
||||||
|
|
||||||
spin_lock(&adev->vm_manager.pasid_lock);
|
|
||||||
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
|
|
||||||
if (vm)
|
|
||||||
root = amdgpu_bo_ref(vm->root.base.bo);
|
|
||||||
else
|
|
||||||
root = NULL;
|
|
||||||
spin_unlock(&adev->vm_manager.pasid_lock);
|
|
||||||
|
|
||||||
if (!root)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
r = amdgpu_bo_reserve(root, true);
|
|
||||||
if (r)
|
|
||||||
goto error_unref;
|
|
||||||
|
|
||||||
/* Double check that the VM still exists */
|
|
||||||
spin_lock(&adev->vm_manager.pasid_lock);
|
|
||||||
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
|
|
||||||
if (vm && vm->root.base.bo != root)
|
|
||||||
vm = NULL;
|
|
||||||
spin_unlock(&adev->vm_manager.pasid_lock);
|
|
||||||
if (!vm)
|
|
||||||
goto error_unlock;
|
|
||||||
|
|
||||||
addr /= AMDGPU_GPU_PAGE_SIZE;
|
|
||||||
flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
|
|
||||||
AMDGPU_PTE_SYSTEM;
|
|
||||||
|
|
||||||
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
|
|
||||||
/* Redirect the access to the dummy page */
|
|
||||||
value = adev->dummy_page_addr;
|
|
||||||
flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
|
|
||||||
AMDGPU_PTE_WRITEABLE;
|
|
||||||
} else {
|
|
||||||
/* Let the hw retry silently on the PTE */
|
|
||||||
value = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_vm_bo_update_mapping(adev, vm, true, NULL, addr, addr + 1,
|
|
||||||
flags, value, NULL, NULL);
|
|
||||||
if (r)
|
|
||||||
goto error_unlock;
|
|
||||||
|
|
||||||
r = amdgpu_vm_update_pdes(adev, vm, true);
|
|
||||||
|
|
||||||
error_unlock:
|
|
||||||
amdgpu_bo_unreserve(root);
|
|
||||||
if (r < 0)
|
|
||||||
DRM_ERROR("Can't handle page fault (%ld)\n", r);
|
|
||||||
|
|
||||||
error_unref:
|
|
||||||
amdgpu_bo_unref(&root);
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -99,9 +99,6 @@ struct amdgpu_bo_list_entry;
|
||||||
#define AMDGPU_VM_FAULT_STOP_FIRST 1
|
#define AMDGPU_VM_FAULT_STOP_FIRST 1
|
||||||
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
|
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
|
||||||
|
|
||||||
/* Reserve 4MB VRAM for page tables */
|
|
||||||
#define AMDGPU_VM_RESERVED_VRAM (4ULL << 20)
|
|
||||||
|
|
||||||
/* max number of VMHUB */
|
/* max number of VMHUB */
|
||||||
#define AMDGPU_MAX_VMHUBS 3
|
#define AMDGPU_MAX_VMHUBS 3
|
||||||
#define AMDGPU_GFXHUB_0 0
|
#define AMDGPU_GFXHUB_0 0
|
||||||
|
@ -201,11 +198,6 @@ struct amdgpu_vm_update_params {
|
||||||
*/
|
*/
|
||||||
struct amdgpu_vm *vm;
|
struct amdgpu_vm *vm;
|
||||||
|
|
||||||
/**
|
|
||||||
* @direct: if changes should be made directly
|
|
||||||
*/
|
|
||||||
bool direct;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @pages_addr:
|
* @pages_addr:
|
||||||
*
|
*
|
||||||
|
@ -262,9 +254,8 @@ struct amdgpu_vm {
|
||||||
struct amdgpu_vm_pt root;
|
struct amdgpu_vm_pt root;
|
||||||
struct dma_fence *last_update;
|
struct dma_fence *last_update;
|
||||||
|
|
||||||
/* Scheduler entities for page table updates */
|
/* Scheduler entity for page table updates */
|
||||||
struct drm_sched_entity direct;
|
struct drm_sched_entity entity;
|
||||||
struct drm_sched_entity delayed;
|
|
||||||
|
|
||||||
unsigned int pasid;
|
unsigned int pasid;
|
||||||
/* dedicated to vm */
|
/* dedicated to vm */
|
||||||
|
@ -366,8 +357,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||||
int (*callback)(void *p, struct amdgpu_bo *bo),
|
int (*callback)(void *p, struct amdgpu_bo *bo),
|
||||||
void *param);
|
void *param);
|
||||||
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
|
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
|
||||||
int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
|
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
|
||||||
struct amdgpu_vm *vm, bool direct);
|
struct amdgpu_vm *vm);
|
||||||
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
||||||
struct amdgpu_vm *vm,
|
struct amdgpu_vm *vm,
|
||||||
struct dma_fence **fence);
|
struct dma_fence **fence);
|
||||||
|
@ -413,8 +404,6 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
|
||||||
|
|
||||||
void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
|
void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
|
||||||
struct amdgpu_task_info *task_info);
|
struct amdgpu_task_info *task_info);
|
||||||
bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
|
|
||||||
uint64_t addr);
|
|
||||||
|
|
||||||
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
|
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
|
||||||
|
|
||||||
|
|
|
@ -49,6 +49,13 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
/* Wait for PT BOs to be idle. PTs share the same resv. object
|
||||||
|
* as the root PD BO
|
||||||
|
*/
|
||||||
|
r = amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
|
||||||
|
if (unlikely(r))
|
||||||
|
return r;
|
||||||
|
|
||||||
/* Wait for any BO move to be completed */
|
/* Wait for any BO move to be completed */
|
||||||
if (exclusive) {
|
if (exclusive) {
|
||||||
r = dma_fence_wait(exclusive, true);
|
r = dma_fence_wait(exclusive, true);
|
||||||
|
@ -56,14 +63,7 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Don't wait for submissions during page fault */
|
|
||||||
if (p->direct)
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Wait for PT BOs to be idle. PTs share the same resv. object
|
|
||||||
* as the root PD BO
|
|
||||||
*/
|
|
||||||
return amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -89,7 +89,7 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
|
||||||
|
|
||||||
pe += (unsigned long)amdgpu_bo_kptr(bo);
|
pe += (unsigned long)amdgpu_bo_kptr(bo);
|
||||||
|
|
||||||
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
|
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
|
||||||
|
|
||||||
for (i = 0; i < count; i++) {
|
for (i = 0; i < count; i++) {
|
||||||
value = p->pages_addr ?
|
value = p->pages_addr ?
|
||||||
|
|
|
@ -68,19 +68,17 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
p->num_dw_left = ndw;
|
|
||||||
|
|
||||||
/* Wait for moves to be completed */
|
|
||||||
r = amdgpu_sync_fence(p->adev, &p->job->sync, exclusive, false);
|
r = amdgpu_sync_fence(p->adev, &p->job->sync, exclusive, false);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
/* Don't wait for any submissions during page fault handling */
|
r = amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
|
||||||
if (p->direct)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
|
|
||||||
owner, false);
|
owner, false);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
p->num_dw_left = ndw;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -97,23 +95,22 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
|
||||||
{
|
{
|
||||||
struct amdgpu_bo *root = p->vm->root.base.bo;
|
struct amdgpu_bo *root = p->vm->root.base.bo;
|
||||||
struct amdgpu_ib *ib = p->job->ibs;
|
struct amdgpu_ib *ib = p->job->ibs;
|
||||||
struct drm_sched_entity *entity;
|
|
||||||
struct amdgpu_ring *ring;
|
struct amdgpu_ring *ring;
|
||||||
struct dma_fence *f;
|
struct dma_fence *f;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
entity = p->direct ? &p->vm->direct : &p->vm->delayed;
|
ring = container_of(p->vm->entity.rq->sched, struct amdgpu_ring, sched);
|
||||||
ring = container_of(entity->rq->sched, struct amdgpu_ring, sched);
|
|
||||||
|
|
||||||
WARN_ON(ib->length_dw == 0);
|
WARN_ON(ib->length_dw == 0);
|
||||||
amdgpu_ring_pad_ib(ring, ib);
|
amdgpu_ring_pad_ib(ring, ib);
|
||||||
WARN_ON(ib->length_dw > p->num_dw_left);
|
WARN_ON(ib->length_dw > p->num_dw_left);
|
||||||
r = amdgpu_job_submit(p->job, entity, AMDGPU_FENCE_OWNER_VM, &f);
|
r = amdgpu_job_submit(p->job, &p->vm->entity,
|
||||||
|
AMDGPU_FENCE_OWNER_VM, &f);
|
||||||
if (r)
|
if (r)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
amdgpu_bo_fence(root, f, true);
|
amdgpu_bo_fence(root, f, true);
|
||||||
if (fence && !p->direct)
|
if (fence)
|
||||||
swap(*fence, f);
|
swap(*fence, f);
|
||||||
dma_fence_put(f);
|
dma_fence_put(f);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -123,6 +120,7 @@ error:
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_vm_sdma_copy_ptes - copy the PTEs from mapping
|
* amdgpu_vm_sdma_copy_ptes - copy the PTEs from mapping
|
||||||
*
|
*
|
||||||
|
@ -143,7 +141,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
|
||||||
src += p->num_dw_left * 4;
|
src += p->num_dw_left * 4;
|
||||||
|
|
||||||
pe += amdgpu_bo_gpu_offset(bo);
|
pe += amdgpu_bo_gpu_offset(bo);
|
||||||
trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct);
|
trace_amdgpu_vm_copy_ptes(pe, src, count);
|
||||||
|
|
||||||
amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
|
amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
|
||||||
}
|
}
|
||||||
|
@ -170,7 +168,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
|
||||||
struct amdgpu_ib *ib = p->job->ibs;
|
struct amdgpu_ib *ib = p->job->ibs;
|
||||||
|
|
||||||
pe += amdgpu_bo_gpu_offset(bo);
|
pe += amdgpu_bo_gpu_offset(bo);
|
||||||
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
|
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
|
||||||
if (count < 3) {
|
if (count < 3) {
|
||||||
amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
|
amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
|
||||||
count, incr);
|
count, incr);
|
||||||
|
|
|
@ -23,9 +23,6 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
#include "amdgpu_vm.h"
|
|
||||||
#include "amdgpu_atomfirmware.h"
|
|
||||||
#include "atom.h"
|
|
||||||
|
|
||||||
struct amdgpu_vram_mgr {
|
struct amdgpu_vram_mgr {
|
||||||
struct drm_mm mm;
|
struct drm_mm mm;
|
||||||
|
@ -104,39 +101,6 @@ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
|
||||||
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]));
|
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]));
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
|
|
||||||
struct device_attribute *attr,
|
|
||||||
char *buf)
|
|
||||||
{
|
|
||||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
|
||||||
struct amdgpu_device *adev = ddev->dev_private;
|
|
||||||
|
|
||||||
switch (adev->gmc.vram_vendor) {
|
|
||||||
case SAMSUNG:
|
|
||||||
return snprintf(buf, PAGE_SIZE, "samsung\n");
|
|
||||||
case INFINEON:
|
|
||||||
return snprintf(buf, PAGE_SIZE, "infineon\n");
|
|
||||||
case ELPIDA:
|
|
||||||
return snprintf(buf, PAGE_SIZE, "elpida\n");
|
|
||||||
case ETRON:
|
|
||||||
return snprintf(buf, PAGE_SIZE, "etron\n");
|
|
||||||
case NANYA:
|
|
||||||
return snprintf(buf, PAGE_SIZE, "nanya\n");
|
|
||||||
case HYNIX:
|
|
||||||
return snprintf(buf, PAGE_SIZE, "hynix\n");
|
|
||||||
case MOSEL:
|
|
||||||
return snprintf(buf, PAGE_SIZE, "mosel\n");
|
|
||||||
case WINBOND:
|
|
||||||
return snprintf(buf, PAGE_SIZE, "winbond\n");
|
|
||||||
case ESMT:
|
|
||||||
return snprintf(buf, PAGE_SIZE, "esmt\n");
|
|
||||||
case MICRON:
|
|
||||||
return snprintf(buf, PAGE_SIZE, "micron\n");
|
|
||||||
default:
|
|
||||||
return snprintf(buf, PAGE_SIZE, "unknown\n");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static DEVICE_ATTR(mem_info_vram_total, S_IRUGO,
|
static DEVICE_ATTR(mem_info_vram_total, S_IRUGO,
|
||||||
amdgpu_mem_info_vram_total_show, NULL);
|
amdgpu_mem_info_vram_total_show, NULL);
|
||||||
static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO,
|
static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO,
|
||||||
|
@ -145,8 +109,6 @@ static DEVICE_ATTR(mem_info_vram_used, S_IRUGO,
|
||||||
amdgpu_mem_info_vram_used_show, NULL);
|
amdgpu_mem_info_vram_used_show, NULL);
|
||||||
static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
|
static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
|
||||||
amdgpu_mem_info_vis_vram_used_show, NULL);
|
amdgpu_mem_info_vis_vram_used_show, NULL);
|
||||||
static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO,
|
|
||||||
amdgpu_mem_info_vram_vendor, NULL);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_vram_mgr_init - init VRAM manager and DRM MM
|
* amdgpu_vram_mgr_init - init VRAM manager and DRM MM
|
||||||
|
@ -192,11 +154,6 @@ static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
|
||||||
DRM_ERROR("Failed to create device file mem_info_vis_vram_used\n");
|
DRM_ERROR("Failed to create device file mem_info_vis_vram_used\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_vendor);
|
|
||||||
if (ret) {
|
|
||||||
DRM_ERROR("Failed to create device file mem_info_vram_vendor\n");
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -223,7 +180,6 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
|
||||||
device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
|
device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
|
||||||
device_remove_file(adev->dev, &dev_attr_mem_info_vram_used);
|
device_remove_file(adev->dev, &dev_attr_mem_info_vram_used);
|
||||||
device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
|
device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
|
||||||
device_remove_file(adev->dev, &dev_attr_mem_info_vram_vendor);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -319,7 +275,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
|
||||||
struct drm_mm_node *nodes;
|
struct drm_mm_node *nodes;
|
||||||
enum drm_mm_insert_mode mode;
|
enum drm_mm_insert_mode mode;
|
||||||
unsigned long lpfn, num_nodes, pages_per_node, pages_left;
|
unsigned long lpfn, num_nodes, pages_per_node, pages_left;
|
||||||
uint64_t vis_usage = 0, mem_bytes, max_bytes;
|
uint64_t vis_usage = 0, mem_bytes;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
@ -327,13 +283,9 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
|
||||||
if (!lpfn)
|
if (!lpfn)
|
||||||
lpfn = man->size;
|
lpfn = man->size;
|
||||||
|
|
||||||
max_bytes = adev->gmc.mc_vram_size;
|
|
||||||
if (tbo->type != ttm_bo_type_kernel)
|
|
||||||
max_bytes -= AMDGPU_VM_RESERVED_VRAM;
|
|
||||||
|
|
||||||
/* bail out quickly if there's likely not enough VRAM for this BO */
|
/* bail out quickly if there's likely not enough VRAM for this BO */
|
||||||
mem_bytes = (u64)mem->num_pages << PAGE_SHIFT;
|
mem_bytes = (u64)mem->num_pages << PAGE_SHIFT;
|
||||||
if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
|
if (atomic64_add_return(mem_bytes, &mgr->usage) > adev->gmc.mc_vram_size) {
|
||||||
atomic64_sub(mem_bytes, &mgr->usage);
|
atomic64_sub(mem_bytes, &mgr->usage);
|
||||||
mem->mm_node = NULL;
|
mem->mm_node = NULL;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -25,7 +25,6 @@
|
||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
#include "amdgpu_xgmi.h"
|
#include "amdgpu_xgmi.h"
|
||||||
#include "amdgpu_smu.h"
|
#include "amdgpu_smu.h"
|
||||||
#include "amdgpu_ras.h"
|
|
||||||
#include "df/df_3_6_offset.h"
|
#include "df/df_3_6_offset.h"
|
||||||
|
|
||||||
static DEFINE_MUTEX(xgmi_mutex);
|
static DEFINE_MUTEX(xgmi_mutex);
|
||||||
|
@ -438,52 +437,3 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
|
||||||
mutex_unlock(&hive->hive_lock);
|
mutex_unlock(&hive->hive_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
int r;
|
|
||||||
struct ras_ih_if ih_info = {
|
|
||||||
.cb = NULL,
|
|
||||||
};
|
|
||||||
struct ras_fs_if fs_info = {
|
|
||||||
.sysfs_name = "xgmi_wafl_err_count",
|
|
||||||
.debugfs_name = "xgmi_wafl_err_inject",
|
|
||||||
};
|
|
||||||
|
|
||||||
if (!adev->gmc.xgmi.supported ||
|
|
||||||
adev->gmc.xgmi.num_physical_nodes == 0)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (!adev->gmc.xgmi.ras_if) {
|
|
||||||
adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
|
|
||||||
if (!adev->gmc.xgmi.ras_if)
|
|
||||||
return -ENOMEM;
|
|
||||||
adev->gmc.xgmi.ras_if->block = AMDGPU_RAS_BLOCK__XGMI_WAFL;
|
|
||||||
adev->gmc.xgmi.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
|
|
||||||
adev->gmc.xgmi.ras_if->sub_block_index = 0;
|
|
||||||
strcpy(adev->gmc.xgmi.ras_if->name, "xgmi_wafl");
|
|
||||||
}
|
|
||||||
ih_info.head = fs_info.head = *adev->gmc.xgmi.ras_if;
|
|
||||||
r = amdgpu_ras_late_init(adev, adev->gmc.xgmi.ras_if,
|
|
||||||
&fs_info, &ih_info);
|
|
||||||
if (r || !amdgpu_ras_is_supported(adev, adev->gmc.xgmi.ras_if->block)) {
|
|
||||||
kfree(adev->gmc.xgmi.ras_if);
|
|
||||||
adev->gmc.xgmi.ras_if = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) &&
|
|
||||||
adev->gmc.xgmi.ras_if) {
|
|
||||||
struct ras_common_if *ras_if = adev->gmc.xgmi.ras_if;
|
|
||||||
struct ras_ih_if ih_info = {
|
|
||||||
.cb = NULL,
|
|
||||||
};
|
|
||||||
|
|
||||||
amdgpu_ras_late_fini(adev, ras_if, &ih_info);
|
|
||||||
kfree(ras_if);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -42,8 +42,6 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
|
||||||
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
|
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
|
||||||
int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
|
int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
|
||||||
struct amdgpu_device *peer_adev);
|
struct amdgpu_device *peer_adev);
|
||||||
int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev);
|
|
||||||
void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev);
|
|
||||||
|
|
||||||
static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
|
static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
|
||||||
struct amdgpu_device *bo_adev)
|
struct amdgpu_device *bo_adev)
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
#include "soc15.h"
|
#include "soc15.h"
|
||||||
|
|
||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
|
#include "soc15_hw_ip.h"
|
||||||
#include "arct_ip_offset.h"
|
#include "arct_ip_offset.h"
|
||||||
|
|
||||||
int arct_reg_base_init(struct amdgpu_device *adev)
|
int arct_reg_base_init(struct amdgpu_device *adev)
|
||||||
|
@ -51,8 +52,6 @@ int arct_reg_base_init(struct amdgpu_device *adev)
|
||||||
adev->reg_offset[SDMA7_HWIP][i] = (uint32_t *)(&(SDMA7_BASE.instance[i]));
|
adev->reg_offset[SDMA7_HWIP][i] = (uint32_t *)(&(SDMA7_BASE.instance[i]));
|
||||||
adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
|
adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
|
||||||
adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
|
adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
|
||||||
adev->reg_offset[UMC_HWIP][i] = (uint32_t *)(&(UMC_BASE.instance[i]));
|
|
||||||
adev->reg_offset[RSMU_HWIP][i] = (uint32_t *)(&(RSMU_BASE.instance[i]));
|
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -966,25 +966,6 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev,
|
||||||
|
|
||||||
static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
|
static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
|
||||||
{mmGRBM_STATUS},
|
{mmGRBM_STATUS},
|
||||||
{mmGRBM_STATUS2},
|
|
||||||
{mmGRBM_STATUS_SE0},
|
|
||||||
{mmGRBM_STATUS_SE1},
|
|
||||||
{mmGRBM_STATUS_SE2},
|
|
||||||
{mmGRBM_STATUS_SE3},
|
|
||||||
{mmSRBM_STATUS},
|
|
||||||
{mmSRBM_STATUS2},
|
|
||||||
{mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET},
|
|
||||||
{mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET},
|
|
||||||
{mmCP_STAT},
|
|
||||||
{mmCP_STALLED_STAT1},
|
|
||||||
{mmCP_STALLED_STAT2},
|
|
||||||
{mmCP_STALLED_STAT3},
|
|
||||||
{mmCP_CPF_BUSY_STAT},
|
|
||||||
{mmCP_CPF_STALLED_STAT1},
|
|
||||||
{mmCP_CPF_STATUS},
|
|
||||||
{mmCP_CPC_BUSY_STAT},
|
|
||||||
{mmCP_CPC_STALLED_STAT1},
|
|
||||||
{mmCP_CPC_STATUS},
|
|
||||||
{mmGB_ADDR_CONFIG},
|
{mmGB_ADDR_CONFIG},
|
||||||
{mmMC_ARB_RAMCFG},
|
{mmMC_ARB_RAMCFG},
|
||||||
{mmGB_TILE_MODE0},
|
{mmGB_TILE_MODE0},
|
||||||
|
@ -1289,15 +1270,15 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cik_asic_pci_config_reset - soft reset GPU
|
* cik_asic_reset - soft reset GPU
|
||||||
*
|
*
|
||||||
* @adev: amdgpu_device pointer
|
* @adev: amdgpu_device pointer
|
||||||
*
|
*
|
||||||
* Use PCI Config method to reset the GPU.
|
* Look up which blocks are hung and attempt
|
||||||
*
|
* to reset them.
|
||||||
* Returns 0 for success.
|
* Returns 0 for success.
|
||||||
*/
|
*/
|
||||||
static int cik_asic_pci_config_reset(struct amdgpu_device *adev)
|
static int cik_asic_reset(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
@ -1313,47 +1294,9 @@ static int cik_asic_pci_config_reset(struct amdgpu_device *adev)
|
||||||
static enum amd_reset_method
|
static enum amd_reset_method
|
||||||
cik_asic_reset_method(struct amdgpu_device *adev)
|
cik_asic_reset_method(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
bool baco_reset;
|
|
||||||
|
|
||||||
switch (adev->asic_type) {
|
|
||||||
case CHIP_BONAIRE:
|
|
||||||
case CHIP_HAWAII:
|
|
||||||
/* disable baco reset until it works */
|
|
||||||
/* smu7_asic_get_baco_capability(adev, &baco_reset); */
|
|
||||||
baco_reset = false;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
baco_reset = false;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (baco_reset)
|
|
||||||
return AMD_RESET_METHOD_BACO;
|
|
||||||
else
|
|
||||||
return AMD_RESET_METHOD_LEGACY;
|
return AMD_RESET_METHOD_LEGACY;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* cik_asic_reset - soft reset GPU
|
|
||||||
*
|
|
||||||
* @adev: amdgpu_device pointer
|
|
||||||
*
|
|
||||||
* Look up which blocks are hung and attempt
|
|
||||||
* to reset them.
|
|
||||||
* Returns 0 for success.
|
|
||||||
*/
|
|
||||||
static int cik_asic_reset(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
int r;
|
|
||||||
|
|
||||||
if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)
|
|
||||||
r = smu7_asic_baco_reset(adev);
|
|
||||||
else
|
|
||||||
r = cik_asic_pci_config_reset(adev);
|
|
||||||
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
static u32 cik_get_config_memsize(struct amdgpu_device *adev)
|
static u32 cik_get_config_memsize(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
return RREG32(mmCONFIG_MEMSIZE);
|
return RREG32(mmCONFIG_MEMSIZE);
|
||||||
|
|
|
@ -31,7 +31,4 @@ void cik_srbm_select(struct amdgpu_device *adev,
|
||||||
int cik_set_ip_blocks(struct amdgpu_device *adev);
|
int cik_set_ip_blocks(struct amdgpu_device *adev);
|
||||||
|
|
||||||
void legacy_doorbell_index_init(struct amdgpu_device *adev);
|
void legacy_doorbell_index_init(struct amdgpu_device *adev);
|
||||||
int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap);
|
|
||||||
int smu7_asic_baco_reset(struct amdgpu_device *adev);
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -330,11 +330,9 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev->ddev;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||||
drm_for_each_connector_iter(connector, &iter) {
|
|
||||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
|
|
||||||
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
|
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
|
||||||
|
@ -370,7 +368,6 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
|
||||||
amdgpu_irq_get(adev, &adev->hpd_irq,
|
amdgpu_irq_get(adev, &adev->hpd_irq,
|
||||||
amdgpu_connector->hpd.hpd);
|
amdgpu_connector->hpd.hpd);
|
||||||
}
|
}
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -385,11 +382,9 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev->ddev;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||||
drm_for_each_connector_iter(connector, &iter) {
|
|
||||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
|
|
||||||
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
|
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
|
||||||
|
@ -402,7 +397,6 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
|
||||||
amdgpu_irq_put(adev, &adev->hpd_irq,
|
amdgpu_irq_put(adev, &adev->hpd_irq,
|
||||||
amdgpu_connector->hpd.hpd);
|
amdgpu_connector->hpd.hpd);
|
||||||
}
|
}
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 dce_v10_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
|
static u32 dce_v10_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
|
||||||
|
@ -1225,12 +1219,10 @@ static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder)
|
||||||
static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
|
static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
|
||||||
struct drm_display_mode *mode)
|
struct drm_display_mode *mode)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = encoder->dev;
|
struct amdgpu_device *adev = encoder->dev->dev_private;
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
|
||||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
struct amdgpu_connector *amdgpu_connector = NULL;
|
struct amdgpu_connector *amdgpu_connector = NULL;
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
int interlace = 0;
|
int interlace = 0;
|
||||||
|
@ -1238,14 +1230,12 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
|
||||||
if (!dig || !dig->afmt || !dig->afmt->pin)
|
if (!dig || !dig->afmt || !dig->afmt->pin)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
|
||||||
drm_for_each_connector_iter(connector, &iter) {
|
|
||||||
if (connector->encoder == encoder) {
|
if (connector->encoder == encoder) {
|
||||||
amdgpu_connector = to_amdgpu_connector(connector);
|
amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
|
|
||||||
if (!amdgpu_connector) {
|
if (!amdgpu_connector) {
|
||||||
DRM_ERROR("Couldn't find encoder's connector\n");
|
DRM_ERROR("Couldn't find encoder's connector\n");
|
||||||
|
@ -1271,12 +1261,10 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
|
||||||
|
|
||||||
static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
|
static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = encoder->dev;
|
struct amdgpu_device *adev = encoder->dev->dev_private;
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
|
||||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
struct amdgpu_connector *amdgpu_connector = NULL;
|
struct amdgpu_connector *amdgpu_connector = NULL;
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
u8 *sadb = NULL;
|
u8 *sadb = NULL;
|
||||||
|
@ -1285,14 +1273,12 @@ static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder
|
||||||
if (!dig || !dig->afmt || !dig->afmt->pin)
|
if (!dig || !dig->afmt || !dig->afmt->pin)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
|
||||||
drm_for_each_connector_iter(connector, &iter) {
|
|
||||||
if (connector->encoder == encoder) {
|
if (connector->encoder == encoder) {
|
||||||
amdgpu_connector = to_amdgpu_connector(connector);
|
amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
|
|
||||||
if (!amdgpu_connector) {
|
if (!amdgpu_connector) {
|
||||||
DRM_ERROR("Couldn't find encoder's connector\n");
|
DRM_ERROR("Couldn't find encoder's connector\n");
|
||||||
|
@ -1327,12 +1313,10 @@ static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder
|
||||||
|
|
||||||
static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
|
static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = encoder->dev;
|
struct amdgpu_device *adev = encoder->dev->dev_private;
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
|
||||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
struct amdgpu_connector *amdgpu_connector = NULL;
|
struct amdgpu_connector *amdgpu_connector = NULL;
|
||||||
struct cea_sad *sads;
|
struct cea_sad *sads;
|
||||||
int i, sad_count;
|
int i, sad_count;
|
||||||
|
@ -1355,14 +1339,12 @@ static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
|
||||||
if (!dig || !dig->afmt || !dig->afmt->pin)
|
if (!dig || !dig->afmt || !dig->afmt->pin)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
|
||||||
drm_for_each_connector_iter(connector, &iter) {
|
|
||||||
if (connector->encoder == encoder) {
|
if (connector->encoder == encoder) {
|
||||||
amdgpu_connector = to_amdgpu_connector(connector);
|
amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
|
|
||||||
if (!amdgpu_connector) {
|
if (!amdgpu_connector) {
|
||||||
DRM_ERROR("Couldn't find encoder's connector\n");
|
DRM_ERROR("Couldn't find encoder's connector\n");
|
||||||
|
@ -1370,10 +1352,10 @@ static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
|
||||||
}
|
}
|
||||||
|
|
||||||
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
|
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
|
||||||
if (sad_count < 0)
|
if (sad_count <= 0) {
|
||||||
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
|
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
|
||||||
if (sad_count <= 0)
|
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
BUG_ON(!sads);
|
BUG_ON(!sads);
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
|
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
|
||||||
|
|
|
@ -348,11 +348,9 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev->ddev;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||||
drm_for_each_connector_iter(connector, &iter) {
|
|
||||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
|
|
||||||
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
|
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
|
||||||
|
@ -387,7 +385,6 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
|
||||||
dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
|
dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
|
||||||
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
|
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
|
||||||
}
|
}
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -402,11 +399,9 @@ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev->ddev;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||||
drm_for_each_connector_iter(connector, &iter) {
|
|
||||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
|
|
||||||
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
|
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
|
||||||
|
@ -418,7 +413,6 @@ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
|
||||||
|
|
||||||
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
|
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
|
||||||
}
|
}
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
|
static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
|
||||||
|
@ -1251,12 +1245,10 @@ static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder)
|
||||||
static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
|
static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
|
||||||
struct drm_display_mode *mode)
|
struct drm_display_mode *mode)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = encoder->dev;
|
struct amdgpu_device *adev = encoder->dev->dev_private;
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
|
||||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
struct amdgpu_connector *amdgpu_connector = NULL;
|
struct amdgpu_connector *amdgpu_connector = NULL;
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
int interlace = 0;
|
int interlace = 0;
|
||||||
|
@ -1264,14 +1256,12 @@ static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
|
||||||
if (!dig || !dig->afmt || !dig->afmt->pin)
|
if (!dig || !dig->afmt || !dig->afmt->pin)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
|
||||||
drm_for_each_connector_iter(connector, &iter) {
|
|
||||||
if (connector->encoder == encoder) {
|
if (connector->encoder == encoder) {
|
||||||
amdgpu_connector = to_amdgpu_connector(connector);
|
amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
|
|
||||||
if (!amdgpu_connector) {
|
if (!amdgpu_connector) {
|
||||||
DRM_ERROR("Couldn't find encoder's connector\n");
|
DRM_ERROR("Couldn't find encoder's connector\n");
|
||||||
|
@ -1297,12 +1287,10 @@ static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
|
||||||
|
|
||||||
static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
|
static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = encoder->dev;
|
struct amdgpu_device *adev = encoder->dev->dev_private;
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
|
||||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
struct amdgpu_connector *amdgpu_connector = NULL;
|
struct amdgpu_connector *amdgpu_connector = NULL;
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
u8 *sadb = NULL;
|
u8 *sadb = NULL;
|
||||||
|
@ -1311,14 +1299,12 @@ static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder
|
||||||
if (!dig || !dig->afmt || !dig->afmt->pin)
|
if (!dig || !dig->afmt || !dig->afmt->pin)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
|
||||||
drm_for_each_connector_iter(connector, &iter) {
|
|
||||||
if (connector->encoder == encoder) {
|
if (connector->encoder == encoder) {
|
||||||
amdgpu_connector = to_amdgpu_connector(connector);
|
amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
|
|
||||||
if (!amdgpu_connector) {
|
if (!amdgpu_connector) {
|
||||||
DRM_ERROR("Couldn't find encoder's connector\n");
|
DRM_ERROR("Couldn't find encoder's connector\n");
|
||||||
|
@ -1353,12 +1339,10 @@ static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder
|
||||||
|
|
||||||
static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
|
static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = encoder->dev;
|
struct amdgpu_device *adev = encoder->dev->dev_private;
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
|
||||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
struct amdgpu_connector *amdgpu_connector = NULL;
|
struct amdgpu_connector *amdgpu_connector = NULL;
|
||||||
struct cea_sad *sads;
|
struct cea_sad *sads;
|
||||||
int i, sad_count;
|
int i, sad_count;
|
||||||
|
@ -1381,14 +1365,12 @@ static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
|
||||||
if (!dig || !dig->afmt || !dig->afmt->pin)
|
if (!dig || !dig->afmt || !dig->afmt->pin)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
|
||||||
drm_for_each_connector_iter(connector, &iter) {
|
|
||||||
if (connector->encoder == encoder) {
|
if (connector->encoder == encoder) {
|
||||||
amdgpu_connector = to_amdgpu_connector(connector);
|
amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
|
|
||||||
if (!amdgpu_connector) {
|
if (!amdgpu_connector) {
|
||||||
DRM_ERROR("Couldn't find encoder's connector\n");
|
DRM_ERROR("Couldn't find encoder's connector\n");
|
||||||
|
@ -1396,10 +1378,10 @@ static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
|
||||||
}
|
}
|
||||||
|
|
||||||
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
|
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
|
||||||
if (sad_count < 0)
|
if (sad_count <= 0) {
|
||||||
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
|
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
|
||||||
if (sad_count <= 0)
|
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
BUG_ON(!sads);
|
BUG_ON(!sads);
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
|
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
|
||||||
|
|
|
@ -281,11 +281,9 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev->ddev;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||||
drm_for_each_connector_iter(connector, &iter) {
|
|
||||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
|
|
||||||
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
|
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
|
||||||
|
@ -311,7 +309,7 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
|
||||||
dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
|
dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
|
||||||
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
|
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
|
||||||
}
|
}
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -326,11 +324,9 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev->ddev;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||||
drm_for_each_connector_iter(connector, &iter) {
|
|
||||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
|
|
||||||
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
|
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
|
||||||
|
@ -342,7 +338,6 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
|
||||||
|
|
||||||
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
|
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
|
||||||
}
|
}
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
|
static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
|
||||||
|
@ -1129,24 +1124,20 @@ static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder)
|
||||||
static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
|
static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
|
||||||
struct drm_display_mode *mode)
|
struct drm_display_mode *mode)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = encoder->dev;
|
struct amdgpu_device *adev = encoder->dev->dev_private;
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
|
||||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
struct amdgpu_connector *amdgpu_connector = NULL;
|
struct amdgpu_connector *amdgpu_connector = NULL;
|
||||||
int interlace = 0;
|
int interlace = 0;
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
|
||||||
drm_for_each_connector_iter(connector, &iter) {
|
|
||||||
if (connector->encoder == encoder) {
|
if (connector->encoder == encoder) {
|
||||||
amdgpu_connector = to_amdgpu_connector(connector);
|
amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
|
|
||||||
if (!amdgpu_connector) {
|
if (!amdgpu_connector) {
|
||||||
DRM_ERROR("Couldn't find encoder's connector\n");
|
DRM_ERROR("Couldn't find encoder's connector\n");
|
||||||
|
@ -1173,25 +1164,21 @@ static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
|
||||||
|
|
||||||
static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
|
static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = encoder->dev;
|
struct amdgpu_device *adev = encoder->dev->dev_private;
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
|
||||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
struct amdgpu_connector *amdgpu_connector = NULL;
|
struct amdgpu_connector *amdgpu_connector = NULL;
|
||||||
u8 *sadb = NULL;
|
u8 *sadb = NULL;
|
||||||
int sad_count;
|
int sad_count;
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
|
||||||
drm_for_each_connector_iter(connector, &iter) {
|
|
||||||
if (connector->encoder == encoder) {
|
if (connector->encoder == encoder) {
|
||||||
amdgpu_connector = to_amdgpu_connector(connector);
|
amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
|
|
||||||
if (!amdgpu_connector) {
|
if (!amdgpu_connector) {
|
||||||
DRM_ERROR("Couldn't find encoder's connector\n");
|
DRM_ERROR("Couldn't find encoder's connector\n");
|
||||||
|
@ -1234,12 +1221,10 @@ static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
|
||||||
|
|
||||||
static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
|
static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = encoder->dev;
|
struct amdgpu_device *adev = encoder->dev->dev_private;
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
|
||||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
struct amdgpu_connector *amdgpu_connector = NULL;
|
struct amdgpu_connector *amdgpu_connector = NULL;
|
||||||
struct cea_sad *sads;
|
struct cea_sad *sads;
|
||||||
int i, sad_count;
|
int i, sad_count;
|
||||||
|
@ -1259,14 +1244,12 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
|
||||||
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
|
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
|
||||||
};
|
};
|
||||||
|
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
|
||||||
drm_for_each_connector_iter(connector, &iter) {
|
|
||||||
if (connector->encoder == encoder) {
|
if (connector->encoder == encoder) {
|
||||||
amdgpu_connector = to_amdgpu_connector(connector);
|
amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
|
|
||||||
if (!amdgpu_connector) {
|
if (!amdgpu_connector) {
|
||||||
DRM_ERROR("Couldn't find encoder's connector\n");
|
DRM_ERROR("Couldn't find encoder's connector\n");
|
||||||
|
@ -1274,10 +1257,10 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
|
||||||
}
|
}
|
||||||
|
|
||||||
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
|
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
|
||||||
if (sad_count < 0)
|
if (sad_count <= 0) {
|
||||||
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
|
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
|
||||||
if (sad_count <= 0)
|
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
|
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
|
||||||
u32 tmp = 0;
|
u32 tmp = 0;
|
||||||
|
@ -1649,7 +1632,6 @@ static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
|
||||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
struct amdgpu_connector *amdgpu_connector = NULL;
|
struct amdgpu_connector *amdgpu_connector = NULL;
|
||||||
int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
|
int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
|
||||||
int bpc = 8;
|
int bpc = 8;
|
||||||
|
@ -1657,14 +1639,12 @@ static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
|
||||||
if (!dig || !dig->afmt)
|
if (!dig || !dig->afmt)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
|
||||||
drm_for_each_connector_iter(connector, &iter) {
|
|
||||||
if (connector->encoder == encoder) {
|
if (connector->encoder == encoder) {
|
||||||
amdgpu_connector = to_amdgpu_connector(connector);
|
amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
|
|
||||||
if (!amdgpu_connector) {
|
if (!amdgpu_connector) {
|
||||||
DRM_ERROR("Couldn't find encoder's connector\n");
|
DRM_ERROR("Couldn't find encoder's connector\n");
|
||||||
|
|
|
@ -275,11 +275,9 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev->ddev;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||||
drm_for_each_connector_iter(connector, &iter) {
|
|
||||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
|
|
||||||
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
|
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
|
||||||
|
@ -305,7 +303,6 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
|
||||||
dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
|
dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
|
||||||
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
|
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
|
||||||
}
|
}
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -320,11 +317,9 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev->ddev;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||||
drm_for_each_connector_iter(connector, &iter) {
|
|
||||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
|
|
||||||
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
|
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
|
||||||
|
@ -336,7 +331,6 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
|
||||||
|
|
||||||
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
|
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
|
||||||
}
|
}
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
|
static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
|
||||||
|
@ -1163,12 +1157,10 @@ static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
|
||||||
static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
|
static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
|
||||||
struct drm_display_mode *mode)
|
struct drm_display_mode *mode)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = encoder->dev;
|
struct amdgpu_device *adev = encoder->dev->dev_private;
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
|
||||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
struct amdgpu_connector *amdgpu_connector = NULL;
|
struct amdgpu_connector *amdgpu_connector = NULL;
|
||||||
u32 tmp = 0, offset;
|
u32 tmp = 0, offset;
|
||||||
|
|
||||||
|
@ -1177,14 +1169,12 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
|
||||||
|
|
||||||
offset = dig->afmt->pin->offset;
|
offset = dig->afmt->pin->offset;
|
||||||
|
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
|
||||||
drm_for_each_connector_iter(connector, &iter) {
|
|
||||||
if (connector->encoder == encoder) {
|
if (connector->encoder == encoder) {
|
||||||
amdgpu_connector = to_amdgpu_connector(connector);
|
amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
|
|
||||||
if (!amdgpu_connector) {
|
if (!amdgpu_connector) {
|
||||||
DRM_ERROR("Couldn't find encoder's connector\n");
|
DRM_ERROR("Couldn't find encoder's connector\n");
|
||||||
|
@ -1224,12 +1214,10 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
|
||||||
|
|
||||||
static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
|
static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = encoder->dev;
|
struct amdgpu_device *adev = encoder->dev->dev_private;
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
|
||||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
struct amdgpu_connector *amdgpu_connector = NULL;
|
struct amdgpu_connector *amdgpu_connector = NULL;
|
||||||
u32 offset, tmp;
|
u32 offset, tmp;
|
||||||
u8 *sadb = NULL;
|
u8 *sadb = NULL;
|
||||||
|
@ -1240,14 +1228,12 @@ static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
|
||||||
|
|
||||||
offset = dig->afmt->pin->offset;
|
offset = dig->afmt->pin->offset;
|
||||||
|
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
|
||||||
drm_for_each_connector_iter(connector, &iter) {
|
|
||||||
if (connector->encoder == encoder) {
|
if (connector->encoder == encoder) {
|
||||||
amdgpu_connector = to_amdgpu_connector(connector);
|
amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
|
|
||||||
if (!amdgpu_connector) {
|
if (!amdgpu_connector) {
|
||||||
DRM_ERROR("Couldn't find encoder's connector\n");
|
DRM_ERROR("Couldn't find encoder's connector\n");
|
||||||
|
@ -1277,13 +1263,11 @@ static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
|
||||||
|
|
||||||
static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
|
static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = encoder->dev;
|
struct amdgpu_device *adev = encoder->dev->dev_private;
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
|
||||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||||
u32 offset;
|
u32 offset;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
|
||||||
struct amdgpu_connector *amdgpu_connector = NULL;
|
struct amdgpu_connector *amdgpu_connector = NULL;
|
||||||
struct cea_sad *sads;
|
struct cea_sad *sads;
|
||||||
int i, sad_count;
|
int i, sad_count;
|
||||||
|
@ -1308,14 +1292,12 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
|
||||||
|
|
||||||
offset = dig->afmt->pin->offset;
|
offset = dig->afmt->pin->offset;
|
||||||
|
|
||||||
drm_connector_list_iter_begin(dev, &iter);
|
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
|
||||||
drm_for_each_connector_iter(connector, &iter) {
|
|
||||||
if (connector->encoder == encoder) {
|
if (connector->encoder == encoder) {
|
||||||
amdgpu_connector = to_amdgpu_connector(connector);
|
amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drm_connector_list_iter_end(&iter);
|
|
||||||
|
|
||||||
if (!amdgpu_connector) {
|
if (!amdgpu_connector) {
|
||||||
DRM_ERROR("Couldn't find encoder's connector\n");
|
DRM_ERROR("Couldn't find encoder's connector\n");
|
||||||
|
@ -1323,10 +1305,10 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
|
||||||
}
|
}
|
||||||
|
|
||||||
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
|
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
|
||||||
if (sad_count < 0)
|
if (sad_count <= 0) {
|
||||||
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
|
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
|
||||||
if (sad_count <= 0)
|
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
BUG_ON(!sads);
|
BUG_ON(!sads);
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
|
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
|
||||||
|
|
|
@ -33,10 +33,6 @@ static void df_v1_7_sw_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static void df_v1_7_sw_fini(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static void df_v1_7_enable_broadcast_mode(struct amdgpu_device *adev,
|
static void df_v1_7_enable_broadcast_mode(struct amdgpu_device *adev,
|
||||||
bool enable)
|
bool enable)
|
||||||
{
|
{
|
||||||
|
@ -115,7 +111,6 @@ static void df_v1_7_enable_ecc_force_par_wr_rmw(struct amdgpu_device *adev,
|
||||||
|
|
||||||
const struct amdgpu_df_funcs df_v1_7_funcs = {
|
const struct amdgpu_df_funcs df_v1_7_funcs = {
|
||||||
.sw_init = df_v1_7_sw_init,
|
.sw_init = df_v1_7_sw_init,
|
||||||
.sw_fini = df_v1_7_sw_fini,
|
|
||||||
.enable_broadcast_mode = df_v1_7_enable_broadcast_mode,
|
.enable_broadcast_mode = df_v1_7_enable_broadcast_mode,
|
||||||
.get_fb_channel_number = df_v1_7_get_fb_channel_number,
|
.get_fb_channel_number = df_v1_7_get_fb_channel_number,
|
||||||
.get_hbm_channel_number = df_v1_7_get_hbm_channel_number,
|
.get_hbm_channel_number = df_v1_7_get_hbm_channel_number,
|
||||||
|
|
|
@ -99,8 +99,8 @@ static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev,
|
||||||
unsigned long flags, address, data;
|
unsigned long flags, address, data;
|
||||||
uint32_t ficadl_val, ficadh_val;
|
uint32_t ficadl_val, ficadh_val;
|
||||||
|
|
||||||
address = adev->nbio.funcs->get_pcie_index_offset(adev);
|
address = adev->nbio_funcs->get_pcie_index_offset(adev);
|
||||||
data = adev->nbio.funcs->get_pcie_data_offset(adev);
|
data = adev->nbio_funcs->get_pcie_data_offset(adev);
|
||||||
|
|
||||||
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
|
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
|
||||||
WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
|
WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
|
||||||
|
@ -122,8 +122,8 @@ static void df_v3_6_set_fica(struct amdgpu_device *adev, uint32_t ficaa_val,
|
||||||
{
|
{
|
||||||
unsigned long flags, address, data;
|
unsigned long flags, address, data;
|
||||||
|
|
||||||
address = adev->nbio.funcs->get_pcie_index_offset(adev);
|
address = adev->nbio_funcs->get_pcie_index_offset(adev);
|
||||||
data = adev->nbio.funcs->get_pcie_data_offset(adev);
|
data = adev->nbio_funcs->get_pcie_data_offset(adev);
|
||||||
|
|
||||||
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
|
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
|
||||||
WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
|
WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
|
||||||
|
@ -150,8 +150,8 @@ static void df_v3_6_perfmon_rreg(struct amdgpu_device *adev,
|
||||||
{
|
{
|
||||||
unsigned long flags, address, data;
|
unsigned long flags, address, data;
|
||||||
|
|
||||||
address = adev->nbio.funcs->get_pcie_index_offset(adev);
|
address = adev->nbio_funcs->get_pcie_index_offset(adev);
|
||||||
data = adev->nbio.funcs->get_pcie_data_offset(adev);
|
data = adev->nbio_funcs->get_pcie_data_offset(adev);
|
||||||
|
|
||||||
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
|
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
|
||||||
WREG32(address, lo_addr);
|
WREG32(address, lo_addr);
|
||||||
|
@ -172,8 +172,8 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
|
||||||
{
|
{
|
||||||
unsigned long flags, address, data;
|
unsigned long flags, address, data;
|
||||||
|
|
||||||
address = adev->nbio.funcs->get_pcie_index_offset(adev);
|
address = adev->nbio_funcs->get_pcie_index_offset(adev);
|
||||||
data = adev->nbio.funcs->get_pcie_data_offset(adev);
|
data = adev->nbio_funcs->get_pcie_data_offset(adev);
|
||||||
|
|
||||||
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
|
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
|
||||||
WREG32(address, lo_addr);
|
WREG32(address, lo_addr);
|
||||||
|
@ -220,13 +220,6 @@ static void df_v3_6_sw_init(struct amdgpu_device *adev)
|
||||||
adev->df_perfmon_config_assign_mask[i] = 0;
|
adev->df_perfmon_config_assign_mask[i] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void df_v3_6_sw_fini(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
|
|
||||||
device_remove_file(adev->dev, &dev_attr_df_cntr_avail);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
static void df_v3_6_enable_broadcast_mode(struct amdgpu_device *adev,
|
static void df_v3_6_enable_broadcast_mode(struct amdgpu_device *adev,
|
||||||
bool enable)
|
bool enable)
|
||||||
{
|
{
|
||||||
|
@ -544,7 +537,6 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
|
||||||
|
|
||||||
const struct amdgpu_df_funcs df_v3_6_funcs = {
|
const struct amdgpu_df_funcs df_v3_6_funcs = {
|
||||||
.sw_init = df_v3_6_sw_init,
|
.sw_init = df_v3_6_sw_init,
|
||||||
.sw_fini = df_v3_6_sw_fini,
|
|
||||||
.enable_broadcast_mode = df_v3_6_enable_broadcast_mode,
|
.enable_broadcast_mode = df_v3_6_enable_broadcast_mode,
|
||||||
.get_fb_channel_number = df_v3_6_get_fb_channel_number,
|
.get_fb_channel_number = df_v3_6_get_fb_channel_number,
|
||||||
.get_hbm_channel_number = df_v3_6_get_hbm_channel_number,
|
.get_hbm_channel_number = df_v3_6_get_hbm_channel_number,
|
||||||
|
|
|
@ -93,7 +93,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] =
|
||||||
{
|
{
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xc0000000, 0xc0000100),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0x60000ff0, 0x60000100),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0x60000ff0, 0x60000100),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000000, 0x40000100),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000000, 0x40000100),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
|
||||||
|
@ -127,7 +127,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] =
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CGTT_CLK_CTRL, 0xfeff0fff, 0x40000100),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CGTT_CLK_CTRL, 0xfeff0fff, 0x40000100),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000)
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000)
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] =
|
static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] =
|
||||||
|
@ -140,7 +140,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xc0000000, 0xc0000100),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xf8ff0fff, 0x60000100),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xf8ff0fff, 0x60000100),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000ff0, 0x40000100),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000ff0, 0x40000100),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
|
||||||
|
@ -171,7 +171,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000),
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
|
static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
|
||||||
|
@ -179,7 +179,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0x003e001f, 0x003c0014),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0x003e001f, 0x003c0014),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xff7f0fff, 0x0d000100),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xff7f0fff, 0xc0000100),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xffffcfff, 0x60000100),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xffffcfff, 0x60000100),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0xffff0fff, 0x40000100),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0xffff0fff, 0x40000100),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
|
||||||
|
@ -1442,7 +1442,7 @@ static int gfx_v10_0_sw_fini(void *handle)
|
||||||
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
|
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
|
||||||
|
|
||||||
amdgpu_gfx_mqd_sw_fini(adev);
|
amdgpu_gfx_mqd_sw_fini(adev);
|
||||||
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
|
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
|
||||||
amdgpu_gfx_kiq_fini(adev);
|
amdgpu_gfx_kiq_fini(adev);
|
||||||
|
|
||||||
gfx_v10_0_pfp_fini(adev);
|
gfx_v10_0_pfp_fini(adev);
|
||||||
|
@ -2443,7 +2443,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (amdgpu_emu_mode == 1)
|
if (amdgpu_emu_mode == 1)
|
||||||
adev->nbio.funcs->hdp_flush(adev, NULL);
|
adev->nbio_funcs->hdp_flush(adev, NULL);
|
||||||
|
|
||||||
tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
|
tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
|
||||||
tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
|
tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
|
||||||
|
@ -2513,7 +2513,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (amdgpu_emu_mode == 1)
|
if (amdgpu_emu_mode == 1)
|
||||||
adev->nbio.funcs->hdp_flush(adev, NULL);
|
adev->nbio_funcs->hdp_flush(adev, NULL);
|
||||||
|
|
||||||
tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
|
tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
|
||||||
tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
|
tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
|
||||||
|
@ -2582,7 +2582,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (amdgpu_emu_mode == 1)
|
if (amdgpu_emu_mode == 1)
|
||||||
adev->nbio.funcs->hdp_flush(adev, NULL);
|
adev->nbio_funcs->hdp_flush(adev, NULL);
|
||||||
|
|
||||||
tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
|
tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
|
||||||
tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
|
tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
|
||||||
|
@ -2903,7 +2903,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (amdgpu_emu_mode == 1)
|
if (amdgpu_emu_mode == 1)
|
||||||
adev->nbio.funcs->hdp_flush(adev, NULL);
|
adev->nbio_funcs->hdp_flush(adev, NULL);
|
||||||
|
|
||||||
tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
|
tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
|
||||||
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
|
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
|
||||||
|
@ -3106,7 +3106,6 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
|
||||||
memcpy(mqd, adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], sizeof(*mqd));
|
memcpy(mqd, adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], sizeof(*mqd));
|
||||||
/* reset the ring */
|
/* reset the ring */
|
||||||
ring->wptr = 0;
|
ring->wptr = 0;
|
||||||
adev->wb.wb[ring->wptr_offs] = 0;
|
|
||||||
amdgpu_ring_clear_ring(ring);
|
amdgpu_ring_clear_ring(ring);
|
||||||
#ifdef BRING_UP_DEBUG
|
#ifdef BRING_UP_DEBUG
|
||||||
mutex_lock(&adev->srbm_mutex);
|
mutex_lock(&adev->srbm_mutex);
|
||||||
|
@ -4358,7 +4357,7 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = ring->adev;
|
struct amdgpu_device *adev = ring->adev;
|
||||||
u32 ref_and_mask, reg_mem_engine;
|
u32 ref_and_mask, reg_mem_engine;
|
||||||
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
|
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
|
||||||
|
|
||||||
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
|
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
|
||||||
switch (ring->me) {
|
switch (ring->me) {
|
||||||
|
@ -4378,8 +4377,8 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
||||||
}
|
}
|
||||||
|
|
||||||
gfx_v10_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
|
gfx_v10_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
|
||||||
adev->nbio.funcs->get_hdp_flush_req_offset(adev),
|
adev->nbio_funcs->get_hdp_flush_req_offset(adev),
|
||||||
adev->nbio.funcs->get_hdp_flush_done_offset(adev),
|
adev->nbio_funcs->get_hdp_flush_done_offset(adev),
|
||||||
ref_and_mask, ref_and_mask, 0x20);
|
ref_and_mask, ref_and_mask, 0x20);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5284,12 +5283,15 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
|
||||||
|
|
||||||
static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev)
|
static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
unsigned total_cu = adev->gfx.config.max_cu_per_sh *
|
/* init asic gds info */
|
||||||
adev->gfx.config.max_sh_per_se *
|
switch (adev->asic_type) {
|
||||||
adev->gfx.config.max_shader_engines;
|
case CHIP_NAVI10:
|
||||||
|
default:
|
||||||
adev->gds.gds_size = 0x10000;
|
adev->gds.gds_size = 0x10000;
|
||||||
adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1;
|
adev->gds.gds_compute_max_wave_id = 0x4ff;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
adev->gds.gws_size = 64;
|
adev->gds.gws_size = 64;
|
||||||
adev->gds.oa_size = 16;
|
adev->gds.oa_size = 16;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2103,7 +2103,7 @@ static int gfx_v8_0_sw_fini(void *handle)
|
||||||
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
|
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
|
||||||
|
|
||||||
amdgpu_gfx_mqd_sw_fini(adev);
|
amdgpu_gfx_mqd_sw_fini(adev);
|
||||||
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
|
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
|
||||||
amdgpu_gfx_kiq_fini(adev);
|
amdgpu_gfx_kiq_fini(adev);
|
||||||
|
|
||||||
gfx_v8_0_mec_fini(adev);
|
gfx_v8_0_mec_fini(adev);
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -178,8 +178,6 @@ static void gfxhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
|
||||||
tmp = RREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL);
|
tmp = RREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL);
|
||||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
|
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
|
||||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
|
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
|
||||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
|
|
||||||
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
|
|
||||||
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL, tmp);
|
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL, tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -46,25 +46,21 @@ u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev)
|
||||||
return (u64)RREG32_SOC15(GC, 0, mmGCMC_VM_FB_OFFSET) << 24;
|
return (u64)RREG32_SOC15(GC, 0, mmGCMC_VM_FB_OFFSET) << 24;
|
||||||
}
|
}
|
||||||
|
|
||||||
void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
|
static void gfxhub_v2_0_init_gart_pt_regs(struct amdgpu_device *adev)
|
||||||
uint64_t page_table_base)
|
|
||||||
{
|
{
|
||||||
/* two registers distance between mmGCVM_CONTEXT0_* to mmGCVM_CONTEXT1_* */
|
uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
|
||||||
int offset = mmGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
|
|
||||||
- mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
|
|
||||||
|
|
||||||
WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
|
|
||||||
offset * vmid, lower_32_bits(page_table_base));
|
|
||||||
|
|
||||||
WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
|
WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
|
||||||
offset * vmid, upper_32_bits(page_table_base));
|
lower_32_bits(value));
|
||||||
|
|
||||||
|
WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
|
||||||
|
upper_32_bits(value));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gfxhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev)
|
static void gfxhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
|
gfxhub_v2_0_init_gart_pt_regs(adev);
|
||||||
|
|
||||||
gfxhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base);
|
|
||||||
|
|
||||||
WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
|
WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
|
||||||
(u32)(adev->gmc.gart_start >> 12));
|
(u32)(adev->gmc.gart_start >> 12));
|
||||||
|
@ -155,15 +151,6 @@ static void gfxhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
|
||||||
WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2, tmp);
|
WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2, tmp);
|
||||||
|
|
||||||
tmp = mmGCVM_L2_CNTL3_DEFAULT;
|
tmp = mmGCVM_L2_CNTL3_DEFAULT;
|
||||||
if (adev->gmc.translate_further) {
|
|
||||||
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 12);
|
|
||||||
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
|
|
||||||
L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
|
|
||||||
} else {
|
|
||||||
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 9);
|
|
||||||
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
|
|
||||||
L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
|
|
||||||
}
|
|
||||||
WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, tmp);
|
WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, tmp);
|
||||||
|
|
||||||
tmp = mmGCVM_L2_CNTL4_DEFAULT;
|
tmp = mmGCVM_L2_CNTL4_DEFAULT;
|
||||||
|
@ -179,8 +166,6 @@ static void gfxhub_v2_0_enable_system_domain(struct amdgpu_device *adev)
|
||||||
tmp = RREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL);
|
tmp = RREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL);
|
||||||
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
|
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
|
||||||
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
|
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
|
||||||
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL,
|
|
||||||
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
|
|
||||||
WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL, tmp);
|
WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL, tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -31,7 +31,5 @@ void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
|
||||||
bool value);
|
bool value);
|
||||||
void gfxhub_v2_0_init(struct amdgpu_device *adev);
|
void gfxhub_v2_0_init(struct amdgpu_device *adev);
|
||||||
u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev);
|
u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev);
|
||||||
void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
|
|
||||||
uint64_t page_table_base);
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -278,7 +278,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
/* flush hdp cache */
|
/* flush hdp cache */
|
||||||
adev->nbio.funcs->hdp_flush(adev, NULL);
|
adev->nbio_funcs->hdp_flush(adev, NULL);
|
||||||
|
|
||||||
mutex_lock(&adev->mman.gtt_window_lock);
|
mutex_lock(&adev->mman.gtt_window_lock);
|
||||||
|
|
||||||
|
@ -309,7 +309,6 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
||||||
|
|
||||||
job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
|
job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
|
||||||
job->vm_needs_flush = true;
|
job->vm_needs_flush = true;
|
||||||
job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
|
|
||||||
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
|
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
|
||||||
r = amdgpu_job_submit(job, &adev->mman.entity,
|
r = amdgpu_job_submit(job, &adev->mman.entity,
|
||||||
AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
|
AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
|
||||||
|
@ -398,23 +397,43 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid
|
||||||
* 1 system
|
* 1 system
|
||||||
* 0 valid
|
* 0 valid
|
||||||
*/
|
*/
|
||||||
|
static uint64_t gmc_v10_0_get_vm_pte_flags(struct amdgpu_device *adev,
|
||||||
static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
|
uint32_t flags)
|
||||||
{
|
{
|
||||||
switch (flags) {
|
uint64_t pte_flag = 0;
|
||||||
|
|
||||||
|
if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
|
||||||
|
pte_flag |= AMDGPU_PTE_EXECUTABLE;
|
||||||
|
if (flags & AMDGPU_VM_PAGE_READABLE)
|
||||||
|
pte_flag |= AMDGPU_PTE_READABLE;
|
||||||
|
if (flags & AMDGPU_VM_PAGE_WRITEABLE)
|
||||||
|
pte_flag |= AMDGPU_PTE_WRITEABLE;
|
||||||
|
|
||||||
|
switch (flags & AMDGPU_VM_MTYPE_MASK) {
|
||||||
case AMDGPU_VM_MTYPE_DEFAULT:
|
case AMDGPU_VM_MTYPE_DEFAULT:
|
||||||
return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
|
pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
|
||||||
|
break;
|
||||||
case AMDGPU_VM_MTYPE_NC:
|
case AMDGPU_VM_MTYPE_NC:
|
||||||
return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
|
pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
|
||||||
|
break;
|
||||||
case AMDGPU_VM_MTYPE_WC:
|
case AMDGPU_VM_MTYPE_WC:
|
||||||
return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
|
pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
|
||||||
|
break;
|
||||||
case AMDGPU_VM_MTYPE_CC:
|
case AMDGPU_VM_MTYPE_CC:
|
||||||
return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
|
pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
|
||||||
|
break;
|
||||||
case AMDGPU_VM_MTYPE_UC:
|
case AMDGPU_VM_MTYPE_UC:
|
||||||
return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
|
pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
|
pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (flags & AMDGPU_VM_PAGE_PRT)
|
||||||
|
pte_flag |= AMDGPU_PTE_PRT;
|
||||||
|
|
||||||
|
return pte_flag;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
|
static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
|
||||||
|
@ -441,32 +460,12 @@ static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
|
|
||||||
struct amdgpu_bo_va_mapping *mapping,
|
|
||||||
uint64_t *flags)
|
|
||||||
{
|
|
||||||
*flags &= ~AMDGPU_PTE_EXECUTABLE;
|
|
||||||
*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
|
|
||||||
|
|
||||||
*flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
|
|
||||||
*flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
|
|
||||||
|
|
||||||
if (mapping->flags & AMDGPU_PTE_PRT) {
|
|
||||||
*flags |= AMDGPU_PTE_PRT;
|
|
||||||
*flags |= AMDGPU_PTE_SNOOPED;
|
|
||||||
*flags |= AMDGPU_PTE_LOG;
|
|
||||||
*flags |= AMDGPU_PTE_SYSTEM;
|
|
||||||
*flags &= ~AMDGPU_PTE_VALID;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
|
static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
|
||||||
.flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
|
.flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
|
||||||
.emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
|
.emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
|
||||||
.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
|
.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
|
||||||
.map_mtype = gmc_v10_0_map_mtype,
|
.get_vm_pte_flags = gmc_v10_0_get_vm_pte_flags,
|
||||||
.get_vm_pde = gmc_v10_0_get_vm_pde,
|
.get_vm_pde = gmc_v10_0_get_vm_pde
|
||||||
.get_vm_pte = gmc_v10_0_get_vm_pte
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
|
static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
|
||||||
|
@ -520,6 +519,7 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
|
||||||
{
|
{
|
||||||
u64 base = 0;
|
u64 base = 0;
|
||||||
|
|
||||||
|
if (!amdgpu_sriov_vf(adev))
|
||||||
base = gfxhub_v2_0_get_fb_location(adev);
|
base = gfxhub_v2_0_get_fb_location(adev);
|
||||||
|
|
||||||
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
|
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
|
||||||
|
@ -540,13 +540,24 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
|
||||||
*/
|
*/
|
||||||
static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
|
static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
|
int chansize, numchan;
|
||||||
|
|
||||||
|
if (!amdgpu_emu_mode)
|
||||||
|
adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
|
||||||
|
else {
|
||||||
|
/* hard code vram_width for emulation */
|
||||||
|
chansize = 128;
|
||||||
|
numchan = 1;
|
||||||
|
adev->gmc.vram_width = numchan * chansize;
|
||||||
|
}
|
||||||
|
|
||||||
/* Could aper size report 0 ? */
|
/* Could aper size report 0 ? */
|
||||||
adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
|
adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
|
||||||
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
|
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
|
||||||
|
|
||||||
/* size in MB on si */
|
/* size in MB on si */
|
||||||
adev->gmc.mc_vram_size =
|
adev->gmc.mc_vram_size =
|
||||||
adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
|
adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
|
||||||
adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
|
adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
|
||||||
adev->gmc.visible_vram_size = adev->gmc.aper_size;
|
adev->gmc.visible_vram_size = adev->gmc.aper_size;
|
||||||
|
|
||||||
|
@ -625,7 +636,7 @@ static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
|
||||||
|
|
||||||
static int gmc_v10_0_sw_init(void *handle)
|
static int gmc_v10_0_sw_init(void *handle)
|
||||||
{
|
{
|
||||||
int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
|
int r;
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
gfxhub_v2_0_init(adev);
|
gfxhub_v2_0_init(adev);
|
||||||
|
@ -633,15 +644,7 @@ static int gmc_v10_0_sw_init(void *handle)
|
||||||
|
|
||||||
spin_lock_init(&adev->gmc.invalidate_lock);
|
spin_lock_init(&adev->gmc.invalidate_lock);
|
||||||
|
|
||||||
r = amdgpu_atomfirmware_get_vram_info(adev,
|
adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
|
||||||
&vram_width, &vram_type, &vram_vendor);
|
|
||||||
if (!amdgpu_emu_mode)
|
|
||||||
adev->gmc.vram_width = vram_width;
|
|
||||||
else
|
|
||||||
adev->gmc.vram_width = 1 * 128; /* numchan * chansize */
|
|
||||||
|
|
||||||
adev->gmc.vram_type = vram_type;
|
|
||||||
adev->gmc.vram_vendor = vram_vendor;
|
|
||||||
switch (adev->asic_type) {
|
switch (adev->asic_type) {
|
||||||
case CHIP_NAVI10:
|
case CHIP_NAVI10:
|
||||||
case CHIP_NAVI14:
|
case CHIP_NAVI14:
|
||||||
|
@ -791,7 +794,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
|
||||||
WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
|
WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
|
||||||
|
|
||||||
/* Flush HDP after it is initialized */
|
/* Flush HDP after it is initialized */
|
||||||
adev->nbio.funcs->hdp_flush(adev, NULL);
|
adev->nbio_funcs->hdp_flush(adev, NULL);
|
||||||
|
|
||||||
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
|
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
|
||||||
false : true;
|
false : true;
|
||||||
|
|
|
@ -386,20 +386,27 @@ static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
|
||||||
return pd_addr;
|
return pd_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev,
|
||||||
|
uint32_t flags)
|
||||||
|
{
|
||||||
|
uint64_t pte_flag = 0;
|
||||||
|
|
||||||
|
if (flags & AMDGPU_VM_PAGE_READABLE)
|
||||||
|
pte_flag |= AMDGPU_PTE_READABLE;
|
||||||
|
if (flags & AMDGPU_VM_PAGE_WRITEABLE)
|
||||||
|
pte_flag |= AMDGPU_PTE_WRITEABLE;
|
||||||
|
if (flags & AMDGPU_VM_PAGE_PRT)
|
||||||
|
pte_flag |= AMDGPU_PTE_PRT;
|
||||||
|
|
||||||
|
return pte_flag;
|
||||||
|
}
|
||||||
|
|
||||||
static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
|
static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
|
||||||
uint64_t *addr, uint64_t *flags)
|
uint64_t *addr, uint64_t *flags)
|
||||||
{
|
{
|
||||||
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
|
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev,
|
|
||||||
struct amdgpu_bo_va_mapping *mapping,
|
|
||||||
uint64_t *flags)
|
|
||||||
{
|
|
||||||
*flags &= ~AMDGPU_PTE_EXECUTABLE;
|
|
||||||
*flags &= ~AMDGPU_PTE_PRT;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
|
static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
|
||||||
bool value)
|
bool value)
|
||||||
{
|
{
|
||||||
|
@ -1146,7 +1153,7 @@ static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
|
||||||
.emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
|
.emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
|
||||||
.set_prt = gmc_v6_0_set_prt,
|
.set_prt = gmc_v6_0_set_prt,
|
||||||
.get_vm_pde = gmc_v6_0_get_vm_pde,
|
.get_vm_pde = gmc_v6_0_get_vm_pde,
|
||||||
.get_vm_pte = gmc_v6_0_get_vm_pte,
|
.get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
|
static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
|
||||||
|
|
|
@ -463,20 +463,27 @@ static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
|
||||||
amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
|
amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static uint64_t gmc_v7_0_get_vm_pte_flags(struct amdgpu_device *adev,
|
||||||
|
uint32_t flags)
|
||||||
|
{
|
||||||
|
uint64_t pte_flag = 0;
|
||||||
|
|
||||||
|
if (flags & AMDGPU_VM_PAGE_READABLE)
|
||||||
|
pte_flag |= AMDGPU_PTE_READABLE;
|
||||||
|
if (flags & AMDGPU_VM_PAGE_WRITEABLE)
|
||||||
|
pte_flag |= AMDGPU_PTE_WRITEABLE;
|
||||||
|
if (flags & AMDGPU_VM_PAGE_PRT)
|
||||||
|
pte_flag |= AMDGPU_PTE_PRT;
|
||||||
|
|
||||||
|
return pte_flag;
|
||||||
|
}
|
||||||
|
|
||||||
static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level,
|
static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level,
|
||||||
uint64_t *addr, uint64_t *flags)
|
uint64_t *addr, uint64_t *flags)
|
||||||
{
|
{
|
||||||
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
|
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev,
|
|
||||||
struct amdgpu_bo_va_mapping *mapping,
|
|
||||||
uint64_t *flags)
|
|
||||||
{
|
|
||||||
*flags &= ~AMDGPU_PTE_EXECUTABLE;
|
|
||||||
*flags &= ~AMDGPU_PTE_PRT;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gmc_v8_0_set_fault_enable_default - update VM fault handling
|
* gmc_v8_0_set_fault_enable_default - update VM fault handling
|
||||||
*
|
*
|
||||||
|
@ -1336,8 +1343,8 @@ static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
|
||||||
.emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
|
.emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
|
||||||
.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
|
.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
|
||||||
.set_prt = gmc_v7_0_set_prt,
|
.set_prt = gmc_v7_0_set_prt,
|
||||||
.get_vm_pde = gmc_v7_0_get_vm_pde,
|
.get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags,
|
||||||
.get_vm_pte = gmc_v7_0_get_vm_pte
|
.get_vm_pde = gmc_v7_0_get_vm_pde
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
|
static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
|
||||||
|
|
|
@ -686,21 +686,29 @@ static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
|
||||||
* 0 valid
|
* 0 valid
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev,
|
||||||
|
uint32_t flags)
|
||||||
|
{
|
||||||
|
uint64_t pte_flag = 0;
|
||||||
|
|
||||||
|
if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
|
||||||
|
pte_flag |= AMDGPU_PTE_EXECUTABLE;
|
||||||
|
if (flags & AMDGPU_VM_PAGE_READABLE)
|
||||||
|
pte_flag |= AMDGPU_PTE_READABLE;
|
||||||
|
if (flags & AMDGPU_VM_PAGE_WRITEABLE)
|
||||||
|
pte_flag |= AMDGPU_PTE_WRITEABLE;
|
||||||
|
if (flags & AMDGPU_VM_PAGE_PRT)
|
||||||
|
pte_flag |= AMDGPU_PTE_PRT;
|
||||||
|
|
||||||
|
return pte_flag;
|
||||||
|
}
|
||||||
|
|
||||||
static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
|
static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
|
||||||
uint64_t *addr, uint64_t *flags)
|
uint64_t *addr, uint64_t *flags)
|
||||||
{
|
{
|
||||||
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
|
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev,
|
|
||||||
struct amdgpu_bo_va_mapping *mapping,
|
|
||||||
uint64_t *flags)
|
|
||||||
{
|
|
||||||
*flags &= ~AMDGPU_PTE_EXECUTABLE;
|
|
||||||
*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
|
|
||||||
*flags &= ~AMDGPU_PTE_PRT;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gmc_v8_0_set_fault_enable_default - update VM fault handling
|
* gmc_v8_0_set_fault_enable_default - update VM fault handling
|
||||||
*
|
*
|
||||||
|
@ -1703,8 +1711,8 @@ static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
|
||||||
.emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
|
.emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
|
||||||
.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
|
.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
|
||||||
.set_prt = gmc_v8_0_set_prt,
|
.set_prt = gmc_v8_0_set_prt,
|
||||||
.get_vm_pde = gmc_v8_0_get_vm_pde,
|
.get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags,
|
||||||
.get_vm_pte = gmc_v8_0_get_vm_pte
|
.get_vm_pde = gmc_v8_0_get_vm_pde
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
|
static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
|
||||||
|
|
|
@ -51,12 +51,10 @@
|
||||||
#include "gfxhub_v1_1.h"
|
#include "gfxhub_v1_1.h"
|
||||||
#include "mmhub_v9_4.h"
|
#include "mmhub_v9_4.h"
|
||||||
#include "umc_v6_1.h"
|
#include "umc_v6_1.h"
|
||||||
#include "umc_v6_0.h"
|
|
||||||
|
|
||||||
#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
|
#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
|
||||||
|
|
||||||
#include "amdgpu_ras.h"
|
#include "amdgpu_ras.h"
|
||||||
#include "amdgpu_xgmi.h"
|
|
||||||
|
|
||||||
/* add these here since we already include dce12 headers and these are for DCN */
|
/* add these here since we already include dce12 headers and these are for DCN */
|
||||||
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
|
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
|
||||||
|
@ -245,6 +243,44 @@ static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
|
||||||
|
struct ras_err_data *err_data,
|
||||||
|
struct amdgpu_iv_entry *entry)
|
||||||
|
{
|
||||||
|
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
|
||||||
|
if (adev->umc.funcs->query_ras_error_count)
|
||||||
|
adev->umc.funcs->query_ras_error_count(adev, err_data);
|
||||||
|
/* umc query_ras_error_address is also responsible for clearing
|
||||||
|
* error status
|
||||||
|
*/
|
||||||
|
if (adev->umc.funcs->query_ras_error_address)
|
||||||
|
adev->umc.funcs->query_ras_error_address(adev, err_data);
|
||||||
|
|
||||||
|
/* only uncorrectable error needs gpu reset */
|
||||||
|
if (err_data->ue_count)
|
||||||
|
amdgpu_ras_reset_gpu(adev, 0);
|
||||||
|
|
||||||
|
return AMDGPU_RAS_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_irq_src *source,
|
||||||
|
struct amdgpu_iv_entry *entry)
|
||||||
|
{
|
||||||
|
struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
|
||||||
|
struct ras_dispatch_if ih_data = {
|
||||||
|
.entry = entry,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!ras_if)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
ih_data.head = *ras_if;
|
||||||
|
|
||||||
|
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
|
static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
|
||||||
struct amdgpu_irq_src *src,
|
struct amdgpu_irq_src *src,
|
||||||
unsigned type,
|
unsigned type,
|
||||||
|
@ -319,10 +355,6 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If it's the first fault for this address, process it normally */
|
/* If it's the first fault for this address, process it normally */
|
||||||
if (retry_fault && !in_interrupt() &&
|
|
||||||
amdgpu_vm_handle_fault(adev, entry->pasid, addr))
|
|
||||||
return 1; /* This also prevents sending it to KFD */
|
|
||||||
|
|
||||||
if (!amdgpu_sriov_vf(adev)) {
|
if (!amdgpu_sriov_vf(adev)) {
|
||||||
/*
|
/*
|
||||||
* Issue a dummy read to wait for the status register to
|
* Issue a dummy read to wait for the status register to
|
||||||
|
@ -385,7 +417,7 @@ static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
|
||||||
|
|
||||||
static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
|
static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
|
||||||
.set = gmc_v9_0_ecc_interrupt_state,
|
.set = gmc_v9_0_ecc_interrupt_state,
|
||||||
.process = amdgpu_umc_process_ecc_irq,
|
.process = gmc_v9_0_process_ecc_irq,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
|
static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
|
||||||
|
@ -552,25 +584,44 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
|
||||||
* 0 valid
|
* 0 valid
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
|
static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
|
||||||
|
uint32_t flags)
|
||||||
|
|
||||||
{
|
{
|
||||||
switch (flags) {
|
uint64_t pte_flag = 0;
|
||||||
|
|
||||||
|
if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
|
||||||
|
pte_flag |= AMDGPU_PTE_EXECUTABLE;
|
||||||
|
if (flags & AMDGPU_VM_PAGE_READABLE)
|
||||||
|
pte_flag |= AMDGPU_PTE_READABLE;
|
||||||
|
if (flags & AMDGPU_VM_PAGE_WRITEABLE)
|
||||||
|
pte_flag |= AMDGPU_PTE_WRITEABLE;
|
||||||
|
|
||||||
|
switch (flags & AMDGPU_VM_MTYPE_MASK) {
|
||||||
case AMDGPU_VM_MTYPE_DEFAULT:
|
case AMDGPU_VM_MTYPE_DEFAULT:
|
||||||
return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
|
pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
|
||||||
|
break;
|
||||||
case AMDGPU_VM_MTYPE_NC:
|
case AMDGPU_VM_MTYPE_NC:
|
||||||
return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
|
pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
|
||||||
|
break;
|
||||||
case AMDGPU_VM_MTYPE_WC:
|
case AMDGPU_VM_MTYPE_WC:
|
||||||
return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
|
pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
|
||||||
case AMDGPU_VM_MTYPE_RW:
|
break;
|
||||||
return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
|
|
||||||
case AMDGPU_VM_MTYPE_CC:
|
case AMDGPU_VM_MTYPE_CC:
|
||||||
return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
|
pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
|
||||||
|
break;
|
||||||
case AMDGPU_VM_MTYPE_UC:
|
case AMDGPU_VM_MTYPE_UC:
|
||||||
return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
|
pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
|
pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (flags & AMDGPU_VM_PAGE_PRT)
|
||||||
|
pte_flag |= AMDGPU_PTE_PRT;
|
||||||
|
|
||||||
|
return pte_flag;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
|
static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
|
||||||
|
@ -597,34 +648,12 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
|
|
||||||
struct amdgpu_bo_va_mapping *mapping,
|
|
||||||
uint64_t *flags)
|
|
||||||
{
|
|
||||||
*flags &= ~AMDGPU_PTE_EXECUTABLE;
|
|
||||||
*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
|
|
||||||
|
|
||||||
*flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
|
|
||||||
*flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
|
|
||||||
|
|
||||||
if (mapping->flags & AMDGPU_PTE_PRT) {
|
|
||||||
*flags |= AMDGPU_PTE_PRT;
|
|
||||||
*flags &= ~AMDGPU_PTE_VALID;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (adev->asic_type == CHIP_ARCTURUS &&
|
|
||||||
!(*flags & AMDGPU_PTE_SYSTEM) &&
|
|
||||||
mapping->bo_va->is_xgmi)
|
|
||||||
*flags |= AMDGPU_PTE_SNOOPED;
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
|
static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
|
||||||
.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
|
.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
|
||||||
.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
|
.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
|
||||||
.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
|
.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
|
||||||
.map_mtype = gmc_v9_0_map_mtype,
|
.get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
|
||||||
.get_vm_pde = gmc_v9_0_get_vm_pde,
|
.get_vm_pde = gmc_v9_0_get_vm_pde
|
||||||
.get_vm_pte = gmc_v9_0_get_vm_pte
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
|
static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
|
||||||
|
@ -635,9 +664,6 @@ static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
|
||||||
static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
|
static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
switch (adev->asic_type) {
|
switch (adev->asic_type) {
|
||||||
case CHIP_VEGA10:
|
|
||||||
adev->umc.funcs = &umc_v6_0_funcs;
|
|
||||||
break;
|
|
||||||
case CHIP_VEGA20:
|
case CHIP_VEGA20:
|
||||||
adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
|
adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
|
||||||
adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
|
adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
|
||||||
|
@ -655,7 +681,7 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
switch (adev->asic_type) {
|
switch (adev->asic_type) {
|
||||||
case CHIP_VEGA20:
|
case CHIP_VEGA20:
|
||||||
adev->mmhub.funcs = &mmhub_v1_0_funcs;
|
adev->mmhub_funcs = &mmhub_v1_0_funcs;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
@ -736,10 +762,140 @@ static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int gmc_v9_0_ecc_ras_block_late_init(void *handle,
|
||||||
|
struct ras_fs_if *fs_info, struct ras_common_if *ras_block)
|
||||||
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
struct ras_common_if **ras_if = NULL;
|
||||||
|
struct ras_ih_if ih_info = {
|
||||||
|
.cb = gmc_v9_0_process_ras_data_cb,
|
||||||
|
};
|
||||||
|
int r;
|
||||||
|
|
||||||
|
if (ras_block->block == AMDGPU_RAS_BLOCK__UMC)
|
||||||
|
ras_if = &adev->gmc.umc_ras_if;
|
||||||
|
else if (ras_block->block == AMDGPU_RAS_BLOCK__MMHUB)
|
||||||
|
ras_if = &adev->gmc.mmhub_ras_if;
|
||||||
|
else
|
||||||
|
BUG();
|
||||||
|
|
||||||
|
if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
|
||||||
|
amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* handle resume path. */
|
||||||
|
if (*ras_if) {
|
||||||
|
/* resend ras TA enable cmd during resume.
|
||||||
|
* prepare to handle failure.
|
||||||
|
*/
|
||||||
|
ih_info.head = **ras_if;
|
||||||
|
r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
|
||||||
|
if (r) {
|
||||||
|
if (r == -EAGAIN) {
|
||||||
|
/* request a gpu reset. will run again. */
|
||||||
|
amdgpu_ras_request_reset_on_boot(adev,
|
||||||
|
ras_block->block);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
/* fail to enable ras, cleanup all. */
|
||||||
|
goto irq;
|
||||||
|
}
|
||||||
|
/* enable successfully. continue. */
|
||||||
|
goto resume;
|
||||||
|
}
|
||||||
|
|
||||||
|
*ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
|
||||||
|
if (!*ras_if)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
**ras_if = *ras_block;
|
||||||
|
|
||||||
|
r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
|
||||||
|
if (r) {
|
||||||
|
if (r == -EAGAIN) {
|
||||||
|
amdgpu_ras_request_reset_on_boot(adev,
|
||||||
|
ras_block->block);
|
||||||
|
r = 0;
|
||||||
|
}
|
||||||
|
goto feature;
|
||||||
|
}
|
||||||
|
|
||||||
|
ih_info.head = **ras_if;
|
||||||
|
fs_info->head = **ras_if;
|
||||||
|
|
||||||
|
if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) {
|
||||||
|
r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
|
||||||
|
if (r)
|
||||||
|
goto interrupt;
|
||||||
|
}
|
||||||
|
|
||||||
|
amdgpu_ras_debugfs_create(adev, fs_info);
|
||||||
|
|
||||||
|
r = amdgpu_ras_sysfs_create(adev, fs_info);
|
||||||
|
if (r)
|
||||||
|
goto sysfs;
|
||||||
|
resume:
|
||||||
|
if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) {
|
||||||
|
r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
|
||||||
|
if (r)
|
||||||
|
goto irq;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
irq:
|
||||||
|
amdgpu_ras_sysfs_remove(adev, *ras_if);
|
||||||
|
sysfs:
|
||||||
|
amdgpu_ras_debugfs_remove(adev, *ras_if);
|
||||||
|
if (ras_block->block == AMDGPU_RAS_BLOCK__UMC)
|
||||||
|
amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
|
||||||
|
interrupt:
|
||||||
|
amdgpu_ras_feature_enable(adev, *ras_if, 0);
|
||||||
|
feature:
|
||||||
|
kfree(*ras_if);
|
||||||
|
*ras_if = NULL;
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int gmc_v9_0_ecc_late_init(void *handle)
|
||||||
|
{
|
||||||
|
int r;
|
||||||
|
|
||||||
|
struct ras_fs_if umc_fs_info = {
|
||||||
|
.sysfs_name = "umc_err_count",
|
||||||
|
.debugfs_name = "umc_err_inject",
|
||||||
|
};
|
||||||
|
struct ras_common_if umc_ras_block = {
|
||||||
|
.block = AMDGPU_RAS_BLOCK__UMC,
|
||||||
|
.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
|
||||||
|
.sub_block_index = 0,
|
||||||
|
.name = "umc",
|
||||||
|
};
|
||||||
|
struct ras_fs_if mmhub_fs_info = {
|
||||||
|
.sysfs_name = "mmhub_err_count",
|
||||||
|
.debugfs_name = "mmhub_err_inject",
|
||||||
|
};
|
||||||
|
struct ras_common_if mmhub_ras_block = {
|
||||||
|
.block = AMDGPU_RAS_BLOCK__MMHUB,
|
||||||
|
.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
|
||||||
|
.sub_block_index = 0,
|
||||||
|
.name = "mmhub",
|
||||||
|
};
|
||||||
|
|
||||||
|
r = gmc_v9_0_ecc_ras_block_late_init(handle,
|
||||||
|
&umc_fs_info, &umc_ras_block);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
r = gmc_v9_0_ecc_ras_block_late_init(handle,
|
||||||
|
&mmhub_fs_info, &mmhub_ras_block);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
static int gmc_v9_0_late_init(void *handle)
|
static int gmc_v9_0_late_init(void *handle)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
int r;
|
bool r;
|
||||||
|
|
||||||
if (!gmc_v9_0_keep_stolen_memory(adev))
|
if (!gmc_v9_0_keep_stolen_memory(adev))
|
||||||
amdgpu_bo_late_init(adev);
|
amdgpu_bo_late_init(adev);
|
||||||
|
@ -773,7 +929,7 @@ static int gmc_v9_0_late_init(void *handle)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_gmc_ras_late_init(adev);
|
r = gmc_v9_0_ecc_late_init(handle);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -814,11 +970,33 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
|
||||||
*/
|
*/
|
||||||
static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
|
static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
|
int chansize, numchan;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
if (amdgpu_sriov_vf(adev)) {
|
||||||
|
/* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
|
||||||
|
* and DF related registers is not readable, seems hardcord is the
|
||||||
|
* only way to set the correct vram_width
|
||||||
|
*/
|
||||||
|
adev->gmc.vram_width = 2048;
|
||||||
|
} else if (amdgpu_emu_mode != 1) {
|
||||||
|
adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!adev->gmc.vram_width) {
|
||||||
|
/* hbm memory channel size */
|
||||||
|
if (adev->flags & AMD_IS_APU)
|
||||||
|
chansize = 64;
|
||||||
|
else
|
||||||
|
chansize = 128;
|
||||||
|
|
||||||
|
numchan = adev->df_funcs->get_hbm_channel_number(adev);
|
||||||
|
adev->gmc.vram_width = numchan * chansize;
|
||||||
|
}
|
||||||
|
|
||||||
/* size in MB on si */
|
/* size in MB on si */
|
||||||
adev->gmc.mc_vram_size =
|
adev->gmc.mc_vram_size =
|
||||||
adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
|
adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
|
||||||
adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
|
adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
|
||||||
|
|
||||||
if (!(adev->flags & AMD_IS_APU)) {
|
if (!(adev->flags & AMD_IS_APU)) {
|
||||||
|
@ -930,7 +1108,7 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
|
||||||
|
|
||||||
static int gmc_v9_0_sw_init(void *handle)
|
static int gmc_v9_0_sw_init(void *handle)
|
||||||
{
|
{
|
||||||
int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
|
int r;
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
gfxhub_v1_0_init(adev);
|
gfxhub_v1_0_init(adev);
|
||||||
|
@ -941,32 +1119,7 @@ static int gmc_v9_0_sw_init(void *handle)
|
||||||
|
|
||||||
spin_lock_init(&adev->gmc.invalidate_lock);
|
spin_lock_init(&adev->gmc.invalidate_lock);
|
||||||
|
|
||||||
r = amdgpu_atomfirmware_get_vram_info(adev,
|
adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
|
||||||
&vram_width, &vram_type, &vram_vendor);
|
|
||||||
if (amdgpu_sriov_vf(adev))
|
|
||||||
/* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
|
|
||||||
* and DF related registers is not readable, seems hardcord is the
|
|
||||||
* only way to set the correct vram_width
|
|
||||||
*/
|
|
||||||
adev->gmc.vram_width = 2048;
|
|
||||||
else if (amdgpu_emu_mode != 1)
|
|
||||||
adev->gmc.vram_width = vram_width;
|
|
||||||
|
|
||||||
if (!adev->gmc.vram_width) {
|
|
||||||
int chansize, numchan;
|
|
||||||
|
|
||||||
/* hbm memory channel size */
|
|
||||||
if (adev->flags & AMD_IS_APU)
|
|
||||||
chansize = 64;
|
|
||||||
else
|
|
||||||
chansize = 128;
|
|
||||||
|
|
||||||
numchan = adev->df_funcs->get_hbm_channel_number(adev);
|
|
||||||
adev->gmc.vram_width = numchan * chansize;
|
|
||||||
}
|
|
||||||
|
|
||||||
adev->gmc.vram_type = vram_type;
|
|
||||||
adev->gmc.vram_vendor = vram_vendor;
|
|
||||||
switch (adev->asic_type) {
|
switch (adev->asic_type) {
|
||||||
case CHIP_RAVEN:
|
case CHIP_RAVEN:
|
||||||
adev->num_vmhubs = 2;
|
adev->num_vmhubs = 2;
|
||||||
|
@ -1087,7 +1240,33 @@ static int gmc_v9_0_sw_fini(void *handle)
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
void *stolen_vga_buf;
|
void *stolen_vga_buf;
|
||||||
|
|
||||||
amdgpu_gmc_ras_fini(adev);
|
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
|
||||||
|
adev->gmc.umc_ras_if) {
|
||||||
|
struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
|
||||||
|
struct ras_ih_if ih_info = {
|
||||||
|
.head = *ras_if,
|
||||||
|
};
|
||||||
|
|
||||||
|
/* remove fs first */
|
||||||
|
amdgpu_ras_debugfs_remove(adev, ras_if);
|
||||||
|
amdgpu_ras_sysfs_remove(adev, ras_if);
|
||||||
|
/* remove the IH */
|
||||||
|
amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
|
||||||
|
amdgpu_ras_feature_enable(adev, ras_if, 0);
|
||||||
|
kfree(ras_if);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) &&
|
||||||
|
adev->gmc.mmhub_ras_if) {
|
||||||
|
struct ras_common_if *ras_if = adev->gmc.mmhub_ras_if;
|
||||||
|
|
||||||
|
/* remove fs and disable ras feature */
|
||||||
|
amdgpu_ras_debugfs_remove(adev, ras_if);
|
||||||
|
amdgpu_ras_sysfs_remove(adev, ras_if);
|
||||||
|
amdgpu_ras_feature_enable(adev, ras_if, 0);
|
||||||
|
kfree(ras_if);
|
||||||
|
}
|
||||||
|
|
||||||
amdgpu_gem_force_release(adev);
|
amdgpu_gem_force_release(adev);
|
||||||
amdgpu_vm_manager_fini(adev);
|
amdgpu_vm_manager_fini(adev);
|
||||||
|
|
||||||
|
@ -1137,7 +1316,13 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
|
||||||
*/
|
*/
|
||||||
static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
|
static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
int r;
|
int r, i;
|
||||||
|
bool value;
|
||||||
|
u32 tmp;
|
||||||
|
|
||||||
|
amdgpu_device_program_register_sequence(adev,
|
||||||
|
golden_settings_vega10_hdp,
|
||||||
|
ARRAY_SIZE(golden_settings_vega10_hdp));
|
||||||
|
|
||||||
if (adev->gart.bo == NULL) {
|
if (adev->gart.bo == NULL) {
|
||||||
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
|
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
|
||||||
|
@ -1147,6 +1332,15 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
switch (adev->asic_type) {
|
||||||
|
case CHIP_RAVEN:
|
||||||
|
/* TODO for renoir */
|
||||||
|
mmhub_v1_0_update_power_gating(adev, true);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
r = gfxhub_v1_0_gart_enable(adev);
|
r = gfxhub_v1_0_gart_enable(adev);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
@ -1158,49 +1352,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
|
|
||||||
(unsigned)(adev->gmc.gart_size >> 20),
|
|
||||||
(unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
|
|
||||||
adev->gart.ready = true;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int gmc_v9_0_hw_init(void *handle)
|
|
||||||
{
|
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
||||||
bool value;
|
|
||||||
int r, i;
|
|
||||||
u32 tmp;
|
|
||||||
|
|
||||||
/* The sequence of these two function calls matters.*/
|
|
||||||
gmc_v9_0_init_golden_registers(adev);
|
|
||||||
|
|
||||||
if (adev->mode_info.num_crtc) {
|
|
||||||
if (adev->asic_type != CHIP_ARCTURUS) {
|
|
||||||
/* Lockout access through VGA aperture*/
|
|
||||||
WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
|
|
||||||
|
|
||||||
/* disable VGA render */
|
|
||||||
WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
amdgpu_device_program_register_sequence(adev,
|
|
||||||
golden_settings_vega10_hdp,
|
|
||||||
ARRAY_SIZE(golden_settings_vega10_hdp));
|
|
||||||
|
|
||||||
switch (adev->asic_type) {
|
|
||||||
case CHIP_RAVEN:
|
|
||||||
/* TODO for renoir */
|
|
||||||
mmhub_v1_0_update_power_gating(adev, true);
|
|
||||||
break;
|
|
||||||
case CHIP_ARCTURUS:
|
|
||||||
WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
|
WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
|
||||||
|
|
||||||
tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
|
tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
|
||||||
|
@ -1210,7 +1361,7 @@ static int gmc_v9_0_hw_init(void *handle)
|
||||||
WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
|
WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
|
||||||
|
|
||||||
/* After HDP is initialized, flush HDP.*/
|
/* After HDP is initialized, flush HDP.*/
|
||||||
adev->nbio.funcs->hdp_flush(adev, NULL);
|
adev->nbio_funcs->hdp_flush(adev, NULL);
|
||||||
|
|
||||||
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
|
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
|
||||||
value = false;
|
value = false;
|
||||||
|
@ -1226,8 +1377,28 @@ static int gmc_v9_0_hw_init(void *handle)
|
||||||
for (i = 0; i < adev->num_vmhubs; ++i)
|
for (i = 0; i < adev->num_vmhubs; ++i)
|
||||||
gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
|
gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
|
||||||
|
|
||||||
if (adev->umc.funcs && adev->umc.funcs->init_registers)
|
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
|
||||||
adev->umc.funcs->init_registers(adev);
|
(unsigned)(adev->gmc.gart_size >> 20),
|
||||||
|
(unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
|
||||||
|
adev->gart.ready = true;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int gmc_v9_0_hw_init(void *handle)
|
||||||
|
{
|
||||||
|
int r;
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
|
/* The sequence of these two function calls matters.*/
|
||||||
|
gmc_v9_0_init_golden_registers(adev);
|
||||||
|
|
||||||
|
if (adev->mode_info.num_crtc) {
|
||||||
|
/* Lockout access through VGA aperture*/
|
||||||
|
WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
|
||||||
|
|
||||||
|
/* disable VGA render */
|
||||||
|
WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
|
||||||
|
}
|
||||||
|
|
||||||
r = gmc_v9_0_gart_enable(adev);
|
r = gmc_v9_0_gart_enable(adev);
|
||||||
|
|
||||||
|
|
|
@ -206,8 +206,6 @@ static void mmhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
|
||||||
tmp = RREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL);
|
tmp = RREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL);
|
||||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
|
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
|
||||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
|
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
|
||||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
|
|
||||||
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
|
|
||||||
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL, tmp);
|
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL, tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -618,6 +616,5 @@ static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev,
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
|
const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
|
||||||
.ras_late_init = amdgpu_mmhub_ras_late_init,
|
|
||||||
.query_ras_error_count = mmhub_v1_0_query_ras_error_count,
|
.query_ras_error_count = mmhub_v1_0_query_ras_error_count,
|
||||||
};
|
};
|
||||||
|
|
|
@ -31,25 +31,20 @@
|
||||||
|
|
||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
|
|
||||||
void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
|
static void mmhub_v2_0_init_gart_pt_regs(struct amdgpu_device *adev)
|
||||||
uint64_t page_table_base)
|
|
||||||
{
|
{
|
||||||
/* two registers distance between mmMMVM_CONTEXT0_* to mmMMVM_CONTEXT1_* */
|
uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
|
||||||
int offset = mmMMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
|
|
||||||
- mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
|
|
||||||
|
|
||||||
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
|
WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
|
||||||
offset * vmid, lower_32_bits(page_table_base));
|
lower_32_bits(value));
|
||||||
|
|
||||||
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
|
WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
|
||||||
offset * vmid, upper_32_bits(page_table_base));
|
upper_32_bits(value));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mmhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev)
|
static void mmhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
|
mmhub_v2_0_init_gart_pt_regs(adev);
|
||||||
|
|
||||||
mmhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base);
|
|
||||||
|
|
||||||
WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
|
WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
|
||||||
(u32)(adev->gmc.gart_start >> 12));
|
(u32)(adev->gmc.gart_start >> 12));
|
||||||
|
@ -142,15 +137,6 @@ static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
|
||||||
WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2, tmp);
|
WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2, tmp);
|
||||||
|
|
||||||
tmp = mmMMVM_L2_CNTL3_DEFAULT;
|
tmp = mmMMVM_L2_CNTL3_DEFAULT;
|
||||||
if (adev->gmc.translate_further) {
|
|
||||||
tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 12);
|
|
||||||
tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
|
|
||||||
L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
|
|
||||||
} else {
|
|
||||||
tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 9);
|
|
||||||
tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
|
|
||||||
L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
|
|
||||||
}
|
|
||||||
WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, tmp);
|
WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, tmp);
|
||||||
|
|
||||||
tmp = mmMMVM_L2_CNTL4_DEFAULT;
|
tmp = mmMMVM_L2_CNTL4_DEFAULT;
|
||||||
|
@ -166,8 +152,6 @@ static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev)
|
||||||
tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL);
|
tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL);
|
||||||
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
|
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
|
||||||
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
|
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
|
||||||
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL,
|
|
||||||
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
|
|
||||||
WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp);
|
WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -31,7 +31,5 @@ void mmhub_v2_0_init(struct amdgpu_device *adev);
|
||||||
int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
|
int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
|
||||||
enum amd_clockgating_state state);
|
enum amd_clockgating_state state);
|
||||||
void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags);
|
void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags);
|
||||||
void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
|
|
||||||
uint64_t page_table_base);
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -240,8 +240,6 @@ static void mmhub_v9_4_enable_system_domain(struct amdgpu_device *adev,
|
||||||
hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
|
hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
|
||||||
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
|
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
|
||||||
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
|
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
|
||||||
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL,
|
|
||||||
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
|
|
||||||
WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_CNTL,
|
WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_CNTL,
|
||||||
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
|
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
|
||||||
}
|
}
|
||||||
|
|
|
@ -117,7 +117,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
|
||||||
/* disable irqs */
|
/* disable irqs */
|
||||||
navi10_ih_disable_interrupts(adev);
|
navi10_ih_disable_interrupts(adev);
|
||||||
|
|
||||||
adev->nbio.funcs->ih_control(adev);
|
adev->nbio_funcs->ih_control(adev);
|
||||||
|
|
||||||
/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
|
/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
|
||||||
WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
|
WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
|
||||||
|
@ -162,7 +162,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
|
||||||
}
|
}
|
||||||
WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
|
WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
|
||||||
|
|
||||||
adev->nbio.funcs->ih_doorbell_range(adev, ih->use_doorbell,
|
adev->nbio_funcs->ih_doorbell_range(adev, ih->use_doorbell,
|
||||||
ih->doorbell_index);
|
ih->doorbell_index);
|
||||||
|
|
||||||
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
|
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
#include "nv.h"
|
#include "nv.h"
|
||||||
|
|
||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
|
#include "soc15_hw_ip.h"
|
||||||
#include "navi10_ip_offset.h"
|
#include "navi10_ip_offset.h"
|
||||||
|
|
||||||
int navi10_reg_base_init(struct amdgpu_device *adev)
|
int navi10_reg_base_init(struct amdgpu_device *adev)
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
#include "nv.h"
|
#include "nv.h"
|
||||||
|
|
||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
|
#include "soc15_hw_ip.h"
|
||||||
#include "navi12_ip_offset.h"
|
#include "navi12_ip_offset.h"
|
||||||
|
|
||||||
int navi12_reg_base_init(struct amdgpu_device *adev)
|
int navi12_reg_base_init(struct amdgpu_device *adev)
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
#include "nv.h"
|
#include "nv.h"
|
||||||
|
|
||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
|
#include "soc15_hw_ip.h"
|
||||||
#include "navi14_ip_offset.h"
|
#include "navi14_ip_offset.h"
|
||||||
|
|
||||||
int navi14_reg_base_init(struct amdgpu_device *adev)
|
int navi14_reg_base_init(struct amdgpu_device *adev)
|
||||||
|
|
|
@ -27,21 +27,11 @@
|
||||||
#include "nbio/nbio_2_3_default.h"
|
#include "nbio/nbio_2_3_default.h"
|
||||||
#include "nbio/nbio_2_3_offset.h"
|
#include "nbio/nbio_2_3_offset.h"
|
||||||
#include "nbio/nbio_2_3_sh_mask.h"
|
#include "nbio/nbio_2_3_sh_mask.h"
|
||||||
#include <uapi/linux/kfd_ioctl.h>
|
|
||||||
|
|
||||||
#define smnPCIE_CONFIG_CNTL 0x11180044
|
#define smnPCIE_CONFIG_CNTL 0x11180044
|
||||||
#define smnCPM_CONTROL 0x11180460
|
#define smnCPM_CONTROL 0x11180460
|
||||||
#define smnPCIE_CNTL2 0x11180070
|
#define smnPCIE_CNTL2 0x11180070
|
||||||
|
|
||||||
|
|
||||||
static void nbio_v2_3_remap_hdp_registers(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
|
|
||||||
adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
|
|
||||||
WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL,
|
|
||||||
adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
|
|
||||||
}
|
|
||||||
|
|
||||||
static u32 nbio_v2_3_get_rev_id(struct amdgpu_device *adev)
|
static u32 nbio_v2_3_get_rev_id(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
|
u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
|
||||||
|
@ -66,9 +56,10 @@ static void nbio_v2_3_hdp_flush(struct amdgpu_device *adev,
|
||||||
struct amdgpu_ring *ring)
|
struct amdgpu_ring *ring)
|
||||||
{
|
{
|
||||||
if (!ring || !ring->funcs->emit_wreg)
|
if (!ring || !ring->funcs->emit_wreg)
|
||||||
WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
|
WREG32_SOC15_NO_KIQ(NBIO, 0, mmBIF_BX_PF_HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
|
||||||
else
|
else
|
||||||
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
|
amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
|
||||||
|
NBIO, 0, mmBIF_BX_PF_HDP_MEM_COHERENCY_FLUSH_CNTL), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 nbio_v2_3_get_memsize(struct amdgpu_device *adev)
|
static u32 nbio_v2_3_get_memsize(struct amdgpu_device *adev)
|
||||||
|
@ -320,6 +311,7 @@ static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
|
const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
|
||||||
|
.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg,
|
||||||
.get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset,
|
.get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset,
|
||||||
.get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset,
|
.get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset,
|
||||||
.get_pcie_index_offset = nbio_v2_3_get_pcie_index_offset,
|
.get_pcie_index_offset = nbio_v2_3_get_pcie_index_offset,
|
||||||
|
@ -339,5 +331,4 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
|
||||||
.ih_control = nbio_v2_3_ih_control,
|
.ih_control = nbio_v2_3_ih_control,
|
||||||
.init_registers = nbio_v2_3_init_registers,
|
.init_registers = nbio_v2_3_init_registers,
|
||||||
.detect_hw_virt = nbio_v2_3_detect_hw_virt,
|
.detect_hw_virt = nbio_v2_3_detect_hw_virt,
|
||||||
.remap_hdp_registers = nbio_v2_3_remap_hdp_registers,
|
|
||||||
};
|
};
|
||||||
|
|
|
@ -26,7 +26,6 @@
|
||||||
|
|
||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
|
|
||||||
extern const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg;
|
|
||||||
extern const struct amdgpu_nbio_funcs nbio_v2_3_funcs;
|
extern const struct amdgpu_nbio_funcs nbio_v2_3_funcs;
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue