mirror of
https://github.com/torvalds/linux.git
synced 2024-11-13 07:31:45 +00:00
Merge branch 'drm-next-4.15' of git://people.freedesktop.org/~agd5f/linux into drm-next
More new stuff for 4.15. Highlights: - Add clock query interface for raven - Add new FENCE_TO_HANDLE ioctl - UVD video encode ring support on polaris - transparent huge page DMA support - deadlock fixes - compute pipe lru tweaks - powerplay cleanups and regression fixes - fix duplicate symbol issue with radeon and amdgpu - misc bug fixes * 'drm-next-4.15' of git://people.freedesktop.org/~agd5f/linux: (72 commits) drm/radeon/dp: make radeon_dp_get_dp_link_config static drm/radeon: move ci_send_msg_to_smc to where it's used drm/amd/sched: fix deadlock caused by unsignaled fences of deleted jobs drm/amd/sched: NULL out the s_fence field after run_job drm/amd/sched: move adding finish callback to amd_sched_job_begin drm/amd/sched: fix an outdated comment drm/amd/sched: rename amd_sched_entity_pop_job drm/amdgpu: minor coding style fix drm/ttm: add transparent huge page support for DMA allocations v2 drm/ttm: add support for different pool sizes drm/ttm: remove unsued options from ttm_mem_global_alloc_page drm/amdgpu: add uvd enc irq drm/amdgpu: add uvd enc ib test drm/amdgpu: add uvd enc ring test drm/amdgpu: add uvd enc vm functions (v2) drm/amdgpu: add uvd enc into run queue drm/amdgpu: add uvd enc rings drm/amdgpu: add new uvd enc ring methods drm/amdgpu: add uvd enc command in header drm/amdgpu: add uvd enc registers in header ...
This commit is contained in:
commit
bb7a9c8d71
@ -121,6 +121,7 @@ extern int amdgpu_cntl_sb_buf_per_se;
|
||||
extern int amdgpu_param_buf_per_se;
|
||||
extern int amdgpu_job_hang_limit;
|
||||
extern int amdgpu_lbpw;
|
||||
extern int amdgpu_compute_multipipe;
|
||||
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
extern int amdgpu_si_support;
|
||||
@ -1310,6 +1311,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp);
|
||||
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
|
||||
int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp);
|
||||
int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
|
||||
int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp);
|
||||
@ -1524,7 +1527,6 @@ struct amdgpu_device {
|
||||
|
||||
/* powerplay */
|
||||
struct amd_powerplay powerplay;
|
||||
bool pp_enabled;
|
||||
bool pp_force_state_enabled;
|
||||
|
||||
/* dpm */
|
||||
|
@ -338,6 +338,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
|
||||
struct cik_mqd *m;
|
||||
uint32_t *mqd_hqd;
|
||||
uint32_t reg, wptr_val, data;
|
||||
bool valid_wptr = false;
|
||||
|
||||
m = get_mqd(mqd);
|
||||
|
||||
@ -356,7 +357,14 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
|
||||
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
|
||||
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
|
||||
|
||||
if (read_user_wptr(mm, wptr, wptr_val))
|
||||
/* read_user_ptr may take the mm->mmap_sem.
|
||||
* release srbm_mutex to avoid circular dependency between
|
||||
* srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
|
||||
*/
|
||||
release_queue(kgd);
|
||||
valid_wptr = read_user_wptr(mm, wptr, wptr_val);
|
||||
acquire_queue(kgd, pipe_id, queue_id);
|
||||
if (valid_wptr)
|
||||
WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
|
||||
|
||||
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
|
||||
|
@ -292,6 +292,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
|
||||
struct vi_mqd *m;
|
||||
uint32_t *mqd_hqd;
|
||||
uint32_t reg, wptr_val, data;
|
||||
bool valid_wptr = false;
|
||||
|
||||
m = get_mqd(mqd);
|
||||
|
||||
@ -339,7 +340,14 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
|
||||
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
|
||||
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
|
||||
|
||||
if (read_user_wptr(mm, wptr, wptr_val))
|
||||
/* read_user_ptr may take the mm->mmap_sem.
|
||||
* release srbm_mutex to avoid circular dependency between
|
||||
* srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
|
||||
*/
|
||||
release_queue(kgd);
|
||||
valid_wptr = read_user_wptr(mm, wptr, wptr_val);
|
||||
acquire_queue(kgd, pipe_id, queue_id);
|
||||
if (valid_wptr)
|
||||
WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
|
||||
|
||||
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
|
||||
|
@ -42,6 +42,28 @@ struct amdgpu_cgs_device {
|
||||
struct amdgpu_device *adev = \
|
||||
((struct amdgpu_cgs_device *)cgs_device)->adev
|
||||
|
||||
static void *amdgpu_cgs_register_pp_handle(struct cgs_device *cgs_device,
|
||||
int (*call_back_func)(struct amd_pp_init *, void **))
|
||||
{
|
||||
CGS_FUNC_ADEV;
|
||||
struct amd_pp_init pp_init;
|
||||
struct amd_powerplay *amd_pp;
|
||||
|
||||
if (call_back_func == NULL)
|
||||
return NULL;
|
||||
|
||||
amd_pp = &(adev->powerplay);
|
||||
pp_init.chip_family = adev->family;
|
||||
pp_init.chip_id = adev->asic_type;
|
||||
pp_init.pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
|
||||
pp_init.feature_mask = amdgpu_pp_feature_mask;
|
||||
pp_init.device = cgs_device;
|
||||
if (call_back_func(&pp_init, &(amd_pp->pp_handle)))
|
||||
return NULL;
|
||||
|
||||
return adev->powerplay.pp_handle;
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
|
||||
enum cgs_gpu_mem_type type,
|
||||
uint64_t size, uint64_t align,
|
||||
@ -1179,6 +1201,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
|
||||
.is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
|
||||
.enter_safe_mode = amdgpu_cgs_enter_safe_mode,
|
||||
.lock_grbm_idx = amdgpu_cgs_lock_grbm_idx,
|
||||
.register_pp_handle = amdgpu_cgs_register_pp_handle,
|
||||
};
|
||||
|
||||
static const struct cgs_os_ops amdgpu_cgs_os_ops = {
|
||||
|
@ -25,6 +25,7 @@
|
||||
* Jerome Glisse <glisse@freedesktop.org>
|
||||
*/
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/sync_file.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include <drm/drm_syncobj.h>
|
||||
@ -1330,6 +1331,66 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
|
||||
return fence;
|
||||
}
|
||||
|
||||
int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
||||
union drm_amdgpu_fence_to_handle *info = data;
|
||||
struct dma_fence *fence;
|
||||
struct drm_syncobj *syncobj;
|
||||
struct sync_file *sync_file;
|
||||
int fd, r;
|
||||
|
||||
if (amdgpu_kms_vram_lost(adev, fpriv))
|
||||
return -ENODEV;
|
||||
|
||||
fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
|
||||
if (IS_ERR(fence))
|
||||
return PTR_ERR(fence);
|
||||
|
||||
switch (info->in.what) {
|
||||
case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
|
||||
r = drm_syncobj_create(&syncobj, 0, fence);
|
||||
dma_fence_put(fence);
|
||||
if (r)
|
||||
return r;
|
||||
r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
|
||||
drm_syncobj_put(syncobj);
|
||||
return r;
|
||||
|
||||
case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
|
||||
r = drm_syncobj_create(&syncobj, 0, fence);
|
||||
dma_fence_put(fence);
|
||||
if (r)
|
||||
return r;
|
||||
r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
|
||||
drm_syncobj_put(syncobj);
|
||||
return r;
|
||||
|
||||
case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
|
||||
fd = get_unused_fd_flags(O_CLOEXEC);
|
||||
if (fd < 0) {
|
||||
dma_fence_put(fence);
|
||||
return fd;
|
||||
}
|
||||
|
||||
sync_file = sync_file_create(fence);
|
||||
dma_fence_put(fence);
|
||||
if (!sync_file) {
|
||||
put_unused_fd(fd);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
fd_install(fd, sync_file->file);
|
||||
info->out.handle = fd;
|
||||
return 0;
|
||||
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_cs_wait_all_fence - wait on all fences to signal
|
||||
*
|
||||
|
@ -56,6 +56,7 @@
|
||||
#include "amdgpu_vf_error.h"
|
||||
|
||||
#include "amdgpu_amdkfd.h"
|
||||
#include "amdgpu_pm.h"
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
|
||||
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
|
||||
@ -1603,6 +1604,7 @@ static int amdgpu_init(struct amdgpu_device *adev)
|
||||
return r;
|
||||
}
|
||||
adev->ip_blocks[i].status.sw = true;
|
||||
|
||||
/* need to do gmc hw init early so we can allocate gpu mem */
|
||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
|
||||
r = amdgpu_vram_scratch_init(adev);
|
||||
@ -1633,6 +1635,11 @@ static int amdgpu_init(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
mutex_lock(&adev->firmware.mutex);
|
||||
if (amdgpu_ucode_init_bo(adev))
|
||||
adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
|
||||
mutex_unlock(&adev->firmware.mutex);
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_blocks[i].status.sw)
|
||||
continue;
|
||||
@ -1768,6 +1775,8 @@ static int amdgpu_fini(struct amdgpu_device *adev)
|
||||
|
||||
adev->ip_blocks[i].status.hw = false;
|
||||
}
|
||||
if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT)
|
||||
amdgpu_ucode_fini_bo(adev);
|
||||
|
||||
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
|
||||
if (!adev->ip_blocks[i].status.sw)
|
||||
@ -2040,6 +2049,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
mutex_init(&adev->srbm_mutex);
|
||||
mutex_init(&adev->grbm_idx_mutex);
|
||||
mutex_init(&adev->mn_lock);
|
||||
mutex_init(&adev->virt.vf_errors.lock);
|
||||
hash_init(adev->mn_hash);
|
||||
|
||||
amdgpu_check_arguments(adev);
|
||||
@ -2125,7 +2135,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
r = amdgpu_atombios_init(adev);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "amdgpu_atombios_init failed\n");
|
||||
amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
|
||||
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
@ -2136,7 +2146,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
if (amdgpu_vpost_needed(adev)) {
|
||||
if (!adev->bios) {
|
||||
dev_err(adev->dev, "no vBIOS found\n");
|
||||
amdgpu_vf_error_put(AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
|
||||
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
|
||||
r = -EINVAL;
|
||||
goto failed;
|
||||
}
|
||||
@ -2144,7 +2154,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "gpu post error!\n");
|
||||
amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_POST_ERROR, 0, 0);
|
||||
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_POST_ERROR, 0, 0);
|
||||
goto failed;
|
||||
}
|
||||
} else {
|
||||
@ -2156,7 +2166,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
r = amdgpu_atomfirmware_get_clock_info(adev);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
|
||||
amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
|
||||
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
|
||||
goto failed;
|
||||
}
|
||||
} else {
|
||||
@ -2164,7 +2174,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
r = amdgpu_atombios_get_clock_info(adev);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
|
||||
amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
|
||||
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
|
||||
goto failed;
|
||||
}
|
||||
/* init i2c buses */
|
||||
@ -2175,7 +2185,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
r = amdgpu_fence_driver_init(adev);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
|
||||
amdgpu_vf_error_put(AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
|
||||
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
@ -2185,7 +2195,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
r = amdgpu_init(adev);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "amdgpu_init failed\n");
|
||||
amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
|
||||
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
|
||||
amdgpu_fini(adev);
|
||||
goto failed;
|
||||
}
|
||||
@ -2205,7 +2215,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
r = amdgpu_ib_pool_init(adev);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "IB initialization failed (%d).\n", r);
|
||||
amdgpu_vf_error_put(AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
|
||||
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
@ -2215,6 +2225,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
|
||||
amdgpu_fbdev_init(adev);
|
||||
|
||||
r = amdgpu_pm_sysfs_init(adev);
|
||||
if (r)
|
||||
DRM_ERROR("registering pm debugfs failed (%d).\n", r);
|
||||
|
||||
r = amdgpu_gem_debugfs_init(adev);
|
||||
if (r)
|
||||
DRM_ERROR("registering gem debugfs failed (%d).\n", r);
|
||||
@ -2254,7 +2268,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
r = amdgpu_late_init(adev);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "amdgpu_late_init failed\n");
|
||||
amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
|
||||
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
@ -2311,6 +2325,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
|
||||
iounmap(adev->rmmio);
|
||||
adev->rmmio = NULL;
|
||||
amdgpu_doorbell_fini(adev);
|
||||
amdgpu_pm_sysfs_fini(adev);
|
||||
amdgpu_debugfs_regs_cleanup(adev);
|
||||
}
|
||||
|
||||
@ -2936,7 +2951,7 @@ out:
|
||||
}
|
||||
} else {
|
||||
dev_err(adev->dev, "asic resume failed (%d).\n", r);
|
||||
amdgpu_vf_error_put(AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, 0, r);
|
||||
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, 0, r);
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
if (adev->rings[i] && adev->rings[i]->sched.thread) {
|
||||
kthread_unpark(adev->rings[i]->sched.thread);
|
||||
@ -2950,7 +2965,7 @@ out:
|
||||
if (r) {
|
||||
/* bad news, how to tell it to userspace ? */
|
||||
dev_info(adev->dev, "GPU reset failed\n");
|
||||
amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
|
||||
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
|
||||
}
|
||||
else {
|
||||
dev_info(adev->dev, "GPU reset successed!\n");
|
||||
|
@ -356,6 +356,10 @@ enum amdgpu_pcie_gen {
|
||||
((adev)->powerplay.pp_funcs->switch_power_profile(\
|
||||
(adev)->powerplay.pp_handle, type))
|
||||
|
||||
#define amdgpu_dpm_set_clockgating_by_smu(adev, msg_id) \
|
||||
((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
|
||||
(adev)->powerplay.pp_handle, msg_id))
|
||||
|
||||
struct amdgpu_dpm {
|
||||
struct amdgpu_ps *ps;
|
||||
/* number of valid power states */
|
||||
|
@ -70,9 +70,10 @@
|
||||
* - 3.18.0 - Export gpu always on cu bitmap
|
||||
* - 3.19.0 - Add support for UVD MJPEG decode
|
||||
* - 3.20.0 - Add support for local BOs
|
||||
* - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 3
|
||||
#define KMS_DRIVER_MINOR 20
|
||||
#define KMS_DRIVER_MINOR 21
|
||||
#define KMS_DRIVER_PATCHLEVEL 0
|
||||
|
||||
int amdgpu_vram_limit = 0;
|
||||
@ -122,6 +123,7 @@ int amdgpu_cntl_sb_buf_per_se = 0;
|
||||
int amdgpu_param_buf_per_se = 0;
|
||||
int amdgpu_job_hang_limit = 0;
|
||||
int amdgpu_lbpw = -1;
|
||||
int amdgpu_compute_multipipe = -1;
|
||||
|
||||
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
|
||||
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
|
||||
@ -265,6 +267,9 @@ module_param_named(job_hang_limit, amdgpu_job_hang_limit, int ,0444);
|
||||
MODULE_PARM_DESC(lbpw, "Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable, -1 = auto)");
|
||||
module_param_named(lbpw, amdgpu_lbpw, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across pipes (1 = enable, 0 = disable, -1 = auto)");
|
||||
module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
|
||||
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
|
||||
#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
|
||||
|
@ -109,9 +109,26 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_s
|
||||
}
|
||||
}
|
||||
|
||||
static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
|
||||
{
|
||||
if (amdgpu_compute_multipipe != -1) {
|
||||
DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
|
||||
amdgpu_compute_multipipe);
|
||||
return amdgpu_compute_multipipe == 1;
|
||||
}
|
||||
|
||||
/* FIXME: spreading the queues across pipes causes perf regressions
|
||||
* on POLARIS11 compute workloads */
|
||||
if (adev->asic_type == CHIP_POLARIS11)
|
||||
return false;
|
||||
|
||||
return adev->gfx.mec.num_mec > 1;
|
||||
}
|
||||
|
||||
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, queue, pipe, mec;
|
||||
bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev);
|
||||
|
||||
/* policy for amdgpu compute queue ownership */
|
||||
for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
|
||||
@ -125,8 +142,7 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
|
||||
if (mec >= adev->gfx.mec.num_mec)
|
||||
break;
|
||||
|
||||
/* FIXME: spreading the queues across pipes causes perf regressions */
|
||||
if (0) {
|
||||
if (multipipe_policy) {
|
||||
/* policy: amdgpu owns the first two queues of the first MEC */
|
||||
if (mec == 0 && queue < 2)
|
||||
set_bit(i, adev->gfx.mec.queue_bitmap);
|
||||
|
@ -1024,6 +1024,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
/* KMS */
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
|
@ -64,10 +64,6 @@ static const struct cg_flag_name clocks[] = {
|
||||
|
||||
void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->pp_enabled)
|
||||
/* TODO */
|
||||
return;
|
||||
|
||||
if (adev->pm.dpm_enabled) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
if (power_supply_is_system_supplied() > 0)
|
||||
@ -118,7 +114,7 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (adev->pp_enabled) {
|
||||
if (adev->powerplay.pp_funcs->dispatch_tasks) {
|
||||
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state, NULL);
|
||||
} else {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
@ -303,7 +299,8 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
|
||||
|
||||
if (strlen(buf) == 1)
|
||||
adev->pp_force_state_enabled = false;
|
||||
else if (adev->pp_enabled) {
|
||||
else if (adev->powerplay.pp_funcs->dispatch_tasks &&
|
||||
adev->powerplay.pp_funcs->get_pp_num_states) {
|
||||
struct pp_states_info data;
|
||||
|
||||
ret = kstrtoul(buf, 0, &idx);
|
||||
@ -531,7 +528,7 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
|
||||
if (adev->powerplay.pp_funcs->set_sclk_od)
|
||||
amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
|
||||
|
||||
if (adev->pp_enabled) {
|
||||
if (adev->powerplay.pp_funcs->dispatch_tasks) {
|
||||
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL);
|
||||
} else {
|
||||
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
|
||||
@ -575,7 +572,7 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
|
||||
if (adev->powerplay.pp_funcs->set_mclk_od)
|
||||
amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
|
||||
|
||||
if (adev->pp_enabled) {
|
||||
if (adev->powerplay.pp_funcs->dispatch_tasks) {
|
||||
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL);
|
||||
} else {
|
||||
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
|
||||
@ -959,9 +956,6 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
|
||||
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
|
||||
return 0;
|
||||
|
||||
if (adev->pp_enabled)
|
||||
return effective_mode;
|
||||
|
||||
/* Skip fan attributes if fan is not present */
|
||||
if (adev->pm.no_fan &&
|
||||
(attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
|
||||
@ -1317,6 +1311,9 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
|
||||
if (adev->pm.sysfs_initialized)
|
||||
return 0;
|
||||
|
||||
if (adev->pm.dpm_enabled == 0)
|
||||
return 0;
|
||||
|
||||
if (adev->powerplay.pp_funcs->get_temperature == NULL)
|
||||
return 0;
|
||||
|
||||
@ -1341,27 +1338,26 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (adev->pp_enabled) {
|
||||
ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to create device file pp_num_states\n");
|
||||
return ret;
|
||||
}
|
||||
ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to create device file pp_cur_state\n");
|
||||
return ret;
|
||||
}
|
||||
ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to create device file pp_force_state\n");
|
||||
return ret;
|
||||
}
|
||||
ret = device_create_file(adev->dev, &dev_attr_pp_table);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to create device file pp_table\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to create device file pp_num_states\n");
|
||||
return ret;
|
||||
}
|
||||
ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to create device file pp_cur_state\n");
|
||||
return ret;
|
||||
}
|
||||
ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to create device file pp_force_state\n");
|
||||
return ret;
|
||||
}
|
||||
ret = device_create_file(adev->dev, &dev_attr_pp_table);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to create device file pp_table\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
|
||||
@ -1417,16 +1413,19 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
|
||||
|
||||
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->pm.dpm_enabled == 0)
|
||||
return;
|
||||
|
||||
if (adev->pm.int_hwmon_dev)
|
||||
hwmon_device_unregister(adev->pm.int_hwmon_dev);
|
||||
device_remove_file(adev->dev, &dev_attr_power_dpm_state);
|
||||
device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
|
||||
if (adev->pp_enabled) {
|
||||
device_remove_file(adev->dev, &dev_attr_pp_num_states);
|
||||
device_remove_file(adev->dev, &dev_attr_pp_cur_state);
|
||||
device_remove_file(adev->dev, &dev_attr_pp_force_state);
|
||||
device_remove_file(adev->dev, &dev_attr_pp_table);
|
||||
}
|
||||
|
||||
device_remove_file(adev->dev, &dev_attr_pp_num_states);
|
||||
device_remove_file(adev->dev, &dev_attr_pp_cur_state);
|
||||
device_remove_file(adev->dev, &dev_attr_pp_force_state);
|
||||
device_remove_file(adev->dev, &dev_attr_pp_table);
|
||||
|
||||
device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
|
||||
device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
|
||||
device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
|
||||
@ -1457,7 +1456,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
|
||||
amdgpu_fence_wait_empty(ring);
|
||||
}
|
||||
|
||||
if (adev->pp_enabled) {
|
||||
if (adev->powerplay.pp_funcs->dispatch_tasks) {
|
||||
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL, NULL);
|
||||
} else {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
@ -1592,15 +1591,15 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
|
||||
if ((adev->flags & AMD_IS_PX) &&
|
||||
(ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
|
||||
seq_printf(m, "PX asic powered off\n");
|
||||
} else if (adev->pp_enabled) {
|
||||
return amdgpu_debugfs_pm_info_pp(m, adev);
|
||||
} else {
|
||||
} else if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
|
||||
adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
|
||||
else
|
||||
seq_printf(m, "Debugfs support not implemented for this asic\n");
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
} else {
|
||||
return amdgpu_debugfs_pm_info_pp(m, adev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -34,24 +34,6 @@
|
||||
#include "cik_dpm.h"
|
||||
#include "vi_dpm.h"
|
||||
|
||||
static int amdgpu_create_pp_handle(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amd_pp_init pp_init;
|
||||
struct amd_powerplay *amd_pp;
|
||||
int ret;
|
||||
|
||||
amd_pp = &(adev->powerplay);
|
||||
pp_init.chip_family = adev->family;
|
||||
pp_init.chip_id = adev->asic_type;
|
||||
pp_init.pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
|
||||
pp_init.feature_mask = amdgpu_pp_feature_mask;
|
||||
pp_init.device = amdgpu_cgs_create_device(adev);
|
||||
ret = amd_powerplay_create(&pp_init, &(amd_pp->pp_handle));
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_pp_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
@ -59,7 +41,6 @@ static int amdgpu_pp_early_init(void *handle)
|
||||
int ret = 0;
|
||||
|
||||
amd_pp = &(adev->powerplay);
|
||||
adev->pp_enabled = false;
|
||||
amd_pp->pp_handle = (void *)adev;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
@ -73,9 +54,7 @@ static int amdgpu_pp_early_init(void *handle)
|
||||
case CHIP_STONEY:
|
||||
case CHIP_VEGA10:
|
||||
case CHIP_RAVEN:
|
||||
adev->pp_enabled = true;
|
||||
if (amdgpu_create_pp_handle(adev))
|
||||
return -EINVAL;
|
||||
amd_pp->cgs_device = amdgpu_cgs_create_device(adev);
|
||||
amd_pp->ip_funcs = &pp_ip_funcs;
|
||||
amd_pp->pp_funcs = &pp_dpm_funcs;
|
||||
break;
|
||||
@ -97,9 +76,7 @@ static int amdgpu_pp_early_init(void *handle)
|
||||
amd_pp->ip_funcs = &ci_dpm_ip_funcs;
|
||||
amd_pp->pp_funcs = &ci_dpm_funcs;
|
||||
} else {
|
||||
adev->pp_enabled = true;
|
||||
if (amdgpu_create_pp_handle(adev))
|
||||
return -EINVAL;
|
||||
amd_pp->cgs_device = amdgpu_cgs_create_device(adev);
|
||||
amd_pp->ip_funcs = &pp_ip_funcs;
|
||||
amd_pp->pp_funcs = &pp_dpm_funcs;
|
||||
}
|
||||
@ -118,12 +95,9 @@ static int amdgpu_pp_early_init(void *handle)
|
||||
|
||||
if (adev->powerplay.ip_funcs->early_init)
|
||||
ret = adev->powerplay.ip_funcs->early_init(
|
||||
adev->powerplay.pp_handle);
|
||||
amd_pp->cgs_device ? amd_pp->cgs_device :
|
||||
amd_pp->pp_handle);
|
||||
|
||||
if (ret == PP_DPM_DISABLED) {
|
||||
adev->pm.dpm_enabled = false;
|
||||
return 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -137,11 +111,6 @@ static int amdgpu_pp_late_init(void *handle)
|
||||
ret = adev->powerplay.ip_funcs->late_init(
|
||||
adev->powerplay.pp_handle);
|
||||
|
||||
if (adev->pp_enabled && adev->pm.dpm_enabled) {
|
||||
amdgpu_pm_sysfs_init(adev);
|
||||
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_COMPLETE_INIT, NULL, NULL);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -176,21 +145,11 @@ static int amdgpu_pp_hw_init(void *handle)
|
||||
int ret = 0;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->pp_enabled && adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
|
||||
amdgpu_ucode_init_bo(adev);
|
||||
|
||||
if (adev->powerplay.ip_funcs->hw_init)
|
||||
ret = adev->powerplay.ip_funcs->hw_init(
|
||||
adev->powerplay.pp_handle);
|
||||
|
||||
if (ret == PP_DPM_DISABLED) {
|
||||
adev->pm.dpm_enabled = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if ((amdgpu_dpm != 0) && !amdgpu_sriov_vf(adev))
|
||||
adev->pm.dpm_enabled = true;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -199,16 +158,10 @@ static int amdgpu_pp_hw_fini(void *handle)
|
||||
int ret = 0;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->pp_enabled && adev->pm.dpm_enabled)
|
||||
amdgpu_pm_sysfs_fini(adev);
|
||||
|
||||
if (adev->powerplay.ip_funcs->hw_fini)
|
||||
ret = adev->powerplay.ip_funcs->hw_fini(
|
||||
adev->powerplay.pp_handle);
|
||||
|
||||
if (adev->pp_enabled && adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
|
||||
amdgpu_ucode_fini_bo(adev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -220,9 +173,8 @@ static void amdgpu_pp_late_fini(void *handle)
|
||||
adev->powerplay.ip_funcs->late_fini(
|
||||
adev->powerplay.pp_handle);
|
||||
|
||||
|
||||
if (adev->pp_enabled)
|
||||
amd_powerplay_destroy(adev->powerplay.pp_handle);
|
||||
if (adev->powerplay.cgs_device)
|
||||
amdgpu_cgs_destroy_device(adev->powerplay.cgs_device);
|
||||
}
|
||||
|
||||
static int amdgpu_pp_suspend(void *handle)
|
||||
|
@ -411,13 +411,6 @@ static int psp_hw_init(void *handle)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&adev->firmware.mutex);
|
||||
/*
|
||||
* This sequence is just used on hw_init only once, no need on
|
||||
* resume.
|
||||
*/
|
||||
ret = amdgpu_ucode_init_bo(adev);
|
||||
if (ret)
|
||||
goto failed;
|
||||
|
||||
ret = psp_load_fw(adev);
|
||||
if (ret) {
|
||||
@ -442,8 +435,6 @@ static int psp_hw_fini(void *handle)
|
||||
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
|
||||
return 0;
|
||||
|
||||
amdgpu_ucode_fini_bo(adev);
|
||||
|
||||
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
|
||||
|
||||
amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
|
||||
|
@ -121,7 +121,7 @@ static enum amdgpu_ring_type amdgpu_hw_ip_to_ring_type(int hw_ip)
|
||||
|
||||
static int amdgpu_lru_map(struct amdgpu_device *adev,
|
||||
struct amdgpu_queue_mapper *mapper,
|
||||
int user_ring,
|
||||
int user_ring, bool lru_pipe_order,
|
||||
struct amdgpu_ring **out_ring)
|
||||
{
|
||||
int r, i, j;
|
||||
@ -139,7 +139,7 @@ static int amdgpu_lru_map(struct amdgpu_device *adev,
|
||||
}
|
||||
|
||||
r = amdgpu_ring_lru_get(adev, ring_type, ring_blacklist,
|
||||
j, out_ring);
|
||||
j, lru_pipe_order, out_ring);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -284,8 +284,10 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
|
||||
r = amdgpu_identity_map(adev, mapper, ring, out_ring);
|
||||
break;
|
||||
case AMDGPU_HW_IP_DMA:
|
||||
r = amdgpu_lru_map(adev, mapper, ring, false, out_ring);
|
||||
break;
|
||||
case AMDGPU_HW_IP_COMPUTE:
|
||||
r = amdgpu_lru_map(adev, mapper, ring, out_ring);
|
||||
r = amdgpu_lru_map(adev, mapper, ring, true, out_ring);
|
||||
break;
|
||||
default:
|
||||
*out_ring = NULL;
|
||||
|
@ -315,14 +315,16 @@ static bool amdgpu_ring_is_blacklisted(struct amdgpu_ring *ring,
|
||||
* @type: amdgpu_ring_type enum
|
||||
* @blacklist: blacklisted ring ids array
|
||||
* @num_blacklist: number of entries in @blacklist
|
||||
* @lru_pipe_order: find a ring from the least recently used pipe
|
||||
* @ring: output ring
|
||||
*
|
||||
* Retrieve the amdgpu_ring structure for the least recently used ring of
|
||||
* a specific IP block (all asics).
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type, int *blacklist,
|
||||
int num_blacklist, struct amdgpu_ring **ring)
|
||||
int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type,
|
||||
int *blacklist, int num_blacklist,
|
||||
bool lru_pipe_order, struct amdgpu_ring **ring)
|
||||
{
|
||||
struct amdgpu_ring *entry;
|
||||
|
||||
@ -337,10 +339,23 @@ int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type, int *blacklist,
|
||||
if (amdgpu_ring_is_blacklisted(entry, blacklist, num_blacklist))
|
||||
continue;
|
||||
|
||||
*ring = entry;
|
||||
amdgpu_ring_lru_touch_locked(adev, *ring);
|
||||
break;
|
||||
if (!*ring) {
|
||||
*ring = entry;
|
||||
|
||||
/* We are done for ring LRU */
|
||||
if (!lru_pipe_order)
|
||||
break;
|
||||
}
|
||||
|
||||
/* Move all rings on the same pipe to the end of the list */
|
||||
if (entry->pipe == (*ring)->pipe)
|
||||
amdgpu_ring_lru_touch_locked(adev, entry);
|
||||
}
|
||||
|
||||
/* Move the ring we found to the end of the list */
|
||||
if (*ring)
|
||||
amdgpu_ring_lru_touch_locked(adev, *ring);
|
||||
|
||||
spin_unlock(&adev->ring_lru_list_lock);
|
||||
|
||||
if (!*ring) {
|
||||
|
@ -201,8 +201,9 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
||||
unsigned ring_size, struct amdgpu_irq_src *irq_src,
|
||||
unsigned irq_type);
|
||||
void amdgpu_ring_fini(struct amdgpu_ring *ring);
|
||||
int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type, int *blacklist,
|
||||
int num_blacklist, struct amdgpu_ring **ring);
|
||||
int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type,
|
||||
int *blacklist, int num_blacklist,
|
||||
bool lru_pipe_order, struct amdgpu_ring **ring);
|
||||
void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring);
|
||||
static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
|
||||
{
|
||||
|
@ -25,30 +25,21 @@
|
||||
#include "amdgpu_vf_error.h"
|
||||
#include "mxgpu_ai.h"
|
||||
|
||||
#define AMDGPU_VF_ERROR_ENTRY_SIZE 16
|
||||
|
||||
/* struct error_entry - amdgpu VF error information. */
|
||||
struct amdgpu_vf_error_buffer {
|
||||
int read_count;
|
||||
int write_count;
|
||||
uint16_t code[AMDGPU_VF_ERROR_ENTRY_SIZE];
|
||||
uint16_t flags[AMDGPU_VF_ERROR_ENTRY_SIZE];
|
||||
uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
|
||||
};
|
||||
|
||||
struct amdgpu_vf_error_buffer admgpu_vf_errors;
|
||||
|
||||
|
||||
void amdgpu_vf_error_put(uint16_t sub_error_code, uint16_t error_flags, uint64_t error_data)
|
||||
void amdgpu_vf_error_put(struct amdgpu_device *adev,
|
||||
uint16_t sub_error_code,
|
||||
uint16_t error_flags,
|
||||
uint64_t error_data)
|
||||
{
|
||||
int index;
|
||||
uint16_t error_code = AMDGIM_ERROR_CODE(AMDGIM_ERROR_CATEGORY_VF, sub_error_code);
|
||||
|
||||
index = admgpu_vf_errors.write_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
|
||||
admgpu_vf_errors.code [index] = error_code;
|
||||
admgpu_vf_errors.flags [index] = error_flags;
|
||||
admgpu_vf_errors.data [index] = error_data;
|
||||
admgpu_vf_errors.write_count ++;
|
||||
mutex_lock(&adev->virt.vf_errors.lock);
|
||||
index = adev->virt.vf_errors.write_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
|
||||
adev->virt.vf_errors.code [index] = error_code;
|
||||
adev->virt.vf_errors.flags [index] = error_flags;
|
||||
adev->virt.vf_errors.data [index] = error_data;
|
||||
adev->virt.vf_errors.write_count ++;
|
||||
mutex_unlock(&adev->virt.vf_errors.lock);
|
||||
}
|
||||
|
||||
|
||||
@ -58,7 +49,8 @@ void amdgpu_vf_error_trans_all(struct amdgpu_device *adev)
|
||||
u32 data1, data2, data3;
|
||||
int index;
|
||||
|
||||
if ((NULL == adev) || (!amdgpu_sriov_vf(adev)) || (!adev->virt.ops) || (!adev->virt.ops->trans_msg)) {
|
||||
if ((NULL == adev) || (!amdgpu_sriov_vf(adev)) ||
|
||||
(!adev->virt.ops) || (!adev->virt.ops->trans_msg)) {
|
||||
return;
|
||||
}
|
||||
/*
|
||||
@ -68,18 +60,22 @@ void amdgpu_vf_error_trans_all(struct amdgpu_device *adev)
|
||||
return;
|
||||
}
|
||||
*/
|
||||
|
||||
mutex_lock(&adev->virt.vf_errors.lock);
|
||||
/* The errors are overlay of array, correct read_count as full. */
|
||||
if (admgpu_vf_errors.write_count - admgpu_vf_errors.read_count > AMDGPU_VF_ERROR_ENTRY_SIZE) {
|
||||
admgpu_vf_errors.read_count = admgpu_vf_errors.write_count - AMDGPU_VF_ERROR_ENTRY_SIZE;
|
||||
if (adev->virt.vf_errors.write_count - adev->virt.vf_errors.read_count > AMDGPU_VF_ERROR_ENTRY_SIZE) {
|
||||
adev->virt.vf_errors.read_count = adev->virt.vf_errors.write_count - AMDGPU_VF_ERROR_ENTRY_SIZE;
|
||||
}
|
||||
|
||||
while (admgpu_vf_errors.read_count < admgpu_vf_errors.write_count) {
|
||||
index =admgpu_vf_errors.read_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
|
||||
data1 = AMDGIM_ERROR_CODE_FLAGS_TO_MAILBOX (admgpu_vf_errors.code[index], admgpu_vf_errors.flags[index]);
|
||||
data2 = admgpu_vf_errors.data[index] & 0xFFFFFFFF;
|
||||
data3 = (admgpu_vf_errors.data[index] >> 32) & 0xFFFFFFFF;
|
||||
while (adev->virt.vf_errors.read_count < adev->virt.vf_errors.write_count) {
|
||||
index =adev->virt.vf_errors.read_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
|
||||
data1 = AMDGIM_ERROR_CODE_FLAGS_TO_MAILBOX(adev->virt.vf_errors.code[index],
|
||||
adev->virt.vf_errors.flags[index]);
|
||||
data2 = adev->virt.vf_errors.data[index] & 0xFFFFFFFF;
|
||||
data3 = (adev->virt.vf_errors.data[index] >> 32) & 0xFFFFFFFF;
|
||||
|
||||
adev->virt.ops->trans_msg(adev, IDH_LOG_VF_ERROR, data1, data2, data3);
|
||||
admgpu_vf_errors.read_count ++;
|
||||
adev->virt.vf_errors.read_count ++;
|
||||
}
|
||||
mutex_unlock(&adev->virt.vf_errors.lock);
|
||||
}
|
||||
|
@ -56,7 +56,10 @@ enum AMDGIM_ERROR_CATEGORY {
|
||||
AMDGIM_ERROR_CATEGORY_MAX
|
||||
};
|
||||
|
||||
void amdgpu_vf_error_put(uint16_t sub_error_code, uint16_t error_flags, uint64_t error_data);
|
||||
void amdgpu_vf_error_put(struct amdgpu_device *adev,
|
||||
uint16_t sub_error_code,
|
||||
uint16_t error_flags,
|
||||
uint64_t error_data);
|
||||
void amdgpu_vf_error_trans_all (struct amdgpu_device *adev);
|
||||
|
||||
#endif /* __VF_ERROR_H__ */
|
||||
|
@ -36,6 +36,18 @@ struct amdgpu_mm_table {
|
||||
uint64_t gpu_addr;
|
||||
};
|
||||
|
||||
#define AMDGPU_VF_ERROR_ENTRY_SIZE 16
|
||||
|
||||
/* struct error_entry - amdgpu VF error information. */
|
||||
struct amdgpu_vf_error_buffer {
|
||||
struct mutex lock;
|
||||
int read_count;
|
||||
int write_count;
|
||||
uint16_t code[AMDGPU_VF_ERROR_ENTRY_SIZE];
|
||||
uint16_t flags[AMDGPU_VF_ERROR_ENTRY_SIZE];
|
||||
uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct amdgpu_virt_ops - amdgpu device virt operations
|
||||
*/
|
||||
@ -59,6 +71,7 @@ struct amdgpu_virt {
|
||||
struct work_struct flr_work;
|
||||
struct amdgpu_mm_table mm_table;
|
||||
const struct amdgpu_virt_ops *ops;
|
||||
struct amdgpu_vf_error_buffer vf_errors;
|
||||
};
|
||||
|
||||
#define AMDGPU_CSA_SIZE (8 * 1024)
|
||||
|
@ -2541,7 +2541,8 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
|
||||
* @adev: amdgpu_device pointer
|
||||
* @fragment_size_default: the default fragment size if it's set auto
|
||||
*/
|
||||
void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev, uint32_t fragment_size_default)
|
||||
void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev,
|
||||
uint32_t fragment_size_default)
|
||||
{
|
||||
if (amdgpu_vm_fragment_size == -1)
|
||||
adev->vm_manager.fragment_size = fragment_size_default;
|
||||
@ -2555,7 +2556,8 @@ void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev, uint32_t fragment_s
|
||||
* @adev: amdgpu_device pointer
|
||||
* @vm_size: the default vm size if it's set auto
|
||||
*/
|
||||
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size, uint32_t fragment_size_default)
|
||||
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size,
|
||||
uint32_t fragment_size_default)
|
||||
{
|
||||
/* adjust vm size firstly */
|
||||
if (amdgpu_vm_size == -1)
|
||||
@ -2682,6 +2684,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
}
|
||||
|
||||
INIT_KFIFO(vm->faults);
|
||||
vm->fault_credit = 16;
|
||||
|
||||
return 0;
|
||||
|
||||
@ -2776,6 +2779,36 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
amdgpu_vm_free_reserved_vmid(adev, vm, i);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @pasid: PASID do identify the VM
|
||||
*
|
||||
* This function is expected to be called in interrupt context. Returns
|
||||
* true if there was fault credit, false otherwise
|
||||
*/
|
||||
bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
|
||||
unsigned int pasid)
|
||||
{
|
||||
struct amdgpu_vm *vm;
|
||||
|
||||
spin_lock(&adev->vm_manager.pasid_lock);
|
||||
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
|
||||
spin_unlock(&adev->vm_manager.pasid_lock);
|
||||
if (!vm)
|
||||
/* VM not found, can't track fault credit */
|
||||
return true;
|
||||
|
||||
/* No lock needed. only accessed by IRQ handler */
|
||||
if (!vm->fault_credit)
|
||||
/* Too many faults in this VM */
|
||||
return false;
|
||||
|
||||
vm->fault_credit--;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vm_manager_init - init the VM manager
|
||||
*
|
||||
|
@ -165,8 +165,11 @@ struct amdgpu_vm {
|
||||
/* Flag to indicate ATS support from PTE for GFX9 */
|
||||
bool pte_support_ats;
|
||||
|
||||
/* Up to 128 pending page faults */
|
||||
/* Up to 128 pending retry page faults */
|
||||
DECLARE_KFIFO(faults, u64, 128);
|
||||
|
||||
/* Limit non-retry fault storms */
|
||||
unsigned int fault_credit;
|
||||
};
|
||||
|
||||
struct amdgpu_vm_id {
|
||||
@ -244,6 +247,8 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
int vm_context, unsigned int pasid);
|
||||
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
|
||||
unsigned int pasid);
|
||||
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
|
||||
struct list_head *validated,
|
||||
struct amdgpu_bo_list_entry *entry);
|
||||
|
@ -6365,7 +6365,6 @@ static int ci_dpm_sw_fini(void *handle)
|
||||
flush_work(&adev->pm.dpm.thermal.work);
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
amdgpu_pm_sysfs_fini(adev);
|
||||
ci_dpm_fini(adev);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
|
||||
|
@ -237,8 +237,23 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev)
|
||||
*/
|
||||
static bool cik_ih_prescreen_iv(struct amdgpu_device *adev)
|
||||
{
|
||||
/* Process all interrupts */
|
||||
return true;
|
||||
u32 ring_index = adev->irq.ih.rptr >> 2;
|
||||
u16 pasid;
|
||||
|
||||
switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
|
||||
case 146:
|
||||
case 147:
|
||||
pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
|
||||
if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
|
||||
return true;
|
||||
break;
|
||||
default:
|
||||
/* Not a VM fault */
|
||||
return true;
|
||||
}
|
||||
|
||||
adev->irq.ih.rptr += 16;
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -216,8 +216,23 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev)
|
||||
*/
|
||||
static bool cz_ih_prescreen_iv(struct amdgpu_device *adev)
|
||||
{
|
||||
/* Process all interrupts */
|
||||
return true;
|
||||
u32 ring_index = adev->irq.ih.rptr >> 2;
|
||||
u16 pasid;
|
||||
|
||||
switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
|
||||
case 146:
|
||||
case 147:
|
||||
pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
|
||||
if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
|
||||
return true;
|
||||
break;
|
||||
default:
|
||||
/* Not a VM fault */
|
||||
return true;
|
||||
}
|
||||
|
||||
adev->irq.ih.rptr += 16;
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -4132,18 +4132,12 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
|
||||
gfx_v8_0_rlc_reset(adev);
|
||||
gfx_v8_0_init_pg(adev);
|
||||
|
||||
if (!adev->pp_enabled) {
|
||||
if (adev->firmware.load_type != AMDGPU_FW_LOAD_SMU) {
|
||||
/* legacy rlc firmware loading */
|
||||
r = gfx_v8_0_rlc_load_microcode(adev);
|
||||
if (r)
|
||||
return r;
|
||||
} else {
|
||||
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
|
||||
AMDGPU_UCODE_ID_RLC_G);
|
||||
if (r)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
|
||||
/* legacy rlc firmware loading */
|
||||
r = gfx_v8_0_rlc_load_microcode(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
gfx_v8_0_rlc_start(adev);
|
||||
@ -4959,43 +4953,15 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
|
||||
if (!(adev->flags & AMD_IS_APU))
|
||||
gfx_v8_0_enable_gui_idle_interrupt(adev, false);
|
||||
|
||||
if (!adev->pp_enabled) {
|
||||
if (adev->firmware.load_type != AMDGPU_FW_LOAD_SMU) {
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
|
||||
/* legacy firmware loading */
|
||||
r = gfx_v8_0_cp_gfx_load_microcode(adev);
|
||||
if (r)
|
||||
return r;
|
||||
r = gfx_v8_0_cp_gfx_load_microcode(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = gfx_v8_0_cp_compute_load_microcode(adev);
|
||||
if (r)
|
||||
return r;
|
||||
} else {
|
||||
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
|
||||
AMDGPU_UCODE_ID_CP_CE);
|
||||
if (r)
|
||||
return -EINVAL;
|
||||
|
||||
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
|
||||
AMDGPU_UCODE_ID_CP_PFP);
|
||||
if (r)
|
||||
return -EINVAL;
|
||||
|
||||
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
|
||||
AMDGPU_UCODE_ID_CP_ME);
|
||||
if (r)
|
||||
return -EINVAL;
|
||||
|
||||
if (adev->asic_type == CHIP_TOPAZ) {
|
||||
r = gfx_v8_0_cp_compute_load_microcode(adev);
|
||||
if (r)
|
||||
return r;
|
||||
} else {
|
||||
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
|
||||
AMDGPU_UCODE_ID_CP_MEC1);
|
||||
if (r)
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
r = gfx_v8_0_cp_compute_load_microcode(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
r = gfx_v8_0_cp_gfx_resume(adev);
|
||||
@ -6018,7 +5984,6 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
|
||||
{
|
||||
uint32_t msg_id, pp_state = 0;
|
||||
uint32_t pp_support_state = 0;
|
||||
void *pp_handle = adev->powerplay.pp_handle;
|
||||
|
||||
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
|
||||
@ -6036,7 +6001,8 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
|
||||
PP_BLOCK_GFX_CG,
|
||||
pp_support_state,
|
||||
pp_state);
|
||||
amd_set_clockgating_by_smu(pp_handle, msg_id);
|
||||
if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
|
||||
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
|
||||
}
|
||||
|
||||
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
|
||||
@ -6057,7 +6023,8 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
|
||||
PP_BLOCK_GFX_MG,
|
||||
pp_support_state,
|
||||
pp_state);
|
||||
amd_set_clockgating_by_smu(pp_handle, msg_id);
|
||||
if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
|
||||
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -6069,7 +6036,6 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
|
||||
|
||||
uint32_t msg_id, pp_state = 0;
|
||||
uint32_t pp_support_state = 0;
|
||||
void *pp_handle = adev->powerplay.pp_handle;
|
||||
|
||||
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
|
||||
@ -6087,7 +6053,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
|
||||
PP_BLOCK_GFX_CG,
|
||||
pp_support_state,
|
||||
pp_state);
|
||||
amd_set_clockgating_by_smu(pp_handle, msg_id);
|
||||
if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
|
||||
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
|
||||
}
|
||||
|
||||
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) {
|
||||
@ -6106,7 +6073,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
|
||||
PP_BLOCK_GFX_3D,
|
||||
pp_support_state,
|
||||
pp_state);
|
||||
amd_set_clockgating_by_smu(pp_handle, msg_id);
|
||||
if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
|
||||
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
|
||||
}
|
||||
|
||||
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
|
||||
@ -6127,7 +6095,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
|
||||
PP_BLOCK_GFX_MG,
|
||||
pp_support_state,
|
||||
pp_state);
|
||||
amd_set_clockgating_by_smu(pp_handle, msg_id);
|
||||
if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
|
||||
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
|
||||
}
|
||||
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
|
||||
@ -6142,7 +6111,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
|
||||
PP_BLOCK_GFX_RLC,
|
||||
pp_support_state,
|
||||
pp_state);
|
||||
amd_set_clockgating_by_smu(pp_handle, msg_id);
|
||||
if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
|
||||
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
|
||||
}
|
||||
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
|
||||
@ -6156,7 +6126,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
|
||||
PP_BLOCK_GFX_CP,
|
||||
pp_support_state,
|
||||
pp_state);
|
||||
amd_set_clockgating_by_smu(pp_handle, msg_id);
|
||||
if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
|
||||
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -7076,7 +7047,7 @@ static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
|
||||
{
|
||||
uint64_t ce_payload_addr;
|
||||
int cnt_ce;
|
||||
static union {
|
||||
union {
|
||||
struct vi_ce_ib_state regular;
|
||||
struct vi_ce_ib_state_chained_ib chained;
|
||||
} ce_payload = {};
|
||||
@ -7105,7 +7076,7 @@ static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring)
|
||||
{
|
||||
uint64_t de_payload_addr, gds_addr, csa_addr;
|
||||
int cnt_de;
|
||||
static union {
|
||||
union {
|
||||
struct vi_de_ib_state regular;
|
||||
struct vi_de_ib_state_chained_ib chained;
|
||||
} de_payload = {};
|
||||
|
@ -3583,7 +3583,7 @@ static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
|
||||
static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
||||
{
|
||||
u32 ref_and_mask, reg_mem_engine;
|
||||
struct nbio_hdp_flush_reg *nbio_hf_reg;
|
||||
const struct nbio_hdp_flush_reg *nbio_hf_reg;
|
||||
|
||||
if (ring->adev->flags & AMD_IS_APU)
|
||||
nbio_hf_reg = &nbio_v7_0_hdp_flush_reg;
|
||||
@ -3806,7 +3806,7 @@ static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
|
||||
|
||||
static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
|
||||
{
|
||||
static struct v9_ce_ib_state ce_payload = {0};
|
||||
struct v9_ce_ib_state ce_payload = {0};
|
||||
uint64_t csa_addr;
|
||||
int cnt;
|
||||
|
||||
@ -3825,7 +3825,7 @@ static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
|
||||
|
||||
static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
|
||||
{
|
||||
static struct v9_de_ib_state de_payload = {0};
|
||||
struct v9_de_ib_state de_payload = {0};
|
||||
uint64_t csa_addr, gds_addr;
|
||||
int cnt;
|
||||
|
||||
|
@ -216,8 +216,23 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev)
|
||||
*/
|
||||
static bool iceland_ih_prescreen_iv(struct amdgpu_device *adev)
|
||||
{
|
||||
/* Process all interrupts */
|
||||
return true;
|
||||
u32 ring_index = adev->irq.ih.rptr >> 2;
|
||||
u16 pasid;
|
||||
|
||||
switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
|
||||
case 146:
|
||||
case 147:
|
||||
pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
|
||||
if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
|
||||
return true;
|
||||
break;
|
||||
default:
|
||||
/* Not a VM fault */
|
||||
return true;
|
||||
}
|
||||
|
||||
adev->irq.ih.rptr += 16;
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2969,16 +2969,10 @@ static int kv_dpm_late_init(void *handle)
|
||||
{
|
||||
/* powerdown unused blocks for now */
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int ret;
|
||||
|
||||
if (!amdgpu_dpm)
|
||||
return 0;
|
||||
|
||||
/* init the sysfs and debugfs files late */
|
||||
ret = amdgpu_pm_sysfs_init(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
kv_dpm_powergate_acp(adev, true);
|
||||
kv_dpm_powergate_samu(adev, true);
|
||||
|
||||
@ -3040,7 +3034,6 @@ static int kv_dpm_sw_fini(void *handle)
|
||||
flush_work(&adev->pm.dpm.thermal.work);
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
amdgpu_pm_sysfs_fini(adev);
|
||||
kv_dpm_fini(adev);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
|
||||
|
@ -215,31 +215,27 @@ void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
|
||||
*flags |= AMD_CG_SUPPORT_BIF_LS;
|
||||
}
|
||||
|
||||
struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
|
||||
struct nbio_pcie_index_data nbio_v6_1_pcie_index_data;
|
||||
const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
|
||||
.hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ),
|
||||
.hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE),
|
||||
.ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
|
||||
.ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
|
||||
.ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
|
||||
.ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK,
|
||||
.ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK,
|
||||
.ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK,
|
||||
.ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK,
|
||||
.ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK,
|
||||
.ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK,
|
||||
.ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK,
|
||||
.ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK,
|
||||
.ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK
|
||||
};
|
||||
|
||||
int nbio_v6_1_init(struct amdgpu_device *adev)
|
||||
{
|
||||
nbio_v6_1_hdp_flush_reg.hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ);
|
||||
nbio_v6_1_hdp_flush_reg.hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE);
|
||||
nbio_v6_1_hdp_flush_reg.ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK;
|
||||
nbio_v6_1_hdp_flush_reg.ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK;
|
||||
nbio_v6_1_hdp_flush_reg.ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK;
|
||||
nbio_v6_1_hdp_flush_reg.ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK;
|
||||
nbio_v6_1_hdp_flush_reg.ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK;
|
||||
nbio_v6_1_hdp_flush_reg.ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK;
|
||||
nbio_v6_1_hdp_flush_reg.ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK;
|
||||
nbio_v6_1_hdp_flush_reg.ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK;
|
||||
nbio_v6_1_hdp_flush_reg.ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK;
|
||||
nbio_v6_1_hdp_flush_reg.ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK;
|
||||
nbio_v6_1_hdp_flush_reg.ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK;
|
||||
nbio_v6_1_hdp_flush_reg.ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK;
|
||||
|
||||
nbio_v6_1_pcie_index_data.index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX);
|
||||
nbio_v6_1_pcie_index_data.data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA);
|
||||
|
||||
return 0;
|
||||
}
|
||||
const struct nbio_pcie_index_data nbio_v6_1_pcie_index_data = {
|
||||
.index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX),
|
||||
.data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA),
|
||||
};
|
||||
|
||||
void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
|
||||
{
|
||||
|
@ -26,8 +26,8 @@
|
||||
|
||||
#include "soc15_common.h"
|
||||
|
||||
extern struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
|
||||
extern struct nbio_pcie_index_data nbio_v6_1_pcie_index_data;
|
||||
extern const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
|
||||
extern const struct nbio_pcie_index_data nbio_v6_1_pcie_index_data;
|
||||
int nbio_v6_1_init(struct amdgpu_device *adev);
|
||||
u32 nbio_v6_1_get_atombios_scratch_regs(struct amdgpu_device *adev,
|
||||
uint32_t idx);
|
||||
|
@ -185,28 +185,24 @@ void nbio_v7_0_ih_control(struct amdgpu_device *adev)
|
||||
WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
|
||||
}
|
||||
|
||||
struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
|
||||
struct nbio_pcie_index_data nbio_v7_0_pcie_index_data;
|
||||
const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
|
||||
.hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ),
|
||||
.hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE),
|
||||
.ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
|
||||
.ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
|
||||
.ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
|
||||
.ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK,
|
||||
.ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK,
|
||||
.ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK,
|
||||
.ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK,
|
||||
.ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK,
|
||||
.ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK,
|
||||
.ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK,
|
||||
.ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK,
|
||||
.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
|
||||
};
|
||||
|
||||
int nbio_v7_0_init(struct amdgpu_device *adev)
|
||||
{
|
||||
nbio_v7_0_hdp_flush_reg.hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ);
|
||||
nbio_v7_0_hdp_flush_reg.hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE);
|
||||
nbio_v7_0_hdp_flush_reg.ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK;
|
||||
nbio_v7_0_hdp_flush_reg.ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK;
|
||||
nbio_v7_0_hdp_flush_reg.ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK;
|
||||
nbio_v7_0_hdp_flush_reg.ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK;
|
||||
nbio_v7_0_hdp_flush_reg.ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK;
|
||||
nbio_v7_0_hdp_flush_reg.ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK;
|
||||
nbio_v7_0_hdp_flush_reg.ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK;
|
||||
nbio_v7_0_hdp_flush_reg.ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK;
|
||||
nbio_v7_0_hdp_flush_reg.ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK;
|
||||
nbio_v7_0_hdp_flush_reg.ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK;
|
||||
nbio_v7_0_hdp_flush_reg.ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
|
||||
nbio_v7_0_hdp_flush_reg.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
|
||||
|
||||
nbio_v7_0_pcie_index_data.index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
|
||||
nbio_v7_0_pcie_index_data.data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
|
||||
|
||||
return 0;
|
||||
}
|
||||
const struct nbio_pcie_index_data nbio_v7_0_pcie_index_data = {
|
||||
.index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2),
|
||||
.data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2)
|
||||
};
|
||||
|
@ -26,8 +26,8 @@
|
||||
|
||||
#include "soc15_common.h"
|
||||
|
||||
extern struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
|
||||
extern struct nbio_pcie_index_data nbio_v7_0_pcie_index_data;
|
||||
extern const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
|
||||
extern const struct nbio_pcie_index_data nbio_v7_0_pcie_index_data;
|
||||
int nbio_v7_0_init(struct amdgpu_device *adev);
|
||||
u32 nbio_v7_0_get_atombios_scratch_regs(struct amdgpu_device *adev,
|
||||
uint32_t idx);
|
||||
|
@ -561,21 +561,11 @@ static int sdma_v2_4_start(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (!adev->pp_enabled) {
|
||||
if (adev->firmware.load_type != AMDGPU_FW_LOAD_SMU) {
|
||||
r = sdma_v2_4_load_microcode(adev);
|
||||
if (r)
|
||||
return r;
|
||||
} else {
|
||||
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
|
||||
AMDGPU_UCODE_ID_SDMA0);
|
||||
if (r)
|
||||
return -EINVAL;
|
||||
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
|
||||
AMDGPU_UCODE_ID_SDMA1);
|
||||
if (r)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
|
||||
r = sdma_v2_4_load_microcode(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
/* halt the engine before programing */
|
||||
|
@ -819,23 +819,12 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
|
||||
*/
|
||||
static int sdma_v3_0_start(struct amdgpu_device *adev)
|
||||
{
|
||||
int r, i;
|
||||
int r;
|
||||
|
||||
if (!adev->pp_enabled) {
|
||||
if (adev->firmware.load_type != AMDGPU_FW_LOAD_SMU) {
|
||||
r = sdma_v3_0_load_microcode(adev);
|
||||
if (r)
|
||||
return r;
|
||||
} else {
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
|
||||
(i == 0) ?
|
||||
AMDGPU_UCODE_ID_SDMA0 :
|
||||
AMDGPU_UCODE_ID_SDMA1);
|
||||
if (r)
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
|
||||
r = sdma_v3_0_load_microcode(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
/* disable sdma engine before programing it */
|
||||
|
@ -371,7 +371,7 @@ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
||||
{
|
||||
u32 ref_and_mask = 0;
|
||||
struct nbio_hdp_flush_reg *nbio_hf_reg;
|
||||
const struct nbio_hdp_flush_reg *nbio_hf_reg;
|
||||
|
||||
if (ring->adev->flags & AMD_IS_APU)
|
||||
nbio_hf_reg = &nbio_v7_0_hdp_flush_reg;
|
||||
|
@ -7604,11 +7604,6 @@ static int si_dpm_late_init(void *handle)
|
||||
if (!amdgpu_dpm)
|
||||
return 0;
|
||||
|
||||
/* init the sysfs and debugfs files late */
|
||||
ret = amdgpu_pm_sysfs_init(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = si_set_temperature_range(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -7764,7 +7759,6 @@ static int si_dpm_sw_fini(void *handle)
|
||||
flush_work(&adev->pm.dpm.thermal.work);
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
amdgpu_pm_sysfs_fini(adev);
|
||||
si_dpm_fini(adev);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
|
||||
|
@ -101,7 +101,7 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
|
||||
{
|
||||
unsigned long flags, address, data;
|
||||
u32 r;
|
||||
struct nbio_pcie_index_data *nbio_pcie_id;
|
||||
const struct nbio_pcie_index_data *nbio_pcie_id;
|
||||
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
nbio_pcie_id = &nbio_v7_0_pcie_index_data;
|
||||
@ -122,7 +122,7 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
|
||||
static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
|
||||
{
|
||||
unsigned long flags, address, data;
|
||||
struct nbio_pcie_index_data *nbio_pcie_id;
|
||||
const struct nbio_pcie_index_data *nbio_pcie_id;
|
||||
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
nbio_pcie_id = &nbio_v7_0_pcie_index_data;
|
||||
@ -604,21 +604,6 @@ static int soc15_common_early_init(void *handle)
|
||||
(amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP)))
|
||||
psp_enabled = true;
|
||||
|
||||
/*
|
||||
* nbio need be used for both sdma and gfx9, but only
|
||||
* initializes once
|
||||
*/
|
||||
switch(adev->asic_type) {
|
||||
case CHIP_VEGA10:
|
||||
nbio_v6_1_init(adev);
|
||||
break;
|
||||
case CHIP_RAVEN:
|
||||
nbio_v7_0_init(adev);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
adev->rev_id = soc15_get_rev_id(adev);
|
||||
adev->external_rev_id = 0xFF;
|
||||
switch (adev->asic_type) {
|
||||
|
@ -227,8 +227,23 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev)
|
||||
*/
|
||||
static bool tonga_ih_prescreen_iv(struct amdgpu_device *adev)
|
||||
{
|
||||
/* Process all interrupts */
|
||||
return true;
|
||||
u32 ring_index = adev->irq.ih.rptr >> 2;
|
||||
u16 pasid;
|
||||
|
||||
switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
|
||||
case 146:
|
||||
case 147:
|
||||
pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
|
||||
if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
|
||||
return true;
|
||||
break;
|
||||
default:
|
||||
/* Not a VM fault */
|
||||
return true;
|
||||
}
|
||||
|
||||
adev->irq.ih.rptr += 16;
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -38,6 +38,8 @@
|
||||
#include "vi.h"
|
||||
|
||||
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
|
||||
static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev);
|
||||
|
||||
static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
|
||||
static int uvd_v6_0_start(struct amdgpu_device *adev);
|
||||
static void uvd_v6_0_stop(struct amdgpu_device *adev);
|
||||
@ -47,6 +49,18 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
|
||||
static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
|
||||
bool enable);
|
||||
|
||||
/**
|
||||
* uvd_v6_0_enc_support - get encode support status
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Returns the current hardware encode support status
|
||||
*/
|
||||
static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
|
||||
{
|
||||
return ((adev->asic_type >= CHIP_POLARIS10) && (adev->asic_type <= CHIP_POLARIS12));
|
||||
}
|
||||
|
||||
/**
|
||||
* uvd_v6_0_ring_get_rptr - get read pointer
|
||||
*
|
||||
@ -61,6 +75,22 @@ static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
|
||||
return RREG32(mmUVD_RBC_RB_RPTR);
|
||||
}
|
||||
|
||||
/**
|
||||
* uvd_v6_0_enc_ring_get_rptr - get enc read pointer
|
||||
*
|
||||
* @ring: amdgpu_ring pointer
|
||||
*
|
||||
* Returns the current hardware enc read pointer
|
||||
*/
|
||||
static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
||||
if (ring == &adev->uvd.ring_enc[0])
|
||||
return RREG32(mmUVD_RB_RPTR);
|
||||
else
|
||||
return RREG32(mmUVD_RB_RPTR2);
|
||||
}
|
||||
/**
|
||||
* uvd_v6_0_ring_get_wptr - get write pointer
|
||||
*
|
||||
@ -75,6 +105,23 @@ static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
|
||||
return RREG32(mmUVD_RBC_RB_WPTR);
|
||||
}
|
||||
|
||||
/**
|
||||
* uvd_v6_0_enc_ring_get_wptr - get enc write pointer
|
||||
*
|
||||
* @ring: amdgpu_ring pointer
|
||||
*
|
||||
* Returns the current hardware enc write pointer
|
||||
*/
|
||||
static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
||||
if (ring == &adev->uvd.ring_enc[0])
|
||||
return RREG32(mmUVD_RB_WPTR);
|
||||
else
|
||||
return RREG32(mmUVD_RB_WPTR2);
|
||||
}
|
||||
|
||||
/**
|
||||
* uvd_v6_0_ring_set_wptr - set write pointer
|
||||
*
|
||||
@ -89,11 +136,247 @@ static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
|
||||
WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
|
||||
}
|
||||
|
||||
/**
|
||||
* uvd_v6_0_enc_ring_set_wptr - set enc write pointer
|
||||
*
|
||||
* @ring: amdgpu_ring pointer
|
||||
*
|
||||
* Commits the enc write pointer to the hardware
|
||||
*/
|
||||
static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
||||
if (ring == &adev->uvd.ring_enc[0])
|
||||
WREG32(mmUVD_RB_WPTR,
|
||||
lower_32_bits(ring->wptr));
|
||||
else
|
||||
WREG32(mmUVD_RB_WPTR2,
|
||||
lower_32_bits(ring->wptr));
|
||||
}
|
||||
|
||||
/**
|
||||
* uvd_v6_0_enc_ring_test_ring - test if UVD ENC ring is working
|
||||
*
|
||||
* @ring: the engine to test on
|
||||
*
|
||||
*/
|
||||
static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
uint32_t rptr = amdgpu_ring_get_rptr(ring);
|
||||
unsigned i;
|
||||
int r;
|
||||
|
||||
r = amdgpu_ring_alloc(ring, 16);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n",
|
||||
ring->idx, r);
|
||||
return r;
|
||||
}
|
||||
amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
|
||||
amdgpu_ring_commit(ring);
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (amdgpu_ring_get_rptr(ring) != rptr)
|
||||
break;
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
|
||||
if (i < adev->usec_timeout) {
|
||||
DRM_INFO("ring test on %d succeeded in %d usecs\n",
|
||||
ring->idx, i);
|
||||
} else {
|
||||
DRM_ERROR("amdgpu: ring %d test failed\n",
|
||||
ring->idx);
|
||||
r = -ETIMEDOUT;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* uvd_v6_0_enc_get_create_msg - generate a UVD ENC create msg
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @ring: ring we should submit the msg to
|
||||
* @handle: session handle to use
|
||||
* @fence: optional fence to return
|
||||
*
|
||||
* Open up a stream for HW test
|
||||
*/
|
||||
static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
const unsigned ib_size_dw = 16;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_ib *ib;
|
||||
struct dma_fence *f = NULL;
|
||||
uint64_t dummy;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
ib = &job->ibs[0];
|
||||
dummy = ib->gpu_addr + 1024;
|
||||
|
||||
ib->length_dw = 0;
|
||||
ib->ptr[ib->length_dw++] = 0x00000018;
|
||||
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
|
||||
ib->ptr[ib->length_dw++] = handle;
|
||||
ib->ptr[ib->length_dw++] = 0x00010000;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
|
||||
ib->ptr[ib->length_dw++] = dummy;
|
||||
|
||||
ib->ptr[ib->length_dw++] = 0x00000014;
|
||||
ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
|
||||
ib->ptr[ib->length_dw++] = 0x0000001c;
|
||||
ib->ptr[ib->length_dw++] = 0x00000001;
|
||||
ib->ptr[ib->length_dw++] = 0x00000000;
|
||||
|
||||
ib->ptr[ib->length_dw++] = 0x00000008;
|
||||
ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
|
||||
|
||||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
ib->ptr[i] = 0x0;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
if (fence)
|
||||
*fence = dma_fence_get(f);
|
||||
dma_fence_put(f);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
amdgpu_job_free(job);
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* uvd_v6_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @ring: ring we should submit the msg to
|
||||
* @handle: session handle to use
|
||||
* @fence: optional fence to return
|
||||
*
|
||||
* Close up a stream for HW test or if userspace failed to do so
|
||||
*/
|
||||
int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
bool direct, struct dma_fence **fence)
|
||||
{
|
||||
const unsigned ib_size_dw = 16;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_ib *ib;
|
||||
struct dma_fence *f = NULL;
|
||||
uint64_t dummy;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
ib = &job->ibs[0];
|
||||
dummy = ib->gpu_addr + 1024;
|
||||
|
||||
ib->length_dw = 0;
|
||||
ib->ptr[ib->length_dw++] = 0x00000018;
|
||||
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
|
||||
ib->ptr[ib->length_dw++] = handle;
|
||||
ib->ptr[ib->length_dw++] = 0x00010000;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
|
||||
ib->ptr[ib->length_dw++] = dummy;
|
||||
|
||||
ib->ptr[ib->length_dw++] = 0x00000014;
|
||||
ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
|
||||
ib->ptr[ib->length_dw++] = 0x0000001c;
|
||||
ib->ptr[ib->length_dw++] = 0x00000001;
|
||||
ib->ptr[ib->length_dw++] = 0x00000000;
|
||||
|
||||
ib->ptr[ib->length_dw++] = 0x00000008;
|
||||
ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
|
||||
|
||||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
ib->ptr[i] = 0x0;
|
||||
|
||||
if (direct) {
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
} else {
|
||||
r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
|
||||
if (r)
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (fence)
|
||||
*fence = dma_fence_get(f);
|
||||
dma_fence_put(f);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
amdgpu_job_free(job);
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* uvd_v6_0_enc_ring_test_ib - test if UVD ENC IBs are working
|
||||
*
|
||||
* @ring: the engine to test on
|
||||
*
|
||||
*/
|
||||
static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||
{
|
||||
struct dma_fence *fence = NULL;
|
||||
long r;
|
||||
|
||||
r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
|
||||
goto error;
|
||||
}
|
||||
|
||||
r = uvd_v6_0_enc_get_destroy_msg(ring, 1, true, &fence);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
|
||||
goto error;
|
||||
}
|
||||
|
||||
r = dma_fence_wait_timeout(fence, false, timeout);
|
||||
if (r == 0) {
|
||||
DRM_ERROR("amdgpu: IB test timed out.\n");
|
||||
r = -ETIMEDOUT;
|
||||
} else if (r < 0) {
|
||||
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
|
||||
} else {
|
||||
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
|
||||
r = 0;
|
||||
}
|
||||
error:
|
||||
dma_fence_put(fence);
|
||||
return r;
|
||||
}
|
||||
static int uvd_v6_0_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
uvd_v6_0_set_ring_funcs(adev);
|
||||
|
||||
if (uvd_v6_0_enc_support(adev)) {
|
||||
adev->uvd.num_enc_rings = 2;
|
||||
uvd_v6_0_set_enc_ring_funcs(adev);
|
||||
}
|
||||
|
||||
uvd_v6_0_set_irq_funcs(adev);
|
||||
|
||||
return 0;
|
||||
@ -102,7 +385,7 @@ static int uvd_v6_0_early_init(void *handle)
|
||||
static int uvd_v6_0_sw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
int r;
|
||||
int i, r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* UVD TRAP */
|
||||
@ -110,10 +393,31 @@ static int uvd_v6_0_sw_init(void *handle)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* UVD ENC TRAP */
|
||||
if (uvd_v6_0_enc_support(adev)) {
|
||||
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.irq);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
r = amdgpu_uvd_sw_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (uvd_v6_0_enc_support(adev)) {
|
||||
struct amd_sched_rq *rq;
|
||||
ring = &adev->uvd.ring_enc[0];
|
||||
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
|
||||
r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
|
||||
rq, amdgpu_sched_jobs);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed setting up UVD ENC run queue.\n");
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
r = amdgpu_uvd_resume(adev);
|
||||
if (r)
|
||||
return r;
|
||||
@ -121,19 +425,38 @@ static int uvd_v6_0_sw_init(void *handle)
|
||||
ring = &adev->uvd.ring;
|
||||
sprintf(ring->name, "uvd");
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (uvd_v6_0_enc_support(adev)) {
|
||||
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
|
||||
ring = &adev->uvd.ring_enc[i];
|
||||
sprintf(ring->name, "uvd_enc%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int uvd_v6_0_sw_fini(void *handle)
|
||||
{
|
||||
int r;
|
||||
int i, r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
r = amdgpu_uvd_suspend(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (uvd_v6_0_enc_support(adev)) {
|
||||
amd_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
|
||||
|
||||
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
|
||||
amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
|
||||
}
|
||||
|
||||
return amdgpu_uvd_sw_fini(adev);
|
||||
}
|
||||
|
||||
@ -149,7 +472,7 @@ static int uvd_v6_0_hw_init(void *handle)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ring *ring = &adev->uvd.ring;
|
||||
uint32_t tmp;
|
||||
int r;
|
||||
int i, r;
|
||||
|
||||
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
|
||||
uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
|
||||
@ -189,9 +512,25 @@ static int uvd_v6_0_hw_init(void *handle)
|
||||
|
||||
amdgpu_ring_commit(ring);
|
||||
|
||||
if (uvd_v6_0_enc_support(adev)) {
|
||||
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
|
||||
ring = &adev->uvd.ring_enc[i];
|
||||
ring->ready = true;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
if (!r)
|
||||
DRM_INFO("UVD initialized successfully.\n");
|
||||
if (!r) {
|
||||
if (uvd_v6_0_enc_support(adev))
|
||||
DRM_INFO("UVD and UVD ENC initialized successfully.\n");
|
||||
else
|
||||
DRM_INFO("UVD initialized successfully.\n");
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
@ -514,6 +853,22 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
|
||||
|
||||
WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
|
||||
|
||||
if (uvd_v6_0_enc_support(adev)) {
|
||||
ring = &adev->uvd.ring_enc[0];
|
||||
WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
|
||||
WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
|
||||
WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr);
|
||||
WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
|
||||
WREG32(mmUVD_RB_SIZE, ring->ring_size / 4);
|
||||
|
||||
ring = &adev->uvd.ring_enc[1];
|
||||
WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
|
||||
WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
|
||||
WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr);
|
||||
WREG32(mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
|
||||
WREG32(mmUVD_RB_SIZE2, ring->ring_size / 4);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -576,6 +931,26 @@ static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
|
||||
amdgpu_ring_write(ring, 2);
|
||||
}
|
||||
|
||||
/**
|
||||
* uvd_v6_0_enc_ring_emit_fence - emit an enc fence & trap command
|
||||
*
|
||||
* @ring: amdgpu_ring pointer
|
||||
* @fence: fence to emit
|
||||
*
|
||||
* Write enc a fence and a trap command to the ring.
|
||||
*/
|
||||
static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
|
||||
u64 seq, unsigned flags)
|
||||
{
|
||||
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
|
||||
|
||||
amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
|
||||
amdgpu_ring_write(ring, addr);
|
||||
amdgpu_ring_write(ring, upper_32_bits(addr));
|
||||
amdgpu_ring_write(ring, seq);
|
||||
amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
|
||||
}
|
||||
|
||||
/**
|
||||
* uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush
|
||||
*
|
||||
@ -667,6 +1042,24 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
amdgpu_ring_write(ring, ib->length_dw);
|
||||
}
|
||||
|
||||
/**
|
||||
* uvd_v6_0_enc_ring_emit_ib - enc execute indirect buffer
|
||||
*
|
||||
* @ring: amdgpu_ring pointer
|
||||
* @ib: indirect buffer to execute
|
||||
*
|
||||
* Write enc ring commands to execute the indirect buffer
|
||||
*/
|
||||
static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
|
||||
{
|
||||
amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
|
||||
amdgpu_ring_write(ring, vm_id);
|
||||
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
|
||||
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
|
||||
amdgpu_ring_write(ring, ib->length_dw);
|
||||
}
|
||||
|
||||
static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
unsigned vm_id, uint64_t pd_addr)
|
||||
{
|
||||
@ -718,6 +1111,33 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
|
||||
amdgpu_ring_write(ring, 0xE);
|
||||
}
|
||||
|
||||
static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
|
||||
{
|
||||
uint32_t seq = ring->fence_drv.sync_seq;
|
||||
uint64_t addr = ring->fence_drv.gpu_addr;
|
||||
|
||||
amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE);
|
||||
amdgpu_ring_write(ring, lower_32_bits(addr));
|
||||
amdgpu_ring_write(ring, upper_32_bits(addr));
|
||||
amdgpu_ring_write(ring, seq);
|
||||
}
|
||||
|
||||
static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
|
||||
{
|
||||
amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
|
||||
}
|
||||
|
||||
static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
unsigned int vm_id, uint64_t pd_addr)
|
||||
{
|
||||
amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
|
||||
amdgpu_ring_write(ring, vm_id);
|
||||
amdgpu_ring_write(ring, pd_addr >> 12);
|
||||
|
||||
amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
|
||||
amdgpu_ring_write(ring, vm_id);
|
||||
}
|
||||
|
||||
static bool uvd_v6_0_is_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
@ -825,8 +1245,31 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
bool int_handled = true;
|
||||
DRM_DEBUG("IH: UVD TRAP\n");
|
||||
amdgpu_fence_process(&adev->uvd.ring);
|
||||
|
||||
switch (entry->src_id) {
|
||||
case 124:
|
||||
amdgpu_fence_process(&adev->uvd.ring);
|
||||
break;
|
||||
case 119:
|
||||
if (likely(uvd_v6_0_enc_support(adev)))
|
||||
amdgpu_fence_process(&adev->uvd.ring_enc[0]);
|
||||
else
|
||||
int_handled = false;
|
||||
break;
|
||||
case 120:
|
||||
if (likely(uvd_v6_0_enc_support(adev)))
|
||||
amdgpu_fence_process(&adev->uvd.ring_enc[1]);
|
||||
else
|
||||
int_handled = false;
|
||||
break;
|
||||
}
|
||||
|
||||
if (false == int_handled)
|
||||
DRM_ERROR("Unhandled interrupt: %d %d\n",
|
||||
entry->src_id, entry->src_data[0]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1153,6 +1596,33 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
|
||||
.end_use = amdgpu_uvd_ring_end_use,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_UVD_ENC,
|
||||
.align_mask = 0x3f,
|
||||
.nop = HEVC_ENC_CMD_NO_OP,
|
||||
.support_64bit_ptrs = false,
|
||||
.get_rptr = uvd_v6_0_enc_ring_get_rptr,
|
||||
.get_wptr = uvd_v6_0_enc_ring_get_wptr,
|
||||
.set_wptr = uvd_v6_0_enc_ring_set_wptr,
|
||||
.emit_frame_size =
|
||||
4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */
|
||||
6 + /* uvd_v6_0_enc_ring_emit_vm_flush */
|
||||
5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */
|
||||
1, /* uvd_v6_0_enc_ring_insert_end */
|
||||
.emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */
|
||||
.emit_ib = uvd_v6_0_enc_ring_emit_ib,
|
||||
.emit_fence = uvd_v6_0_enc_ring_emit_fence,
|
||||
.emit_vm_flush = uvd_v6_0_enc_ring_emit_vm_flush,
|
||||
.emit_pipeline_sync = uvd_v6_0_enc_ring_emit_pipeline_sync,
|
||||
.test_ring = uvd_v6_0_enc_ring_test_ring,
|
||||
.test_ib = uvd_v6_0_enc_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
.insert_end = uvd_v6_0_enc_ring_insert_end,
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.begin_use = amdgpu_uvd_ring_begin_use,
|
||||
.end_use = amdgpu_uvd_ring_end_use,
|
||||
};
|
||||
|
||||
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->asic_type >= CHIP_POLARIS10) {
|
||||
@ -1164,6 +1634,16 @@ static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
|
||||
adev->uvd.ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
|
||||
|
||||
DRM_INFO("UVD ENC is enabled in VM mode\n");
|
||||
}
|
||||
|
||||
static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
|
||||
.set = uvd_v6_0_set_interrupt_state,
|
||||
.process = uvd_v6_0_process_interrupt,
|
||||
@ -1171,7 +1651,11 @@ static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
|
||||
|
||||
static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->uvd.irq.num_types = 1;
|
||||
if (uvd_v6_0_enc_support(adev))
|
||||
adev->uvd.irq.num_types = adev->uvd.num_enc_rings + 1;
|
||||
else
|
||||
adev->uvd.irq.num_types = 1;
|
||||
|
||||
adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs;
|
||||
}
|
||||
|
||||
|
@ -260,15 +260,18 @@ static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Not a retry fault */
|
||||
if (!(dw5 & 0x80))
|
||||
return true;
|
||||
|
||||
pasid = dw3 & 0xffff;
|
||||
/* No PASID, can't identify faulting process */
|
||||
if (!pasid)
|
||||
return true;
|
||||
|
||||
/* Not a retry fault, check fault credit */
|
||||
if (!(dw5 & 0x80)) {
|
||||
if (!amdgpu_vm_pasid_fault_credit(adev, pasid))
|
||||
goto ignore_iv;
|
||||
return true;
|
||||
}
|
||||
|
||||
addr = ((u64)(dw5 & 0xf) << 44) | ((u64)dw4 << 12);
|
||||
key = AMDGPU_VM_FAULT(pasid, addr);
|
||||
r = amdgpu_ih_add_fault(adev, key);
|
||||
|
@ -1254,7 +1254,6 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
|
||||
uint32_t msg_id, pp_state = 0;
|
||||
uint32_t pp_support_state = 0;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
void *pp_handle = adev->powerplay.pp_handle;
|
||||
|
||||
if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
|
||||
@ -1271,7 +1270,8 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
|
||||
PP_BLOCK_SYS_MC,
|
||||
pp_support_state,
|
||||
pp_state);
|
||||
amd_set_clockgating_by_smu(pp_handle, msg_id);
|
||||
if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
|
||||
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
|
||||
}
|
||||
|
||||
if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
|
||||
@ -1289,7 +1289,8 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
|
||||
PP_BLOCK_SYS_SDMA,
|
||||
pp_support_state,
|
||||
pp_state);
|
||||
amd_set_clockgating_by_smu(pp_handle, msg_id);
|
||||
if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
|
||||
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
|
||||
}
|
||||
|
||||
if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
|
||||
@ -1307,7 +1308,8 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
|
||||
PP_BLOCK_SYS_HDP,
|
||||
pp_support_state,
|
||||
pp_state);
|
||||
amd_set_clockgating_by_smu(pp_handle, msg_id);
|
||||
if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
|
||||
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
|
||||
}
|
||||
|
||||
|
||||
@ -1321,7 +1323,8 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
|
||||
PP_BLOCK_SYS_BIF,
|
||||
PP_STATE_SUPPORT_LS,
|
||||
pp_state);
|
||||
amd_set_clockgating_by_smu(pp_handle, msg_id);
|
||||
if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
|
||||
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
|
||||
}
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
|
||||
if (state == AMD_CG_STATE_UNGATE)
|
||||
@ -1333,7 +1336,8 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
|
||||
PP_BLOCK_SYS_BIF,
|
||||
PP_STATE_SUPPORT_CG,
|
||||
pp_state);
|
||||
amd_set_clockgating_by_smu(pp_handle, msg_id);
|
||||
if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
|
||||
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
|
||||
}
|
||||
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
|
||||
@ -1347,7 +1351,8 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
|
||||
PP_BLOCK_SYS_DRM,
|
||||
PP_STATE_SUPPORT_LS,
|
||||
pp_state);
|
||||
amd_set_clockgating_by_smu(pp_handle, msg_id);
|
||||
if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
|
||||
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
|
||||
}
|
||||
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
|
||||
@ -1361,7 +1366,8 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
|
||||
PP_BLOCK_SYS_ROM,
|
||||
PP_STATE_SUPPORT_CG,
|
||||
pp_state);
|
||||
amd_set_clockgating_by_smu(pp_handle, msg_id);
|
||||
if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
|
||||
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -465,6 +465,16 @@
|
||||
#define VCE_CMD_UPDATE_PTB 0x00000107
|
||||
#define VCE_CMD_FLUSH_TLB 0x00000108
|
||||
|
||||
/* HEVC ENC */
|
||||
#define HEVC_ENC_CMD_NO_OP 0x00000000
|
||||
#define HEVC_ENC_CMD_END 0x00000001
|
||||
#define HEVC_ENC_CMD_FENCE 0x00000003
|
||||
#define HEVC_ENC_CMD_TRAP 0x00000004
|
||||
#define HEVC_ENC_CMD_IB_VM 0x00000102
|
||||
#define HEVC_ENC_CMD_WAIT_GE 0x00000106
|
||||
#define HEVC_ENC_CMD_UPDATE_PTB 0x00000107
|
||||
#define HEVC_ENC_CMD_FLUSH_TLB 0x00000108
|
||||
|
||||
/* mmPA_SC_RASTER_CONFIG mask */
|
||||
#define RB_MAP_PKR0(x) ((x) << 0)
|
||||
#define RB_MAP_PKR0_MASK (0x3 << 0)
|
||||
|
@ -257,43 +257,54 @@ struct amd_ip_funcs {
|
||||
void (*get_clockgating_state)(void *handle, u32 *flags);
|
||||
};
|
||||
|
||||
enum amd_pp_task;
|
||||
|
||||
enum amd_pp_task;
|
||||
enum amd_pp_clock_type;
|
||||
struct pp_states_info;
|
||||
struct amd_pp_simple_clock_info;
|
||||
struct amd_pp_display_configuration;
|
||||
struct amd_pp_clock_info;
|
||||
struct pp_display_clock_request;
|
||||
struct pp_wm_sets_with_clock_ranges_soc15;
|
||||
struct pp_clock_levels_with_voltage;
|
||||
struct pp_clock_levels_with_latency;
|
||||
struct amd_pp_clocks;
|
||||
|
||||
struct amd_pm_funcs {
|
||||
int (*get_temperature)(void *handle);
|
||||
/* export for dpm on ci and si */
|
||||
int (*pre_set_power_state)(void *handle);
|
||||
int (*set_power_state)(void *handle);
|
||||
void (*post_set_power_state)(void *handle);
|
||||
void (*display_configuration_changed)(void *handle);
|
||||
u32 (*get_sclk)(void *handle, bool low);
|
||||
u32 (*get_mclk)(void *handle, bool low);
|
||||
void (*print_power_state)(void *handle, void *ps);
|
||||
void (*debugfs_print_current_performance_level)(void *handle, struct seq_file *m);
|
||||
int (*force_performance_level)(void *handle, enum amd_dpm_forced_level level);
|
||||
bool (*vblank_too_short)(void *handle);
|
||||
void (*powergate_uvd)(void *handle, bool gate);
|
||||
void (*powergate_vce)(void *handle, bool gate);
|
||||
void (*enable_bapm)(void *handle, bool enable);
|
||||
int (*check_state_equal)(void *handle,
|
||||
void *cps,
|
||||
void *rps,
|
||||
bool *equal);
|
||||
/* export for sysfs */
|
||||
int (*get_temperature)(void *handle);
|
||||
void (*set_fan_control_mode)(void *handle, u32 mode);
|
||||
u32 (*get_fan_control_mode)(void *handle);
|
||||
int (*set_fan_speed_percent)(void *handle, u32 speed);
|
||||
int (*get_fan_speed_percent)(void *handle, u32 *speed);
|
||||
int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask);
|
||||
int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf);
|
||||
int (*force_performance_level)(void *handle, enum amd_dpm_forced_level level);
|
||||
int (*get_sclk_od)(void *handle);
|
||||
int (*set_sclk_od)(void *handle, uint32_t value);
|
||||
int (*get_mclk_od)(void *handle);
|
||||
int (*set_mclk_od)(void *handle, uint32_t value);
|
||||
int (*check_state_equal)(void *handle,
|
||||
void *cps,
|
||||
void *rps,
|
||||
bool *equal);
|
||||
int (*read_sensor)(void *handle, int idx, void *value,
|
||||
int *size);
|
||||
int (*read_sensor)(void *handle, int idx, void *value, int *size);
|
||||
enum amd_dpm_forced_level (*get_performance_level)(void *handle);
|
||||
enum amd_pm_state_type (*get_current_power_state)(void *handle);
|
||||
int (*get_fan_speed_rpm)(void *handle, uint32_t *rpm);
|
||||
int (*get_pp_num_states)(void *handle, struct pp_states_info *data);
|
||||
int (*get_pp_table)(void *handle, char **table);
|
||||
int (*set_pp_table)(void *handle, const char *buf, size_t size);
|
||||
void (*debugfs_print_current_performance_level)(void *handle, struct seq_file *m);
|
||||
|
||||
struct amd_vce_state* (*get_vce_clock_state)(void *handle, u32 idx);
|
||||
int (*reset_power_profile_state)(void *handle,
|
||||
struct amd_pp_profile *request);
|
||||
int (*get_power_profile_state)(void *handle,
|
||||
@ -302,16 +313,39 @@ struct amd_pm_funcs {
|
||||
struct amd_pp_profile *request);
|
||||
int (*switch_power_profile)(void *handle,
|
||||
enum amd_pp_profile_type type);
|
||||
int (*load_firmware)(void *handle);
|
||||
int (*wait_for_fw_loading_complete)(void *handle);
|
||||
enum amd_dpm_forced_level (*get_performance_level)(void *handle);
|
||||
enum amd_pm_state_type (*get_current_power_state)(void *handle);
|
||||
/* export to amdgpu */
|
||||
void (*powergate_uvd)(void *handle, bool gate);
|
||||
void (*powergate_vce)(void *handle, bool gate);
|
||||
struct amd_vce_state* (*get_vce_clock_state)(void *handle, u32 idx);
|
||||
int (*dispatch_tasks)(void *handle, enum amd_pp_task task_id,
|
||||
void *input, void *output);
|
||||
int (*get_fan_speed_rpm)(void *handle, uint32_t *rpm);
|
||||
int (*get_pp_num_states)(void *handle, struct pp_states_info *data);
|
||||
int (*get_pp_table)(void *handle, char **table);
|
||||
int (*set_pp_table)(void *handle, const char *buf, size_t size);
|
||||
int (*load_firmware)(void *handle);
|
||||
int (*wait_for_fw_loading_complete)(void *handle);
|
||||
int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id);
|
||||
/* export to DC */
|
||||
u32 (*get_sclk)(void *handle, bool low);
|
||||
u32 (*get_mclk)(void *handle, bool low);
|
||||
int (*display_configuration_change)(void *handle,
|
||||
const struct amd_pp_display_configuration *input);
|
||||
int (*get_display_power_level)(void *handle,
|
||||
struct amd_pp_simple_clock_info *output);
|
||||
int (*get_current_clocks)(void *handle,
|
||||
struct amd_pp_clock_info *clocks);
|
||||
int (*get_clock_by_type)(void *handle,
|
||||
enum amd_pp_clock_type type,
|
||||
struct amd_pp_clocks *clocks);
|
||||
int (*get_clock_by_type_with_latency)(void *handle,
|
||||
enum amd_pp_clock_type type,
|
||||
struct pp_clock_levels_with_latency *clocks);
|
||||
int (*get_clock_by_type_with_voltage)(void *handle,
|
||||
enum amd_pp_clock_type type,
|
||||
struct pp_clock_levels_with_voltage *clocks);
|
||||
int (*set_watermarks_for_clocks_ranges)(void *handle,
|
||||
struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
|
||||
int (*display_clock_voltage_request)(void *handle,
|
||||
struct pp_display_clock_request *clock);
|
||||
int (*get_display_mode_validation_clocks)(void *handle,
|
||||
struct amd_pp_simple_clock_info *clocks);
|
||||
};
|
||||
|
||||
|
||||
|
@ -36,6 +36,16 @@
|
||||
#define mmUVD_UDEC_DBW_ADDR_CONFIG 0x3bd5
|
||||
#define mmUVD_POWER_STATUS_U 0x3bfd
|
||||
#define mmUVD_NO_OP 0x3bff
|
||||
#define mmUVD_RB_BASE_LO2 0x3c21
|
||||
#define mmUVD_RB_BASE_HI2 0x3c22
|
||||
#define mmUVD_RB_SIZE2 0x3c23
|
||||
#define mmUVD_RB_RPTR2 0x3c24
|
||||
#define mmUVD_RB_WPTR2 0x3c25
|
||||
#define mmUVD_RB_BASE_LO 0x3c26
|
||||
#define mmUVD_RB_BASE_HI 0x3c27
|
||||
#define mmUVD_RB_SIZE 0x3c28
|
||||
#define mmUVD_RB_RPTR 0x3c29
|
||||
#define mmUVD_RB_WPTR 0x3c2a
|
||||
#define mmUVD_LMI_RBC_RB_64BIT_BAR_LOW 0x3c69
|
||||
#define mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH 0x3c68
|
||||
#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW 0x3c67
|
||||
@ -43,6 +53,11 @@
|
||||
#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW 0x3c5f
|
||||
#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH 0x3c5e
|
||||
#define mmUVD_SEMA_CNTL 0x3d00
|
||||
#define mmUVD_RB_WPTR3 0x3d1c
|
||||
#define mmUVD_RB_RPTR3 0x3d1b
|
||||
#define mmUVD_RB_BASE_LO3 0x3d1d
|
||||
#define mmUVD_RB_BASE_HI3 0x3d1e
|
||||
#define mmUVD_RB_SIZE3 0x3d1f
|
||||
#define mmUVD_LMI_EXT40_ADDR 0x3d26
|
||||
#define mmUVD_CTX_INDEX 0x3d28
|
||||
#define mmUVD_CTX_DATA 0x3d29
|
||||
|
@ -4292,6 +4292,7 @@ typedef struct _ATOM_DPCD_INFO
|
||||
#define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30
|
||||
#define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1
|
||||
#define ATOM_VRAM_BLOCK_NEEDS_RESERVATION 0x0
|
||||
#define ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION 0x2
|
||||
|
||||
/***********************************************************************************/
|
||||
// Structure used in VRAM_UsageByFirmwareTable
|
||||
|
@ -423,6 +423,10 @@ typedef int (*cgs_enter_safe_mode)(struct cgs_device *cgs_device, bool en);
|
||||
|
||||
typedef void (*cgs_lock_grbm_idx)(struct cgs_device *cgs_device, bool lock);
|
||||
|
||||
struct amd_pp_init;
|
||||
typedef void* (*cgs_register_pp_handle)(struct cgs_device *cgs_device,
|
||||
int (*call_back_func)(struct amd_pp_init *, void **));
|
||||
|
||||
struct cgs_ops {
|
||||
/* memory management calls (similar to KFD interface) */
|
||||
cgs_alloc_gpu_mem_t alloc_gpu_mem;
|
||||
@ -459,6 +463,7 @@ struct cgs_ops {
|
||||
cgs_is_virtualization_enabled_t is_virtualization_enabled;
|
||||
cgs_enter_safe_mode enter_safe_mode;
|
||||
cgs_lock_grbm_idx lock_grbm_idx;
|
||||
cgs_register_pp_handle register_pp_handle;
|
||||
};
|
||||
|
||||
struct cgs_os_ops; /* To be define in OS-specific CGS header */
|
||||
@ -537,4 +542,7 @@ struct cgs_device
|
||||
|
||||
#define cgs_lock_grbm_idx(cgs_device, lock) \
|
||||
CGS_CALL(lock_grbm_idx, cgs_device, lock)
|
||||
#define cgs_register_pp_handle(cgs_device, call_back_func) \
|
||||
CGS_CALL(register_pp_handle, cgs_device, call_back_func)
|
||||
|
||||
#endif /* _CGS_COMMON_H */
|
||||
|
@ -4,7 +4,8 @@ menu "AMD Library routines"
|
||||
# Closed hash table
|
||||
#
|
||||
config CHASH
|
||||
tristate "Closed hash table"
|
||||
tristate
|
||||
default DRM_AMDGPU
|
||||
help
|
||||
Statically sized closed hash table implementation with low
|
||||
memory and CPU overhead.
|
||||
|
@ -223,8 +223,8 @@ static int chash_table_check(struct __chash_table *table)
|
||||
static void chash_iter_relocate(struct chash_iter dst, struct chash_iter src)
|
||||
{
|
||||
BUG_ON(src.table == dst.table && src.slot == dst.slot);
|
||||
BUG_ON(src.table->key_size != src.table->key_size);
|
||||
BUG_ON(src.table->value_size != src.table->value_size);
|
||||
BUG_ON(src.table->key_size != dst.table->key_size);
|
||||
BUG_ON(src.table->value_size != dst.table->value_size);
|
||||
|
||||
if (dst.table->key_size == 4)
|
||||
dst.table->keys32[dst.slot] = src.table->keys32[src.slot];
|
||||
|
@ -30,9 +30,14 @@
|
||||
#include "pp_instance.h"
|
||||
#include "power_state.h"
|
||||
|
||||
#define PP_DPM_DISABLED 0xCCCC
|
||||
|
||||
static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
|
||||
void *input, void *output);
|
||||
|
||||
static inline int pp_check(struct pp_instance *handle)
|
||||
{
|
||||
if (handle == NULL || handle->pp_valid != PP_VALID)
|
||||
if (handle == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (handle->hwmgr == NULL || handle->hwmgr->smumgr_funcs == NULL)
|
||||
@ -47,19 +52,54 @@ static inline int pp_check(struct pp_instance *handle)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amd_powerplay_create(struct amd_pp_init *pp_init,
|
||||
void **handle)
|
||||
{
|
||||
struct pp_instance *instance;
|
||||
|
||||
if (pp_init == NULL || handle == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
instance = kzalloc(sizeof(struct pp_instance), GFP_KERNEL);
|
||||
if (instance == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
instance->chip_family = pp_init->chip_family;
|
||||
instance->chip_id = pp_init->chip_id;
|
||||
instance->pm_en = pp_init->pm_en;
|
||||
instance->feature_mask = pp_init->feature_mask;
|
||||
instance->device = pp_init->device;
|
||||
mutex_init(&instance->pp_lock);
|
||||
*handle = instance;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amd_powerplay_destroy(void *handle)
|
||||
{
|
||||
struct pp_instance *instance = (struct pp_instance *)handle;
|
||||
|
||||
kfree(instance->hwmgr);
|
||||
instance->hwmgr = NULL;
|
||||
|
||||
kfree(instance);
|
||||
instance = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pp_early_init(void *handle)
|
||||
{
|
||||
int ret;
|
||||
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
||||
struct pp_instance *pp_handle = NULL;
|
||||
|
||||
pp_handle = cgs_register_pp_handle(handle, amd_powerplay_create);
|
||||
|
||||
if (!pp_handle)
|
||||
return -EINVAL;
|
||||
|
||||
ret = hwmgr_early_init(pp_handle);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
if ((pp_handle->pm_en == 0)
|
||||
|| cgs_is_virtualization_enabled(pp_handle->device))
|
||||
return PP_DPM_DISABLED;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -71,7 +111,7 @@ static int pp_sw_init(void *handle)
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret == 0 || ret == PP_DPM_DISABLED) {
|
||||
if (ret >= 0) {
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
|
||||
if (hwmgr->smumgr_funcs->smu_init == NULL)
|
||||
@ -91,7 +131,7 @@ static int pp_sw_fini(void *handle)
|
||||
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
if (ret == 0 || ret == PP_DPM_DISABLED) {
|
||||
if (ret >= 0) {
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
|
||||
if (hwmgr->smumgr_funcs->smu_fini == NULL)
|
||||
@ -110,7 +150,7 @@ static int pp_hw_init(void *handle)
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret == 0 || ret == PP_DPM_DISABLED) {
|
||||
if (ret >= 0) {
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
|
||||
if (hwmgr->smumgr_funcs->start_smu == NULL)
|
||||
@ -122,16 +162,17 @@ static int pp_hw_init(void *handle)
|
||||
return -EINVAL;;
|
||||
}
|
||||
if (ret == PP_DPM_DISABLED)
|
||||
return PP_DPM_DISABLED;
|
||||
goto exit;
|
||||
ret = hwmgr_hw_init(pp_handle);
|
||||
if (ret)
|
||||
goto exit;
|
||||
}
|
||||
|
||||
ret = hwmgr_hw_init(pp_handle);
|
||||
if (ret)
|
||||
goto err;
|
||||
return 0;
|
||||
err:
|
||||
return ret;
|
||||
exit:
|
||||
pp_handle->pm_en = 0;
|
||||
return PP_DPM_DISABLED;
|
||||
cgs_notify_dpm_enabled(hwmgr->device, false);
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
static int pp_hw_fini(void *handle)
|
||||
@ -146,6 +187,25 @@ static int pp_hw_fini(void *handle)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pp_late_init(void *handle)
|
||||
{
|
||||
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
||||
int ret = 0;
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
if (ret == 0)
|
||||
pp_dpm_dispatch_tasks(pp_handle,
|
||||
AMD_PP_TASK_COMPLETE_INIT, NULL, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pp_late_fini(void *handle)
|
||||
{
|
||||
amd_powerplay_destroy(handle);
|
||||
}
|
||||
|
||||
|
||||
static bool pp_is_idle(void *handle)
|
||||
{
|
||||
return false;
|
||||
@ -161,28 +221,6 @@ static int pp_sw_reset(void *handle)
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int amd_set_clockgating_by_smu(void *handle, uint32_t msg_id)
|
||||
{
|
||||
struct pp_hwmgr *hwmgr;
|
||||
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
||||
int ret = 0;
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
|
||||
if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
|
||||
pr_info("%s was not implemented.\n", __func__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
|
||||
}
|
||||
|
||||
static int pp_set_powergating_state(void *handle,
|
||||
enum amd_powergating_state state)
|
||||
{
|
||||
@ -192,7 +230,7 @@ static int pp_set_powergating_state(void *handle,
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -213,39 +251,34 @@ static int pp_suspend(void *handle)
|
||||
int ret = 0;
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret == PP_DPM_DISABLED)
|
||||
return 0;
|
||||
else if (ret != 0)
|
||||
return ret;
|
||||
|
||||
return hwmgr_hw_suspend(pp_handle);
|
||||
if (ret == 0)
|
||||
hwmgr_hw_suspend(pp_handle);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pp_resume(void *handle)
|
||||
{
|
||||
struct pp_hwmgr *hwmgr;
|
||||
int ret, ret1;
|
||||
int ret;
|
||||
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
||||
|
||||
ret1 = pp_check(pp_handle);
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret1 != 0 && ret1 != PP_DPM_DISABLED)
|
||||
return ret1;
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
|
||||
if (hwmgr->smumgr_funcs->start_smu == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
ret = hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr);
|
||||
if (ret) {
|
||||
if (hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) {
|
||||
pr_err("smc start failed\n");
|
||||
hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr);
|
||||
return ret;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ret1 == PP_DPM_DISABLED)
|
||||
if (ret == PP_DPM_DISABLED)
|
||||
return 0;
|
||||
|
||||
return hwmgr_hw_resume(pp_handle);
|
||||
@ -254,11 +287,12 @@ static int pp_resume(void *handle)
|
||||
const struct amd_ip_funcs pp_ip_funcs = {
|
||||
.name = "powerplay",
|
||||
.early_init = pp_early_init,
|
||||
.late_init = NULL,
|
||||
.late_init = pp_late_init,
|
||||
.sw_init = pp_sw_init,
|
||||
.sw_fini = pp_sw_fini,
|
||||
.hw_init = pp_hw_init,
|
||||
.hw_fini = pp_hw_fini,
|
||||
.late_fini = pp_late_fini,
|
||||
.suspend = pp_suspend,
|
||||
.resume = pp_resume,
|
||||
.is_idle = pp_is_idle,
|
||||
@ -278,6 +312,27 @@ static int pp_dpm_fw_loading_complete(void *handle)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
|
||||
{
|
||||
struct pp_hwmgr *hwmgr;
|
||||
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
||||
int ret = 0;
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
|
||||
if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
|
||||
pr_info("%s was not implemented.\n", __func__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
|
||||
}
|
||||
|
||||
static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
|
||||
enum amd_dpm_forced_level *level)
|
||||
{
|
||||
@ -323,7 +378,7 @@ static int pp_dpm_force_performance_level(void *handle,
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -358,7 +413,7 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -377,7 +432,7 @@ static uint32_t pp_dpm_get_sclk(void *handle, bool low)
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -401,7 +456,7 @@ static uint32_t pp_dpm_get_mclk(void *handle, bool low)
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -424,7 +479,7 @@ static void pp_dpm_powergate_vce(void *handle, bool gate)
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -446,7 +501,7 @@ static void pp_dpm_powergate_uvd(void *handle, bool gate)
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -468,7 +523,7 @@ static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&pp_handle->pp_lock);
|
||||
@ -488,7 +543,7 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -530,7 +585,7 @@ static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -553,7 +608,7 @@ static uint32_t pp_dpm_get_fan_control_mode(void *handle)
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -576,7 +631,7 @@ static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -599,7 +654,7 @@ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -623,7 +678,7 @@ static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -645,7 +700,7 @@ static int pp_dpm_get_temperature(void *handle)
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -670,7 +725,7 @@ static int pp_dpm_get_pp_num_states(void *handle,
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -715,7 +770,7 @@ static int pp_dpm_get_pp_table(void *handle, char **table)
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -738,7 +793,7 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -780,7 +835,7 @@ static int pp_dpm_force_clock_level(void *handle,
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -804,7 +859,7 @@ static int pp_dpm_print_clock_levels(void *handle,
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -827,7 +882,7 @@ static int pp_dpm_get_sclk_od(void *handle)
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -850,7 +905,7 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -874,7 +929,7 @@ static int pp_dpm_get_mclk_od(void *handle)
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -897,7 +952,7 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -921,7 +976,7 @@ static int pp_dpm_read_sensor(void *handle, int idx,
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -947,7 +1002,7 @@ pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return NULL;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -1120,53 +1175,16 @@ const struct amd_pm_funcs pp_dpm_funcs = {
|
||||
.get_power_profile_state = pp_dpm_get_power_profile_state,
|
||||
.set_power_profile_state = pp_dpm_set_power_profile_state,
|
||||
.switch_power_profile = pp_dpm_switch_power_profile,
|
||||
.set_clockgating_by_smu = pp_set_clockgating_by_smu,
|
||||
};
|
||||
|
||||
int amd_powerplay_create(struct amd_pp_init *pp_init,
|
||||
void **handle)
|
||||
{
|
||||
struct pp_instance *instance;
|
||||
|
||||
if (pp_init == NULL || handle == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
instance = kzalloc(sizeof(struct pp_instance), GFP_KERNEL);
|
||||
if (instance == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
instance->pp_valid = PP_VALID;
|
||||
instance->chip_family = pp_init->chip_family;
|
||||
instance->chip_id = pp_init->chip_id;
|
||||
instance->pm_en = pp_init->pm_en;
|
||||
instance->feature_mask = pp_init->feature_mask;
|
||||
instance->device = pp_init->device;
|
||||
mutex_init(&instance->pp_lock);
|
||||
*handle = instance;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amd_powerplay_destroy(void *handle)
|
||||
{
|
||||
struct pp_instance *instance = (struct pp_instance *)handle;
|
||||
|
||||
kfree(instance->hwmgr);
|
||||
instance->hwmgr = NULL;
|
||||
|
||||
kfree(instance);
|
||||
instance = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amd_powerplay_reset(void *handle)
|
||||
{
|
||||
struct pp_instance *instance = (struct pp_instance *)handle;
|
||||
int ret;
|
||||
|
||||
if (cgs_is_virtualization_enabled(instance->hwmgr->device))
|
||||
return PP_DPM_DISABLED;
|
||||
|
||||
ret = pp_check(instance);
|
||||
if (ret != 0)
|
||||
if (!ret)
|
||||
return ret;
|
||||
|
||||
ret = pp_hw_fini(instance);
|
||||
@ -1175,7 +1193,7 @@ int amd_powerplay_reset(void *handle)
|
||||
|
||||
ret = hwmgr_hw_init(instance);
|
||||
if (ret)
|
||||
return PP_DPM_DISABLED;
|
||||
return ret;
|
||||
|
||||
return hwmgr_handle_task(instance, AMD_PP_TASK_COMPLETE_INIT, NULL, NULL);
|
||||
}
|
||||
@ -1191,7 +1209,7 @@ int amd_powerplay_display_configuration_change(void *handle,
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -1210,7 +1228,7 @@ int amd_powerplay_get_display_power_level(void *handle,
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -1235,7 +1253,7 @@ int amd_powerplay_get_current_clocks(void *handle,
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -1252,7 +1270,7 @@ int amd_powerplay_get_current_clocks(void *handle,
|
||||
ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
|
||||
&hw_clocks, PHM_PerformanceLevelDesignation_Activity);
|
||||
|
||||
if (ret != 0) {
|
||||
if (ret) {
|
||||
pr_info("Error in phm_get_clock_info \n");
|
||||
mutex_unlock(&pp_handle->pp_lock);
|
||||
return -EINVAL;
|
||||
@ -1286,7 +1304,7 @@ int amd_powerplay_get_clock_by_type(void *handle, enum amd_pp_clock_type type, s
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
@ -1309,7 +1327,7 @@ int amd_powerplay_get_clock_by_type_with_latency(void *handle,
|
||||
int ret = 0;
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!clocks)
|
||||
@ -1331,7 +1349,7 @@ int amd_powerplay_get_clock_by_type_with_voltage(void *handle,
|
||||
int ret = 0;
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!clocks)
|
||||
@ -1355,7 +1373,7 @@ int amd_powerplay_set_watermarks_for_clocks_ranges(void *handle,
|
||||
int ret = 0;
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!wm_with_clock_ranges)
|
||||
@ -1379,7 +1397,7 @@ int amd_powerplay_display_clock_voltage_request(void *handle,
|
||||
int ret = 0;
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!clock)
|
||||
@ -1403,7 +1421,7 @@ int amd_powerplay_get_display_mode_validation_clocks(void *handle,
|
||||
|
||||
ret = pp_check(pp_handle);
|
||||
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
|
@ -704,7 +704,7 @@ static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr)
|
||||
|
||||
clock = hwmgr->display_config.min_core_set_clock;
|
||||
if (clock == 0)
|
||||
pr_info("min_core_set_clock not set\n");
|
||||
pr_debug("min_core_set_clock not set\n");
|
||||
|
||||
if (cz_hwmgr->sclk_dpm.hard_min_clk != clock) {
|
||||
cz_hwmgr->sclk_dpm.hard_min_clk = clock;
|
||||
|
@ -292,7 +292,6 @@ int hwmgr_hw_fini(struct pp_instance *handle)
|
||||
|
||||
phm_stop_thermal_controller(hwmgr);
|
||||
psm_set_boot_states(hwmgr);
|
||||
phm_display_configuration_changed(hwmgr);
|
||||
psm_adjust_power_state_dynamic(hwmgr, false, NULL);
|
||||
phm_disable_dynamic_state_management(hwmgr);
|
||||
phm_disable_clock_power_gatings(hwmgr);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -28,7 +28,7 @@
|
||||
#include <linux/kernel.h>
|
||||
|
||||
struct phm_fuses_default {
|
||||
const char *key;
|
||||
uint64_t key;
|
||||
uint32_t VFT2_m1;
|
||||
uint32_t VFT2_m2;
|
||||
uint32_t VFT2_b;
|
||||
@ -40,9 +40,7 @@ struct phm_fuses_default {
|
||||
uint32_t VFT0_b;
|
||||
};
|
||||
|
||||
extern struct phm_fuses_default vega10_fuses_default[];
|
||||
extern int pp_override_get_default_fuse_value(uint64_t key,
|
||||
struct phm_fuses_default list[],
|
||||
struct phm_fuses_default *result);
|
||||
|
||||
#endif
|
@ -224,6 +224,8 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
|
||||
if (skip)
|
||||
return 0;
|
||||
|
||||
phm_display_configuration_changed(hwmgr);
|
||||
|
||||
if (new_ps != NULL)
|
||||
requested = new_ps;
|
||||
else
|
||||
@ -232,7 +234,6 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
|
||||
pcurrent = hwmgr->current_ps;
|
||||
|
||||
phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
|
||||
|
||||
if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr,
|
||||
&pcurrent->hardware, &requested->hardware, &equal)))
|
||||
equal = false;
|
||||
@ -241,6 +242,9 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
|
||||
phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware);
|
||||
memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size);
|
||||
}
|
||||
|
||||
phm_notify_smc_display_config_after_ps_adjustment(hwmgr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -790,7 +790,7 @@ static const ATOM_PPLIB_STATE_V2 *get_state_entry_v2(
|
||||
return pstate;
|
||||
}
|
||||
|
||||
static unsigned char soft_dummy_pp_table[] = {
|
||||
static const unsigned char soft_dummy_pp_table[] = {
|
||||
0xe1, 0x01, 0x06, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x42, 0x00, 0x4a, 0x00, 0x6c, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x42, 0x00, 0x02, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00,
|
||||
0x00, 0x4e, 0x00, 0x88, 0x00, 0x00, 0x9e, 0x00, 0x17, 0x00, 0x00, 0x00, 0x9e, 0x00, 0x00, 0x00,
|
||||
|
@ -312,37 +312,37 @@ static int rv_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
|
||||
}
|
||||
|
||||
/* temporary hardcoded clock voltage breakdown tables */
|
||||
DpmClock_t VddDcfClk[]= {
|
||||
static const DpmClock_t VddDcfClk[]= {
|
||||
{ 300, 2600},
|
||||
{ 600, 3200},
|
||||
{ 600, 3600},
|
||||
};
|
||||
|
||||
DpmClock_t VddSocClk[]= {
|
||||
static const DpmClock_t VddSocClk[]= {
|
||||
{ 478, 2600},
|
||||
{ 722, 3200},
|
||||
{ 722, 3600},
|
||||
};
|
||||
|
||||
DpmClock_t VddFClk[]= {
|
||||
static const DpmClock_t VddFClk[]= {
|
||||
{ 400, 2600},
|
||||
{1200, 3200},
|
||||
{1200, 3600},
|
||||
};
|
||||
|
||||
DpmClock_t VddDispClk[]= {
|
||||
static const DpmClock_t VddDispClk[]= {
|
||||
{ 435, 2600},
|
||||
{ 661, 3200},
|
||||
{1086, 3600},
|
||||
};
|
||||
|
||||
DpmClock_t VddDppClk[]= {
|
||||
static const DpmClock_t VddDppClk[]= {
|
||||
{ 435, 2600},
|
||||
{ 661, 3200},
|
||||
{ 661, 3600},
|
||||
};
|
||||
|
||||
DpmClock_t VddPhyClk[]= {
|
||||
static const DpmClock_t VddPhyClk[]= {
|
||||
{ 540, 2600},
|
||||
{ 810, 3200},
|
||||
{ 810, 3600},
|
||||
@ -350,7 +350,7 @@ DpmClock_t VddPhyClk[]= {
|
||||
|
||||
static int rv_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
|
||||
struct rv_voltage_dependency_table **pptable,
|
||||
uint32_t num_entry, DpmClock_t *pclk_dependency_table)
|
||||
uint32_t num_entry, const DpmClock_t *pclk_dependency_table)
|
||||
{
|
||||
uint32_t table_size, i;
|
||||
struct rv_voltage_dependency_table *ptable;
|
||||
@ -421,6 +421,26 @@ static int rv_populate_clock_table(struct pp_hwmgr *hwmgr)
|
||||
rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk,
|
||||
ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]);
|
||||
|
||||
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
|
||||
PPSMC_MSG_GetMinGfxclkFrequency),
|
||||
"Attempt to get min GFXCLK Failed!",
|
||||
return -1);
|
||||
PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
|
||||
&result),
|
||||
"Attempt to get min GFXCLK Failed!",
|
||||
return -1);
|
||||
rv_data->gfx_min_freq_limit = result * 100;
|
||||
|
||||
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
|
||||
PPSMC_MSG_GetMaxGfxclkFrequency),
|
||||
"Attempt to get max GFXCLK Failed!",
|
||||
return -1);
|
||||
PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
|
||||
&result),
|
||||
"Attempt to get max GFXCLK Failed!",
|
||||
return -1);
|
||||
rv_data->gfx_max_freq_limit = result * 100;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -599,7 +619,53 @@ static int rv_force_clock_level(struct pp_hwmgr *hwmgr,
|
||||
static int rv_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
enum pp_clock_type type, char *buf)
|
||||
{
|
||||
return 0;
|
||||
struct rv_hwmgr *data = (struct rv_hwmgr *)(hwmgr->backend);
|
||||
struct rv_voltage_dependency_table *mclk_table =
|
||||
data->clock_vol_info.vdd_dep_on_fclk;
|
||||
int i, now, size = 0;
|
||||
|
||||
switch (type) {
|
||||
case PP_SCLK:
|
||||
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
|
||||
PPSMC_MSG_GetGfxclkFrequency),
|
||||
"Attempt to get current GFXCLK Failed!",
|
||||
return -1);
|
||||
PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
|
||||
&now),
|
||||
"Attempt to get current GFXCLK Failed!",
|
||||
return -1);
|
||||
|
||||
size += sprintf(buf + size, "0: %uMhz %s\n",
|
||||
data->gfx_min_freq_limit / 100,
|
||||
((data->gfx_min_freq_limit / 100)
|
||||
== now) ? "*" : "");
|
||||
size += sprintf(buf + size, "1: %uMhz %s\n",
|
||||
data->gfx_max_freq_limit / 100,
|
||||
((data->gfx_max_freq_limit / 100)
|
||||
== now) ? "*" : "");
|
||||
break;
|
||||
case PP_MCLK:
|
||||
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
|
||||
PPSMC_MSG_GetFclkFrequency),
|
||||
"Attempt to get current MEMCLK Failed!",
|
||||
return -1);
|
||||
PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
|
||||
&now),
|
||||
"Attempt to get current MEMCLK Failed!",
|
||||
return -1);
|
||||
|
||||
for (i = 0; i < mclk_table->count; i++)
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
i,
|
||||
mclk_table->entries[i].clk / 100,
|
||||
((mclk_table->entries[i].clk / 100)
|
||||
== now) ? "*" : "");
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static int rv_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
|
||||
@ -830,13 +896,37 @@ static int rv_thermal_get_temperature(struct pp_hwmgr *hwmgr)
|
||||
static int rv_read_sensor(struct pp_hwmgr *hwmgr, int idx,
|
||||
void *value, int *size)
|
||||
{
|
||||
uint32_t sclk, mclk;
|
||||
int ret = 0;
|
||||
|
||||
switch (idx) {
|
||||
case AMDGPU_PP_SENSOR_GFX_SCLK:
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
|
||||
if (!ret) {
|
||||
rv_read_arg_from_smc(hwmgr, &sclk);
|
||||
/* in units of 10KHZ */
|
||||
*((uint32_t *)value) = sclk * 100;
|
||||
*size = 4;
|
||||
}
|
||||
break;
|
||||
case AMDGPU_PP_SENSOR_GFX_MCLK:
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
|
||||
if (!ret) {
|
||||
rv_read_arg_from_smc(hwmgr, &mclk);
|
||||
/* in units of 10KHZ */
|
||||
*((uint32_t *)value) = mclk * 100;
|
||||
*size = 4;
|
||||
}
|
||||
break;
|
||||
case AMDGPU_PP_SENSOR_GPU_TEMP:
|
||||
*((uint32_t *)value) = rv_thermal_get_temperature(hwmgr);
|
||||
return 0;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct pp_hwmgr_func rv_hwmgr_funcs = {
|
||||
|
@ -283,6 +283,8 @@ struct rv_hwmgr {
|
||||
uint32_t vclk_soft_min;
|
||||
uint32_t dclk_soft_min;
|
||||
uint32_t gfx_actual_soft_min_freq;
|
||||
uint32_t gfx_min_freq_limit;
|
||||
uint32_t gfx_max_freq_limit;
|
||||
|
||||
bool vcn_power_gated;
|
||||
bool vcn_dpg_mode;
|
||||
|
@ -3825,14 +3825,11 @@ static int smu7_notify_link_speed_change_after_state_change(
|
||||
static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
|
||||
int ret = 0;
|
||||
|
||||
if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) {
|
||||
if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK)
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr,
|
||||
(PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
|
||||
ret = (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
|
||||
}
|
||||
return ret;
|
||||
return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
|
||||
}
|
||||
|
||||
static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
|
||||
|
@ -56,7 +56,7 @@
|
||||
|
||||
#define HBM_MEMORY_CHANNEL_WIDTH 128
|
||||
|
||||
uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
|
||||
static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
|
||||
|
||||
#define MEM_FREQ_LOW_LATENCY 25000
|
||||
#define MEM_FREQ_HIGH_LATENCY 80000
|
||||
@ -81,7 +81,7 @@ uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
|
||||
static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
|
||||
enum pp_clock_type type, uint32_t mask);
|
||||
|
||||
const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
|
||||
static const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
|
||||
|
||||
struct vega10_power_state *cast_phw_vega10_power_state(
|
||||
struct pp_hw_power_state *hw_ps)
|
||||
@ -2364,7 +2364,7 @@ static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable)
|
||||
} else {
|
||||
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
|
||||
false,
|
||||
data->smu_features[GNLD_AVFS].smu_feature_id),
|
||||
data->smu_features[GNLD_AVFS].smu_feature_bitmap),
|
||||
"[avfs_control] Attempt to Disable AVFS feature Failed!",
|
||||
return -1);
|
||||
data->smu_features[GNLD_AVFS].enabled = false;
|
||||
@ -2393,7 +2393,7 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
|
||||
|
||||
serial_number = ((uint64_t)bottom32 << 32) | top32;
|
||||
|
||||
if (pp_override_get_default_fuse_value(serial_number, vega10_fuses_default, &fuse) == 0) {
|
||||
if (pp_override_get_default_fuse_value(serial_number, &fuse) == 0) {
|
||||
avfs_fuse_table->VFT0_b = fuse.VFT0_b;
|
||||
avfs_fuse_table->VFT0_m1 = fuse.VFT0_m1;
|
||||
avfs_fuse_table->VFT0_m2 = fuse.VFT0_m2;
|
||||
|
@ -31,7 +31,6 @@
|
||||
#include "vega10_ppsmc.h"
|
||||
#include "vega10_powertune.h"
|
||||
|
||||
extern const uint32_t PhwVega10_Magic;
|
||||
#define VEGA10_MAX_HARDWARE_POWERLEVELS 2
|
||||
|
||||
#define WaterMarksExist 1
|
||||
|
@ -1243,8 +1243,8 @@ int vega10_enable_didt_config(struct pp_hwmgr *hwmgr)
|
||||
}
|
||||
|
||||
if (0 == result) {
|
||||
PP_ASSERT_WITH_CODE((!vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_DIDT].smu_feature_bitmap)),
|
||||
"[EnableDiDtConfig] Attempt to Enable DiDt feature Failed!", return result);
|
||||
result = vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_DIDT].smu_feature_bitmap);
|
||||
PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDtConfig] Attempt to Enable DiDt feature Failed!", return result);
|
||||
data->smu_features[GNLD_DIDT].enabled = true;
|
||||
}
|
||||
}
|
||||
@ -1290,8 +1290,8 @@ int vega10_disable_didt_config(struct pp_hwmgr *hwmgr)
|
||||
}
|
||||
|
||||
if (0 == result) {
|
||||
PP_ASSERT_WITH_CODE((0 != vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_DIDT].smu_feature_bitmap)),
|
||||
"[DisableDiDtConfig] Attempt to Disable DiDt feature Failed!", return result);
|
||||
result = vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_DIDT].smu_feature_bitmap);
|
||||
PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDtConfig] Attempt to Disable DiDt feature Failed!", return result);
|
||||
data->smu_features[GNLD_DIDT].enabled = false;
|
||||
}
|
||||
}
|
||||
|
@ -33,8 +33,6 @@
|
||||
extern const struct amd_ip_funcs pp_ip_funcs;
|
||||
extern const struct amd_pm_funcs pp_dpm_funcs;
|
||||
|
||||
#define PP_DPM_DISABLED 0xCCCC
|
||||
|
||||
enum amd_pp_sensors {
|
||||
AMDGPU_PP_SENSOR_GFX_SCLK = 0,
|
||||
AMDGPU_PP_SENSOR_VDDNB,
|
||||
@ -268,16 +266,12 @@ struct pp_display_clock_request {
|
||||
state << PP_STATE_SHIFT)
|
||||
|
||||
struct amd_powerplay {
|
||||
struct cgs_device *cgs_device;
|
||||
void *pp_handle;
|
||||
const struct amd_ip_funcs *ip_funcs;
|
||||
const struct amd_pm_funcs *pp_funcs;
|
||||
};
|
||||
|
||||
int amd_powerplay_create(struct amd_pp_init *pp_init,
|
||||
void **handle);
|
||||
|
||||
int amd_powerplay_destroy(void *handle);
|
||||
|
||||
int amd_powerplay_reset(void *handle);
|
||||
|
||||
int amd_powerplay_display_configuration_change(void *handle,
|
||||
@ -310,6 +304,5 @@ int amd_powerplay_display_clock_voltage_request(void *handle,
|
||||
int amd_powerplay_get_display_mode_validation_clocks(void *handle,
|
||||
struct amd_pp_simple_clock_info *output);
|
||||
|
||||
int amd_set_clockgating_by_smu(void *handle, uint32_t msg_id);
|
||||
|
||||
#endif /* _AMD_POWERPLAY_H_ */
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -25,10 +25,7 @@
|
||||
|
||||
#include "hwmgr.h"
|
||||
|
||||
#define PP_VALID 0x1F1F1F1F
|
||||
|
||||
struct pp_instance {
|
||||
uint32_t pp_valid;
|
||||
uint32_t chip_family;
|
||||
uint32_t chip_id;
|
||||
bool pm_en;
|
||||
|
@ -70,7 +70,12 @@
|
||||
#define PPSMC_MSG_SetPhyclkVoltageByFreq 0x26
|
||||
#define PPSMC_MSG_SetDppclkVoltageByFreq 0x27
|
||||
#define PPSMC_MSG_SetSoftMinVcn 0x28
|
||||
#define PPSMC_Message_Count 0x29
|
||||
#define PPSMC_MSG_GetGfxclkFrequency 0x2A
|
||||
#define PPSMC_MSG_GetFclkFrequency 0x2B
|
||||
#define PPSMC_MSG_GetMinGfxclkFrequency 0x2C
|
||||
#define PPSMC_MSG_GetMaxGfxclkFrequency 0x2D
|
||||
#define PPSMC_MSG_SoftReset 0x2E
|
||||
#define PPSMC_Message_Count 0x2F
|
||||
|
||||
|
||||
typedef uint16_t PPSMC_Result;
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o fiji_smc.o \
|
||||
polaris10_smumgr.o iceland_smumgr.o polaris10_smc.o tonga_smc.o \
|
||||
smu7_smumgr.o iceland_smc.o vega10_smumgr.o rv_smumgr.o ci_smc.o ci_smumgr.o
|
||||
smu7_smumgr.o iceland_smc.o vega10_smumgr.o rv_smumgr.o ci_smc.o
|
||||
|
||||
AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
|
||||
|
||||
|
@ -28,7 +28,6 @@
|
||||
|
||||
#include "smumgr.h"
|
||||
#include "pp_debug.h"
|
||||
#include "ci_smc.h"
|
||||
#include "ci_smumgr.h"
|
||||
#include "ppsmc.h"
|
||||
#include "smu7_hwmgr.h"
|
||||
@ -208,7 +207,7 @@ static int ci_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
|
||||
static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -227,7 +226,7 @@ int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
|
||||
static int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
|
||||
uint16_t msg, uint32_t parameter)
|
||||
{
|
||||
cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
|
||||
@ -476,7 +475,7 @@ static int ci_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
|
||||
return result;
|
||||
}
|
||||
|
||||
int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
|
||||
static int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
|
||||
struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
|
||||
@ -1297,7 +1296,7 @@ static int ci_populate_single_memory_level(
|
||||
return result;
|
||||
}
|
||||
|
||||
int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
|
||||
static int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
|
||||
struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
|
||||
@ -1944,7 +1943,7 @@ static int ci_start_smc(struct pp_hwmgr *hwmgr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ci_init_smc_table(struct pp_hwmgr *hwmgr)
|
||||
static int ci_init_smc_table(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
int result;
|
||||
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
|
||||
@ -2125,7 +2124,7 @@ int ci_init_smc_table(struct pp_hwmgr *hwmgr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
|
||||
static int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
|
||||
SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
|
||||
@ -2211,7 +2210,7 @@ static int ci_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr)
|
||||
static int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
|
||||
struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
|
||||
@ -2252,7 +2251,7 @@ int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr)
|
||||
return result;
|
||||
}
|
||||
|
||||
uint32_t ci_get_offsetof(uint32_t type, uint32_t member)
|
||||
static uint32_t ci_get_offsetof(uint32_t type, uint32_t member)
|
||||
{
|
||||
switch (type) {
|
||||
case SMU_SoftRegisters:
|
||||
@ -2278,7 +2277,7 @@ uint32_t ci_get_offsetof(uint32_t type, uint32_t member)
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t ci_get_mac_definition(uint32_t value)
|
||||
static uint32_t ci_get_mac_definition(uint32_t value)
|
||||
{
|
||||
switch (value) {
|
||||
case SMU_MAX_LEVELS_GRAPHICS:
|
||||
@ -2332,7 +2331,7 @@ static int ci_load_smc_ucode(struct pp_hwmgr *hwmgr)
|
||||
PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
|
||||
|
||||
if (0 != byte_count) {
|
||||
pr_err("SMC size must be dividable by 4\n");
|
||||
pr_err("SMC size must be divisible by 4\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -2355,7 +2354,7 @@ static int ci_upload_firmware(struct pp_hwmgr *hwmgr)
|
||||
return ci_load_smc_ucode(hwmgr);
|
||||
}
|
||||
|
||||
int ci_process_firmware_header(struct pp_hwmgr *hwmgr)
|
||||
static int ci_process_firmware_header(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
|
||||
struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
|
||||
@ -2667,7 +2666,7 @@ static int ci_set_valid_flag(struct ci_mc_reg_table *table)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
|
||||
static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
int result;
|
||||
struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
|
||||
@ -2722,13 +2721,13 @@ int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
|
||||
return result;
|
||||
}
|
||||
|
||||
bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
|
||||
static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
return ci_is_smc_ram_running(hwmgr);
|
||||
}
|
||||
|
||||
int ci_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
|
||||
struct amd_pp_profile *request)
|
||||
static int ci_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
|
||||
struct amd_pp_profile *request)
|
||||
{
|
||||
struct ci_smumgr *smu_data = (struct ci_smumgr *)
|
||||
(hwmgr->smu_backend);
|
||||
@ -2751,3 +2750,59 @@ int ci_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
|
||||
return ci_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
|
||||
array_size, SMC_RAM_END);
|
||||
}
|
||||
|
||||
|
||||
static int ci_smu_init(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
int i;
|
||||
struct ci_smumgr *ci_priv = NULL;
|
||||
|
||||
ci_priv = kzalloc(sizeof(struct ci_smumgr), GFP_KERNEL);
|
||||
|
||||
if (ci_priv == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
|
||||
ci_priv->activity_target[i] = 30;
|
||||
|
||||
hwmgr->smu_backend = ci_priv;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ci_smu_fini(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
kfree(hwmgr->smu_backend);
|
||||
hwmgr->smu_backend = NULL;
|
||||
cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ci_start_smu(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct pp_smumgr_func ci_smu_funcs = {
|
||||
.smu_init = ci_smu_init,
|
||||
.smu_fini = ci_smu_fini,
|
||||
.start_smu = ci_start_smu,
|
||||
.check_fw_load_finish = NULL,
|
||||
.request_smu_load_fw = NULL,
|
||||
.request_smu_load_specific_fw = NULL,
|
||||
.send_msg_to_smc = ci_send_msg_to_smc,
|
||||
.send_msg_to_smc_with_parameter = ci_send_msg_to_smc_with_parameter,
|
||||
.download_pptable_settings = NULL,
|
||||
.upload_pptable_settings = NULL,
|
||||
.get_offsetof = ci_get_offsetof,
|
||||
.process_firmware_header = ci_process_firmware_header,
|
||||
.init_smc_table = ci_init_smc_table,
|
||||
.update_sclk_threshold = ci_update_sclk_threshold,
|
||||
.thermal_setup_fan_table = ci_thermal_setup_fan_table,
|
||||
.populate_all_graphic_levels = ci_populate_all_graphic_levels,
|
||||
.populate_all_memory_levels = ci_populate_all_memory_levels,
|
||||
.get_mac_definition = ci_get_mac_definition,
|
||||
.initialize_mc_reg_table = ci_initialize_mc_reg_table,
|
||||
.is_dpm_running = ci_is_dpm_running,
|
||||
.populate_requested_graphic_levels = ci_populate_requested_graphic_levels,
|
||||
};
|
||||
|
@ -1,52 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#ifndef CI_SMC_H
|
||||
#define CI_SMC_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
|
||||
struct pp_smumgr;
|
||||
struct pp_hwmgr;
|
||||
struct amd_pp_profile;
|
||||
|
||||
int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
|
||||
uint16_t msg, uint32_t parameter);
|
||||
int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
|
||||
int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
|
||||
int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
|
||||
int ci_init_smc_table(struct pp_hwmgr *hwmgr);
|
||||
int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
|
||||
int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
|
||||
int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr);
|
||||
uint32_t ci_get_offsetof(uint32_t type, uint32_t member);
|
||||
uint32_t ci_get_mac_definition(uint32_t value);
|
||||
int ci_process_firmware_header(struct pp_hwmgr *hwmgr);
|
||||
int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
|
||||
bool ci_is_dpm_running(struct pp_hwmgr *hwmgr);
|
||||
int ci_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
|
||||
struct amd_pp_profile *request);
|
||||
|
||||
|
||||
#endif
|
||||
|
@ -1,86 +0,0 @@
|
||||
/*
|
||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/fb.h>
|
||||
#include "linux/delay.h"
|
||||
|
||||
#include "smumgr.h"
|
||||
#include "ci_smumgr.h"
|
||||
#include "cgs_common.h"
|
||||
#include "ci_smc.h"
|
||||
|
||||
static int ci_smu_init(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
int i;
|
||||
struct ci_smumgr *ci_priv = NULL;
|
||||
|
||||
ci_priv = kzalloc(sizeof(struct ci_smumgr), GFP_KERNEL);
|
||||
|
||||
if (ci_priv == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
|
||||
ci_priv->activity_target[i] = 30;
|
||||
|
||||
hwmgr->smu_backend = ci_priv;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ci_smu_fini(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
kfree(hwmgr->smu_backend);
|
||||
hwmgr->smu_backend = NULL;
|
||||
cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ci_start_smu(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct pp_smumgr_func ci_smu_funcs = {
|
||||
.smu_init = ci_smu_init,
|
||||
.smu_fini = ci_smu_fini,
|
||||
.start_smu = ci_start_smu,
|
||||
.check_fw_load_finish = NULL,
|
||||
.request_smu_load_fw = NULL,
|
||||
.request_smu_load_specific_fw = NULL,
|
||||
.send_msg_to_smc = ci_send_msg_to_smc,
|
||||
.send_msg_to_smc_with_parameter = ci_send_msg_to_smc_with_parameter,
|
||||
.download_pptable_settings = NULL,
|
||||
.upload_pptable_settings = NULL,
|
||||
.get_offsetof = ci_get_offsetof,
|
||||
.process_firmware_header = ci_process_firmware_header,
|
||||
.init_smc_table = ci_init_smc_table,
|
||||
.update_sclk_threshold = ci_update_sclk_threshold,
|
||||
.thermal_setup_fan_table = ci_thermal_setup_fan_table,
|
||||
.populate_all_graphic_levels = ci_populate_all_graphic_levels,
|
||||
.populate_all_memory_levels = ci_populate_all_memory_levels,
|
||||
.get_mac_definition = ci_get_mac_definition,
|
||||
.initialize_mc_reg_table = ci_initialize_mc_reg_table,
|
||||
.is_dpm_running = ci_is_dpm_running,
|
||||
.populate_requested_graphic_levels = ci_populate_requested_graphic_levels,
|
||||
};
|
@ -159,37 +159,44 @@ static int fiji_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr)
|
||||
return result;
|
||||
}
|
||||
|
||||
static int fiji_setup_pwr_virus(struct pp_hwmgr *hwmgr)
|
||||
static void execute_pwr_table(struct pp_hwmgr *hwmgr, const PWR_Command_Table *pvirus, int size)
|
||||
{
|
||||
int i;
|
||||
int result = -EINVAL;
|
||||
uint32_t reg, data;
|
||||
|
||||
const PWR_Command_Table *pvirus = PwrVirusTable;
|
||||
struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
|
||||
|
||||
for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) {
|
||||
switch (pvirus->command) {
|
||||
case PwrCmdWrite:
|
||||
reg = pvirus->reg;
|
||||
data = pvirus->data;
|
||||
for (i = 0; i < size; i++) {
|
||||
reg = pvirus->reg;
|
||||
data = pvirus->data;
|
||||
if (reg != 0xffffffff)
|
||||
cgs_write_register(hwmgr->device, reg, data);
|
||||
else
|
||||
break;
|
||||
|
||||
case PwrCmdEnd:
|
||||
result = 0;
|
||||
break;
|
||||
|
||||
default:
|
||||
pr_info("Table Exit with Invalid Command!");
|
||||
smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
|
||||
result = -EINVAL;
|
||||
break;
|
||||
}
|
||||
pvirus++;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
static void execute_pwr_dfy_table(struct pp_hwmgr *hwmgr, const PWR_DFY_Section *section)
|
||||
{
|
||||
int i;
|
||||
cgs_write_register(hwmgr->device, mmCP_DFY_CNTL, section->dfy_cntl);
|
||||
cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_HI, section->dfy_addr_hi);
|
||||
cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_LO, section->dfy_addr_lo);
|
||||
for (i = 0; i < section->dfy_size; i++)
|
||||
cgs_write_register(hwmgr->device, mmCP_DFY_DATA_0, section->dfy_data[i]);
|
||||
}
|
||||
|
||||
static int fiji_setup_pwr_virus(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
execute_pwr_table(hwmgr, PwrVirusTable_pre, ARRAY_SIZE(PwrVirusTable_pre));
|
||||
execute_pwr_dfy_table(hwmgr, &pwr_virus_section1);
|
||||
execute_pwr_dfy_table(hwmgr, &pwr_virus_section2);
|
||||
execute_pwr_dfy_table(hwmgr, &pwr_virus_section3);
|
||||
execute_pwr_dfy_table(hwmgr, &pwr_virus_section4);
|
||||
execute_pwr_dfy_table(hwmgr, &pwr_virus_section5);
|
||||
execute_pwr_dfy_table(hwmgr, &pwr_virus_section6);
|
||||
execute_pwr_table(hwmgr, PwrVirusTable_post, ARRAY_SIZE(PwrVirusTable_post));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fiji_start_avfs_btc(struct pp_hwmgr *hwmgr)
|
||||
|
@ -108,7 +108,7 @@ static int iceland_upload_smc_firmware_data(struct pp_hwmgr *hwmgr,
|
||||
|
||||
PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
|
||||
|
||||
PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL);
|
||||
PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be divisible by 4.", return -EINVAL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -60,37 +60,44 @@ static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
|
||||
static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = {
|
||||
0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
|
||||
|
||||
static int polaris10_setup_pwr_virus(struct pp_hwmgr *hwmgr)
|
||||
static void execute_pwr_table(struct pp_hwmgr *hwmgr, const PWR_Command_Table *pvirus, int size)
|
||||
{
|
||||
int i;
|
||||
int result = -EINVAL;
|
||||
uint32_t reg, data;
|
||||
|
||||
const PWR_Command_Table *pvirus = pwr_virus_table;
|
||||
struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
|
||||
|
||||
for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) {
|
||||
switch (pvirus->command) {
|
||||
case PwrCmdWrite:
|
||||
reg = pvirus->reg;
|
||||
data = pvirus->data;
|
||||
for (i = 0; i < size; i++) {
|
||||
reg = pvirus->reg;
|
||||
data = pvirus->data;
|
||||
if (reg != 0xffffffff)
|
||||
cgs_write_register(hwmgr->device, reg, data);
|
||||
else
|
||||
break;
|
||||
|
||||
case PwrCmdEnd:
|
||||
result = 0;
|
||||
break;
|
||||
|
||||
default:
|
||||
pr_info("Table Exit with Invalid Command!");
|
||||
smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
|
||||
result = -EINVAL;
|
||||
break;
|
||||
}
|
||||
pvirus++;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
static void execute_pwr_dfy_table(struct pp_hwmgr *hwmgr, const PWR_DFY_Section *section)
|
||||
{
|
||||
int i;
|
||||
cgs_write_register(hwmgr->device, mmCP_DFY_CNTL, section->dfy_cntl);
|
||||
cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_HI, section->dfy_addr_hi);
|
||||
cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_LO, section->dfy_addr_lo);
|
||||
for (i = 0; i < section->dfy_size; i++)
|
||||
cgs_write_register(hwmgr->device, mmCP_DFY_DATA_0, section->dfy_data[i]);
|
||||
}
|
||||
|
||||
static int polaris10_setup_pwr_virus(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
execute_pwr_table(hwmgr, pwr_virus_table_pre, ARRAY_SIZE(pwr_virus_table_pre));
|
||||
execute_pwr_dfy_table(hwmgr, &pwr_virus_section1);
|
||||
execute_pwr_dfy_table(hwmgr, &pwr_virus_section2);
|
||||
execute_pwr_dfy_table(hwmgr, &pwr_virus_section3);
|
||||
execute_pwr_dfy_table(hwmgr, &pwr_virus_section4);
|
||||
execute_pwr_dfy_table(hwmgr, &pwr_virus_section5);
|
||||
execute_pwr_dfy_table(hwmgr, &pwr_virus_section6);
|
||||
execute_pwr_table(hwmgr, pwr_virus_table_post, ARRAY_SIZE(pwr_virus_table_post));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int polaris10_perform_btc(struct pp_hwmgr *hwmgr)
|
||||
|
@ -513,7 +513,7 @@ static int smu7_upload_smc_firmware_data(struct pp_hwmgr *hwmgr, uint32_t length
|
||||
|
||||
PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
|
||||
|
||||
PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL);
|
||||
PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be divisible by 4.", return -EINVAL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -227,8 +227,14 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
|
||||
*/
|
||||
kthread_park(sched->thread);
|
||||
kthread_unpark(sched->thread);
|
||||
while (kfifo_out(&entity->job_queue, &job, sizeof(job)))
|
||||
while (kfifo_out(&entity->job_queue, &job, sizeof(job))) {
|
||||
struct amd_sched_fence *s_fence = job->s_fence;
|
||||
amd_sched_fence_scheduled(s_fence);
|
||||
dma_fence_set_error(&s_fence->finished, -ESRCH);
|
||||
amd_sched_fence_finished(s_fence);
|
||||
dma_fence_put(&s_fence->finished);
|
||||
sched->ops->free_job(job);
|
||||
}
|
||||
|
||||
}
|
||||
kfifo_free(&entity->job_queue);
|
||||
@ -308,7 +314,7 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
|
||||
}
|
||||
|
||||
static struct amd_sched_job *
|
||||
amd_sched_entity_pop_job(struct amd_sched_entity *entity)
|
||||
amd_sched_entity_peek_job(struct amd_sched_entity *entity)
|
||||
{
|
||||
struct amd_gpu_scheduler *sched = entity->sched;
|
||||
struct amd_sched_job *sched_job;
|
||||
@ -354,8 +360,7 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
|
||||
return added;
|
||||
}
|
||||
|
||||
/* job_finish is called after hw fence signaled, and
|
||||
* the job had already been deleted from ring_mirror_list
|
||||
/* job_finish is called after hw fence signaled
|
||||
*/
|
||||
static void amd_sched_job_finish(struct work_struct *work)
|
||||
{
|
||||
@ -396,6 +401,9 @@ static void amd_sched_job_begin(struct amd_sched_job *s_job)
|
||||
{
|
||||
struct amd_gpu_scheduler *sched = s_job->sched;
|
||||
|
||||
dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
|
||||
amd_sched_job_finish_cb);
|
||||
|
||||
spin_lock(&sched->job_list_lock);
|
||||
list_add_tail(&s_job->node, &sched->ring_mirror_list);
|
||||
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
|
||||
@ -488,8 +496,6 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
|
||||
struct amd_sched_entity *entity = sched_job->s_entity;
|
||||
|
||||
trace_amd_sched_job(sched_job);
|
||||
dma_fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
|
||||
amd_sched_job_finish_cb);
|
||||
wait_event(entity->sched->job_scheduled,
|
||||
amd_sched_entity_in(sched_job));
|
||||
}
|
||||
@ -600,7 +606,7 @@ static int amd_sched_main(void *param)
|
||||
if (!entity)
|
||||
continue;
|
||||
|
||||
sched_job = amd_sched_entity_pop_job(entity);
|
||||
sched_job = amd_sched_entity_peek_job(entity);
|
||||
if (!sched_job)
|
||||
continue;
|
||||
|
||||
@ -611,6 +617,10 @@ static int amd_sched_main(void *param)
|
||||
|
||||
fence = sched->ops->run_job(sched_job);
|
||||
amd_sched_fence_scheduled(s_fence);
|
||||
|
||||
/* amd_sched_process_job drops the job's reference of the fence. */
|
||||
sched_job->s_fence = NULL;
|
||||
|
||||
if (fence) {
|
||||
s_fence->parent = dma_fence_get(fence);
|
||||
r = dma_fence_add_callback(fence, &s_fence->cb,
|
||||
|
@ -262,8 +262,14 @@ void drm_syncobj_free(struct kref *kref)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_syncobj_free);
|
||||
|
||||
static int drm_syncobj_create(struct drm_file *file_private,
|
||||
u32 *handle, uint32_t flags)
|
||||
/**
|
||||
* drm_syncobj_create - create a new syncobj
|
||||
* @out_syncobj: returned syncobj
|
||||
* @flags: DRM_SYNCOBJ_* flags
|
||||
* @fence: if non-NULL, the syncobj will represent this fence
|
||||
*/
|
||||
int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
|
||||
struct dma_fence *fence)
|
||||
{
|
||||
int ret;
|
||||
struct drm_syncobj *syncobj;
|
||||
@ -284,6 +290,25 @@ static int drm_syncobj_create(struct drm_file *file_private,
|
||||
}
|
||||
}
|
||||
|
||||
if (fence)
|
||||
drm_syncobj_replace_fence(syncobj, fence);
|
||||
|
||||
*out_syncobj = syncobj;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_syncobj_create);
|
||||
|
||||
/**
|
||||
* drm_syncobj_get_handle - get a handle from a syncobj
|
||||
*/
|
||||
int drm_syncobj_get_handle(struct drm_file *file_private,
|
||||
struct drm_syncobj *syncobj, u32 *handle)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* take a reference to put in the idr */
|
||||
drm_syncobj_get(syncobj);
|
||||
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock(&file_private->syncobj_table_lock);
|
||||
ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
|
||||
@ -299,6 +324,22 @@ static int drm_syncobj_create(struct drm_file *file_private,
|
||||
*handle = ret;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_syncobj_get_handle);
|
||||
|
||||
static int drm_syncobj_create_as_handle(struct drm_file *file_private,
|
||||
u32 *handle, uint32_t flags)
|
||||
{
|
||||
int ret;
|
||||
struct drm_syncobj *syncobj;
|
||||
|
||||
ret = drm_syncobj_create(&syncobj, flags, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = drm_syncobj_get_handle(file_private, syncobj, handle);
|
||||
drm_syncobj_put(syncobj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int drm_syncobj_destroy(struct drm_file *file_private,
|
||||
u32 handle)
|
||||
@ -345,33 +386,38 @@ static int drm_syncobj_alloc_file(struct drm_syncobj *syncobj)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
|
||||
{
|
||||
int ret;
|
||||
int fd;
|
||||
|
||||
fd = get_unused_fd_flags(O_CLOEXEC);
|
||||
if (fd < 0)
|
||||
return fd;
|
||||
|
||||
if (!syncobj->file) {
|
||||
ret = drm_syncobj_alloc_file(syncobj);
|
||||
if (ret) {
|
||||
put_unused_fd(fd);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
fd_install(fd, syncobj->file);
|
||||
*p_fd = fd;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_syncobj_get_fd);
|
||||
|
||||
static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
|
||||
u32 handle, int *p_fd)
|
||||
{
|
||||
struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
|
||||
int ret;
|
||||
int fd;
|
||||
|
||||
if (!syncobj)
|
||||
return -EINVAL;
|
||||
|
||||
fd = get_unused_fd_flags(O_CLOEXEC);
|
||||
if (fd < 0) {
|
||||
drm_syncobj_put(syncobj);
|
||||
return fd;
|
||||
}
|
||||
|
||||
if (!syncobj->file) {
|
||||
ret = drm_syncobj_alloc_file(syncobj);
|
||||
if (ret)
|
||||
goto out_put_fd;
|
||||
}
|
||||
fd_install(fd, syncobj->file);
|
||||
drm_syncobj_put(syncobj);
|
||||
*p_fd = fd;
|
||||
return 0;
|
||||
out_put_fd:
|
||||
put_unused_fd(fd);
|
||||
ret = drm_syncobj_get_fd(syncobj, p_fd);
|
||||
drm_syncobj_put(syncobj);
|
||||
return ret;
|
||||
}
|
||||
@ -522,8 +568,8 @@ drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
|
||||
if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
|
||||
return -EINVAL;
|
||||
|
||||
return drm_syncobj_create(file_private,
|
||||
&args->handle, args->flags);
|
||||
return drm_syncobj_create_as_handle(file_private,
|
||||
&args->handle, args->flags);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -304,10 +304,10 @@ static int convert_bpc_to_bpp(int bpc)
|
||||
|
||||
/***** radeon specific DP functions *****/
|
||||
|
||||
int radeon_dp_get_dp_link_config(struct drm_connector *connector,
|
||||
const u8 dpcd[DP_DPCD_SIZE],
|
||||
unsigned pix_clock,
|
||||
unsigned *dp_lanes, unsigned *dp_rate)
|
||||
static int radeon_dp_get_dp_link_config(struct drm_connector *connector,
|
||||
const u8 dpcd[DP_DPCD_SIZE],
|
||||
unsigned pix_clock,
|
||||
unsigned *dp_lanes, unsigned *dp_rate)
|
||||
{
|
||||
int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
|
||||
static const unsigned link_rates[3] = { 162000, 270000, 540000 };
|
||||
|
@ -184,6 +184,7 @@ static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
|
||||
u32 target_tdp);
|
||||
static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
|
||||
|
||||
static PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg);
|
||||
static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
|
||||
PPSMC_Msg msg, u32 parameter);
|
||||
|
||||
@ -1651,6 +1652,27 @@ static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
|
||||
}
|
||||
#endif
|
||||
|
||||
static PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg)
|
||||
{
|
||||
u32 tmp;
|
||||
int i;
|
||||
|
||||
if (!ci_is_smc_running(rdev))
|
||||
return PPSMC_Result_Failed;
|
||||
|
||||
WREG32(SMC_MESSAGE_0, msg);
|
||||
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
tmp = RREG32(SMC_RESP_0);
|
||||
if (tmp != 0)
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
tmp = RREG32(SMC_RESP_0);
|
||||
|
||||
return (PPSMC_Result)tmp;
|
||||
}
|
||||
|
||||
static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
|
||||
PPSMC_Msg msg, u32 parameter)
|
||||
{
|
||||
|
@ -330,7 +330,6 @@ int ci_program_jump_on_start(struct radeon_device *rdev);
|
||||
void ci_stop_smc_clock(struct radeon_device *rdev);
|
||||
void ci_start_smc_clock(struct radeon_device *rdev);
|
||||
bool ci_is_smc_running(struct radeon_device *rdev);
|
||||
PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg);
|
||||
PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev);
|
||||
int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit);
|
||||
int ci_read_smc_sram_dword(struct radeon_device *rdev,
|
||||
|
@ -163,27 +163,6 @@ bool ci_is_smc_running(struct radeon_device *rdev)
|
||||
return false;
|
||||
}
|
||||
|
||||
PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg)
|
||||
{
|
||||
u32 tmp;
|
||||
int i;
|
||||
|
||||
if (!ci_is_smc_running(rdev))
|
||||
return PPSMC_Result_Failed;
|
||||
|
||||
WREG32(SMC_MESSAGE_0, msg);
|
||||
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
tmp = RREG32(SMC_RESP_0);
|
||||
if (tmp != 0)
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
tmp = RREG32(SMC_RESP_0);
|
||||
|
||||
return (PPSMC_Result)tmp;
|
||||
}
|
||||
|
||||
#if 0
|
||||
PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev)
|
||||
{
|
||||
|
@ -352,7 +352,7 @@ static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
|
||||
*/
|
||||
static DEFINE_IDA(pasid_ida);
|
||||
|
||||
int alloc_pasid(unsigned int bits)
|
||||
static int alloc_pasid(unsigned int bits)
|
||||
{
|
||||
int pasid = -EINVAL;
|
||||
|
||||
@ -367,7 +367,7 @@ int alloc_pasid(unsigned int bits)
|
||||
return pasid;
|
||||
}
|
||||
|
||||
void free_pasid(unsigned int pasid)
|
||||
static void free_pasid(unsigned int pasid)
|
||||
{
|
||||
ida_simple_remove(&pasid_ida, pasid);
|
||||
}
|
||||
|
@ -762,10 +762,6 @@ extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
|
||||
extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
|
||||
extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
|
||||
struct drm_connector *connector);
|
||||
extern int radeon_dp_get_dp_link_config(struct drm_connector *connector,
|
||||
const u8 *dpcd,
|
||||
unsigned pix_clock,
|
||||
unsigned *dp_lanes, unsigned *dp_rate);
|
||||
extern void radeon_dp_set_rx_power_state(struct drm_connector *connector,
|
||||
u8 power_state);
|
||||
extern void radeon_dp_aux_init(struct radeon_connector *radeon_connector);
|
||||
|
@ -546,8 +546,7 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
|
||||
EXPORT_SYMBOL(ttm_mem_global_alloc);
|
||||
|
||||
int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
|
||||
struct page *page,
|
||||
bool no_wait, bool interruptible)
|
||||
struct page *page, uint64_t size)
|
||||
{
|
||||
|
||||
struct ttm_mem_zone *zone = NULL;
|
||||
@ -564,11 +563,11 @@ int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
|
||||
if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
|
||||
zone = glob->zone_kernel;
|
||||
#endif
|
||||
return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
|
||||
interruptible);
|
||||
return ttm_mem_global_alloc_zone(glob, zone, size, false, false);
|
||||
}
|
||||
|
||||
void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
|
||||
void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page,
|
||||
uint64_t size)
|
||||
{
|
||||
struct ttm_mem_zone *zone = NULL;
|
||||
|
||||
@ -579,10 +578,9 @@ void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
|
||||
if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
|
||||
zone = glob->zone_kernel;
|
||||
#endif
|
||||
ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
|
||||
ttm_mem_global_free_zone(glob, zone, size);
|
||||
}
|
||||
|
||||
|
||||
size_t ttm_round_pot(size_t size)
|
||||
{
|
||||
if ((size & (size - 1)) == 0)
|
||||
|
@ -883,7 +883,7 @@ int ttm_pool_populate(struct ttm_tt *ttm)
|
||||
}
|
||||
|
||||
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
|
||||
false, false);
|
||||
PAGE_SIZE);
|
||||
if (unlikely(ret != 0)) {
|
||||
ttm_pool_unpopulate(ttm);
|
||||
return -ENOMEM;
|
||||
@ -910,7 +910,7 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm)
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
if (ttm->pages[i]) {
|
||||
ttm_mem_global_free_page(ttm->glob->mem_glob,
|
||||
ttm->pages[i]);
|
||||
ttm->pages[i], PAGE_SIZE);
|
||||
ttm_put_pages(&ttm->pages[i], 1,
|
||||
ttm->page_flags,
|
||||
ttm->caching_state);
|
||||
|
@ -60,21 +60,25 @@
|
||||
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
|
||||
#define SMALL_ALLOCATION 4
|
||||
#define FREE_ALL_PAGES (~0U)
|
||||
#define VADDR_FLAG_HUGE_POOL 1UL
|
||||
|
||||
enum pool_type {
|
||||
IS_UNDEFINED = 0,
|
||||
IS_WC = 1 << 1,
|
||||
IS_UC = 1 << 2,
|
||||
IS_CACHED = 1 << 3,
|
||||
IS_DMA32 = 1 << 4
|
||||
IS_DMA32 = 1 << 4,
|
||||
IS_HUGE = 1 << 5
|
||||
};
|
||||
|
||||
/*
|
||||
* The pool structure. There are usually six pools:
|
||||
* The pool structure. There are up to nine pools:
|
||||
* - generic (not restricted to DMA32):
|
||||
* - write combined, uncached, cached.
|
||||
* - dma32 (up to 2^32 - so up 4GB):
|
||||
* - write combined, uncached, cached.
|
||||
* - huge (not restricted to DMA32):
|
||||
* - write combined, uncached, cached.
|
||||
* for each 'struct device'. The 'cached' is for pages that are actively used.
|
||||
* The other ones can be shrunk by the shrinker API if neccessary.
|
||||
* @pools: The 'struct device->dma_pools' link.
|
||||
@ -114,13 +118,14 @@ struct dma_pool {
|
||||
* The accounting page keeping track of the allocated page along with
|
||||
* the DMA address.
|
||||
* @page_list: The link to the 'page_list' in 'struct dma_pool'.
|
||||
* @vaddr: The virtual address of the page
|
||||
* @vaddr: The virtual address of the page and a flag if the page belongs to a
|
||||
* huge pool
|
||||
* @dma: The bus address of the page. If the page is not allocated
|
||||
* via the DMA API, it will be -1.
|
||||
*/
|
||||
struct dma_page {
|
||||
struct list_head page_list;
|
||||
void *vaddr;
|
||||
unsigned long vaddr;
|
||||
struct page *p;
|
||||
dma_addr_t dma;
|
||||
};
|
||||
@ -319,7 +324,8 @@ static int ttm_set_pages_caching(struct dma_pool *pool,
|
||||
static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
|
||||
{
|
||||
dma_addr_t dma = d_page->dma;
|
||||
dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
|
||||
d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL;
|
||||
dma_free_coherent(pool->dev, pool->size, (void *)d_page->vaddr, dma);
|
||||
|
||||
kfree(d_page);
|
||||
d_page = NULL;
|
||||
@ -327,19 +333,22 @@ static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
|
||||
static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
|
||||
{
|
||||
struct dma_page *d_page;
|
||||
void *vaddr;
|
||||
|
||||
d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
|
||||
if (!d_page)
|
||||
return NULL;
|
||||
|
||||
d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
|
||||
&d_page->dma,
|
||||
pool->gfp_flags);
|
||||
if (d_page->vaddr) {
|
||||
if (is_vmalloc_addr(d_page->vaddr))
|
||||
d_page->p = vmalloc_to_page(d_page->vaddr);
|
||||
vaddr = dma_alloc_coherent(pool->dev, pool->size, &d_page->dma,
|
||||
pool->gfp_flags);
|
||||
if (vaddr) {
|
||||
if (is_vmalloc_addr(vaddr))
|
||||
d_page->p = vmalloc_to_page(vaddr);
|
||||
else
|
||||
d_page->p = virt_to_page(d_page->vaddr);
|
||||
d_page->p = virt_to_page(vaddr);
|
||||
d_page->vaddr = (unsigned long)vaddr;
|
||||
if (pool->type & IS_HUGE)
|
||||
d_page->vaddr |= VADDR_FLAG_HUGE_POOL;
|
||||
} else {
|
||||
kfree(d_page);
|
||||
d_page = NULL;
|
||||
@ -371,11 +380,40 @@ static void ttm_pool_update_free_locked(struct dma_pool *pool,
|
||||
}
|
||||
|
||||
/* set memory back to wb and free the pages. */
|
||||
static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
|
||||
{
|
||||
struct page *page = d_page->p;
|
||||
unsigned i, num_pages;
|
||||
int ret;
|
||||
|
||||
/* Don't set WB on WB page pool. */
|
||||
if (!(pool->type & IS_CACHED)) {
|
||||
num_pages = pool->size / PAGE_SIZE;
|
||||
for (i = 0; i < num_pages; ++i, ++page) {
|
||||
ret = set_pages_array_wb(&page, 1);
|
||||
if (ret) {
|
||||
pr_err("%s: Failed to set %d pages to wb!\n",
|
||||
pool->dev_name, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
list_del(&d_page->page_list);
|
||||
__ttm_dma_free_page(pool, d_page);
|
||||
}
|
||||
|
||||
static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
|
||||
struct page *pages[], unsigned npages)
|
||||
{
|
||||
struct dma_page *d_page, *tmp;
|
||||
|
||||
if (pool->type & IS_HUGE) {
|
||||
list_for_each_entry_safe(d_page, tmp, d_pages, page_list)
|
||||
ttm_dma_page_put(pool, d_page);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/* Don't set WB on WB page pool. */
|
||||
if (npages && !(pool->type & IS_CACHED) &&
|
||||
set_pages_array_wb(pages, npages))
|
||||
@ -388,17 +426,6 @@ static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
|
||||
}
|
||||
}
|
||||
|
||||
static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
|
||||
{
|
||||
/* Don't set WB on WB page pool. */
|
||||
if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
|
||||
pr_err("%s: Failed to set %d pages to wb!\n",
|
||||
pool->dev_name, 1);
|
||||
|
||||
list_del(&d_page->page_list);
|
||||
__ttm_dma_free_page(pool, d_page);
|
||||
}
|
||||
|
||||
/*
|
||||
* Free pages from pool.
|
||||
*
|
||||
@ -567,8 +594,8 @@ static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
|
||||
static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
|
||||
enum pool_type type)
|
||||
{
|
||||
char *n[] = {"wc", "uc", "cached", " dma32", "unknown",};
|
||||
enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED};
|
||||
const char *n[] = {"wc", "uc", "cached", " dma32", "huge"};
|
||||
enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_HUGE};
|
||||
struct device_pools *sec_pool = NULL;
|
||||
struct dma_pool *pool = NULL, **ptr;
|
||||
unsigned i;
|
||||
@ -605,11 +632,18 @@ static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
|
||||
pool->npages_free = pool->npages_in_use = 0;
|
||||
pool->nfrees = 0;
|
||||
pool->gfp_flags = flags;
|
||||
pool->size = PAGE_SIZE;
|
||||
if (type & IS_HUGE)
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
pool->size = HPAGE_PMD_SIZE;
|
||||
#else
|
||||
BUG();
|
||||
#endif
|
||||
else
|
||||
pool->size = PAGE_SIZE;
|
||||
pool->type = type;
|
||||
pool->nrefills = 0;
|
||||
p = pool->name;
|
||||
for (i = 0; i < 5; i++) {
|
||||
for (i = 0; i < ARRAY_SIZE(t); i++) {
|
||||
if (type & t[i]) {
|
||||
p += snprintf(p, sizeof(pool->name) - (p - pool->name),
|
||||
"%s", n[i]);
|
||||
@ -713,7 +747,7 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
|
||||
struct dma_page *dma_p;
|
||||
struct page *p;
|
||||
int r = 0;
|
||||
unsigned i, cpages;
|
||||
unsigned i, j, npages, cpages;
|
||||
unsigned max_cpages = min(count,
|
||||
(unsigned)(PAGE_SIZE/sizeof(struct page *)));
|
||||
|
||||
@ -751,28 +785,32 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
|
||||
goto out;
|
||||
}
|
||||
p = dma_p->p;
|
||||
list_add(&dma_p->page_list, d_pages);
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
/* gfp flags of highmem page should never be dma32 so we
|
||||
* we should be fine in such case
|
||||
*/
|
||||
if (!PageHighMem(p))
|
||||
if (PageHighMem(p))
|
||||
continue;
|
||||
#endif
|
||||
{
|
||||
caching_array[cpages++] = p;
|
||||
|
||||
npages = pool->size / PAGE_SIZE;
|
||||
for (j = 0; j < npages; ++j) {
|
||||
caching_array[cpages++] = p + j;
|
||||
if (cpages == max_cpages) {
|
||||
/* Note: Cannot hold the spinlock */
|
||||
r = ttm_set_pages_caching(pool, caching_array,
|
||||
cpages);
|
||||
cpages);
|
||||
if (r) {
|
||||
ttm_dma_handle_caching_state_failure(
|
||||
pool, d_pages, caching_array,
|
||||
cpages);
|
||||
pool, d_pages, caching_array,
|
||||
cpages);
|
||||
goto out;
|
||||
}
|
||||
cpages = 0;
|
||||
}
|
||||
}
|
||||
list_add(&dma_p->page_list, d_pages);
|
||||
}
|
||||
|
||||
if (cpages) {
|
||||
@ -860,6 +898,26 @@ static int ttm_dma_pool_get_pages(struct dma_pool *pool,
|
||||
return r;
|
||||
}
|
||||
|
||||
static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
|
||||
{
|
||||
struct ttm_tt *ttm = &ttm_dma->ttm;
|
||||
gfp_t gfp_flags;
|
||||
|
||||
if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
|
||||
gfp_flags = GFP_USER | GFP_DMA32;
|
||||
else
|
||||
gfp_flags = GFP_HIGHUSER;
|
||||
if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
|
||||
gfp_flags |= __GFP_ZERO;
|
||||
|
||||
if (huge) {
|
||||
gfp_flags |= GFP_TRANSHUGE;
|
||||
gfp_flags &= ~__GFP_MOVABLE;
|
||||
}
|
||||
|
||||
return gfp_flags;
|
||||
}
|
||||
|
||||
/*
|
||||
* On success pages list will hold count number of correctly
|
||||
* cached pages. On failure will hold the negative return value (-ENOMEM, etc).
|
||||
@ -868,6 +926,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
|
||||
{
|
||||
struct ttm_tt *ttm = &ttm_dma->ttm;
|
||||
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
|
||||
unsigned long num_pages = ttm->num_pages;
|
||||
struct dma_pool *pool;
|
||||
enum pool_type type;
|
||||
unsigned i;
|
||||
@ -876,26 +935,61 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
|
||||
if (ttm->state != tt_unpopulated)
|
||||
return 0;
|
||||
|
||||
INIT_LIST_HEAD(&ttm_dma->pages_list);
|
||||
i = 0;
|
||||
|
||||
type = ttm_to_type(ttm->page_flags, ttm->caching_state);
|
||||
pool = ttm_dma_find_pool(dev, type);
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
|
||||
goto skip_huge;
|
||||
|
||||
pool = ttm_dma_find_pool(dev, type | IS_HUGE);
|
||||
if (!pool) {
|
||||
gfp_t gfp_flags;
|
||||
gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, true);
|
||||
|
||||
if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
|
||||
gfp_flags = GFP_USER | GFP_DMA32;
|
||||
else
|
||||
gfp_flags = GFP_HIGHUSER;
|
||||
if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
|
||||
gfp_flags |= __GFP_ZERO;
|
||||
|
||||
pool = ttm_dma_pool_init(dev, gfp_flags, type);
|
||||
if (IS_ERR_OR_NULL(pool)) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE);
|
||||
if (IS_ERR_OR_NULL(pool))
|
||||
goto skip_huge;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&ttm_dma->pages_list);
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
while (num_pages >= HPAGE_PMD_NR) {
|
||||
unsigned j;
|
||||
|
||||
ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
|
||||
if (ret != 0)
|
||||
break;
|
||||
|
||||
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
|
||||
pool->size);
|
||||
if (unlikely(ret != 0)) {
|
||||
ttm_dma_unpopulate(ttm_dma, dev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) {
|
||||
ttm->pages[j] = ttm->pages[j - 1] + 1;
|
||||
ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] +
|
||||
PAGE_SIZE;
|
||||
}
|
||||
|
||||
i += HPAGE_PMD_NR;
|
||||
num_pages -= HPAGE_PMD_NR;
|
||||
}
|
||||
|
||||
skip_huge:
|
||||
#endif
|
||||
|
||||
pool = ttm_dma_find_pool(dev, type);
|
||||
if (!pool) {
|
||||
gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, false);
|
||||
|
||||
pool = ttm_dma_pool_init(dev, gfp_flags, type);
|
||||
if (IS_ERR_OR_NULL(pool))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
while (num_pages) {
|
||||
ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
|
||||
if (ret != 0) {
|
||||
ttm_dma_unpopulate(ttm_dma, dev);
|
||||
@ -903,11 +997,14 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
|
||||
}
|
||||
|
||||
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
|
||||
false, false);
|
||||
pool->size);
|
||||
if (unlikely(ret != 0)) {
|
||||
ttm_dma_unpopulate(ttm_dma, dev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
++i;
|
||||
--num_pages;
|
||||
}
|
||||
|
||||
if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
|
||||
@ -931,10 +1028,33 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
|
||||
struct dma_page *d_page, *next;
|
||||
enum pool_type type;
|
||||
bool is_cached = false;
|
||||
unsigned count = 0, i, npages = 0;
|
||||
unsigned count, i, npages = 0;
|
||||
unsigned long irq_flags;
|
||||
|
||||
type = ttm_to_type(ttm->page_flags, ttm->caching_state);
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
pool = ttm_dma_find_pool(dev, type | IS_HUGE);
|
||||
if (pool) {
|
||||
count = 0;
|
||||
list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
|
||||
page_list) {
|
||||
if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL))
|
||||
continue;
|
||||
|
||||
count++;
|
||||
ttm_mem_global_free_page(ttm->glob->mem_glob,
|
||||
d_page->p, pool->size);
|
||||
ttm_dma_page_put(pool, d_page);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&pool->lock, irq_flags);
|
||||
pool->npages_in_use -= count;
|
||||
pool->nfrees += count;
|
||||
spin_unlock_irqrestore(&pool->lock, irq_flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
pool = ttm_dma_find_pool(dev, type);
|
||||
if (!pool)
|
||||
return;
|
||||
@ -943,6 +1063,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
|
||||
ttm_to_type(ttm->page_flags, tt_cached)) == pool);
|
||||
|
||||
/* make sure pages array match list and count number of pages */
|
||||
count = 0;
|
||||
list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
|
||||
ttm->pages[count] = d_page->p;
|
||||
count++;
|
||||
@ -968,13 +1089,13 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
|
||||
if (is_cached) {
|
||||
list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
|
||||
ttm_mem_global_free_page(ttm->glob->mem_glob,
|
||||
d_page->p);
|
||||
d_page->p, pool->size);
|
||||
ttm_dma_page_put(pool, d_page);
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < count; i++) {
|
||||
ttm_mem_global_free_page(ttm->glob->mem_glob,
|
||||
ttm->pages[i]);
|
||||
ttm->pages[i], pool->size);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -136,5 +136,10 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
|
||||
u32 handle,
|
||||
struct dma_fence **fence);
|
||||
void drm_syncobj_free(struct kref *kref);
|
||||
int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
|
||||
struct dma_fence *fence);
|
||||
int drm_syncobj_get_handle(struct drm_file *file_private,
|
||||
struct drm_syncobj *syncobj, u32 *handle);
|
||||
int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd);
|
||||
|
||||
#endif
|
||||
|
@ -150,10 +150,9 @@ extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
|
||||
extern void ttm_mem_global_free(struct ttm_mem_global *glob,
|
||||
uint64_t amount);
|
||||
extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
|
||||
struct page *page,
|
||||
bool no_wait, bool interruptible);
|
||||
struct page *page, uint64_t size);
|
||||
extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
|
||||
struct page *page);
|
||||
struct page *page, uint64_t size);
|
||||
extern size_t ttm_round_pot(size_t size);
|
||||
extern uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob);
|
||||
#endif
|
||||
|
@ -52,6 +52,7 @@ extern "C" {
|
||||
#define DRM_AMDGPU_GEM_USERPTR 0x11
|
||||
#define DRM_AMDGPU_WAIT_FENCES 0x12
|
||||
#define DRM_AMDGPU_VM 0x13
|
||||
#define DRM_AMDGPU_FENCE_TO_HANDLE 0x14
|
||||
|
||||
#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
|
||||
#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
|
||||
@ -67,6 +68,7 @@ extern "C" {
|
||||
#define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
|
||||
#define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
|
||||
#define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm)
|
||||
#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
|
||||
|
||||
#define AMDGPU_GEM_DOMAIN_CPU 0x1
|
||||
#define AMDGPU_GEM_DOMAIN_GTT 0x2
|
||||
@ -515,6 +517,20 @@ struct drm_amdgpu_cs_chunk_sem {
|
||||
__u32 handle;
|
||||
};
|
||||
|
||||
#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ 0
|
||||
#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD 1
|
||||
#define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD 2
|
||||
|
||||
union drm_amdgpu_fence_to_handle {
|
||||
struct {
|
||||
struct drm_amdgpu_fence fence;
|
||||
__u32 what;
|
||||
} in;
|
||||
struct {
|
||||
__u32 handle;
|
||||
} out;
|
||||
};
|
||||
|
||||
struct drm_amdgpu_cs_chunk_data {
|
||||
union {
|
||||
struct drm_amdgpu_cs_chunk_ib ib_data;
|
||||
|
Loading…
Reference in New Issue
Block a user