drm/amdgpu: Modify the argument of emit_ib interface
use the point of struct amdgpu_job as the function
argument instand of vmid, so the other members of
struct amdgpu_job can be visit in emit_ib function.
v2: add a wrapper for getting the VMID
add the job before the ib on the parameter list.
v3: refine the wrapper name
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
@@ -221,8 +221,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||||||
!amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
|
!amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
amdgpu_ring_emit_ib(ring, ib, job ? job->vmid : 0,
|
amdgpu_ring_emit_ib(ring, job, ib, need_ctx_switch);
|
||||||
need_ctx_switch);
|
|
||||||
need_ctx_switch = false;
|
need_ctx_switch = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -33,6 +33,8 @@
|
|||||||
#define to_amdgpu_job(sched_job) \
|
#define to_amdgpu_job(sched_job) \
|
||||||
container_of((sched_job), struct amdgpu_job, base)
|
container_of((sched_job), struct amdgpu_job, base)
|
||||||
|
|
||||||
|
#define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0)
|
||||||
|
|
||||||
struct amdgpu_fence;
|
struct amdgpu_fence;
|
||||||
|
|
||||||
struct amdgpu_job {
|
struct amdgpu_job {
|
||||||
|
|||||||
@@ -129,8 +129,9 @@ struct amdgpu_ring_funcs {
|
|||||||
unsigned emit_ib_size;
|
unsigned emit_ib_size;
|
||||||
/* command emit functions */
|
/* command emit functions */
|
||||||
void (*emit_ib)(struct amdgpu_ring *ring,
|
void (*emit_ib)(struct amdgpu_ring *ring,
|
||||||
|
struct amdgpu_job *job,
|
||||||
struct amdgpu_ib *ib,
|
struct amdgpu_ib *ib,
|
||||||
unsigned vmid, bool ctx_switch);
|
bool ctx_switch);
|
||||||
void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
|
void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
|
||||||
uint64_t seq, unsigned flags);
|
uint64_t seq, unsigned flags);
|
||||||
void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
|
void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
|
||||||
@@ -228,7 +229,7 @@ struct amdgpu_ring {
|
|||||||
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
|
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
|
||||||
#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
|
#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
|
||||||
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
|
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
|
||||||
#define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c))
|
#define amdgpu_ring_emit_ib(r, job, ib, c) ((r)->funcs->emit_ib((r), (job), (ib), (c)))
|
||||||
#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
|
#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
|
||||||
#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
|
#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
|
||||||
#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
|
#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
|
||||||
|
|||||||
@@ -1032,8 +1032,10 @@ out:
|
|||||||
* @ib: the IB to execute
|
* @ib: the IB to execute
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
|
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring,
|
||||||
unsigned vmid, bool ctx_switch)
|
struct amdgpu_job *job,
|
||||||
|
struct amdgpu_ib *ib,
|
||||||
|
bool ctx_switch)
|
||||||
{
|
{
|
||||||
amdgpu_ring_write(ring, VCE_CMD_IB);
|
amdgpu_ring_write(ring, VCE_CMD_IB);
|
||||||
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
|
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
|
||||||
|
|||||||
@@ -65,8 +65,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||||||
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
|
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
|
||||||
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
|
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
|
||||||
int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
|
int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
|
||||||
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
|
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
|
||||||
unsigned vmid, bool ctx_switch);
|
struct amdgpu_ib *ib, bool ctx_switch);
|
||||||
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
|
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
|
||||||
unsigned flags);
|
unsigned flags);
|
||||||
int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
|
int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
|
||||||
|
|||||||
@@ -218,9 +218,11 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
|
|||||||
* Schedule an IB in the DMA ring (CIK).
|
* Schedule an IB in the DMA ring (CIK).
|
||||||
*/
|
*/
|
||||||
static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
|
static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
|
||||||
|
struct amdgpu_job *job,
|
||||||
struct amdgpu_ib *ib,
|
struct amdgpu_ib *ib,
|
||||||
unsigned vmid, bool ctx_switch)
|
bool ctx_switch)
|
||||||
{
|
{
|
||||||
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||||
u32 extra_bits = vmid & 0xf;
|
u32 extra_bits = vmid & 0xf;
|
||||||
|
|
||||||
/* IB packet must end on a 8 DW boundary */
|
/* IB packet must end on a 8 DW boundary */
|
||||||
|
|||||||
@@ -1840,9 +1840,11 @@ static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
|
static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||||
|
struct amdgpu_job *job,
|
||||||
struct amdgpu_ib *ib,
|
struct amdgpu_ib *ib,
|
||||||
unsigned vmid, bool ctx_switch)
|
bool ctx_switch)
|
||||||
{
|
{
|
||||||
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||||
u32 header, control = 0;
|
u32 header, control = 0;
|
||||||
|
|
||||||
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
|
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
|
||||||
|
|||||||
@@ -2227,9 +2227,11 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
|
|||||||
* on the gfx ring for execution by the GPU.
|
* on the gfx ring for execution by the GPU.
|
||||||
*/
|
*/
|
||||||
static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
||||||
|
struct amdgpu_job *job,
|
||||||
struct amdgpu_ib *ib,
|
struct amdgpu_ib *ib,
|
||||||
unsigned vmid, bool ctx_switch)
|
bool ctx_switch)
|
||||||
{
|
{
|
||||||
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||||
u32 header, control = 0;
|
u32 header, control = 0;
|
||||||
|
|
||||||
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
|
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
|
||||||
@@ -2256,9 +2258,11 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
|
static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
|
||||||
|
struct amdgpu_job *job,
|
||||||
struct amdgpu_ib *ib,
|
struct amdgpu_ib *ib,
|
||||||
unsigned vmid, bool ctx_switch)
|
bool ctx_switch)
|
||||||
{
|
{
|
||||||
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||||
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
|
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
|
||||||
|
|
||||||
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
|
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
|
||||||
|
|||||||
@@ -6109,9 +6109,11 @@ static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
||||||
|
struct amdgpu_job *job,
|
||||||
struct amdgpu_ib *ib,
|
struct amdgpu_ib *ib,
|
||||||
unsigned vmid, bool ctx_switch)
|
bool ctx_switch)
|
||||||
{
|
{
|
||||||
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||||
u32 header, control = 0;
|
u32 header, control = 0;
|
||||||
|
|
||||||
if (ib->flags & AMDGPU_IB_FLAG_CE)
|
if (ib->flags & AMDGPU_IB_FLAG_CE)
|
||||||
@@ -6139,9 +6141,11 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
|
static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
|
||||||
|
struct amdgpu_job *job,
|
||||||
struct amdgpu_ib *ib,
|
struct amdgpu_ib *ib,
|
||||||
unsigned vmid, bool ctx_switch)
|
bool ctx_switch)
|
||||||
{
|
{
|
||||||
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||||
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
|
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
|
||||||
|
|
||||||
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
|
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
|
||||||
|
|||||||
@@ -4049,9 +4049,11 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
||||||
|
struct amdgpu_job *job,
|
||||||
struct amdgpu_ib *ib,
|
struct amdgpu_ib *ib,
|
||||||
unsigned vmid, bool ctx_switch)
|
bool ctx_switch)
|
||||||
{
|
{
|
||||||
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||||
u32 header, control = 0;
|
u32 header, control = 0;
|
||||||
|
|
||||||
if (ib->flags & AMDGPU_IB_FLAG_CE)
|
if (ib->flags & AMDGPU_IB_FLAG_CE)
|
||||||
@@ -4080,9 +4082,11 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
|
static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
|
||||||
|
struct amdgpu_job *job,
|
||||||
struct amdgpu_ib *ib,
|
struct amdgpu_ib *ib,
|
||||||
unsigned vmid, bool ctx_switch)
|
bool ctx_switch)
|
||||||
{
|
{
|
||||||
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||||
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
|
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
|
||||||
|
|
||||||
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
|
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
|
||||||
|
|||||||
@@ -245,9 +245,12 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
|
|||||||
* Schedule an IB in the DMA ring (VI).
|
* Schedule an IB in the DMA ring (VI).
|
||||||
*/
|
*/
|
||||||
static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
|
static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
|
||||||
|
struct amdgpu_job *job,
|
||||||
struct amdgpu_ib *ib,
|
struct amdgpu_ib *ib,
|
||||||
unsigned vmid, bool ctx_switch)
|
bool ctx_switch)
|
||||||
{
|
{
|
||||||
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||||
|
|
||||||
/* IB packet must end on a 8 DW boundary */
|
/* IB packet must end on a 8 DW boundary */
|
||||||
sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
|
sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
|
||||||
|
|
||||||
|
|||||||
@@ -419,9 +419,12 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
|
|||||||
* Schedule an IB in the DMA ring (VI).
|
* Schedule an IB in the DMA ring (VI).
|
||||||
*/
|
*/
|
||||||
static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
|
static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||||
|
struct amdgpu_job *job,
|
||||||
struct amdgpu_ib *ib,
|
struct amdgpu_ib *ib,
|
||||||
unsigned vmid, bool ctx_switch)
|
bool ctx_switch)
|
||||||
{
|
{
|
||||||
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||||
|
|
||||||
/* IB packet must end on a 8 DW boundary */
|
/* IB packet must end on a 8 DW boundary */
|
||||||
sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
|
sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
|
||||||
|
|
||||||
|
|||||||
@@ -497,9 +497,12 @@ static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
|
|||||||
* Schedule an IB in the DMA ring (VEGA10).
|
* Schedule an IB in the DMA ring (VEGA10).
|
||||||
*/
|
*/
|
||||||
static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
|
static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||||
|
struct amdgpu_job *job,
|
||||||
struct amdgpu_ib *ib,
|
struct amdgpu_ib *ib,
|
||||||
unsigned vmid, bool ctx_switch)
|
bool ctx_switch)
|
||||||
{
|
{
|
||||||
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||||
|
|
||||||
/* IB packet must end on a 8 DW boundary */
|
/* IB packet must end on a 8 DW boundary */
|
||||||
sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
|
sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
|
||||||
|
|
||||||
|
|||||||
@@ -61,9 +61,11 @@ static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
|
static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
|
||||||
|
struct amdgpu_job *job,
|
||||||
struct amdgpu_ib *ib,
|
struct amdgpu_ib *ib,
|
||||||
unsigned vmid, bool ctx_switch)
|
bool ctx_switch)
|
||||||
{
|
{
|
||||||
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||||
/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
|
/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
|
||||||
* Pad as necessary with NOPs.
|
* Pad as necessary with NOPs.
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -509,8 +509,9 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
|
|||||||
* Write ring commands to execute the indirect buffer
|
* Write ring commands to execute the indirect buffer
|
||||||
*/
|
*/
|
||||||
static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
|
static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
|
||||||
|
struct amdgpu_job *job,
|
||||||
struct amdgpu_ib *ib,
|
struct amdgpu_ib *ib,
|
||||||
unsigned vmid, bool ctx_switch)
|
bool ctx_switch)
|
||||||
{
|
{
|
||||||
amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
|
amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
|
||||||
amdgpu_ring_write(ring, ib->gpu_addr);
|
amdgpu_ring_write(ring, ib->gpu_addr);
|
||||||
|
|||||||
@@ -524,8 +524,9 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
|
|||||||
* Write ring commands to execute the indirect buffer
|
* Write ring commands to execute the indirect buffer
|
||||||
*/
|
*/
|
||||||
static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
|
static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||||
|
struct amdgpu_job *job,
|
||||||
struct amdgpu_ib *ib,
|
struct amdgpu_ib *ib,
|
||||||
unsigned vmid, bool ctx_switch)
|
bool ctx_switch)
|
||||||
{
|
{
|
||||||
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
|
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
|
||||||
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
|
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
|
||||||
|
|||||||
@@ -975,9 +975,12 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
|
|||||||
* Write ring commands to execute the indirect buffer
|
* Write ring commands to execute the indirect buffer
|
||||||
*/
|
*/
|
||||||
static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
|
static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||||
|
struct amdgpu_job *job,
|
||||||
struct amdgpu_ib *ib,
|
struct amdgpu_ib *ib,
|
||||||
unsigned vmid, bool ctx_switch)
|
bool ctx_switch)
|
||||||
{
|
{
|
||||||
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||||
|
|
||||||
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
|
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
|
||||||
amdgpu_ring_write(ring, vmid);
|
amdgpu_ring_write(ring, vmid);
|
||||||
|
|
||||||
@@ -998,8 +1001,12 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
|
|||||||
* Write enc ring commands to execute the indirect buffer
|
* Write enc ring commands to execute the indirect buffer
|
||||||
*/
|
*/
|
||||||
static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
|
static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
|
||||||
struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
|
struct amdgpu_job *job,
|
||||||
|
struct amdgpu_ib *ib,
|
||||||
|
bool ctx_switch)
|
||||||
{
|
{
|
||||||
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||||
|
|
||||||
amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
|
amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
|
||||||
amdgpu_ring_write(ring, vmid);
|
amdgpu_ring_write(ring, vmid);
|
||||||
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
|
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
|
||||||
|
|||||||
@@ -1270,10 +1270,12 @@ static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
|
|||||||
* Write ring commands to execute the indirect buffer
|
* Write ring commands to execute the indirect buffer
|
||||||
*/
|
*/
|
||||||
static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
|
static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||||
|
struct amdgpu_job *job,
|
||||||
struct amdgpu_ib *ib,
|
struct amdgpu_ib *ib,
|
||||||
unsigned vmid, bool ctx_switch)
|
bool ctx_switch)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = ring->adev;
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||||
|
|
||||||
amdgpu_ring_write(ring,
|
amdgpu_ring_write(ring,
|
||||||
PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
|
PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
|
||||||
@@ -1299,8 +1301,12 @@ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
|
|||||||
* Write enc ring commands to execute the indirect buffer
|
* Write enc ring commands to execute the indirect buffer
|
||||||
*/
|
*/
|
||||||
static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
|
static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
|
||||||
struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
|
struct amdgpu_job *job,
|
||||||
|
struct amdgpu_ib *ib,
|
||||||
|
bool ctx_switch)
|
||||||
{
|
{
|
||||||
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||||
|
|
||||||
amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
|
amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
|
||||||
amdgpu_ring_write(ring, vmid);
|
amdgpu_ring_write(ring, vmid);
|
||||||
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
|
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
|
||||||
|
|||||||
@@ -833,8 +833,12 @@ out:
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
|
static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||||
struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
|
struct amdgpu_job *job,
|
||||||
|
struct amdgpu_ib *ib,
|
||||||
|
bool ctx_switch)
|
||||||
{
|
{
|
||||||
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||||
|
|
||||||
amdgpu_ring_write(ring, VCE_CMD_IB_VM);
|
amdgpu_ring_write(ring, VCE_CMD_IB_VM);
|
||||||
amdgpu_ring_write(ring, vmid);
|
amdgpu_ring_write(ring, vmid);
|
||||||
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
|
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
|
||||||
|
|||||||
@@ -946,9 +946,11 @@ static int vce_v4_0_set_powergating_state(void *handle,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
|
static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
|
||||||
struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
|
struct amdgpu_ib *ib, bool ctx_switch)
|
||||||
{
|
{
|
||||||
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||||
|
|
||||||
amdgpu_ring_write(ring, VCE_CMD_IB_VM);
|
amdgpu_ring_write(ring, VCE_CMD_IB_VM);
|
||||||
amdgpu_ring_write(ring, vmid);
|
amdgpu_ring_write(ring, vmid);
|
||||||
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
|
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
|
||||||
|
|||||||
@@ -1358,10 +1358,12 @@ static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
|
|||||||
* Write ring commands to execute the indirect buffer
|
* Write ring commands to execute the indirect buffer
|
||||||
*/
|
*/
|
||||||
static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
|
static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
|
||||||
|
struct amdgpu_job *job,
|
||||||
struct amdgpu_ib *ib,
|
struct amdgpu_ib *ib,
|
||||||
unsigned vmid, bool ctx_switch)
|
bool ctx_switch)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = ring->adev;
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||||
|
|
||||||
amdgpu_ring_write(ring,
|
amdgpu_ring_write(ring,
|
||||||
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
|
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
|
||||||
@@ -1516,8 +1518,12 @@ static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
|
|||||||
* Write enc ring commands to execute the indirect buffer
|
* Write enc ring commands to execute the indirect buffer
|
||||||
*/
|
*/
|
||||||
static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
|
static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
|
||||||
struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
|
struct amdgpu_job *job,
|
||||||
|
struct amdgpu_ib *ib,
|
||||||
|
bool ctx_switch)
|
||||||
{
|
{
|
||||||
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||||
|
|
||||||
amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
|
amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
|
||||||
amdgpu_ring_write(ring, vmid);
|
amdgpu_ring_write(ring, vmid);
|
||||||
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
|
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
|
||||||
@@ -1717,10 +1723,12 @@ static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u6
|
|||||||
* Write ring commands to execute the indirect buffer.
|
* Write ring commands to execute the indirect buffer.
|
||||||
*/
|
*/
|
||||||
static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
|
static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
|
||||||
|
struct amdgpu_job *job,
|
||||||
struct amdgpu_ib *ib,
|
struct amdgpu_ib *ib,
|
||||||
unsigned vmid, bool ctx_switch)
|
bool ctx_switch)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = ring->adev;
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||||
|
|
||||||
amdgpu_ring_write(ring,
|
amdgpu_ring_write(ring,
|
||||||
PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
|
PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
|
||||||
|
|||||||
Reference in New Issue
Block a user