Merge branch 'drm-fixes-3.17' of git://people.freedesktop.org/~agd5f/linux

more radeon fixes

* 'drm-fixes-3.17' of git://people.freedesktop.org/~agd5f/linux:
  Revert "drm/radeon: Use write-combined CPU mappings of ring buffers with PCIe"
  drm/radeon: fix active_cu mask on SI and CIK after re-init (v3)
  drm/radeon: fix active cu count for SI and CIK
  drm/radeon: re-enable selective GPUVM flushing
  drm/radeon: Sync ME and PFP after CP semaphore waits v4
  drm/radeon: fix display handling in radeon_gpu_reset
  drm/radeon: fix pm handling in radeon_gpu_reset
  drm/radeon: Only flush HDP cache for indirect buffers from userspace
  drm/radeon: properly document reloc priority mask
This commit is contained in:
Dave Airlie 2014-08-22 07:29:25 +10:00
commit c3735aeb65
28 changed files with 154 additions and 85 deletions

View File

@ -3483,7 +3483,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
u32 mc_shared_chmap, mc_arb_ramcfg; u32 mc_shared_chmap, mc_arb_ramcfg;
u32 hdp_host_path_cntl; u32 hdp_host_path_cntl;
u32 tmp; u32 tmp;
int i, j, k; int i, j;
switch (rdev->family) { switch (rdev->family) {
case CHIP_BONAIRE: case CHIP_BONAIRE:
@ -3672,12 +3672,11 @@ static void cik_gpu_init(struct radeon_device *rdev)
rdev->config.cik.max_sh_per_se, rdev->config.cik.max_sh_per_se,
rdev->config.cik.max_backends_per_se); rdev->config.cik.max_backends_per_se);
rdev->config.cik.active_cus = 0;
for (i = 0; i < rdev->config.cik.max_shader_engines; i++) { for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) { for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
for (k = 0; k < rdev->config.cik.max_cu_per_sh; k++) { rdev->config.cik.active_cus +=
rdev->config.cik.active_cus += hweight32(cik_get_cu_active_bitmap(rdev, i, j));
hweight32(cik_get_cu_active_bitmap(rdev, i, j));
}
} }
} }
@ -3801,7 +3800,7 @@ int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2)); radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2));
radeon_ring_write(ring, 0xDEADBEEF); radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) { for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch); tmp = RREG32(scratch);
@ -3920,6 +3919,17 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
} }
/**
* cik_semaphore_ring_emit - emit a semaphore on the CP ring
*
* @rdev: radeon_device pointer
* @ring: radeon ring buffer object
* @semaphore: radeon semaphore object
* @emit_wait: Is this a sempahore wait?
*
* Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
* from running ahead of semaphore waits.
*/
bool cik_semaphore_ring_emit(struct radeon_device *rdev, bool cik_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring, struct radeon_ring *ring,
struct radeon_semaphore *semaphore, struct radeon_semaphore *semaphore,
@ -3932,6 +3942,12 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, lower_32_bits(addr)); radeon_ring_write(ring, lower_32_bits(addr));
radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
if (emit_wait && ring->idx == RADEON_RING_TYPE_GFX_INDEX) {
/* Prevent the PFP from running ahead of the semaphore wait */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
}
return true; return true;
} }
@ -4004,7 +4020,7 @@ int cik_copy_cpdma(struct radeon_device *rdev,
return r; return r;
} }
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence); radeon_semaphore_free(rdev, &sem, *fence);
return r; return r;
@ -4103,7 +4119,7 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2); ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2);
ib.ptr[2] = 0xDEADBEEF; ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3; ib.length_dw = 3;
r = radeon_ib_schedule(rdev, &ib, NULL); r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) { if (r) {
radeon_scratch_free(rdev, scratch); radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib); radeon_ib_free(rdev, &ib);
@ -4324,7 +4340,7 @@ static int cik_cp_gfx_start(struct radeon_device *rdev)
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
return 0; return 0;
} }

View File

@ -596,7 +596,7 @@ int cik_copy_dma(struct radeon_device *rdev,
return r; return r;
} }
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence); radeon_semaphore_free(rdev, &sem, *fence);
return r; return r;
@ -638,7 +638,7 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr)); radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr));
radeon_ring_write(ring, 1); /* number of DWs to follow */ radeon_ring_write(ring, 1); /* number of DWs to follow */
radeon_ring_write(ring, 0xDEADBEEF); radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) { for (i = 0; i < rdev->usec_timeout; i++) {
tmp = readl(ptr); tmp = readl(ptr);
@ -695,7 +695,7 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[4] = 0xDEADBEEF; ib.ptr[4] = 0xDEADBEEF;
ib.length_dw = 5; ib.length_dw = 5;
r = radeon_ib_schedule(rdev, &ib, NULL); r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) { if (r) {
radeon_ib_free(rdev, &ib); radeon_ib_free(rdev, &ib);
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);

View File

@ -2869,7 +2869,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
cp_me = 0xff; cp_me = 0xff;
WREG32(CP_ME_CNTL, cp_me); WREG32(CP_ME_CNTL, cp_me);
@ -2912,7 +2912,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* */ radeon_ring_write(ring, 0x00000010); /* */
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
return 0; return 0;
} }

View File

@ -155,7 +155,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
return r; return r;
} }
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence); radeon_semaphore_free(rdev, &sem, *fence);
return r; return r;

View File

@ -1505,7 +1505,7 @@ static int cayman_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
cayman_cp_enable(rdev, true); cayman_cp_enable(rdev, true);
@ -1547,7 +1547,7 @@ static int cayman_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* */ radeon_ring_write(ring, 0x00000010); /* */
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
/* XXX init other rings */ /* XXX init other rings */

View File

@ -925,7 +925,7 @@ int r100_copy_blit(struct radeon_device *rdev,
if (fence) { if (fence) {
r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
} }
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
return r; return r;
} }
@ -958,7 +958,7 @@ void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
RADEON_ISYNC_ANY3D_IDLE2D | RADEON_ISYNC_ANY3D_IDLE2D |
RADEON_ISYNC_WAIT_IDLEGUI | RADEON_ISYNC_WAIT_IDLEGUI |
RADEON_ISYNC_CPSCRATCH_IDLEGUI); RADEON_ISYNC_CPSCRATCH_IDLEGUI);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
} }
@ -3638,7 +3638,7 @@ int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
} }
radeon_ring_write(ring, PACKET0(scratch, 0)); radeon_ring_write(ring, PACKET0(scratch, 0));
radeon_ring_write(ring, 0xDEADBEEF); radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) { for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch); tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF) { if (tmp == 0xDEADBEEF) {
@ -3700,7 +3700,7 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[6] = PACKET2(0); ib.ptr[6] = PACKET2(0);
ib.ptr[7] = PACKET2(0); ib.ptr[7] = PACKET2(0);
ib.length_dw = 8; ib.length_dw = 8;
r = radeon_ib_schedule(rdev, &ib, NULL); r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) { if (r) {
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
goto free_ib; goto free_ib;

View File

@ -121,7 +121,7 @@ int r200_copy_dma(struct radeon_device *rdev,
if (fence) { if (fence) {
r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
} }
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
return r; return r;
} }

View File

@ -295,7 +295,7 @@ void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_write(ring, radeon_ring_write(ring,
R300_GEOMETRY_ROUND_NEAREST | R300_GEOMETRY_ROUND_NEAREST |
R300_COLOR_ROUND_NEAREST); R300_COLOR_ROUND_NEAREST);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
} }
static void r300_errata(struct radeon_device *rdev) static void r300_errata(struct radeon_device *rdev)

View File

@ -219,7 +219,7 @@ static void r420_cp_errata_init(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1)); radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
radeon_ring_write(ring, rdev->config.r300.resync_scratch); radeon_ring_write(ring, rdev->config.r300.resync_scratch);
radeon_ring_write(ring, 0xDEADBEEF); radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
} }
static void r420_cp_errata_fini(struct radeon_device *rdev) static void r420_cp_errata_fini(struct radeon_device *rdev)
@ -232,7 +232,7 @@ static void r420_cp_errata_fini(struct radeon_device *rdev)
radeon_ring_lock(rdev, ring, 8); radeon_ring_lock(rdev, ring, 8);
radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(ring, R300_RB3D_DC_FINISH); radeon_ring_write(ring, R300_RB3D_DC_FINISH);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
radeon_scratch_free(rdev, rdev->config.r300.resync_scratch); radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
} }

View File

@ -2547,7 +2547,7 @@ int r600_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
cp_me = 0xff; cp_me = 0xff;
WREG32(R_0086D8_CP_ME_CNTL, cp_me); WREG32(R_0086D8_CP_ME_CNTL, cp_me);
@ -2683,7 +2683,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(ring, 0xDEADBEEF); radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) { for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch); tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF) if (tmp == 0xDEADBEEF)
@ -2753,6 +2753,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
} }
} }
/**
* r600_semaphore_ring_emit - emit a semaphore on the CP ring
*
* @rdev: radeon_device pointer
* @ring: radeon ring buffer object
* @semaphore: radeon semaphore object
* @emit_wait: Is this a sempahore wait?
*
* Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
* from running ahead of semaphore waits.
*/
bool r600_semaphore_ring_emit(struct radeon_device *rdev, bool r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring, struct radeon_ring *ring,
struct radeon_semaphore *semaphore, struct radeon_semaphore *semaphore,
@ -2768,6 +2779,13 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, lower_32_bits(addr)); radeon_ring_write(ring, lower_32_bits(addr));
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
/* PFP_SYNC_ME packet only exists on 7xx+ */
if (emit_wait && (rdev->family >= CHIP_RV770)) {
/* Prevent the PFP from running ahead of the semaphore wait */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
}
return true; return true;
} }
@ -2845,7 +2863,7 @@ int r600_copy_cpdma(struct radeon_device *rdev,
return r; return r;
} }
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence); radeon_semaphore_free(rdev, &sem, *fence);
return r; return r;
@ -3165,7 +3183,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
ib.ptr[2] = 0xDEADBEEF; ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3; ib.length_dw = 3;
r = radeon_ib_schedule(rdev, &ib, NULL); r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) { if (r) {
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
goto free_ib; goto free_ib;

View File

@ -261,7 +261,7 @@ int r600_dma_ring_test(struct radeon_device *rdev,
radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff); radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
radeon_ring_write(ring, 0xDEADBEEF); radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) { for (i = 0; i < rdev->usec_timeout; i++) {
tmp = readl(ptr); tmp = readl(ptr);
@ -368,7 +368,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[3] = 0xDEADBEEF; ib.ptr[3] = 0xDEADBEEF;
ib.length_dw = 4; ib.length_dw = 4;
r = radeon_ib_schedule(rdev, &ib, NULL); r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) { if (r) {
radeon_ib_free(rdev, &ib); radeon_ib_free(rdev, &ib);
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
@ -493,7 +493,7 @@ int r600_copy_dma(struct radeon_device *rdev,
return r; return r;
} }
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence); radeon_semaphore_free(rdev, &sem, *fence);
return r; return r;

View File

@ -1597,6 +1597,7 @@
*/ */
# define PACKET3_CP_DMA_CMD_SAIC (1 << 28) # define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
# define PACKET3_CP_DMA_CMD_DAIC (1 << 29) # define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
#define PACKET3_PFP_SYNC_ME 0x42 /* r7xx+ only */
#define PACKET3_SURFACE_SYNC 0x43 #define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6) # define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_FULL_CACHE_ENA (1 << 20) /* r7xx+ only */ # define PACKET3_FULL_CACHE_ENA (1 << 20) /* r7xx+ only */

View File

@ -968,7 +968,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
unsigned size); unsigned size);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib); void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib); struct radeon_ib *const_ib, bool hdp_flush);
int radeon_ib_pool_init(struct radeon_device *rdev); int radeon_ib_pool_init(struct radeon_device *rdev);
void radeon_ib_pool_fini(struct radeon_device *rdev); void radeon_ib_pool_fini(struct radeon_device *rdev);
int radeon_ib_ring_tests(struct radeon_device *rdev); int radeon_ib_ring_tests(struct radeon_device *rdev);
@ -978,8 +978,10 @@ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp); void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp); void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp,
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp); bool hdp_flush);
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp,
bool hdp_flush);
void radeon_ring_undo(struct radeon_ring *ring); void radeon_ring_undo(struct radeon_ring *ring);
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp); void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);

View File

@ -132,7 +132,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
* the buffers used for read only, which doubles the range * the buffers used for read only, which doubles the range
* to 0 to 31. 32 is reserved for the kernel driver. * to 0 to 31. 32 is reserved for the kernel driver.
*/ */
priority = (r->flags & 0xf) * 2 + !!r->write_domain; priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
+ !!r->write_domain;
/* the first reloc of an UVD job is the msg and that must be in /* the first reloc of an UVD job is the msg and that must be in
VRAM, also but everything into VRAM on AGP cards to avoid VRAM, also but everything into VRAM on AGP cards to avoid
@ -450,7 +451,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
radeon_vce_note_usage(rdev); radeon_vce_note_usage(rdev);
radeon_cs_sync_rings(parser); radeon_cs_sync_rings(parser);
r = radeon_ib_schedule(rdev, &parser->ib, NULL); r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
if (r) { if (r) {
DRM_ERROR("Failed to schedule IB !\n"); DRM_ERROR("Failed to schedule IB !\n");
} }
@ -541,9 +542,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
if ((rdev->family >= CHIP_TAHITI) && if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) { (parser->chunk_const_ib_idx != -1)) {
r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib); r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
} else { } else {
r = radeon_ib_schedule(rdev, &parser->ib, NULL); r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
} }
out: out:

View File

@ -1680,8 +1680,8 @@ int radeon_gpu_reset(struct radeon_device *rdev)
radeon_save_bios_scratch_regs(rdev); radeon_save_bios_scratch_regs(rdev);
/* block TTM */ /* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
radeon_pm_suspend(rdev);
radeon_suspend(rdev); radeon_suspend(rdev);
radeon_hpd_fini(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) { for (i = 0; i < RADEON_NUM_RINGS; ++i) {
ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i], ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
@ -1726,9 +1726,39 @@ retry:
} }
} }
radeon_pm_resume(rdev); if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
/* do dpm late init */
r = radeon_pm_late_init(rdev);
if (r) {
rdev->pm.dpm_enabled = false;
DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
}
} else {
/* resume old pm late */
radeon_pm_resume(rdev);
}
/* init dig PHYs, disp eng pll */
if (rdev->is_atom_bios) {
radeon_atom_encoder_init(rdev);
radeon_atom_disp_eng_pll_init(rdev);
/* turn on the BL */
if (rdev->mode_info.bl_encoder) {
u8 bl_level = radeon_get_backlight_level(rdev,
rdev->mode_info.bl_encoder);
radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
bl_level);
}
}
/* reset hpd state */
radeon_hpd_init(rdev);
drm_helper_resume_force_mode(rdev->ddev); drm_helper_resume_force_mode(rdev->ddev);
/* set the power state here in case we are a PX system or headless */
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
radeon_pm_compute_clocks(rdev);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
if (r) { if (r) {
/* bad news, how to tell it to userspace ? */ /* bad news, how to tell it to userspace ? */

View File

@ -107,6 +107,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
* @rdev: radeon_device pointer * @rdev: radeon_device pointer
* @ib: IB object to schedule * @ib: IB object to schedule
* @const_ib: Const IB to schedule (SI only) * @const_ib: Const IB to schedule (SI only)
* @hdp_flush: Whether or not to perform an HDP cache flush
* *
* Schedule an IB on the associated ring (all asics). * Schedule an IB on the associated ring (all asics).
* Returns 0 on success, error on failure. * Returns 0 on success, error on failure.
@ -122,7 +123,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
* to SI there was just a DE IB. * to SI there was just a DE IB.
*/ */
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib) struct radeon_ib *const_ib, bool hdp_flush)
{ {
struct radeon_ring *ring = &rdev->ring[ib->ring]; struct radeon_ring *ring = &rdev->ring[ib->ring];
int r = 0; int r = 0;
@ -176,7 +177,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
if (ib->vm) if (ib->vm)
radeon_vm_fence(rdev, ib->vm, ib->fence); radeon_vm_fence(rdev, ib->vm, ib->fence);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, hdp_flush);
return 0; return 0;
} }

View File

@ -177,16 +177,18 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig
* *
* @rdev: radeon_device pointer * @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information * @ring: radeon_ring structure holding ring information
* @hdp_flush: Whether or not to perform an HDP cache flush
* *
* Update the wptr (write pointer) to tell the GPU to * Update the wptr (write pointer) to tell the GPU to
* execute new commands on the ring buffer (all asics). * execute new commands on the ring buffer (all asics).
*/ */
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring,
bool hdp_flush)
{ {
/* If we are emitting the HDP flush via the ring buffer, we need to /* If we are emitting the HDP flush via the ring buffer, we need to
* do it before padding. * do it before padding.
*/ */
if (rdev->asic->ring[ring->idx]->hdp_flush) if (hdp_flush && rdev->asic->ring[ring->idx]->hdp_flush)
rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring); rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring);
/* We pad to match fetch size */ /* We pad to match fetch size */
while (ring->wptr & ring->align_mask) { while (ring->wptr & ring->align_mask) {
@ -196,7 +198,7 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
/* If we are emitting the HDP flush via MMIO, we need to do it after /* If we are emitting the HDP flush via MMIO, we need to do it after
* all CPU writes to VRAM finished. * all CPU writes to VRAM finished.
*/ */
if (rdev->asic->mmio_hdp_flush) if (hdp_flush && rdev->asic->mmio_hdp_flush)
rdev->asic->mmio_hdp_flush(rdev); rdev->asic->mmio_hdp_flush(rdev);
radeon_ring_set_wptr(rdev, ring); radeon_ring_set_wptr(rdev, ring);
} }
@ -207,12 +209,14 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
* *
* @rdev: radeon_device pointer * @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information * @ring: radeon_ring structure holding ring information
* @hdp_flush: Whether or not to perform an HDP cache flush
* *
* Call radeon_ring_commit() then unlock the ring (all asics). * Call radeon_ring_commit() then unlock the ring (all asics).
*/ */
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring) void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring,
bool hdp_flush)
{ {
radeon_ring_commit(rdev, ring); radeon_ring_commit(rdev, ring, hdp_flush);
mutex_unlock(&rdev->ring_lock); mutex_unlock(&rdev->ring_lock);
} }
@ -372,7 +376,7 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, data[i]); radeon_ring_write(ring, data[i]);
} }
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
kfree(data); kfree(data);
return 0; return 0;
} }
@ -400,9 +404,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
/* Allocate ring buffer */ /* Allocate ring buffer */
if (ring->ring_obj == NULL) { if (ring->ring_obj == NULL) {
r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true, r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, RADEON_GEM_DOMAIN_GTT, 0,
(rdev->flags & RADEON_IS_PCIE) ?
RADEON_GEM_GTT_WC : 0,
NULL, &ring->ring_obj); NULL, &ring->ring_obj);
if (r) { if (r) {
dev_err(rdev->dev, "(%d) ring create failed\n", r); dev_err(rdev->dev, "(%d) ring create failed\n", r);

View File

@ -179,7 +179,7 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
continue; continue;
} }
radeon_ring_commit(rdev, &rdev->ring[i]); radeon_ring_commit(rdev, &rdev->ring[i], false);
radeon_fence_note_sync(fence, ring); radeon_fence_note_sync(fence, ring);
semaphore->gpu_addr += 8; semaphore->gpu_addr += 8;

View File

@ -288,7 +288,7 @@ static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
return r; return r;
} }
radeon_fence_emit(rdev, fence, ring->idx); radeon_fence_emit(rdev, fence, ring->idx);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
} }
return 0; return 0;
} }
@ -313,7 +313,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
goto out_cleanup; goto out_cleanup;
} }
radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
radeon_ring_unlock_commit(rdev, ringA); radeon_ring_unlock_commit(rdev, ringA, false);
r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1); r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1);
if (r) if (r)
@ -325,7 +325,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
goto out_cleanup; goto out_cleanup;
} }
radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
radeon_ring_unlock_commit(rdev, ringA); radeon_ring_unlock_commit(rdev, ringA, false);
r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2); r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2);
if (r) if (r)
@ -344,7 +344,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
goto out_cleanup; goto out_cleanup;
} }
radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
radeon_ring_unlock_commit(rdev, ringB); radeon_ring_unlock_commit(rdev, ringB, false);
r = radeon_fence_wait(fence1, false); r = radeon_fence_wait(fence1, false);
if (r) { if (r) {
@ -365,7 +365,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
goto out_cleanup; goto out_cleanup;
} }
radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
radeon_ring_unlock_commit(rdev, ringB); radeon_ring_unlock_commit(rdev, ringB, false);
r = radeon_fence_wait(fence2, false); r = radeon_fence_wait(fence2, false);
if (r) { if (r) {
@ -408,7 +408,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
goto out_cleanup; goto out_cleanup;
} }
radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
radeon_ring_unlock_commit(rdev, ringA); radeon_ring_unlock_commit(rdev, ringA, false);
r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA); r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);
if (r) if (r)
@ -420,7 +420,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
goto out_cleanup; goto out_cleanup;
} }
radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore); radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
radeon_ring_unlock_commit(rdev, ringB); radeon_ring_unlock_commit(rdev, ringB, false);
r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB); r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);
if (r) if (r)
goto out_cleanup; goto out_cleanup;
@ -442,7 +442,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
goto out_cleanup; goto out_cleanup;
} }
radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
radeon_ring_unlock_commit(rdev, ringC); radeon_ring_unlock_commit(rdev, ringC, false);
for (i = 0; i < 30; ++i) { for (i = 0; i < 30; ++i) {
mdelay(100); mdelay(100);
@ -468,7 +468,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
goto out_cleanup; goto out_cleanup;
} }
radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
radeon_ring_unlock_commit(rdev, ringC); radeon_ring_unlock_commit(rdev, ringC, false);
mdelay(1000); mdelay(1000);

View File

@ -646,7 +646,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
ib.ptr[i] = PACKET2(0); ib.ptr[i] = PACKET2(0);
ib.length_dw = 16; ib.length_dw = 16;
r = radeon_ib_schedule(rdev, &ib, NULL); r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) if (r)
goto err; goto err;
ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence); ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);

View File

@ -368,7 +368,7 @@ int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
for (i = ib.length_dw; i < ib_size_dw; ++i) for (i = ib.length_dw; i < ib_size_dw; ++i)
ib.ptr[i] = 0x0; ib.ptr[i] = 0x0;
r = radeon_ib_schedule(rdev, &ib, NULL); r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) { if (r) {
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
} }
@ -425,7 +425,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
for (i = ib.length_dw; i < ib_size_dw; ++i) for (i = ib.length_dw; i < ib_size_dw; ++i)
ib.ptr[i] = 0x0; ib.ptr[i] = 0x0;
r = radeon_ib_schedule(rdev, &ib, NULL); r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) { if (r) {
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
} }
@ -715,7 +715,7 @@ int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r; return r;
} }
radeon_ring_write(ring, VCE_CMD_END); radeon_ring_write(ring, VCE_CMD_END);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) { for (i = 0; i < rdev->usec_timeout; i++) {
if (vce_v1_0_get_rptr(rdev, ring) != rptr) if (vce_v1_0_get_rptr(rdev, ring) != rptr)

View File

@ -238,9 +238,7 @@ void radeon_vm_flush(struct radeon_device *rdev,
uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory); uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
/* if we can't remember our last VM flush then flush now! */ /* if we can't remember our last VM flush then flush now! */
/* XXX figure out why we have to flush all the time before CIK */ if (!vm->last_flush || pd_addr != vm->pd_gpu_addr) {
if (rdev->family < CHIP_BONAIRE ||
!vm->last_flush || pd_addr != vm->pd_gpu_addr) {
trace_radeon_vm_flush(pd_addr, ring, vm->id); trace_radeon_vm_flush(pd_addr, ring, vm->id);
vm->pd_gpu_addr = pd_addr; vm->pd_gpu_addr = pd_addr;
radeon_ring_vm_flush(rdev, ring, vm); radeon_ring_vm_flush(rdev, ring, vm);
@ -422,7 +420,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
radeon_asic_vm_pad_ib(rdev, &ib); radeon_asic_vm_pad_ib(rdev, &ib);
WARN_ON(ib.length_dw > 64); WARN_ON(ib.length_dw > 64);
r = radeon_ib_schedule(rdev, &ib, NULL); r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) if (r)
goto error; goto error;
@ -699,7 +697,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj); radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
WARN_ON(ib.length_dw > ndw); WARN_ON(ib.length_dw > ndw);
r = radeon_ib_schedule(rdev, &ib, NULL); r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) { if (r) {
radeon_ib_free(rdev, &ib); radeon_ib_free(rdev, &ib);
return r; return r;
@ -963,7 +961,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
WARN_ON(ib.length_dw > ndw); WARN_ON(ib.length_dw > ndw);
radeon_semaphore_sync_to(ib.semaphore, vm->fence); radeon_semaphore_sync_to(ib.semaphore, vm->fence);
r = radeon_ib_schedule(rdev, &ib, NULL); r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) { if (r) {
radeon_ib_free(rdev, &ib); radeon_ib_free(rdev, &ib);
return r; return r;

View File

@ -124,7 +124,7 @@ void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST); radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
radeon_ring_write(ring, PACKET0(0x20C8, 0)); radeon_ring_write(ring, PACKET0(0x20C8, 0));
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
} }
int rv515_mc_wait_for_idle(struct radeon_device *rdev) int rv515_mc_wait_for_idle(struct radeon_device *rdev)

View File

@ -90,7 +90,7 @@ int rv770_copy_dma(struct radeon_device *rdev,
return r; return r;
} }
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence); radeon_semaphore_free(rdev, &sem, *fence);
return r; return r;

View File

@ -3057,7 +3057,7 @@ static void si_gpu_init(struct radeon_device *rdev)
u32 sx_debug_1; u32 sx_debug_1;
u32 hdp_host_path_cntl; u32 hdp_host_path_cntl;
u32 tmp; u32 tmp;
int i, j, k; int i, j;
switch (rdev->family) { switch (rdev->family) {
case CHIP_TAHITI: case CHIP_TAHITI:
@ -3255,12 +3255,11 @@ static void si_gpu_init(struct radeon_device *rdev)
rdev->config.si.max_sh_per_se, rdev->config.si.max_sh_per_se,
rdev->config.si.max_cu_per_sh); rdev->config.si.max_cu_per_sh);
rdev->config.si.active_cus = 0;
for (i = 0; i < rdev->config.si.max_shader_engines; i++) { for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
for (j = 0; j < rdev->config.si.max_sh_per_se; j++) { for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) { rdev->config.si.active_cus +=
rdev->config.si.active_cus += hweight32(si_get_cu_active_bitmap(rdev, i, j));
hweight32(si_get_cu_active_bitmap(rdev, i, j));
}
} }
} }
@ -3541,7 +3540,7 @@ static int si_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
radeon_ring_write(ring, 0xc000); radeon_ring_write(ring, 0xc000);
radeon_ring_write(ring, 0xe000); radeon_ring_write(ring, 0xe000);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
si_cp_enable(rdev, true); si_cp_enable(rdev, true);
@ -3570,7 +3569,7 @@ static int si_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) { for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
ring = &rdev->ring[i]; ring = &rdev->ring[i];
@ -3580,7 +3579,7 @@ static int si_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0)); radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
} }
return 0; return 0;

View File

@ -275,7 +275,7 @@ int si_copy_dma(struct radeon_device *rdev,
return r; return r;
} }
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence); radeon_semaphore_free(rdev, &sem, *fence);
return r; return r;

View File

@ -124,7 +124,7 @@ int uvd_v1_0_init(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0)); radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
radeon_ring_write(ring, 3); radeon_ring_write(ring, 3);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
done: done:
/* lower clocks again */ /* lower clocks again */
@ -331,7 +331,7 @@ int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
} }
radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0)); radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
radeon_ring_write(ring, 0xDEADBEEF); radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) { for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(UVD_CONTEXT_ID); tmp = RREG32(UVD_CONTEXT_ID);
if (tmp == 0xDEADBEEF) if (tmp == 0xDEADBEEF)

View File

@ -944,6 +944,7 @@ struct drm_radeon_cs_chunk {
}; };
/* drm_radeon_cs_reloc.flags */ /* drm_radeon_cs_reloc.flags */
#define RADEON_RELOC_PRIO_MASK (0xf << 0)
struct drm_radeon_cs_reloc { struct drm_radeon_cs_reloc {
uint32_t handle; uint32_t handle;