drm/amd/display: Perform outstanding programming on full updates

[WHY]
In certain scenarios DC can internally trigger back to back full updates
which will miss some required programming that is normally deferred
until post update via optimize_bandwidth.

[HOW]
In back to back update scenarios, wait for pending updates to complete
and perform any strictly required outstanding programming.

Reviewed-by: Alvin Lee <alvin.lee2@amd.com>
Signed-off-by: Dillon Varone <dillon.varone@amd.com>
Signed-off-by: Tom Chung <chiahsuan.chung@amd.com>
Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Dillon Varone 2024-08-01 15:38:34 -04:00 committed by Alex Deucher
parent 67ea53a4bd
commit 7a1eb66809
11 changed files with 176 additions and 122 deletions

View File

@ -1352,80 +1352,6 @@ static void disable_vbios_mode_if_required(
}
}
/**
* wait_for_blank_complete - wait for all active OPPs to finish pending blank
* pattern updates
*
* @dc: [in] dc reference
* @context: [in] hardware context in use
*/
static void wait_for_blank_complete(struct dc *dc,
struct dc_state *context)
{
struct pipe_ctx *opp_head;
struct dce_hwseq *hws = dc->hwseq;
int i;
if (!hws->funcs.wait_for_blank_complete)
return;
for (i = 0; i < MAX_PIPES; i++) {
opp_head = &context->res_ctx.pipe_ctx[i];
if (!resource_is_pipe_type(opp_head, OPP_HEAD) ||
dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM)
continue;
hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp);
}
}
static void wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context)
{
struct pipe_ctx *otg_master;
struct timing_generator *tg;
int i;
for (i = 0; i < MAX_PIPES; i++) {
otg_master = &context->res_ctx.pipe_ctx[i];
if (!resource_is_pipe_type(otg_master, OTG_MASTER) ||
dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM)
continue;
tg = otg_master->stream_res.tg;
if (tg->funcs->wait_odm_doublebuffer_pending_clear)
tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
}
/* ODM update may require to reprogram blank pattern for each OPP */
wait_for_blank_complete(dc, context);
}
static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
{
int i;
PERF_TRACE();
for (i = 0; i < MAX_PIPES; i++) {
int count = 0;
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->plane_state || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
continue;
/* Timeout 100 ms */
while (count < 100000) {
/* Must set to false to start with, due to OR in update function */
pipe->plane_state->status.is_flip_pending = false;
dc->hwss.update_pending_status(pipe);
if (!pipe->plane_state->status.is_flip_pending)
break;
udelay(1);
count++;
}
ASSERT(!pipe->plane_state->status.is_flip_pending);
}
PERF_TRACE();
}
/* Public functions */
struct dc *dc_create(const struct dc_init_data *init_params)
@ -2109,12 +2035,12 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
if (context->stream_count > get_seamless_boot_stream_count(context) ||
context->stream_count == 0) {
/* Must wait for no flips to be pending before doing optimize bw */
wait_for_no_pipes_pending(dc, context);
hwss_wait_for_no_pipes_pending(dc, context);
/*
* optimized dispclk depends on ODM setup. Need to wait for ODM
* update pending complete before optimizing bandwidth.
*/
wait_for_odm_update_pending_complete(dc, context);
hwss_wait_for_odm_update_pending_complete(dc, context);
/* pplib is notified if disp_num changed */
dc->hwss.optimize_bandwidth(dc, context);
/* Need to do otg sync again as otg could be out of sync due to otg
@ -3786,47 +3712,6 @@ static void commit_planes_for_stream_fast(struct dc *dc,
top_pipe_to_program->stream->update_flags.raw = 0;
}
static void wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context)
{
/*
* This function calls HWSS to wait for any potentially double buffered
* operations to complete. It should be invoked as a pre-amble prior
* to full update programming before asserting any HW locks.
*/
int pipe_idx;
int opp_inst;
int opp_count = dc->res_pool->res_cap->num_opp;
struct hubp *hubp;
int mpcc_inst;
const struct pipe_ctx *pipe_ctx;
for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx];
if (!pipe_ctx->stream)
continue;
if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
hubp = pipe_ctx->plane_res.hubp;
if (!hubp)
continue;
mpcc_inst = hubp->inst;
// MPCC inst is equal to pipe index in practice
for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
if ((dc->res_pool->opps[opp_inst] != NULL) &&
(dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst])) {
dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
break;
}
}
}
wait_for_odm_update_pending_complete(dc, dc_context);
}
static void commit_planes_for_stream(struct dc *dc,
struct dc_surface_update *srf_updates,
int surface_count,
@ -3850,7 +3735,7 @@ static void commit_planes_for_stream(struct dc *dc,
dc_z10_restore(dc);
if (update_type == UPDATE_TYPE_FULL)
wait_for_outstanding_hw_updates(dc, context);
hwss_process_outstanding_hw_updates(dc, dc->current_state);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];

View File

@ -978,3 +978,126 @@ void get_surface_tile_visual_confirm_color(
break;
}
}
/**
* hwss_wait_for_blank_complete - wait for all active OPPs to finish pending blank
* pattern updates
*
* @dc: [in] dc reference
* @context: [in] hardware context in use
*/
void hwss_wait_for_all_blank_complete(struct dc *dc,
struct dc_state *context)
{
struct pipe_ctx *opp_head;
struct dce_hwseq *hws = dc->hwseq;
int i;
if (!hws->funcs.wait_for_blank_complete)
return;
for (i = 0; i < MAX_PIPES; i++) {
opp_head = &context->res_ctx.pipe_ctx[i];
if (!resource_is_pipe_type(opp_head, OPP_HEAD) ||
dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM)
continue;
hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp);
}
}
void hwss_wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context)
{
struct pipe_ctx *otg_master;
struct timing_generator *tg;
int i;
for (i = 0; i < MAX_PIPES; i++) {
otg_master = &context->res_ctx.pipe_ctx[i];
if (!resource_is_pipe_type(otg_master, OTG_MASTER) ||
dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM)
continue;
tg = otg_master->stream_res.tg;
if (tg->funcs->wait_odm_doublebuffer_pending_clear)
tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
}
/* ODM update may require to reprogram blank pattern for each OPP */
hwss_wait_for_all_blank_complete(dc, context);
}
void hwss_wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
{
int i;
for (i = 0; i < MAX_PIPES; i++) {
int count = 0;
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->plane_state || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
continue;
/* Timeout 100 ms */
while (count < 100000) {
/* Must set to false to start with, due to OR in update function */
pipe->plane_state->status.is_flip_pending = false;
dc->hwss.update_pending_status(pipe);
if (!pipe->plane_state->status.is_flip_pending)
break;
udelay(1);
count++;
}
ASSERT(!pipe->plane_state->status.is_flip_pending);
}
}
void hwss_wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context)
{
/*
* This function calls HWSS to wait for any potentially double buffered
* operations to complete. It should be invoked as a pre-amble prior
* to full update programming before asserting any HW locks.
*/
int pipe_idx;
int opp_inst;
int opp_count = dc->res_pool->res_cap->num_opp;
struct hubp *hubp;
int mpcc_inst;
const struct pipe_ctx *pipe_ctx;
for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx];
if (!pipe_ctx->stream)
continue;
if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
hubp = pipe_ctx->plane_res.hubp;
if (!hubp)
continue;
mpcc_inst = hubp->inst;
// MPCC inst is equal to pipe index in practice
for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
if ((dc->res_pool->opps[opp_inst] != NULL) &&
(dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst])) {
dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
break;
}
}
}
hwss_wait_for_odm_update_pending_complete(dc, dc_context);
}
void hwss_process_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context)
{
/* wait for outstanding updates */
hwss_wait_for_outstanding_hw_updates(dc, dc_context);
/* perform outstanding post update programming */
if (dc->hwss.program_outstanding_updates)
dc->hwss.program_outstanding_updates(dc, dc_context);
}

View File

@ -1846,3 +1846,13 @@ void dcn32_interdependent_update_lock(struct dc *dc,
dc->hwss.pipe_control_lock(dc, pipe, false);
}
}
void dcn32_program_outstanding_updates(struct dc *dc,
struct dc_state *context)
{
struct hubbub *hubbub = dc->res_pool->hubbub;
/* update compbuf if required */
if (hubbub->funcs->program_compbuf_size)
hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
}

View File

@ -133,4 +133,8 @@ void dcn32_prepare_bandwidth(struct dc *dc,
void dcn32_interdependent_update_lock(struct dc *dc,
struct dc_state *context, bool lock);
void dcn32_program_outstanding_updates(struct dc *dc,
struct dc_state *context);
#endif /* __DC_HWSS_DCN32_H__ */

View File

@ -120,6 +120,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.blank_phantom = dcn32_blank_phantom,
.is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless,
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
.program_outstanding_updates = dcn32_program_outstanding_updates,
};
static const struct hwseq_private_funcs dcn32_private_funcs = {

View File

@ -123,6 +123,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.root_clock_control = dcn35_root_clock_control,
.set_long_vtotal = dcn35_set_long_vblank,
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
.program_outstanding_updates = dcn32_program_outstanding_updates,
};
static const struct hwseq_private_funcs dcn35_private_funcs = {

View File

@ -122,6 +122,7 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.root_clock_control = dcn35_root_clock_control,
.set_long_vtotal = dcn35_set_long_vblank,
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
.program_outstanding_updates = dcn32_program_outstanding_updates,
};
static const struct hwseq_private_funcs dcn351_private_funcs = {

View File

@ -1399,7 +1399,7 @@ void dcn401_prepare_bandwidth(struct dc *dc,
{
struct hubbub *hubbub = dc->res_pool->hubbub;
bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support;
unsigned int compbuf_size_kb = 0;
unsigned int compbuf_size = 0;
/* Any transition into P-State support should disable MCLK switching first to avoid hangs */
if (p_state_change_support) {
@ -1429,10 +1429,10 @@ void dcn401_prepare_bandwidth(struct dc *dc,
/* decrease compbuf size */
if (hubbub->funcs->program_compbuf_segments) {
compbuf_size_kb = context->bw_ctx.bw.dcn.arb_regs.compbuf_size;
dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size);
compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size;
dc->wm_optimized_required |= (compbuf_size != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size);
hubbub->funcs->program_compbuf_segments(hubbub, compbuf_size_kb, false);
hubbub->funcs->program_compbuf_segments(hubbub, compbuf_size, false);
}
if (dc->debug.fams2_config.bits.enable) {
@ -1760,3 +1760,13 @@ void dcn401_interdependent_update_lock(struct dc *dc,
}
}
}
void dcn401_program_outstanding_updates(struct dc *dc,
struct dc_state *context)
{
struct hubbub *hubbub = dc->res_pool->hubbub;
/* update compbuf if required */
if (hubbub->funcs->program_compbuf_segments)
hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true);
}

View File

@ -83,4 +83,5 @@ void dcn401_update_odm(struct dc *dc, struct dc_state *context,
void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy);
void dcn401_wait_for_det_buffer_update(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master);
void dcn401_interdependent_update_lock(struct dc *dc, struct dc_state *context, bool lock);
void dcn401_program_outstanding_updates(struct dc *dc, struct dc_state *context);
#endif /* __DC_HWSS_DCN401_H__ */

View File

@ -99,6 +99,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.fams2_global_control_lock = dcn401_fams2_global_control_lock,
.fams2_update_config = dcn401_fams2_update_config,
.fams2_global_control_lock_fast = dcn401_fams2_global_control_lock_fast,
.program_outstanding_updates = dcn401_program_outstanding_updates,
};
static const struct hwseq_private_funcs dcn401_private_funcs = {

View File

@ -459,6 +459,8 @@ struct hw_sequencer_funcs {
bool enable);
void (*fams2_global_control_lock_fast)(union block_sequence_params *params);
void (*set_long_vtotal)(struct pipe_ctx **pipe_ctx, int num_pipes, uint32_t v_total_min, uint32_t v_total_max);
void (*program_outstanding_updates)(struct dc *dc,
struct dc_state *context);
};
void color_space_to_black_color(
@ -519,6 +521,21 @@ void hwss_build_fast_sequence(struct dc *dc,
struct dc_stream_status *stream_status,
struct dc_state *context);
void hwss_wait_for_all_blank_complete(struct dc *dc,
struct dc_state *context);
void hwss_wait_for_odm_update_pending_complete(struct dc *dc,
struct dc_state *context);
void hwss_wait_for_no_pipes_pending(struct dc *dc,
struct dc_state *context);
void hwss_wait_for_outstanding_hw_updates(struct dc *dc,
struct dc_state *dc_context);
void hwss_process_outstanding_hw_updates(struct dc *dc,
struct dc_state *dc_context);
void hwss_send_dmcub_cmd(union block_sequence_params *params);
void hwss_program_manual_trigger(union block_sequence_params *params);