drm/amd/display: roll back quality EASF and ISHARP and dc dependency changes

[Why]
Seeing several regressions related to quality EASF and ISHARP changes
and removing dc dependency changes.

[How]
Roll back SPL changes

Signed-off-by: Samson Tam <Samson.Tam@amd.com>
Reviewed-by: Martin Leung <martin.leung@amd.com>
Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: Rodrigo Siqueira <rodrigo.siqueira@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Samson Tam 2024-07-14 16:31:05 -04:00 committed by Alex Deucher
parent 7c5b344537
commit f9e6759888
21 changed files with 1025 additions and 4550 deletions

View File

@ -1511,6 +1511,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha;
spl_out->scl_data.h_active = pipe_ctx->plane_res.scl_data.h_active;
spl_out->scl_data.v_active = pipe_ctx->plane_res.scl_data.v_active;
// Convert pipe_ctx to respective input params for SPL
translate_SPL_in_params_from_pipe_ctx(pipe_ctx, spl_in);

View File

@ -42,26 +42,26 @@ static void populate_spltaps_from_taps(struct spl_taps *spl_scaling_quality,
static void populate_taps_from_spltaps(struct scaling_taps *scaling_quality,
const struct spl_taps *spl_scaling_quality)
{
scaling_quality->h_taps_c = spl_scaling_quality->h_taps_c + 1;
scaling_quality->h_taps = spl_scaling_quality->h_taps + 1;
scaling_quality->v_taps_c = spl_scaling_quality->v_taps_c + 1;
scaling_quality->v_taps = spl_scaling_quality->v_taps + 1;
scaling_quality->h_taps_c = spl_scaling_quality->h_taps_c;
scaling_quality->h_taps = spl_scaling_quality->h_taps;
scaling_quality->v_taps_c = spl_scaling_quality->v_taps_c;
scaling_quality->v_taps = spl_scaling_quality->v_taps;
}
static void populate_ratios_from_splratios(struct scaling_ratios *ratios,
const struct ratio *spl_ratios)
const struct spl_ratios *spl_ratios)
{
ratios->horz = dc_fixpt_from_ux_dy(spl_ratios->h_scale_ratio >> 5, 3, 19);
ratios->vert = dc_fixpt_from_ux_dy(spl_ratios->v_scale_ratio >> 5, 3, 19);
ratios->horz_c = dc_fixpt_from_ux_dy(spl_ratios->h_scale_ratio_c >> 5, 3, 19);
ratios->vert_c = dc_fixpt_from_ux_dy(spl_ratios->v_scale_ratio_c >> 5, 3, 19);
ratios->horz = spl_ratios->horz;
ratios->vert = spl_ratios->vert;
ratios->horz_c = spl_ratios->horz_c;
ratios->vert_c = spl_ratios->vert_c;
}
static void populate_inits_from_splinits(struct scl_inits *inits,
const struct init *spl_inits)
const struct spl_inits *spl_inits)
{
inits->h = dc_fixpt_from_int_dy(spl_inits->h_filter_init_int, spl_inits->h_filter_init_frac >> 5, 0, 19);
inits->v = dc_fixpt_from_int_dy(spl_inits->v_filter_init_int, spl_inits->v_filter_init_frac >> 5, 0, 19);
inits->h_c = dc_fixpt_from_int_dy(spl_inits->h_filter_init_int_c, spl_inits->h_filter_init_frac_c >> 5, 0, 19);
inits->v_c = dc_fixpt_from_int_dy(spl_inits->v_filter_init_int_c, spl_inits->v_filter_init_frac_c >> 5, 0, 19);
inits->h = spl_inits->h;
inits->v = spl_inits->v;
inits->h_c = spl_inits->h_c;
inits->v_c = spl_inits->v_c;
}
/// @brief Translate SPL input parameters from pipe context
/// @param pipe_ctx
@ -170,15 +170,6 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
/* Translate transfer function */
spl_in->basic_in.tf_type = (enum spl_transfer_func_type) plane_state->in_transfer_func.type;
spl_in->basic_in.tf_predefined_type = (enum spl_transfer_func_predefined) plane_state->in_transfer_func.tf;
spl_in->h_active = pipe_ctx->plane_res.scl_data.h_active;
spl_in->v_active = pipe_ctx->plane_res.scl_data.v_active;
/* Check if it is stream is in fullscreen and if its HDR.
* Use this to determine sharpness levels
*/
spl_in->is_fullscreen = dm_helpers_is_fullscreen(pipe_ctx->stream->ctx, pipe_ctx->stream);
spl_in->is_hdr_on = dm_helpers_is_hdr_on(pipe_ctx->stream->ctx, pipe_ctx->stream);
}
/// @brief Translate SPL output parameters to pipe context
@ -187,15 +178,15 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
void translate_SPL_out_params_to_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl_out *spl_out)
{
// Make scaler data recout point to spl output field recout
populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.recout, &spl_out->dscl_prog_data->recout);
populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.recout, &spl_out->scl_data.recout);
// Make scaler data ratios point to spl output field ratios
populate_ratios_from_splratios(&pipe_ctx->plane_res.scl_data.ratios, &spl_out->dscl_prog_data->ratios);
populate_ratios_from_splratios(&pipe_ctx->plane_res.scl_data.ratios, &spl_out->scl_data.ratios);
// Make scaler data viewport point to spl output field viewport
populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.viewport, &spl_out->dscl_prog_data->viewport);
populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.viewport, &spl_out->scl_data.viewport);
// Make scaler data viewport_c point to spl output field viewport_c
populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.viewport_c, &spl_out->dscl_prog_data->viewport_c);
populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.viewport_c, &spl_out->scl_data.viewport_c);
// Make scaler data taps point to spl output field scaling taps
populate_taps_from_spltaps(&pipe_ctx->plane_res.scl_data.taps, &spl_out->dscl_prog_data->taps);
populate_taps_from_spltaps(&pipe_ctx->plane_res.scl_data.taps, &spl_out->scl_data.taps);
// Make scaler data init point to spl output field init
populate_inits_from_splinits(&pipe_ctx->plane_res.scl_data.inits, &spl_out->dscl_prog_data->init);
populate_inits_from_splinits(&pipe_ctx->plane_res.scl_data.inits, &spl_out->scl_data.inits);
}

View File

@ -6,7 +6,6 @@
#define __DC_SPL_TRANSLATE_H__
#include "dc.h"
#include "resource.h"
#include "dm_helpers.h"
/* Map SPL input parameters to pipe context
* @pipe_ctx: pipe context

View File

@ -788,14 +788,6 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
* certain cases. Hence do corrective active and disable scaling.
*/
plane->composition.scaler_info.enabled = false;
} else if ((plane_state->ctx->dc->config.use_spl == true) &&
(plane->composition.scaler_info.enabled == false)) {
/* To enable sharpener for 1:1, scaler must be enabled. If use_spl is set, then
* allow case where ratio is 1 but taps > 1
*/
if ((scaler_data->taps.h_taps > 1) || (scaler_data->taps.v_taps > 1) ||
(scaler_data->taps.h_taps_c > 1) || (scaler_data->taps.v_taps_c > 1))
plane->composition.scaler_info.enabled = true;
}
/* always_scale is only used for debug purposes not used in production but has to be

View File

@ -280,8 +280,7 @@ static void dpp401_dscl_set_scaler_filter(
static void dpp401_dscl_set_scl_filter(
struct dcn401_dpp *dpp,
const struct scaler_data *scl_data,
bool chroma_coef_mode,
bool force_coeffs_update)
bool chroma_coef_mode)
{
bool h_2tap_hardcode_coef_en = false;
bool v_2tap_hardcode_coef_en = false;
@ -344,7 +343,7 @@ static void dpp401_dscl_set_scl_filter(
|| (filter_v_c && (filter_v_c != dpp->filter_v_c));
}
if ((filter_updated) || (force_coeffs_update)) {
if (filter_updated) {
uint32_t scl_mode = REG_READ(SCL_MODE);
if (!h_2tap_hardcode_coef_en && filter_h) {
@ -656,226 +655,6 @@ static void dpp401_dscl_set_recout(struct dcn401_dpp *dpp,
/* Number of RECOUT vertical lines */
RECOUT_HEIGHT, recout->height);
}
/**
* dpp401_dscl_program_easf_v - Program EASF_V
*
* @dpp_base: High level DPP struct
* @scl_data: scalaer_data info
*
* This is the primary function to program vertical EASF registers
*
*/
static void dpp401_dscl_program_easf_v(struct dpp *dpp_base, const struct scaler_data *scl_data)
{
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
PERF_TRACE();
/* DSCL_EASF_V_MODE */
REG_SET_3(DSCL_EASF_V_MODE, 0,
SCL_EASF_V_EN, scl_data->dscl_prog_data.easf_v_en,
SCL_EASF_V_2TAP_SHARP_FACTOR, scl_data->dscl_prog_data.easf_v_sharp_factor,
SCL_EASF_V_RINGEST_FORCE_EN, scl_data->dscl_prog_data.easf_v_ring);
if (!scl_data->dscl_prog_data.easf_v_en) {
PERF_TRACE();
return;
}
/* DSCL_EASF_V_BF_CNTL */
REG_SET_6(DSCL_EASF_V_BF_CNTL, 0,
SCL_EASF_V_BF1_EN, scl_data->dscl_prog_data.easf_v_bf1_en,
SCL_EASF_V_BF2_MODE, scl_data->dscl_prog_data.easf_v_bf2_mode,
SCL_EASF_V_BF3_MODE, scl_data->dscl_prog_data.easf_v_bf3_mode,
SCL_EASF_V_BF2_FLAT1_GAIN, scl_data->dscl_prog_data.easf_v_bf2_flat1_gain,
SCL_EASF_V_BF2_FLAT2_GAIN, scl_data->dscl_prog_data.easf_v_bf2_flat2_gain,
SCL_EASF_V_BF2_ROC_GAIN, scl_data->dscl_prog_data.easf_v_bf2_roc_gain);
/* DSCL_EASF_V_RINGEST_3TAP_CNTLn */
REG_SET_2(DSCL_EASF_V_RINGEST_3TAP_CNTL1, 0,
SCL_EASF_V_RINGEST_3TAP_DNTILT_UPTILT, scl_data->dscl_prog_data.easf_v_ringest_3tap_dntilt_uptilt,
SCL_EASF_V_RINGEST_3TAP_UPTILT_MAXVAL, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt_max);
REG_SET_2(DSCL_EASF_V_RINGEST_3TAP_CNTL2, 0,
SCL_EASF_V_RINGEST_3TAP_DNTILT_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_dntilt_slope,
SCL_EASF_V_RINGEST_3TAP_UPTILT1_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt1_slope);
REG_SET_2(DSCL_EASF_V_RINGEST_3TAP_CNTL3, 0,
SCL_EASF_V_RINGEST_3TAP_UPTILT2_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt2_slope,
SCL_EASF_V_RINGEST_3TAP_UPTILT2_OFFSET, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt2_offset);
/* DSCL_EASF_V_RINGEST_EVENTAP_REDUCE */
REG_SET_2(DSCL_EASF_V_RINGEST_EVENTAP_REDUCE, 0,
SCL_EASF_V_RINGEST_EVENTAP_REDUCEG1, scl_data->dscl_prog_data.easf_v_ringest_eventap_reduceg1,
SCL_EASF_V_RINGEST_EVENTAP_REDUCEG2, scl_data->dscl_prog_data.easf_v_ringest_eventap_reduceg2);
/* DSCL_EASF_V_RINGEST_EVENTAP_GAIN */
REG_SET_2(DSCL_EASF_V_RINGEST_EVENTAP_GAIN, 0,
SCL_EASF_V_RINGEST_EVENTAP_GAIN1, scl_data->dscl_prog_data.easf_v_ringest_eventap_gain1,
SCL_EASF_V_RINGEST_EVENTAP_GAIN2, scl_data->dscl_prog_data.easf_v_ringest_eventap_gain2);
/* DSCL_EASF_V_BF_FINAL_MAX_MIN */
REG_SET_4(DSCL_EASF_V_BF_FINAL_MAX_MIN, 0,
SCL_EASF_V_BF_MAXA, scl_data->dscl_prog_data.easf_v_bf_maxa,
SCL_EASF_V_BF_MAXB, scl_data->dscl_prog_data.easf_v_bf_maxb,
SCL_EASF_V_BF_MINA, scl_data->dscl_prog_data.easf_v_bf_mina,
SCL_EASF_V_BF_MINB, scl_data->dscl_prog_data.easf_v_bf_minb);
/* DSCL_EASF_V_BF1_PWL_SEGn */
REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG0, 0,
SCL_EASF_V_BF1_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg0,
SCL_EASF_V_BF1_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg0,
SCL_EASF_V_BF1_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg0);
REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG1, 0,
SCL_EASF_V_BF1_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg1,
SCL_EASF_V_BF1_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg1,
SCL_EASF_V_BF1_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg1);
REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG2, 0,
SCL_EASF_V_BF1_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg2,
SCL_EASF_V_BF1_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg2,
SCL_EASF_V_BF1_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg2);
REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG3, 0,
SCL_EASF_V_BF1_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg3,
SCL_EASF_V_BF1_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg3,
SCL_EASF_V_BF1_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg3);
REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG4, 0,
SCL_EASF_V_BF1_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg4,
SCL_EASF_V_BF1_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg4,
SCL_EASF_V_BF1_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg4);
REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG5, 0,
SCL_EASF_V_BF1_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg5,
SCL_EASF_V_BF1_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg5,
SCL_EASF_V_BF1_PWL_SLOPE_SEG5, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg5);
REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG6, 0,
SCL_EASF_V_BF1_PWL_IN_SEG6, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg6,
SCL_EASF_V_BF1_PWL_BASE_SEG6, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg6,
SCL_EASF_V_BF1_PWL_SLOPE_SEG6, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg6);
REG_SET_2(DSCL_EASF_V_BF1_PWL_SEG7, 0,
SCL_EASF_V_BF1_PWL_IN_SEG7, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg7,
SCL_EASF_V_BF1_PWL_BASE_SEG7, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg7);
/* DSCL_EASF_V_BF3_PWL_SEGn */
REG_SET_3(DSCL_EASF_V_BF3_PWL_SEG0, 0,
SCL_EASF_V_BF3_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set0,
SCL_EASF_V_BF3_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set0,
SCL_EASF_V_BF3_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set0);
REG_SET_3(DSCL_EASF_V_BF3_PWL_SEG1, 0,
SCL_EASF_V_BF3_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set1,
SCL_EASF_V_BF3_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set1,
SCL_EASF_V_BF3_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set1);
REG_SET_3(DSCL_EASF_V_BF3_PWL_SEG2, 0,
SCL_EASF_V_BF3_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set2,
SCL_EASF_V_BF3_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set2,
SCL_EASF_V_BF3_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set2);
REG_SET_3(DSCL_EASF_V_BF3_PWL_SEG3, 0,
SCL_EASF_V_BF3_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set3,
SCL_EASF_V_BF3_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set3,
SCL_EASF_V_BF3_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set3);
REG_SET_3(DSCL_EASF_V_BF3_PWL_SEG4, 0,
SCL_EASF_V_BF3_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set4,
SCL_EASF_V_BF3_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set4,
SCL_EASF_V_BF3_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set4);
REG_SET_2(DSCL_EASF_V_BF3_PWL_SEG5, 0,
SCL_EASF_V_BF3_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set5,
SCL_EASF_V_BF3_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set5);
PERF_TRACE();
}
/**
* dpp401_dscl_program_easf_h - Program EASF_H
*
* @dpp_base: High level DPP struct
* @scl_data: scalaer_data info
*
* This is the primary function to program horizontal EASF registers
*
*/
static void dpp401_dscl_program_easf_h(struct dpp *dpp_base, const struct scaler_data *scl_data)
{
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
PERF_TRACE();
/* DSCL_EASF_H_MODE */
REG_SET_3(DSCL_EASF_H_MODE, 0,
SCL_EASF_H_EN, scl_data->dscl_prog_data.easf_h_en,
SCL_EASF_H_2TAP_SHARP_FACTOR, scl_data->dscl_prog_data.easf_h_sharp_factor,
SCL_EASF_H_RINGEST_FORCE_EN, scl_data->dscl_prog_data.easf_h_ring);
if (!scl_data->dscl_prog_data.easf_h_en) {
PERF_TRACE();
return;
}
/* DSCL_EASF_H_BF_CNTL */
REG_SET_6(DSCL_EASF_H_BF_CNTL, 0,
SCL_EASF_H_BF1_EN, scl_data->dscl_prog_data.easf_h_bf1_en,
SCL_EASF_H_BF2_MODE, scl_data->dscl_prog_data.easf_h_bf2_mode,
SCL_EASF_H_BF3_MODE, scl_data->dscl_prog_data.easf_h_bf3_mode,
SCL_EASF_H_BF2_FLAT1_GAIN, scl_data->dscl_prog_data.easf_h_bf2_flat1_gain,
SCL_EASF_H_BF2_FLAT2_GAIN, scl_data->dscl_prog_data.easf_h_bf2_flat2_gain,
SCL_EASF_H_BF2_ROC_GAIN, scl_data->dscl_prog_data.easf_h_bf2_roc_gain);
/* DSCL_EASF_H_RINGEST_EVENTAP_REDUCE */
REG_SET_2(DSCL_EASF_H_RINGEST_EVENTAP_REDUCE, 0,
SCL_EASF_H_RINGEST_EVENTAP_REDUCEG1, scl_data->dscl_prog_data.easf_h_ringest_eventap_reduceg1,
SCL_EASF_H_RINGEST_EVENTAP_REDUCEG2, scl_data->dscl_prog_data.easf_h_ringest_eventap_reduceg2);
/* DSCL_EASF_H_RINGEST_EVENTAP_GAIN */
REG_SET_2(DSCL_EASF_H_RINGEST_EVENTAP_GAIN, 0,
SCL_EASF_H_RINGEST_EVENTAP_GAIN1, scl_data->dscl_prog_data.easf_h_ringest_eventap_gain1,
SCL_EASF_H_RINGEST_EVENTAP_GAIN2, scl_data->dscl_prog_data.easf_h_ringest_eventap_gain2);
/* DSCL_EASF_H_BF_FINAL_MAX_MIN */
REG_SET_4(DSCL_EASF_H_BF_FINAL_MAX_MIN, 0,
SCL_EASF_H_BF_MAXA, scl_data->dscl_prog_data.easf_h_bf_maxa,
SCL_EASF_H_BF_MAXB, scl_data->dscl_prog_data.easf_h_bf_maxb,
SCL_EASF_H_BF_MINA, scl_data->dscl_prog_data.easf_h_bf_mina,
SCL_EASF_H_BF_MINB, scl_data->dscl_prog_data.easf_h_bf_minb);
/* DSCL_EASF_H_BF1_PWL_SEGn */
REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG0, 0,
SCL_EASF_H_BF1_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg0,
SCL_EASF_H_BF1_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg0,
SCL_EASF_H_BF1_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg0);
REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG1, 0,
SCL_EASF_H_BF1_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg1,
SCL_EASF_H_BF1_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg1,
SCL_EASF_H_BF1_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg1);
REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG2, 0,
SCL_EASF_H_BF1_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg2,
SCL_EASF_H_BF1_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg2,
SCL_EASF_H_BF1_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg2);
REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG3, 0,
SCL_EASF_H_BF1_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg3,
SCL_EASF_H_BF1_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg3,
SCL_EASF_H_BF1_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg3);
REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG4, 0,
SCL_EASF_H_BF1_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg4,
SCL_EASF_H_BF1_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg4,
SCL_EASF_H_BF1_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg4);
REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG5, 0,
SCL_EASF_H_BF1_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg5,
SCL_EASF_H_BF1_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg5,
SCL_EASF_H_BF1_PWL_SLOPE_SEG5, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg5);
REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG6, 0,
SCL_EASF_H_BF1_PWL_IN_SEG6, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg6,
SCL_EASF_H_BF1_PWL_BASE_SEG6, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg6,
SCL_EASF_H_BF1_PWL_SLOPE_SEG6, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg6);
REG_SET_2(DSCL_EASF_H_BF1_PWL_SEG7, 0,
SCL_EASF_H_BF1_PWL_IN_SEG7, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg7,
SCL_EASF_H_BF1_PWL_BASE_SEG7, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg7);
/* DSCL_EASF_H_BF3_PWL_SEGn */
REG_SET_3(DSCL_EASF_H_BF3_PWL_SEG0, 0,
SCL_EASF_H_BF3_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set0,
SCL_EASF_H_BF3_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set0,
SCL_EASF_H_BF3_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set0);
REG_SET_3(DSCL_EASF_H_BF3_PWL_SEG1, 0,
SCL_EASF_H_BF3_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set1,
SCL_EASF_H_BF3_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set1,
SCL_EASF_H_BF3_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set1);
REG_SET_3(DSCL_EASF_H_BF3_PWL_SEG2, 0,
SCL_EASF_H_BF3_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set2,
SCL_EASF_H_BF3_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set2,
SCL_EASF_H_BF3_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set2);
REG_SET_3(DSCL_EASF_H_BF3_PWL_SEG3, 0,
SCL_EASF_H_BF3_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set3,
SCL_EASF_H_BF3_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set3,
SCL_EASF_H_BF3_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set3);
REG_SET_3(DSCL_EASF_H_BF3_PWL_SEG4, 0,
SCL_EASF_H_BF3_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set4,
SCL_EASF_H_BF3_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set4,
SCL_EASF_H_BF3_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set4);
REG_SET_2(DSCL_EASF_H_BF3_PWL_SEG5, 0,
SCL_EASF_H_BF3_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set5,
SCL_EASF_H_BF3_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set5);
PERF_TRACE();
}
/**
* dpp401_dscl_program_easf - Program EASF
*
@ -890,19 +669,261 @@ static void dpp401_dscl_program_easf(struct dpp *dpp_base, const struct scaler_d
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
PERF_TRACE();
/* DSCL_SC_MODE */
REG_SET_2(DSCL_SC_MODE, 0,
SCL_SC_MATRIX_MODE, scl_data->dscl_prog_data.easf_matrix_mode,
REG_UPDATE(DSCL_SC_MODE,
SCL_SC_MATRIX_MODE, scl_data->dscl_prog_data.easf_matrix_mode);
REG_UPDATE(DSCL_SC_MODE,
SCL_SC_LTONL_EN, scl_data->dscl_prog_data.easf_ltonl_en);
/* DSCL_EASF_V_MODE */
REG_UPDATE(DSCL_EASF_V_MODE,
SCL_EASF_V_EN, scl_data->dscl_prog_data.easf_v_en);
REG_UPDATE(DSCL_EASF_V_MODE,
SCL_EASF_V_2TAP_SHARP_FACTOR, scl_data->dscl_prog_data.easf_v_sharp_factor);
REG_UPDATE(DSCL_EASF_V_MODE,
SCL_EASF_V_RINGEST_FORCE_EN, scl_data->dscl_prog_data.easf_v_ring);
REG_UPDATE(DSCL_EASF_V_BF_CNTL,
SCL_EASF_V_BF1_EN, scl_data->dscl_prog_data.easf_v_bf1_en);
REG_UPDATE(DSCL_EASF_V_BF_CNTL,
SCL_EASF_V_BF2_MODE, scl_data->dscl_prog_data.easf_v_bf2_mode);
REG_UPDATE(DSCL_EASF_V_BF_CNTL,
SCL_EASF_V_BF3_MODE, scl_data->dscl_prog_data.easf_v_bf3_mode);
REG_UPDATE(DSCL_EASF_V_BF_CNTL,
SCL_EASF_V_BF2_FLAT1_GAIN, scl_data->dscl_prog_data.easf_v_bf2_flat1_gain);
REG_UPDATE(DSCL_EASF_V_BF_CNTL,
SCL_EASF_V_BF2_FLAT2_GAIN, scl_data->dscl_prog_data.easf_v_bf2_flat2_gain);
REG_UPDATE(DSCL_EASF_V_BF_CNTL,
SCL_EASF_V_BF2_ROC_GAIN, scl_data->dscl_prog_data.easf_v_bf2_roc_gain);
REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL1,
SCL_EASF_V_RINGEST_3TAP_DNTILT_UPTILT, scl_data->dscl_prog_data.easf_v_ringest_3tap_dntilt_uptilt);
REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL1,
SCL_EASF_V_RINGEST_3TAP_UPTILT_MAXVAL, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt_max);
REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL2,
SCL_EASF_V_RINGEST_3TAP_DNTILT_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_dntilt_slope);
REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL2,
SCL_EASF_V_RINGEST_3TAP_UPTILT1_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt1_slope);
REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL3,
SCL_EASF_V_RINGEST_3TAP_UPTILT2_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt2_slope);
REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL3,
SCL_EASF_V_RINGEST_3TAP_UPTILT2_OFFSET, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt2_offset);
REG_UPDATE(DSCL_EASF_V_RINGEST_EVENTAP_REDUCE,
SCL_EASF_V_RINGEST_EVENTAP_REDUCEG1, scl_data->dscl_prog_data.easf_v_ringest_eventap_reduceg1);
REG_UPDATE(DSCL_EASF_V_RINGEST_EVENTAP_REDUCE,
SCL_EASF_V_RINGEST_EVENTAP_REDUCEG2, scl_data->dscl_prog_data.easf_v_ringest_eventap_reduceg2);
REG_UPDATE(DSCL_EASF_V_RINGEST_EVENTAP_GAIN,
SCL_EASF_V_RINGEST_EVENTAP_GAIN1, scl_data->dscl_prog_data.easf_v_ringest_eventap_gain1);
REG_UPDATE(DSCL_EASF_V_RINGEST_EVENTAP_GAIN,
SCL_EASF_V_RINGEST_EVENTAP_GAIN2, scl_data->dscl_prog_data.easf_v_ringest_eventap_gain2);
REG_UPDATE(DSCL_EASF_V_BF_FINAL_MAX_MIN,
SCL_EASF_V_BF_MAXA, scl_data->dscl_prog_data.easf_v_bf_maxa);
REG_UPDATE(DSCL_EASF_V_BF_FINAL_MAX_MIN,
SCL_EASF_V_BF_MAXB, scl_data->dscl_prog_data.easf_v_bf_maxb);
REG_UPDATE(DSCL_EASF_V_BF_FINAL_MAX_MIN,
SCL_EASF_V_BF_MINA, scl_data->dscl_prog_data.easf_v_bf_mina);
REG_UPDATE(DSCL_EASF_V_BF_FINAL_MAX_MIN,
SCL_EASF_V_BF_MINB, scl_data->dscl_prog_data.easf_v_bf_minb);
REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG0,
SCL_EASF_V_BF1_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg0);
REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG0,
SCL_EASF_V_BF1_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg0);
REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG0,
SCL_EASF_V_BF1_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg0);
REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG1,
SCL_EASF_V_BF1_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg1);
REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG1,
SCL_EASF_V_BF1_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg1);
REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG1,
SCL_EASF_V_BF1_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg1);
REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG2,
SCL_EASF_V_BF1_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg2);
REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG2,
SCL_EASF_V_BF1_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg2);
REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG2,
SCL_EASF_V_BF1_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg2);
REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG3,
SCL_EASF_V_BF1_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg3);
REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG3,
SCL_EASF_V_BF1_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg3);
REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG3,
SCL_EASF_V_BF1_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg3);
REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG4,
SCL_EASF_V_BF1_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg4);
REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG4,
SCL_EASF_V_BF1_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg4);
REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG4,
SCL_EASF_V_BF1_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg4);
REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG5,
SCL_EASF_V_BF1_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg5);
REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG5,
SCL_EASF_V_BF1_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg5);
REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG5,
SCL_EASF_V_BF1_PWL_SLOPE_SEG5, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg5);
REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG6,
SCL_EASF_V_BF1_PWL_IN_SEG6, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg6);
REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG6,
SCL_EASF_V_BF1_PWL_BASE_SEG6, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg6);
REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG6,
SCL_EASF_V_BF1_PWL_SLOPE_SEG6, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg6);
REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG7,
SCL_EASF_V_BF1_PWL_IN_SEG7, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg7);
REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG7,
SCL_EASF_V_BF1_PWL_BASE_SEG7, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg7);
REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG0,
SCL_EASF_V_BF3_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set0);
REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG0,
SCL_EASF_V_BF3_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set0);
REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG0,
SCL_EASF_V_BF3_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set0);
REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG1,
SCL_EASF_V_BF3_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set1);
REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG1,
SCL_EASF_V_BF3_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set1);
REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG1,
SCL_EASF_V_BF3_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set1);
REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG2,
SCL_EASF_V_BF3_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set2);
REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG2,
SCL_EASF_V_BF3_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set2);
REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG2,
SCL_EASF_V_BF3_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set2);
REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG3,
SCL_EASF_V_BF3_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set3);
REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG3,
SCL_EASF_V_BF3_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set3);
REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG3,
SCL_EASF_V_BF3_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set3);
REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG4,
SCL_EASF_V_BF3_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set4);
REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG4,
SCL_EASF_V_BF3_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set4);
REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG4,
SCL_EASF_V_BF3_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set4);
REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG5,
SCL_EASF_V_BF3_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set5);
REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG5,
SCL_EASF_V_BF3_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set5);
/* DSCL_EASF_H_MODE */
REG_UPDATE(DSCL_EASF_H_MODE,
SCL_EASF_H_EN, scl_data->dscl_prog_data.easf_h_en);
REG_UPDATE(DSCL_EASF_H_MODE,
SCL_EASF_H_2TAP_SHARP_FACTOR, scl_data->dscl_prog_data.easf_h_sharp_factor);
REG_UPDATE(DSCL_EASF_H_MODE,
SCL_EASF_H_RINGEST_FORCE_EN, scl_data->dscl_prog_data.easf_h_ring);
REG_UPDATE(DSCL_EASF_H_BF_CNTL,
SCL_EASF_H_BF1_EN, scl_data->dscl_prog_data.easf_h_bf1_en);
REG_UPDATE(DSCL_EASF_H_BF_CNTL,
SCL_EASF_H_BF2_MODE, scl_data->dscl_prog_data.easf_h_bf2_mode);
REG_UPDATE(DSCL_EASF_H_BF_CNTL,
SCL_EASF_H_BF3_MODE, scl_data->dscl_prog_data.easf_h_bf3_mode);
REG_UPDATE(DSCL_EASF_H_BF_CNTL,
SCL_EASF_H_BF2_FLAT1_GAIN, scl_data->dscl_prog_data.easf_h_bf2_flat1_gain);
REG_UPDATE(DSCL_EASF_H_BF_CNTL,
SCL_EASF_H_BF2_FLAT2_GAIN, scl_data->dscl_prog_data.easf_h_bf2_flat2_gain);
REG_UPDATE(DSCL_EASF_H_BF_CNTL,
SCL_EASF_H_BF2_ROC_GAIN, scl_data->dscl_prog_data.easf_h_bf2_roc_gain);
REG_UPDATE(DSCL_EASF_H_RINGEST_EVENTAP_REDUCE,
SCL_EASF_H_RINGEST_EVENTAP_REDUCEG1, scl_data->dscl_prog_data.easf_h_ringest_eventap_reduceg1);
REG_UPDATE(DSCL_EASF_H_RINGEST_EVENTAP_REDUCE,
SCL_EASF_H_RINGEST_EVENTAP_REDUCEG2, scl_data->dscl_prog_data.easf_h_ringest_eventap_reduceg2);
REG_UPDATE(DSCL_EASF_H_RINGEST_EVENTAP_GAIN,
SCL_EASF_H_RINGEST_EVENTAP_GAIN1, scl_data->dscl_prog_data.easf_h_ringest_eventap_gain1);
REG_UPDATE(DSCL_EASF_H_RINGEST_EVENTAP_GAIN,
SCL_EASF_H_RINGEST_EVENTAP_GAIN2, scl_data->dscl_prog_data.easf_h_ringest_eventap_gain2);
REG_UPDATE(DSCL_EASF_H_BF_FINAL_MAX_MIN,
SCL_EASF_H_BF_MAXA, scl_data->dscl_prog_data.easf_h_bf_maxa);
REG_UPDATE(DSCL_EASF_H_BF_FINAL_MAX_MIN,
SCL_EASF_H_BF_MAXB, scl_data->dscl_prog_data.easf_h_bf_maxb);
REG_UPDATE(DSCL_EASF_H_BF_FINAL_MAX_MIN,
SCL_EASF_H_BF_MINA, scl_data->dscl_prog_data.easf_h_bf_mina);
REG_UPDATE(DSCL_EASF_H_BF_FINAL_MAX_MIN,
SCL_EASF_H_BF_MINB, scl_data->dscl_prog_data.easf_h_bf_minb);
REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG0,
SCL_EASF_H_BF1_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg0);
REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG0,
SCL_EASF_H_BF1_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg0);
REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG0,
SCL_EASF_H_BF1_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg0);
REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG1,
SCL_EASF_H_BF1_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg1);
REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG1,
SCL_EASF_H_BF1_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg1);
REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG1,
SCL_EASF_H_BF1_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg1);
REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG2,
SCL_EASF_H_BF1_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg2);
REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG2,
SCL_EASF_H_BF1_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg2);
REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG2,
SCL_EASF_H_BF1_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg2);
REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG3,
SCL_EASF_H_BF1_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg3);
REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG3,
SCL_EASF_H_BF1_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg3);
REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG3,
SCL_EASF_H_BF1_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg3);
REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG4,
SCL_EASF_H_BF1_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg4);
REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG4,
SCL_EASF_H_BF1_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg4);
REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG4,
SCL_EASF_H_BF1_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg4);
REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG5,
SCL_EASF_H_BF1_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg5);
REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG5,
SCL_EASF_H_BF1_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg5);
REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG5,
SCL_EASF_H_BF1_PWL_SLOPE_SEG5, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg5);
REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG6,
SCL_EASF_H_BF1_PWL_IN_SEG6, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg6);
REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG6,
SCL_EASF_H_BF1_PWL_BASE_SEG6, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg6);
REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG6,
SCL_EASF_H_BF1_PWL_SLOPE_SEG6, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg6);
REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG7,
SCL_EASF_H_BF1_PWL_IN_SEG7, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg7);
REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG7,
SCL_EASF_H_BF1_PWL_BASE_SEG7, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg7);
REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG0,
SCL_EASF_H_BF3_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set0);
REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG0,
SCL_EASF_H_BF3_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set0);
REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG0,
SCL_EASF_H_BF3_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set0);
REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG1,
SCL_EASF_H_BF3_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set1);
REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG1,
SCL_EASF_H_BF3_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set1);
REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG1,
SCL_EASF_H_BF3_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set1);
REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG2,
SCL_EASF_H_BF3_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set2);
REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG2,
SCL_EASF_H_BF3_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set2);
REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG2,
SCL_EASF_H_BF3_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set2);
REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG3,
SCL_EASF_H_BF3_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set3);
REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG3,
SCL_EASF_H_BF3_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set3);
REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG3,
SCL_EASF_H_BF3_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set3);
REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG4,
SCL_EASF_H_BF3_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set4);
REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG4,
SCL_EASF_H_BF3_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set4);
REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG4,
SCL_EASF_H_BF3_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set4);
REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG5,
SCL_EASF_H_BF3_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set5);
REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG5,
SCL_EASF_H_BF3_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set5);
/* DSCL_EASF_SC_MATRIX_C0C1, DSCL_EASF_SC_MATRIX_C2C3 */
REG_SET_2(DSCL_SC_MATRIX_C0C1, 0,
SCL_SC_MATRIX_C0, scl_data->dscl_prog_data.easf_matrix_c0,
REG_UPDATE(DSCL_SC_MATRIX_C0C1,
SCL_SC_MATRIX_C0, scl_data->dscl_prog_data.easf_matrix_c0);
REG_UPDATE(DSCL_SC_MATRIX_C0C1,
SCL_SC_MATRIX_C1, scl_data->dscl_prog_data.easf_matrix_c1);
REG_SET_2(DSCL_SC_MATRIX_C2C3, 0,
SCL_SC_MATRIX_C2, scl_data->dscl_prog_data.easf_matrix_c2,
REG_UPDATE(DSCL_SC_MATRIX_C2C3,
SCL_SC_MATRIX_C2, scl_data->dscl_prog_data.easf_matrix_c2);
REG_UPDATE(DSCL_SC_MATRIX_C2C3,
SCL_SC_MATRIX_C3, scl_data->dscl_prog_data.easf_matrix_c3);
dpp401_dscl_program_easf_v(dpp_base, scl_data);
dpp401_dscl_program_easf_h(dpp_base, scl_data);
PERF_TRACE();
}
/**
@ -937,11 +958,10 @@ static void dpp401_dscl_set_isharp_filter(
REG_UPDATE(ISHARP_DELTA_CTRL,
ISHARP_DELTA_LUT_HOST_SELECT, 0);
/* LUT data write is auto-indexed. Write index once */
REG_SET(ISHARP_DELTA_INDEX, 0,
ISHARP_DELTA_INDEX, 0);
for (level = 0; level < NUM_LEVELS; level++) {
filter_data = filter[level];
REG_SET(ISHARP_DELTA_INDEX, 0,
ISHARP_DELTA_INDEX, level);
REG_SET(ISHARP_DELTA_DATA, 0,
ISHARP_DELTA_DATA, filter_data);
}
@ -957,74 +977,107 @@ static void dpp401_dscl_set_isharp_filter(
*
*/
static void dpp401_dscl_program_isharp(struct dpp *dpp_base,
const struct scaler_data *scl_data,
bool *bs_coeffs_updated)
const struct scaler_data *scl_data)
{
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
*bs_coeffs_updated = false;
PERF_TRACE();
/* ISHARP_MODE */
REG_SET_6(ISHARP_MODE, 0,
ISHARP_EN, scl_data->dscl_prog_data.isharp_en,
ISHARP_NOISEDET_EN, scl_data->dscl_prog_data.isharp_noise_det.enable,
ISHARP_NOISEDET_MODE, scl_data->dscl_prog_data.isharp_noise_det.mode,
ISHARP_LBA_MODE, scl_data->dscl_prog_data.isharp_lba.mode,
ISHARP_FMT_MODE, scl_data->dscl_prog_data.isharp_fmt.mode,
ISHARP_FMT_NORM, scl_data->dscl_prog_data.isharp_fmt.norm);
/* Skip remaining register programming if ISHARP is disabled */
if (!scl_data->dscl_prog_data.isharp_en) {
PERF_TRACE();
return;
}
/* ISHARP_NOISEDET_THRESHOLD */
REG_SET_2(ISHARP_NOISEDET_THRESHOLD, 0,
ISHARP_NOISEDET_UTHRE, scl_data->dscl_prog_data.isharp_noise_det.uthreshold,
/* ISHARP_EN */
REG_UPDATE(ISHARP_MODE,
ISHARP_EN, scl_data->dscl_prog_data.isharp_en);
/* ISHARP_NOISEDET_EN */
REG_UPDATE(ISHARP_MODE,
ISHARP_NOISEDET_EN, scl_data->dscl_prog_data.isharp_noise_det.enable);
/* ISHARP_NOISEDET_MODE */
REG_UPDATE(ISHARP_MODE,
ISHARP_NOISEDET_MODE, scl_data->dscl_prog_data.isharp_noise_det.mode);
/* ISHARP_NOISEDET_UTHRE */
REG_UPDATE(ISHARP_NOISEDET_THRESHOLD,
ISHARP_NOISEDET_UTHRE, scl_data->dscl_prog_data.isharp_noise_det.uthreshold);
/* ISHARP_NOISEDET_DTHRE */
REG_UPDATE(ISHARP_NOISEDET_THRESHOLD,
ISHARP_NOISEDET_DTHRE, scl_data->dscl_prog_data.isharp_noise_det.dthreshold);
/* ISHARP_NOISE_GAIN_PWL */
REG_SET_3(ISHARP_NOISE_GAIN_PWL, 0,
ISHARP_NOISEDET_PWL_START_IN, scl_data->dscl_prog_data.isharp_noise_det.pwl_start_in,
ISHARP_NOISEDET_PWL_END_IN, scl_data->dscl_prog_data.isharp_noise_det.pwl_end_in,
REG_UPDATE(ISHARP_MODE,
ISHARP_NOISEDET_MODE, scl_data->dscl_prog_data.isharp_noise_det.mode);
/* ISHARP_NOISEDET_UTHRE */
REG_UPDATE(ISHARP_NOISEDET_THRESHOLD,
ISHARP_NOISEDET_UTHRE, scl_data->dscl_prog_data.isharp_noise_det.uthreshold);
/* ISHARP_NOISEDET_DTHRE */
REG_UPDATE(ISHARP_NOISEDET_THRESHOLD,
ISHARP_NOISEDET_DTHRE, scl_data->dscl_prog_data.isharp_noise_det.dthreshold);
/* ISHARP_NOISEDET_PWL_START_IN */
REG_UPDATE(ISHARP_NOISE_GAIN_PWL,
ISHARP_NOISEDET_PWL_START_IN, scl_data->dscl_prog_data.isharp_noise_det.pwl_start_in);
/* ISHARP_NOISEDET_PWL_END_IN */
REG_UPDATE(ISHARP_NOISE_GAIN_PWL,
ISHARP_NOISEDET_PWL_END_IN, scl_data->dscl_prog_data.isharp_noise_det.pwl_end_in);
/* ISHARP_NOISEDET_PWL_SLOPE */
REG_UPDATE(ISHARP_NOISE_GAIN_PWL,
ISHARP_NOISEDET_PWL_SLOPE, scl_data->dscl_prog_data.isharp_noise_det.pwl_slope);
/* ISHARP_LBA_MODE */
REG_UPDATE(ISHARP_MODE,
ISHARP_LBA_MODE, scl_data->dscl_prog_data.isharp_lba.mode);
/* ISHARP_LBA: IN_SEG, BASE_SEG, SLOPE_SEG */
REG_SET_3(ISHARP_LBA_PWL_SEG0, 0,
ISHARP_LBA_PWL_IN_SEG0, scl_data->dscl_prog_data.isharp_lba.in_seg[0],
ISHARP_LBA_PWL_BASE_SEG0, scl_data->dscl_prog_data.isharp_lba.base_seg[0],
REG_UPDATE(ISHARP_LBA_PWL_SEG0,
ISHARP_LBA_PWL_IN_SEG0, scl_data->dscl_prog_data.isharp_lba.in_seg[0]);
REG_UPDATE(ISHARP_LBA_PWL_SEG0,
ISHARP_LBA_PWL_BASE_SEG0, scl_data->dscl_prog_data.isharp_lba.base_seg[0]);
REG_UPDATE(ISHARP_LBA_PWL_SEG0,
ISHARP_LBA_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.isharp_lba.slope_seg[0]);
REG_SET_3(ISHARP_LBA_PWL_SEG1, 0,
ISHARP_LBA_PWL_IN_SEG1, scl_data->dscl_prog_data.isharp_lba.in_seg[1],
ISHARP_LBA_PWL_BASE_SEG1, scl_data->dscl_prog_data.isharp_lba.base_seg[1],
REG_UPDATE(ISHARP_LBA_PWL_SEG1,
ISHARP_LBA_PWL_IN_SEG1, scl_data->dscl_prog_data.isharp_lba.in_seg[1]);
REG_UPDATE(ISHARP_LBA_PWL_SEG1,
ISHARP_LBA_PWL_BASE_SEG1, scl_data->dscl_prog_data.isharp_lba.base_seg[1]);
REG_UPDATE(ISHARP_LBA_PWL_SEG1,
ISHARP_LBA_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.isharp_lba.slope_seg[1]);
REG_SET_3(ISHARP_LBA_PWL_SEG2, 0,
ISHARP_LBA_PWL_IN_SEG2, scl_data->dscl_prog_data.isharp_lba.in_seg[2],
ISHARP_LBA_PWL_BASE_SEG2, scl_data->dscl_prog_data.isharp_lba.base_seg[2],
REG_UPDATE(ISHARP_LBA_PWL_SEG2,
ISHARP_LBA_PWL_IN_SEG2, scl_data->dscl_prog_data.isharp_lba.in_seg[2]);
REG_UPDATE(ISHARP_LBA_PWL_SEG2,
ISHARP_LBA_PWL_BASE_SEG2, scl_data->dscl_prog_data.isharp_lba.base_seg[2]);
REG_UPDATE(ISHARP_LBA_PWL_SEG2,
ISHARP_LBA_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.isharp_lba.slope_seg[2]);
REG_SET_3(ISHARP_LBA_PWL_SEG3, 0,
ISHARP_LBA_PWL_IN_SEG3, scl_data->dscl_prog_data.isharp_lba.in_seg[3],
ISHARP_LBA_PWL_BASE_SEG3, scl_data->dscl_prog_data.isharp_lba.base_seg[3],
REG_UPDATE(ISHARP_LBA_PWL_SEG3,
ISHARP_LBA_PWL_IN_SEG3, scl_data->dscl_prog_data.isharp_lba.in_seg[3]);
REG_UPDATE(ISHARP_LBA_PWL_SEG3,
ISHARP_LBA_PWL_BASE_SEG3, scl_data->dscl_prog_data.isharp_lba.base_seg[3]);
REG_UPDATE(ISHARP_LBA_PWL_SEG3,
ISHARP_LBA_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.isharp_lba.slope_seg[3]);
REG_SET_3(ISHARP_LBA_PWL_SEG4, 0,
ISHARP_LBA_PWL_IN_SEG4, scl_data->dscl_prog_data.isharp_lba.in_seg[4],
ISHARP_LBA_PWL_BASE_SEG4, scl_data->dscl_prog_data.isharp_lba.base_seg[4],
REG_UPDATE(ISHARP_LBA_PWL_SEG4,
ISHARP_LBA_PWL_IN_SEG4, scl_data->dscl_prog_data.isharp_lba.in_seg[4]);
REG_UPDATE(ISHARP_LBA_PWL_SEG4,
ISHARP_LBA_PWL_BASE_SEG4, scl_data->dscl_prog_data.isharp_lba.base_seg[4]);
REG_UPDATE(ISHARP_LBA_PWL_SEG4,
ISHARP_LBA_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.isharp_lba.slope_seg[4]);
REG_SET_2(ISHARP_LBA_PWL_SEG5, 0,
ISHARP_LBA_PWL_IN_SEG5, scl_data->dscl_prog_data.isharp_lba.in_seg[5],
REG_UPDATE(ISHARP_LBA_PWL_SEG5,
ISHARP_LBA_PWL_IN_SEG5, scl_data->dscl_prog_data.isharp_lba.in_seg[5]);
REG_UPDATE(ISHARP_LBA_PWL_SEG5,
ISHARP_LBA_PWL_BASE_SEG5, scl_data->dscl_prog_data.isharp_lba.base_seg[5]);
/* ISHARP_FMT_MODE */
REG_UPDATE(ISHARP_MODE,
ISHARP_FMT_MODE, scl_data->dscl_prog_data.isharp_fmt.mode);
/* ISHARP_FMT_NORM */
REG_UPDATE(ISHARP_MODE,
ISHARP_FMT_NORM, scl_data->dscl_prog_data.isharp_fmt.norm);
/* ISHARP_DELTA_LUT */
dpp401_dscl_set_isharp_filter(dpp, scl_data->dscl_prog_data.isharp_delta);
/* ISHARP_NLDELTA_SOFT_CLIP */
REG_SET_6(ISHARP_NLDELTA_SOFT_CLIP, 0,
ISHARP_NLDELTA_SCLIP_EN_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.enable_p,
ISHARP_NLDELTA_SCLIP_PIVOT_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.pivot_p,
ISHARP_NLDELTA_SCLIP_SLOPE_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.slope_p,
ISHARP_NLDELTA_SCLIP_EN_N, scl_data->dscl_prog_data.isharp_nldelta_sclip.enable_n,
ISHARP_NLDELTA_SCLIP_PIVOT_N, scl_data->dscl_prog_data.isharp_nldelta_sclip.pivot_n,
/* ISHARP_NLDELTA_SCLIP_EN_P */
REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP,
ISHARP_NLDELTA_SCLIP_EN_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.enable_p);
/* ISHARP_NLDELTA_SCLIP_PIVOT_P */
REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP,
ISHARP_NLDELTA_SCLIP_PIVOT_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.pivot_p);
/* ISHARP_NLDELTA_SCLIP_SLOPE_P */
REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP,
ISHARP_NLDELTA_SCLIP_SLOPE_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.slope_p);
/* ISHARP_NLDELTA_SCLIP_EN_N */
REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP,
ISHARP_NLDELTA_SCLIP_EN_N, scl_data->dscl_prog_data.isharp_nldelta_sclip.enable_n);
/* ISHARP_NLDELTA_SCLIP_PIVOT_N */
REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP,
ISHARP_NLDELTA_SCLIP_PIVOT_N, scl_data->dscl_prog_data.isharp_nldelta_sclip.pivot_n);
/* ISHARP_NLDELTA_SCLIP_SLOPE_N */
REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP,
ISHARP_NLDELTA_SCLIP_SLOPE_N, scl_data->dscl_prog_data.isharp_nldelta_sclip.slope_n);
/* Blur and Scale Coefficients - SCL_COEF_RAM_TAP_SELECT */
@ -1034,14 +1087,12 @@ static void dpp401_dscl_program_isharp(struct dpp *dpp_base,
dpp, scl_data->taps.v_taps,
SCL_COEF_VERTICAL_BLUR_SCALE,
scl_data->dscl_prog_data.filter_blur_scale_v);
*bs_coeffs_updated = true;
}
if (scl_data->dscl_prog_data.filter_blur_scale_h) {
dpp401_dscl_set_scaler_filter(
dpp, scl_data->taps.h_taps,
SCL_COEF_HORIZONTAL_BLUR_SCALE,
scl_data->dscl_prog_data.filter_blur_scale_h);
*bs_coeffs_updated = true;
}
}
PERF_TRACE();
@ -1072,7 +1123,6 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale);
bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN
&& scl_data->format <= PIXEL_FORMAT_VIDEO_END;
bool bs_coeffs_updated = false;
if (memcmp(&dpp->scl_data, scl_data, sizeof(*scl_data)) == 0)
return;
@ -1132,7 +1182,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS) {
if (dpp->base.ctx->dc->config.prefer_easf)
dpp401_dscl_disable_easf(dpp_base, scl_data);
dpp401_dscl_program_isharp(dpp_base, scl_data, &bs_coeffs_updated);
dpp401_dscl_program_isharp(dpp_base, scl_data);
return;
}
@ -1159,18 +1209,12 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
SCL_V_NUM_TAPS_C, v_num_taps_c,
SCL_H_NUM_TAPS_C, h_num_taps_c);
/* ISharp configuration
* - B&S coeffs are written to same coeff RAM as WB scaler coeffs
* - coeff RAM toggle is in EASF programming
* - if we are only programming B&S coeffs, then need to reprogram
* WB scaler coeffs and toggle coeff RAM together
*/
//if (dpp->base.ctx->dc->config.prefer_easf)
dpp401_dscl_program_isharp(dpp_base, scl_data, &bs_coeffs_updated);
dpp401_dscl_set_scl_filter(dpp, scl_data, ycbcr, bs_coeffs_updated);
dpp401_dscl_set_scl_filter(dpp, scl_data, ycbcr);
/* Edge adaptive scaler function configuration */
if (dpp->base.ctx->dc->config.prefer_easf)
dpp401_dscl_program_easf(dpp_base, scl_data);
/* isharp configuration */
//if (dpp->base.ctx->dc->config.prefer_easf)
dpp401_dscl_program_isharp(dpp_base, scl_data);
PERF_TRACE();
}

View File

@ -76,9 +76,6 @@
#include "dml2/dml2_wrapper.h"
#include "spl/dc_spl_scl_easf_filters.h"
#include "spl/dc_spl_isharp_filters.h"
#define DC_LOGGER_INIT(logger)
enum dcn401_clk_src_array_id {
@ -2126,10 +2123,6 @@ static bool dcn401_resource_construct(
dc->dml2_options.max_segments_per_hubp = 20;
dc->dml2_options.det_segment_size = DCN4_01_CRB_SEGMENT_SIZE_KB;
/* SPL */
spl_init_easf_filter_coeffs();
spl_init_blur_scale_coeffs();
return true;
create_fail:

View File

@ -23,7 +23,7 @@
# Makefile for the 'spl' sub-component of DAL.
# It provides the scaling library interface.
SPL = dc_spl.o dc_spl_scl_filters.o dc_spl_scl_easf_filters.o dc_spl_isharp_filters.o dc_spl_filters.o spl_fixpt31_32.o
SPL = dc_spl.o dc_spl_scl_filters.o dc_spl_scl_filters_old.o dc_spl_isharp_filters.o
AMD_DAL_SPL = $(addprefix $(AMDDALPATH)/dc/spl/,$(SPL))

File diff suppressed because it is too large Load Diff

View File

@ -1,15 +0,0 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dc_spl_filters.h"
void convert_filter_s1_10_to_s1_12(const uint16_t *s1_10_filter,
uint16_t *s1_12_filter, int num_taps)
{
int num_entries = NUM_PHASES_COEFF * num_taps;
int i;
for (i = 0; i < num_entries; i++)
*(s1_12_filter + i) = *(s1_10_filter + i) * 4;
}

View File

@ -1,15 +0,0 @@
/* SPDX-License-Identifier: MIT */
/* Copyright 2024 Advanced Micro Devices, Inc. */
#ifndef __DC_SPL_FILTERS_H__
#define __DC_SPL_FILTERS_H__
#include "dc_spl_types.h"
#define NUM_PHASES_COEFF 33
void convert_filter_s1_10_to_s1_12(const uint16_t *s1_10_filter,
uint16_t *s1_12_filter, int num_taps);
#endif /* __DC_SPL_FILTERS_H__ */

View File

@ -2,9 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dc_spl_types.h"
#include "spl_debug.h"
#include "dc_spl_filters.h"
#include "dc_spl_isharp_filters.h"
//========================================
@ -233,53 +230,6 @@ static const uint32_t filter_isharp_1D_lut_2p0x[32] = {
0x080B0D0E,
0x00020406,
};
//========================================
// Delta Gain 1DLUT
// LUT content is packed as 4-bytes into one DWORD/entry
// A_start = 0.000000
// A_end = 10.000000
// A_gain = 3.000000
// B_start = 11.000000
// B_end = 127.000000
// C_start = 40.000000
// C_end = 127.000000
//========================================
static const uint32_t filter_isharp_1D_lut_3p0x[32] = {
0x03010000,
0x0F0B0805,
0x211E1813,
0x2B292624,
0x3533302E,
0x3E3C3A37,
0x46444240,
0x4D4B4A48,
0x5352504F,
0x59575655,
0x5D5C5B5A,
0x61605F5E,
0x64646362,
0x66666565,
0x68686767,
0x68686868,
0x68686868,
0x67676868,
0x65656666,
0x62636464,
0x5E5F6061,
0x5A5B5C5D,
0x55565759,
0x4F505253,
0x484A4B4D,
0x40424446,
0x373A3C3E,
0x2E303335,
0x2426292B,
0x191B1E21,
0x0D101316,
0x0003060A,
};
//========================================
// Wide scaler coefficients
//========================================================
// <using> gen_scaler_coeffs.m
@ -334,7 +284,7 @@ static const uint16_t filter_isharp_wide_6tap_64p[198] = {
// <CoefType> Blur & Scale LPF
// <CoefQuant> S1.10
//========================================================
static const uint16_t filter_isharp_bs_4tap_in_6_64p[198] = {
static const uint16_t filter_isharp_bs_4tap_64p[198] = {
0x0000, 0x00E5, 0x0237, 0x00E4, 0x0000, 0x0000,
0x0000, 0x00DE, 0x0237, 0x00EB, 0x0000, 0x0000,
0x0000, 0x00D7, 0x0236, 0x00F2, 0x0001, 0x0000,
@ -369,228 +319,6 @@ static const uint16_t filter_isharp_bs_4tap_in_6_64p[198] = {
0x0000, 0x003B, 0x01CF, 0x01C2, 0x0034, 0x0000,
0x0000, 0x0037, 0x01C9, 0x01C9, 0x0037, 0x0000
};
//========================================================
// <using> gen_BlurScale_coeffs.m
// <date> 25-Apr-2022
// <num_taps> 4
// <num_phases> 64
// <CoefType> Blur & Scale LPF
// <CoefQuant> S1.10
//========================================================
static const uint16_t filter_isharp_bs_4tap_64p[132] = {
0x00E5, 0x0237, 0x00E4, 0x0000,
0x00DE, 0x0237, 0x00EB, 0x0000,
0x00D7, 0x0236, 0x00F2, 0x0001,
0x00D0, 0x0235, 0x00FA, 0x0001,
0x00C9, 0x0234, 0x0101, 0x0002,
0x00C2, 0x0233, 0x0108, 0x0003,
0x00BB, 0x0232, 0x0110, 0x0003,
0x00B5, 0x0230, 0x0117, 0x0004,
0x00AE, 0x022E, 0x011F, 0x0005,
0x00A8, 0x022C, 0x0126, 0x0006,
0x00A2, 0x022A, 0x012D, 0x0007,
0x009C, 0x0228, 0x0134, 0x0008,
0x0096, 0x0225, 0x013C, 0x0009,
0x0090, 0x0222, 0x0143, 0x000B,
0x008A, 0x021F, 0x014B, 0x000C,
0x0085, 0x021C, 0x0151, 0x000E,
0x007F, 0x0218, 0x015A, 0x000F,
0x007A, 0x0215, 0x0160, 0x0011,
0x0074, 0x0211, 0x0168, 0x0013,
0x006F, 0x020D, 0x016F, 0x0015,
0x006A, 0x0209, 0x0176, 0x0017,
0x0065, 0x0204, 0x017E, 0x0019,
0x0060, 0x0200, 0x0185, 0x001B,
0x005C, 0x01FB, 0x018C, 0x001D,
0x0057, 0x01F6, 0x0193, 0x0020,
0x0053, 0x01F1, 0x019A, 0x0022,
0x004E, 0x01EC, 0x01A1, 0x0025,
0x004A, 0x01E6, 0x01A8, 0x0028,
0x0046, 0x01E1, 0x01AF, 0x002A,
0x0042, 0x01DB, 0x01B6, 0x002D,
0x003F, 0x01D5, 0x01BB, 0x0031,
0x003B, 0x01CF, 0x01C2, 0x0034,
0x0037, 0x01C9, 0x01C9, 0x0037,
};
//========================================================
// <using> gen_BlurScale_coeffs.m
// <date> 09-Jun-2022
// <num_taps> 3
// <num_phases> 64
// <CoefType> Blur & Scale LPF
// <CoefQuant> S1.10
//========================================================
static const uint16_t filter_isharp_bs_3tap_64p[99] = {
0x0200, 0x0200, 0x0000,
0x01F6, 0x0206, 0x0004,
0x01EC, 0x020B, 0x0009,
0x01E2, 0x0211, 0x000D,
0x01D8, 0x0216, 0x0012,
0x01CE, 0x021C, 0x0016,
0x01C4, 0x0221, 0x001B,
0x01BA, 0x0226, 0x0020,
0x01B0, 0x022A, 0x0026,
0x01A6, 0x022F, 0x002B,
0x019C, 0x0233, 0x0031,
0x0192, 0x0238, 0x0036,
0x0188, 0x023C, 0x003C,
0x017E, 0x0240, 0x0042,
0x0174, 0x0244, 0x0048,
0x016A, 0x0248, 0x004E,
0x0161, 0x024A, 0x0055,
0x0157, 0x024E, 0x005B,
0x014D, 0x0251, 0x0062,
0x0144, 0x0253, 0x0069,
0x013A, 0x0256, 0x0070,
0x0131, 0x0258, 0x0077,
0x0127, 0x025B, 0x007E,
0x011E, 0x025C, 0x0086,
0x0115, 0x025E, 0x008D,
0x010B, 0x0260, 0x0095,
0x0102, 0x0262, 0x009C,
0x00F9, 0x0263, 0x00A4,
0x00F0, 0x0264, 0x00AC,
0x00E7, 0x0265, 0x00B4,
0x00DF, 0x0264, 0x00BD,
0x00D6, 0x0265, 0x00C5,
0x00CD, 0x0266, 0x00CD,
};
/* Converted Blur & Scale coeff tables from S1.10 to S1.12 */
static uint16_t filter_isharp_bs_4tap_in_6_64p_s1_12[198];
static uint16_t filter_isharp_bs_4tap_64p_s1_12[132];
static uint16_t filter_isharp_bs_3tap_64p_s1_12[99];
struct scale_ratio_to_sharpness_level_lookup scale_to_sharp_sdr_nl[3][6] = {
{ /* LOW */
{1125, 1000, 75, 100},
{11, 10, 6, 10},
{1075, 1000, 45, 100},
{105, 100, 3, 10},
{1025, 1000, 15, 100},
{1, 1, 0, 1},
},
{ /* MID */
{1125, 1000, 2, 1},
{11, 10, 175, 100},
{1075, 1000, 15, 10},
{105, 100, 125, 100},
{1025, 1000, 1, 1},
{1, 1, 75, 100},
},
{ /* HIGH */
{1125, 1000, 35, 10},
{11, 10, 32, 10},
{1075, 1000, 29, 10},
{105, 100, 26, 10},
{1025, 1000, 23, 10},
{1, 1, 2, 1},
},
};
struct scale_ratio_to_sharpness_level_lookup scale_to_sharp_sdr_l[3][6] = {
{ /* LOW */
{1125, 1000, 75, 100},
{11, 10, 6, 10},
{1075, 1000, 45, 100},
{105, 100, 3, 10},
{1025, 1000, 15, 100},
{1, 1, 0, 1},
},
{ /* MID */
{1125, 1000, 15, 10},
{11, 10, 135, 100},
{1075, 1000, 12, 10},
{105, 100, 105, 100},
{1025, 1000, 9, 10},
{1, 1, 75, 100},
},
{ /* HIGH */
{1125, 1000, 25, 10},
{11, 10, 23, 10},
{1075, 1000, 21, 10},
{105, 100, 19, 10},
{1025, 1000, 17, 10},
{1, 1, 15, 10},
},
};
struct scale_ratio_to_sharpness_level_lookup scale_to_sharp_hdr_nl[3][6] = {
{ /* LOW */
{1125, 1000, 5, 10},
{11, 10, 4, 10},
{1075, 1000, 3, 10},
{105, 100, 2, 10},
{1025, 1000, 1, 10},
{1, 1, 0, 1},
},
{ /* MID */
{1125, 1000, 1, 1},
{11, 10, 9, 10},
{1075, 1000, 8, 10},
{105, 100, 7, 10},
{1025, 1000, 6, 10},
{1, 1, 5, 10},
},
{ /* HIGH */
{1125, 1000, 15, 10},
{11, 10, 14, 10},
{1075, 1000, 13, 10},
{105, 100, 12, 10},
{1025, 1000, 11, 10},
{1, 1, 1, 1},
},
};
struct scale_ratio_to_sharpness_level_lookup scale_to_sharp_hdr_l[3][6] = {
{ /* LOW */
{1125, 1000, 75, 100},
{11, 10, 6, 10},
{1075, 1000, 45, 100},
{105, 100, 3, 10},
{1025, 1000, 15, 100},
{1, 1, 0, 1},
},
{ /* MID */
{1125, 1000, 15, 10},
{11, 10, 135, 100},
{1075, 1000, 12, 10},
{105, 100, 105, 100},
{1025, 1000, 9, 10},
{1, 1, 75, 100},
},
{ /* HIGH */
{1125, 1000, 25, 10},
{11, 10, 23, 10},
{1075, 1000, 21, 10},
{105, 100, 19, 10},
{1025, 1000, 17, 10},
{1, 1, 15, 10},
},
};
/* Pre-generated 1DLUT for LOW for given setup and sharpness level */
uint32_t filter_isharp_1D_lut_pregen[3][32] = {
{
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
},
{
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
},
{
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
},
};
const uint32_t *spl_get_filter_isharp_1D_lut_0(void)
{
return filter_isharp_1D_lut_0;
@ -611,160 +339,11 @@ const uint32_t *spl_get_filter_isharp_1D_lut_2p0x(void)
{
return filter_isharp_1D_lut_2p0x;
}
const uint32_t *spl_get_filter_isharp_1D_lut_3p0x(void)
{
return filter_isharp_1D_lut_3p0x;
}
const uint16_t *spl_get_filter_isharp_wide_6tap_64p(void)
{
return filter_isharp_wide_6tap_64p;
}
uint16_t *spl_get_filter_isharp_bs_4tap_in_6_64p(void)
const uint16_t *spl_get_filter_isharp_bs_4tap_64p(void)
{
return filter_isharp_bs_4tap_in_6_64p_s1_12;
return filter_isharp_bs_4tap_64p;
}
uint16_t *spl_get_filter_isharp_bs_4tap_64p(void)
{
return filter_isharp_bs_4tap_64p_s1_12;
}
uint16_t *spl_get_filter_isharp_bs_3tap_64p(void)
{
return filter_isharp_bs_3tap_64p_s1_12;
}
void spl_build_isharp_1dlut_from_reference_curve(struct spl_fixed31_32 ratio, enum system_setup setup)
{
uint8_t *byte_ptr_1dlut_src, *byte_ptr_1dlut_dst;
struct spl_fixed31_32 sharp_base, sharp_calc, sharp_level, ratio_level;
int i, j;
struct scale_ratio_to_sharpness_level_lookup *setup_lookup_ptr;
int num_sharp_ramp_levels;
int size_1dlut;
int sharp_calc_int;
uint32_t filter_pregen_store[32];
/*
* Given scaling ratio and current system setup, build pregenerated
* 1DLUT tables for three sharpness levels - LOW, MID, HIGH
*/
for (i = 0; i < 3; i++) {
/*
* Based on setup ( HDR/SDR, L/NL ), get base scale ratio to
* sharpness curve
*/
switch (setup) {
case HDR_L:
setup_lookup_ptr = scale_to_sharp_hdr_l[i];
num_sharp_ramp_levels = sizeof(scale_to_sharp_hdr_l[i])/
sizeof(struct scale_ratio_to_sharpness_level_lookup);
break;
case HDR_NL:
setup_lookup_ptr = scale_to_sharp_hdr_nl[i];
num_sharp_ramp_levels = sizeof(scale_to_sharp_hdr_nl[i])/
sizeof(struct scale_ratio_to_sharpness_level_lookup);
break;
case SDR_L:
setup_lookup_ptr = scale_to_sharp_sdr_l[i];
num_sharp_ramp_levels = sizeof(scale_to_sharp_sdr_l[i])/
sizeof(struct scale_ratio_to_sharpness_level_lookup);
break;
case SDR_NL:
default:
setup_lookup_ptr = scale_to_sharp_sdr_nl[i];
num_sharp_ramp_levels = sizeof(scale_to_sharp_sdr_nl[i])/
sizeof(struct scale_ratio_to_sharpness_level_lookup);
break;
}
/*
* Compare desired scaling ratio and find adjusted sharpness from
* base scale ratio to sharpness curve
*/
j = 0;
sharp_level = spl_fixpt_zero;
while (j < num_sharp_ramp_levels) {
ratio_level = spl_fixpt_from_fraction(setup_lookup_ptr->ratio_numer,
setup_lookup_ptr->ratio_denom);
if (ratio.value >= ratio_level.value) {
sharp_level = spl_fixpt_from_fraction(setup_lookup_ptr->sharpness_numer,
setup_lookup_ptr->sharpness_denom);
break;
}
setup_lookup_ptr++;
j++;
}
/*
* Calculate LUT_128_gained with this equation:
*
* LUT_128_gained[i] = (uint8)(0.5 + min(255,(double)(LUT_128[i])*sharpLevel/iGain))
* where LUT_128[i] is contents of 3p0x isharp 1dlut
* where sharpLevel is desired sharpness level
* where iGain is base sharpness level 3.0
* where LUT_128_gained[i] is adjusted 1dlut value based on desired sharpness level
*/
byte_ptr_1dlut_src = (uint8_t *)filter_isharp_1D_lut_3p0x;
byte_ptr_1dlut_dst = (uint8_t *)filter_pregen_store;
size_1dlut = sizeof(filter_isharp_1D_lut_3p0x);
memset(byte_ptr_1dlut_dst, 0, size_1dlut);
for (j = 0; j < size_1dlut; j++) {
sharp_base = spl_fixpt_from_int((int)*byte_ptr_1dlut_src);
sharp_calc = spl_fixpt_mul(sharp_base, sharp_level);
sharp_calc = spl_fixpt_div(sharp_calc, spl_fixpt_from_int(3));
sharp_calc = spl_fixpt_min(spl_fixpt_from_int(255), sharp_calc);
sharp_calc = spl_fixpt_add(sharp_calc, spl_fixpt_from_fraction(1, 2));
sharp_calc_int = spl_fixpt_floor(sharp_calc);
if (sharp_calc_int > 255)
sharp_calc_int = 255;
*byte_ptr_1dlut_dst = (uint8_t)sharp_calc_int;
byte_ptr_1dlut_src++;
byte_ptr_1dlut_dst++;
}
/* Compare if filter has change, if so update */
if (memcmp((void *)filter_isharp_1D_lut_pregen[i], (void *)filter_pregen_store, size_1dlut) != 0)
memcpy((void *)filter_isharp_1D_lut_pregen[i], (void *)filter_pregen_store, size_1dlut);
}
}
uint32_t *spl_get_pregen_filter_isharp_1D_lut(enum explicit_sharpness sharpness)
{
return filter_isharp_1D_lut_pregen[sharpness];
}
void spl_init_blur_scale_coeffs(void)
{
convert_filter_s1_10_to_s1_12(filter_isharp_bs_3tap_64p,
filter_isharp_bs_3tap_64p_s1_12, 3);
convert_filter_s1_10_to_s1_12(filter_isharp_bs_4tap_64p,
filter_isharp_bs_4tap_64p_s1_12, 4);
convert_filter_s1_10_to_s1_12(filter_isharp_bs_4tap_in_6_64p,
filter_isharp_bs_4tap_in_6_64p_s1_12, 6);
}
uint16_t *spl_dscl_get_blur_scale_coeffs_64p(int taps)
{
if (taps == 3)
return spl_get_filter_isharp_bs_3tap_64p();
else if (taps == 4)
return spl_get_filter_isharp_bs_4tap_64p();
else if (taps == 6)
return spl_get_filter_isharp_bs_4tap_in_6_64p();
else {
/* should never happen, bug */
SPL_BREAK_TO_DEBUGGER();
return NULL;
}
}
void spl_set_blur_scale_data(struct dscl_prog_data *dscl_prog_data,
const struct spl_scaler_data *data)
{
dscl_prog_data->filter_blur_scale_h =
spl_dscl_get_blur_scale_coeffs_64p(data->taps.h_taps);
dscl_prog_data->filter_blur_scale_v =
spl_dscl_get_blur_scale_coeffs_64p(data->taps.v_taps);
}

View File

@ -12,37 +12,6 @@ const uint32_t *spl_get_filter_isharp_1D_lut_0p5x(void);
const uint32_t *spl_get_filter_isharp_1D_lut_1p0x(void);
const uint32_t *spl_get_filter_isharp_1D_lut_1p5x(void);
const uint32_t *spl_get_filter_isharp_1D_lut_2p0x(void);
const uint32_t *spl_get_filter_isharp_1D_lut_3p0x(void);
uint16_t *spl_get_filter_isharp_bs_4tap_in_6_64p(void);
uint16_t *spl_get_filter_isharp_bs_4tap_64p(void);
uint16_t *spl_get_filter_isharp_bs_3tap_64p(void);
const uint16_t *spl_get_filter_isharp_bs_4tap_64p(void);
const uint16_t *spl_get_filter_isharp_wide_6tap_64p(void);
uint16_t *spl_dscl_get_blur_scale_coeffs_64p(int taps);
struct scale_ratio_to_sharpness_level_lookup {
unsigned int ratio_numer;
unsigned int ratio_denom;
unsigned int sharpness_numer;
unsigned int sharpness_denom;
};
struct sharpness_level_mapping {
unsigned int level;
unsigned int level_numer;
unsigned int level_denom;
};
enum system_setup {
SDR_NL = 0,
SDR_L,
HDR_NL,
HDR_L
};
void spl_init_blur_scale_coeffs(void);
void spl_set_blur_scale_data(struct dscl_prog_data *dscl_prog_data,
const struct spl_scaler_data *data);
void spl_build_isharp_1dlut_from_reference_curve(struct spl_fixed31_32 ratio, enum system_setup setup);
uint32_t *spl_get_pregen_filter_isharp_1D_lut(enum explicit_sharpness sharpness);
#endif /* __DC_SPL_ISHARP_FILTERS_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -1,38 +0,0 @@
/* SPDX-License-Identifier: MIT */
/* Copyright 2024 Advanced Micro Devices, Inc. */
#ifndef __DC_SPL_SCL_EASF_FILTERS_H__
#define __DC_SPL_SCL_EASF_FILTERS_H__
#include "dc_spl_types.h"
struct scale_ratio_to_reg_value_lookup {
int numer;
int denom;
const uint32_t reg_value;
};
void spl_init_easf_filter_coeffs(void);
uint16_t *spl_get_easf_filter_3tap_64p(struct spl_fixed31_32 ratio);
uint16_t *spl_get_easf_filter_4tap_64p(struct spl_fixed31_32 ratio);
uint16_t *spl_get_easf_filter_6tap_64p(struct spl_fixed31_32 ratio);
uint16_t *spl_dscl_get_easf_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio);
void spl_set_filters_data(struct dscl_prog_data *dscl_prog_data,
const struct spl_scaler_data *data, bool enable_easf_v,
bool enable_easf_h);
uint32_t spl_get_v_bf3_mode(struct spl_fixed31_32 ratio);
uint32_t spl_get_h_bf3_mode(struct spl_fixed31_32 ratio);
uint32_t spl_get_reducer_gain6(int taps, struct spl_fixed31_32 ratio);
uint32_t spl_get_reducer_gain4(int taps, struct spl_fixed31_32 ratio);
uint32_t spl_get_gainRing6(int taps, struct spl_fixed31_32 ratio);
uint32_t spl_get_gainRing4(int taps, struct spl_fixed31_32 ratio);
uint32_t spl_get_3tap_dntilt_uptilt_offset(int taps, struct spl_fixed31_32 ratio);
uint32_t spl_get_3tap_uptilt_maxval(int taps, struct spl_fixed31_32 ratio);
uint32_t spl_get_3tap_dntilt_slope(int taps, struct spl_fixed31_32 ratio);
uint32_t spl_get_3tap_uptilt1_slope(int taps, struct spl_fixed31_32 ratio);
uint32_t spl_get_3tap_uptilt2_slope(int taps, struct spl_fixed31_32 ratio);
uint32_t spl_get_3tap_uptilt2_offset(int taps, struct spl_fixed31_32 ratio);
#endif /* __DC_SPL_SCL_EASF_FILTERS_H__ */

View File

@ -2,8 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dc_spl_types.h"
#include "spl_debug.h"
#include "dc_spl_scl_filters.h"
//=========================================
// <num_taps> = 2
@ -1319,97 +1317,97 @@ static const uint16_t filter_8tap_64p_183[264] = {
0x3FD4, 0x3F84, 0x0214, 0x0694, 0x0694, 0x0214, 0x3F84, 0x3FD4
};
const uint16_t *spl_get_filter_3tap_16p(struct spl_fixed31_32 ratio)
const uint16_t *spl_get_filter_3tap_16p(struct fixed31_32 ratio)
{
if (ratio.value < spl_fixpt_one.value)
if (ratio.value < dc_fixpt_one.value)
return filter_3tap_16p_upscale;
else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
return filter_3tap_16p_116;
else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
return filter_3tap_16p_149;
else
return filter_3tap_16p_183;
}
const uint16_t *spl_get_filter_3tap_64p(struct spl_fixed31_32 ratio)
const uint16_t *spl_get_filter_3tap_64p(struct fixed31_32 ratio)
{
if (ratio.value < spl_fixpt_one.value)
if (ratio.value < dc_fixpt_one.value)
return filter_3tap_64p_upscale;
else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
return filter_3tap_64p_116;
else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
return filter_3tap_64p_149;
else
return filter_3tap_64p_183;
}
const uint16_t *spl_get_filter_4tap_16p(struct spl_fixed31_32 ratio)
const uint16_t *spl_get_filter_4tap_16p(struct fixed31_32 ratio)
{
if (ratio.value < spl_fixpt_one.value)
if (ratio.value < dc_fixpt_one.value)
return filter_4tap_16p_upscale;
else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
return filter_4tap_16p_116;
else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
return filter_4tap_16p_149;
else
return filter_4tap_16p_183;
}
const uint16_t *spl_get_filter_4tap_64p(struct spl_fixed31_32 ratio)
const uint16_t *spl_get_filter_4tap_64p(struct fixed31_32 ratio)
{
if (ratio.value < spl_fixpt_one.value)
if (ratio.value < dc_fixpt_one.value)
return filter_4tap_64p_upscale;
else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
return filter_4tap_64p_116;
else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
return filter_4tap_64p_149;
else
return filter_4tap_64p_183;
}
const uint16_t *spl_get_filter_5tap_64p(struct spl_fixed31_32 ratio)
const uint16_t *spl_get_filter_5tap_64p(struct fixed31_32 ratio)
{
if (ratio.value < spl_fixpt_one.value)
if (ratio.value < dc_fixpt_one.value)
return filter_5tap_64p_upscale;
else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
return filter_5tap_64p_116;
else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
return filter_5tap_64p_149;
else
return filter_5tap_64p_183;
}
const uint16_t *spl_get_filter_6tap_64p(struct spl_fixed31_32 ratio)
const uint16_t *spl_get_filter_6tap_64p(struct fixed31_32 ratio)
{
if (ratio.value < spl_fixpt_one.value)
if (ratio.value < dc_fixpt_one.value)
return filter_6tap_64p_upscale;
else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
return filter_6tap_64p_116;
else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
return filter_6tap_64p_149;
else
return filter_6tap_64p_183;
}
const uint16_t *spl_get_filter_7tap_64p(struct spl_fixed31_32 ratio)
const uint16_t *spl_get_filter_7tap_64p(struct fixed31_32 ratio)
{
if (ratio.value < spl_fixpt_one.value)
if (ratio.value < dc_fixpt_one.value)
return filter_7tap_64p_upscale;
else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
return filter_7tap_64p_116;
else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
return filter_7tap_64p_149;
else
return filter_7tap_64p_183;
}
const uint16_t *spl_get_filter_8tap_64p(struct spl_fixed31_32 ratio)
const uint16_t *spl_get_filter_8tap_64p(struct fixed31_32 ratio)
{
if (ratio.value < spl_fixpt_one.value)
if (ratio.value < dc_fixpt_one.value)
return filter_8tap_64p_upscale;
else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
return filter_8tap_64p_116;
else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
return filter_8tap_64p_149;
else
return filter_8tap_64p_183;
@ -1424,29 +1422,3 @@ const uint16_t *spl_get_filter_2tap_64p(void)
{
return filter_2tap_64p;
}
const uint16_t *spl_dscl_get_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio)
{
if (taps == 8)
return spl_get_filter_8tap_64p(ratio);
else if (taps == 7)
return spl_get_filter_7tap_64p(ratio);
else if (taps == 6)
return spl_get_filter_6tap_64p(ratio);
else if (taps == 5)
return spl_get_filter_5tap_64p(ratio);
else if (taps == 4)
return spl_get_filter_4tap_64p(ratio);
else if (taps == 3)
return spl_get_filter_3tap_64p(ratio);
else if (taps == 2)
return spl_get_filter_2tap_64p();
else if (taps == 1)
return NULL;
else {
/* should never happen, bug */
SPL_BREAK_TO_DEBUGGER();
return NULL;
}
}

View File

@ -7,16 +7,53 @@
#include "dc_spl_types.h"
const uint16_t *spl_get_filter_3tap_16p(struct spl_fixed31_32 ratio);
const uint16_t *spl_get_filter_3tap_64p(struct spl_fixed31_32 ratio);
const uint16_t *spl_get_filter_4tap_16p(struct spl_fixed31_32 ratio);
const uint16_t *spl_get_filter_4tap_64p(struct spl_fixed31_32 ratio);
const uint16_t *spl_get_filter_5tap_64p(struct spl_fixed31_32 ratio);
const uint16_t *spl_get_filter_6tap_64p(struct spl_fixed31_32 ratio);
const uint16_t *spl_get_filter_7tap_64p(struct spl_fixed31_32 ratio);
const uint16_t *spl_get_filter_8tap_64p(struct spl_fixed31_32 ratio);
const uint16_t *spl_get_filter_3tap_16p(struct fixed31_32 ratio);
const uint16_t *spl_get_filter_3tap_64p(struct fixed31_32 ratio);
const uint16_t *spl_get_filter_4tap_16p(struct fixed31_32 ratio);
const uint16_t *spl_get_filter_4tap_64p(struct fixed31_32 ratio);
const uint16_t *spl_get_filter_5tap_64p(struct fixed31_32 ratio);
const uint16_t *spl_get_filter_6tap_64p(struct fixed31_32 ratio);
const uint16_t *spl_get_filter_7tap_64p(struct fixed31_32 ratio);
const uint16_t *spl_get_filter_8tap_64p(struct fixed31_32 ratio);
const uint16_t *spl_get_filter_2tap_16p(void);
const uint16_t *spl_get_filter_2tap_64p(void);
const uint16_t *spl_dscl_get_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio);
const uint16_t *spl_get_filter_3tap_16p_upscale(void);
const uint16_t *spl_get_filter_3tap_16p_116(void);
const uint16_t *spl_get_filter_3tap_16p_149(void);
const uint16_t *spl_get_filter_3tap_16p_183(void);
const uint16_t *spl_get_filter_4tap_16p_upscale(void);
const uint16_t *spl_get_filter_4tap_16p_116(void);
const uint16_t *spl_get_filter_4tap_16p_149(void);
const uint16_t *spl_get_filter_4tap_16p_183(void);
const uint16_t *spl_get_filter_3tap_64p_upscale(void);
const uint16_t *spl_get_filter_3tap_64p_116(void);
const uint16_t *spl_get_filter_3tap_64p_149(void);
const uint16_t *spl_get_filter_3tap_64p_183(void);
const uint16_t *spl_get_filter_4tap_64p_upscale(void);
const uint16_t *spl_get_filter_4tap_64p_116(void);
const uint16_t *spl_get_filter_4tap_64p_149(void);
const uint16_t *spl_get_filter_4tap_64p_183(void);
const uint16_t *spl_get_filter_5tap_64p_upscale(void);
const uint16_t *spl_get_filter_5tap_64p_116(void);
const uint16_t *spl_get_filter_5tap_64p_149(void);
const uint16_t *spl_get_filter_5tap_64p_183(void);
const uint16_t *spl_get_filter_6tap_64p_upscale(void);
const uint16_t *spl_get_filter_6tap_64p_116(void);
const uint16_t *spl_get_filter_6tap_64p_149(void);
const uint16_t *spl_get_filter_6tap_64p_183(void);
const uint16_t *spl_get_filter_7tap_64p_upscale(void);
const uint16_t *spl_get_filter_7tap_64p_116(void);
const uint16_t *spl_get_filter_7tap_64p_149(void);
const uint16_t *spl_get_filter_7tap_64p_183(void);
const uint16_t *spl_get_filter_8tap_64p_upscale(void);
const uint16_t *spl_get_filter_8tap_64p_116(void);
const uint16_t *spl_get_filter_8tap_64p_149(void);
const uint16_t *spl_get_filter_8tap_64p_183(void);
#endif /* __DC_SPL_SCL_FILTERS_H__ */

View File

@ -0,0 +1,25 @@
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/

View File

@ -2,15 +2,14 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
#include "os_types.h" // swap
#ifndef ASSERT
#define ASSERT(_bool) ((void *)0)
#endif
#include "include/fixed31_32.h" // fixed31_32 and related functions
#ifndef __DC_SPL_TYPES_H__
#define __DC_SPL_TYPES_H__
#include "spl_os_types.h" // swap
#ifndef SPL_ASSERT
#define SPL_ASSERT(_bool) ((void *)0)
#endif
#include "spl_fixpt31_32.h" // fixed31_32 and related functions
enum lb_memory_config {
/* Enable all 3 pieces of memory */
LB_MEMORY_CONFIG_0 = 0,
@ -39,16 +38,16 @@ struct spl_rect {
};
struct spl_ratios {
struct spl_fixed31_32 horz;
struct spl_fixed31_32 vert;
struct spl_fixed31_32 horz_c;
struct spl_fixed31_32 vert_c;
struct fixed31_32 horz;
struct fixed31_32 vert;
struct fixed31_32 horz_c;
struct fixed31_32 vert_c;
};
struct spl_inits {
struct spl_fixed31_32 h;
struct spl_fixed31_32 h_c;
struct spl_fixed31_32 v;
struct spl_fixed31_32 v_c;
struct fixed31_32 h;
struct fixed31_32 h_c;
struct fixed31_32 v;
struct fixed31_32 v_c;
};
struct spl_taps {
@ -81,8 +80,6 @@ enum spl_pixel_format {
SPL_PIXEL_FORMAT_420BPP10,
/*end of pixel format definition*/
SPL_PIXEL_FORMAT_INVALID,
SPL_PIXEL_FORMAT_422BPP8,
SPL_PIXEL_FORMAT_422BPP10,
SPL_PIXEL_FORMAT_GRPH_BEGIN = SPL_PIXEL_FORMAT_INDEX8,
SPL_PIXEL_FORMAT_GRPH_END = SPL_PIXEL_FORMAT_FP16,
SPL_PIXEL_FORMAT_VIDEO_BEGIN = SPL_PIXEL_FORMAT_420BPP8,
@ -138,7 +135,6 @@ struct spl_scaler_data {
struct spl_rect viewport_c;
struct spl_rect recout;
struct spl_ratios ratios;
struct spl_ratios recip_ratios;
struct spl_inits inits;
};
@ -408,16 +404,11 @@ struct dscl_prog_data {
const uint16_t *filter_blur_scale_h;
};
/* SPL input and output definitions */
// SPL scratch struct
struct spl_scratch {
// Pack all SPL outputs in scl_data
struct spl_scaler_data scl_data;
};
/* SPL input and output definitions */
// SPL outputs struct
struct spl_out {
// Pack all SPL outputs in scl_data
struct spl_scaler_data scl_data;
// Pack all output need to program hw registers
struct dscl_prog_data *dscl_prog_data;
};
@ -500,10 +491,6 @@ struct spl_in {
bool prefer_easf;
bool disable_easf;
struct spl_debug debug;
bool is_fullscreen;
bool is_hdr_on;
int h_active;
int v_active;
};
// end of SPL inputs

View File

@ -1,23 +0,0 @@
/* Copyright © 1997-2004 Advanced Micro Devices, Inc. All rights reserved. */
#ifndef SPL_DEBUG_H
#define SPL_DEBUG_H
#ifdef SPL_ASSERT
#undef SPL_ASSERT
#endif
#define SPL_ASSERT(b)
#define SPL_ASSERT_CRITICAL(expr) do {if (expr)/* Do nothing */; } while (0)
#ifdef SPL_DALMSG
#undef SPL_DALMSG
#endif
#define SPL_DALMSG(b)
#ifdef SPL_DAL_ASSERT_MSG
#undef SPL_DAL_ASSERT_MSG
#endif
#define SPL_DAL_ASSERT_MSG(b, m)
#endif // SPL_DEBUG_H

View File

@ -1,518 +0,0 @@
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "spl_fixpt31_32.h"
static const struct spl_fixed31_32 spl_fixpt_two_pi = { 26986075409LL };
static const struct spl_fixed31_32 spl_fixpt_ln2 = { 2977044471LL };
static const struct spl_fixed31_32 spl_fixpt_ln2_div_2 = { 1488522236LL };
static inline unsigned long long abs_i64(
long long arg)
{
if (arg > 0)
return (unsigned long long)arg;
else
return (unsigned long long)(-arg);
}
/*
* @brief
* result = dividend / divisor
* *remainder = dividend % divisor
*/
static inline unsigned long long complete_integer_division_u64(
unsigned long long dividend,
unsigned long long divisor,
unsigned long long *remainder)
{
unsigned long long result;
ASSERT(divisor);
result = spl_div64_u64_rem(dividend, divisor, remainder);
return result;
}
#define FRACTIONAL_PART_MASK \
((1ULL << FIXED31_32_BITS_PER_FRACTIONAL_PART) - 1)
#define GET_INTEGER_PART(x) \
((x) >> FIXED31_32_BITS_PER_FRACTIONAL_PART)
#define GET_FRACTIONAL_PART(x) \
(FRACTIONAL_PART_MASK & (x))
struct spl_fixed31_32 spl_fixpt_from_fraction(long long numerator, long long denominator)
{
struct spl_fixed31_32 res;
bool arg1_negative = numerator < 0;
bool arg2_negative = denominator < 0;
unsigned long long arg1_value = arg1_negative ? -numerator : numerator;
unsigned long long arg2_value = arg2_negative ? -denominator : denominator;
unsigned long long remainder;
/* determine integer part */
unsigned long long res_value = complete_integer_division_u64(
arg1_value, arg2_value, &remainder);
ASSERT(res_value <= LONG_MAX);
/* determine fractional part */
{
unsigned int i = FIXED31_32_BITS_PER_FRACTIONAL_PART;
do {
remainder <<= 1;
res_value <<= 1;
if (remainder >= arg2_value) {
res_value |= 1;
remainder -= arg2_value;
}
} while (--i != 0);
}
/* round up LSB */
{
unsigned long long summand = (remainder << 1) >= arg2_value;
ASSERT(res_value <= LLONG_MAX - summand);
res_value += summand;
}
res.value = (long long)res_value;
if (arg1_negative ^ arg2_negative)
res.value = -res.value;
return res;
}
struct spl_fixed31_32 spl_fixpt_mul(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
{
struct spl_fixed31_32 res;
bool arg1_negative = arg1.value < 0;
bool arg2_negative = arg2.value < 0;
unsigned long long arg1_value = arg1_negative ? -arg1.value : arg1.value;
unsigned long long arg2_value = arg2_negative ? -arg2.value : arg2.value;
unsigned long long arg1_int = GET_INTEGER_PART(arg1_value);
unsigned long long arg2_int = GET_INTEGER_PART(arg2_value);
unsigned long long arg1_fra = GET_FRACTIONAL_PART(arg1_value);
unsigned long long arg2_fra = GET_FRACTIONAL_PART(arg2_value);
unsigned long long tmp;
res.value = arg1_int * arg2_int;
ASSERT(res.value <= (long long)LONG_MAX);
res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART;
tmp = arg1_int * arg2_fra;
ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
res.value += tmp;
tmp = arg2_int * arg1_fra;
ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
res.value += tmp;
tmp = arg1_fra * arg2_fra;
tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
(tmp >= (unsigned long long)spl_fixpt_half.value);
ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
res.value += tmp;
if (arg1_negative ^ arg2_negative)
res.value = -res.value;
return res;
}
struct spl_fixed31_32 spl_fixpt_sqr(struct spl_fixed31_32 arg)
{
struct spl_fixed31_32 res;
unsigned long long arg_value = abs_i64(arg.value);
unsigned long long arg_int = GET_INTEGER_PART(arg_value);
unsigned long long arg_fra = GET_FRACTIONAL_PART(arg_value);
unsigned long long tmp;
res.value = arg_int * arg_int;
ASSERT(res.value <= (long long)LONG_MAX);
res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART;
tmp = arg_int * arg_fra;
ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
res.value += tmp;
ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
res.value += tmp;
tmp = arg_fra * arg_fra;
tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
(tmp >= (unsigned long long)spl_fixpt_half.value);
ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
res.value += tmp;
return res;
}
struct spl_fixed31_32 spl_fixpt_recip(struct spl_fixed31_32 arg)
{
/*
* @note
* Good idea to use Newton's method
*/
ASSERT(arg.value);
return spl_fixpt_from_fraction(
spl_fixpt_one.value,
arg.value);
}
struct spl_fixed31_32 spl_fixpt_sinc(struct spl_fixed31_32 arg)
{
struct spl_fixed31_32 square;
struct spl_fixed31_32 res = spl_fixpt_one;
int n = 27;
struct spl_fixed31_32 arg_norm = arg;
if (spl_fixpt_le(
spl_fixpt_two_pi,
spl_fixpt_abs(arg))) {
arg_norm = spl_fixpt_sub(
arg_norm,
spl_fixpt_mul_int(
spl_fixpt_two_pi,
(int)spl_div64_s64(
arg_norm.value,
spl_fixpt_two_pi.value)));
}
square = spl_fixpt_sqr(arg_norm);
do {
res = spl_fixpt_sub(
spl_fixpt_one,
spl_fixpt_div_int(
spl_fixpt_mul(
square,
res),
n * (n - 1)));
n -= 2;
} while (n > 2);
if (arg.value != arg_norm.value)
res = spl_fixpt_div(
spl_fixpt_mul(res, arg_norm),
arg);
return res;
}
struct spl_fixed31_32 spl_fixpt_sin(struct spl_fixed31_32 arg)
{
return spl_fixpt_mul(
arg,
spl_fixpt_sinc(arg));
}
struct spl_fixed31_32 spl_fixpt_cos(struct spl_fixed31_32 arg)
{
/* TODO implement argument normalization */
const struct spl_fixed31_32 square = spl_fixpt_sqr(arg);
struct spl_fixed31_32 res = spl_fixpt_one;
int n = 26;
do {
res = spl_fixpt_sub(
spl_fixpt_one,
spl_fixpt_div_int(
spl_fixpt_mul(
square,
res),
n * (n - 1)));
n -= 2;
} while (n != 0);
return res;
}
/*
* @brief
* result = exp(arg),
* where abs(arg) < 1
*
* Calculated as Taylor series.
*/
static struct spl_fixed31_32 fixed31_32_exp_from_taylor_series(struct spl_fixed31_32 arg)
{
unsigned int n = 9;
struct spl_fixed31_32 res = spl_fixpt_from_fraction(
n + 2,
n + 1);
/* TODO find correct res */
ASSERT(spl_fixpt_lt(arg, spl_fixpt_one));
do
res = spl_fixpt_add(
spl_fixpt_one,
spl_fixpt_div_int(
spl_fixpt_mul(
arg,
res),
n));
while (--n != 1);
return spl_fixpt_add(
spl_fixpt_one,
spl_fixpt_mul(
arg,
res));
}
struct spl_fixed31_32 spl_fixpt_exp(struct spl_fixed31_32 arg)
{
/*
* @brief
* Main equation is:
* exp(x) = exp(r + m * ln(2)) = (1 << m) * exp(r),
* where m = round(x / ln(2)), r = x - m * ln(2)
*/
if (spl_fixpt_le(
spl_fixpt_ln2_div_2,
spl_fixpt_abs(arg))) {
int m = spl_fixpt_round(
spl_fixpt_div(
arg,
spl_fixpt_ln2));
struct spl_fixed31_32 r = spl_fixpt_sub(
arg,
spl_fixpt_mul_int(
spl_fixpt_ln2,
m));
ASSERT(m != 0);
ASSERT(spl_fixpt_lt(
spl_fixpt_abs(r),
spl_fixpt_one));
if (m > 0)
return spl_fixpt_shl(
fixed31_32_exp_from_taylor_series(r),
(unsigned char)m);
else
return spl_fixpt_div_int(
fixed31_32_exp_from_taylor_series(r),
1LL << -m);
} else if (arg.value != 0)
return fixed31_32_exp_from_taylor_series(arg);
else
return spl_fixpt_one;
}
struct spl_fixed31_32 spl_fixpt_log(struct spl_fixed31_32 arg)
{
struct spl_fixed31_32 res = spl_fixpt_neg(spl_fixpt_one);
/* TODO improve 1st estimation */
struct spl_fixed31_32 error;
ASSERT(arg.value > 0);
/* TODO if arg is negative, return NaN */
/* TODO if arg is zero, return -INF */
do {
struct spl_fixed31_32 res1 = spl_fixpt_add(
spl_fixpt_sub(
res,
spl_fixpt_one),
spl_fixpt_div(
arg,
spl_fixpt_exp(res)));
error = spl_fixpt_sub(
res,
res1);
res = res1;
/* TODO determine max_allowed_error based on quality of exp() */
} while (abs_i64(error.value) > 100ULL);
return res;
}
/* this function is a generic helper to translate fixed point value to
* specified integer format that will consist of integer_bits integer part and
* fractional_bits fractional part. For example it is used in
* spl_fixpt_u2d19 to receive 2 bits integer part and 19 bits fractional
* part in 32 bits. It is used in hw programming (scaler)
*/
static inline unsigned int ux_dy(
long long value,
unsigned int integer_bits,
unsigned int fractional_bits)
{
/* 1. create mask of integer part */
unsigned int result = (1 << integer_bits) - 1;
/* 2. mask out fractional part */
unsigned int fractional_part = FRACTIONAL_PART_MASK & value;
/* 3. shrink fixed point integer part to be of integer_bits width*/
result &= GET_INTEGER_PART(value);
/* 4. make space for fractional part to be filled in after integer */
result <<= fractional_bits;
/* 5. shrink fixed point fractional part to of fractional_bits width*/
fractional_part >>= FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits;
/* 6. merge the result */
return result | fractional_part;
}
static inline unsigned int clamp_ux_dy(
long long value,
unsigned int integer_bits,
unsigned int fractional_bits,
unsigned int min_clamp)
{
unsigned int truncated_val = ux_dy(value, integer_bits, fractional_bits);
if (value >= (1LL << (integer_bits + FIXED31_32_BITS_PER_FRACTIONAL_PART)))
return (1 << (integer_bits + fractional_bits)) - 1;
else if (truncated_val > min_clamp)
return truncated_val;
else
return min_clamp;
}
unsigned int spl_fixpt_u4d19(struct spl_fixed31_32 arg)
{
return ux_dy(arg.value, 4, 19);
}
unsigned int spl_fixpt_u3d19(struct spl_fixed31_32 arg)
{
return ux_dy(arg.value, 3, 19);
}
unsigned int spl_fixpt_u2d19(struct spl_fixed31_32 arg)
{
return ux_dy(arg.value, 2, 19);
}
unsigned int spl_fixpt_u0d19(struct spl_fixed31_32 arg)
{
return ux_dy(arg.value, 0, 19);
}
unsigned int spl_fixpt_clamp_u0d14(struct spl_fixed31_32 arg)
{
return clamp_ux_dy(arg.value, 0, 14, 1);
}
unsigned int spl_fixpt_clamp_u0d10(struct spl_fixed31_32 arg)
{
return clamp_ux_dy(arg.value, 0, 10, 1);
}
int spl_fixpt_s4d19(struct spl_fixed31_32 arg)
{
if (arg.value < 0)
return -(int)ux_dy(spl_fixpt_abs(arg).value, 4, 19);
else
return ux_dy(arg.value, 4, 19);
}
struct spl_fixed31_32 spl_fixpt_from_ux_dy(unsigned int value,
unsigned int integer_bits,
unsigned int fractional_bits)
{
struct spl_fixed31_32 fixpt_value = spl_fixpt_zero;
struct spl_fixed31_32 fixpt_int_value = spl_fixpt_zero;
long long frac_mask = ((long long)1 << (long long)integer_bits) - 1;
fixpt_value.value = (long long)value << (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits);
frac_mask = frac_mask << fractional_bits;
fixpt_int_value.value = value & frac_mask;
fixpt_int_value.value <<= (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits);
fixpt_value.value |= fixpt_int_value.value;
return fixpt_value;
}
struct spl_fixed31_32 spl_fixpt_from_int_dy(unsigned int int_value,
unsigned int frac_value,
unsigned int integer_bits,
unsigned int fractional_bits)
{
struct spl_fixed31_32 fixpt_value = spl_fixpt_from_int(int_value);
fixpt_value.value |= (long long)frac_value << (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits);
return fixpt_value;
}

View File

@ -1,546 +0,0 @@
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __SPL_FIXED31_32_H__
#define __SPL_FIXED31_32_H__
#include "os_types.h"
#include "spl_os_types.h" // swap
#ifndef ASSERT
#define ASSERT(_bool) ((void *)0)
#endif
#ifndef LLONG_MAX
#define LLONG_MAX 9223372036854775807ll
#endif
#ifndef LLONG_MIN
#define LLONG_MIN (-LLONG_MAX - 1ll)
#endif
#define FIXED31_32_BITS_PER_FRACTIONAL_PART 32
#ifndef LLONG_MIN
#define LLONG_MIN (1LL<<63)
#endif
#ifndef LLONG_MAX
#define LLONG_MAX (-1LL>>1)
#endif
/*
* @brief
* Arithmetic operations on real numbers
* represented as fixed-point numbers.
* There are: 1 bit for sign,
* 31 bit for integer part,
* 32 bits for fractional part.
*
* @note
* Currently, overflows and underflows are asserted;
* no special result returned.
*/
struct spl_fixed31_32 {
long long value;
};
/*
* @brief
* Useful constants
*/
static const struct spl_fixed31_32 spl_fixpt_zero = { 0 };
static const struct spl_fixed31_32 spl_fixpt_epsilon = { 1LL };
static const struct spl_fixed31_32 spl_fixpt_half = { 0x80000000LL };
static const struct spl_fixed31_32 spl_fixpt_one = { 0x100000000LL };
/*
* @brief
* Initialization routines
*/
/*
* @brief
* result = numerator / denominator
*/
struct spl_fixed31_32 spl_fixpt_from_fraction(long long numerator, long long denominator);
/*
* @brief
* result = arg
*/
static inline struct spl_fixed31_32 spl_fixpt_from_int(int arg)
{
struct spl_fixed31_32 res;
res.value = (long long) arg << FIXED31_32_BITS_PER_FRACTIONAL_PART;
return res;
}
/*
* @brief
* Unary operators
*/
/*
* @brief
* result = -arg
*/
static inline struct spl_fixed31_32 spl_fixpt_neg(struct spl_fixed31_32 arg)
{
struct spl_fixed31_32 res;
res.value = -arg.value;
return res;
}
/*
* @brief
* result = abs(arg) := (arg >= 0) ? arg : -arg
*/
static inline struct spl_fixed31_32 spl_fixpt_abs(struct spl_fixed31_32 arg)
{
if (arg.value < 0)
return spl_fixpt_neg(arg);
else
return arg;
}
/*
* @brief
* Binary relational operators
*/
/*
* @brief
* result = arg1 < arg2
*/
static inline bool spl_fixpt_lt(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
{
return arg1.value < arg2.value;
}
/*
* @brief
* result = arg1 <= arg2
*/
static inline bool spl_fixpt_le(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
{
return arg1.value <= arg2.value;
}
/*
* @brief
* result = arg1 == arg2
*/
static inline bool spl_fixpt_eq(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
{
return arg1.value == arg2.value;
}
/*
* @brief
* result = min(arg1, arg2) := (arg1 <= arg2) ? arg1 : arg2
*/
static inline struct spl_fixed31_32 spl_fixpt_min(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
{
if (arg1.value <= arg2.value)
return arg1;
else
return arg2;
}
/*
* @brief
* result = max(arg1, arg2) := (arg1 <= arg2) ? arg2 : arg1
*/
static inline struct spl_fixed31_32 spl_fixpt_max(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
{
if (arg1.value <= arg2.value)
return arg2;
else
return arg1;
}
/*
* @brief
* | min_value, when arg <= min_value
* result = | arg, when min_value < arg < max_value
* | max_value, when arg >= max_value
*/
static inline struct spl_fixed31_32 spl_fixpt_clamp(
struct spl_fixed31_32 arg,
struct spl_fixed31_32 min_value,
struct spl_fixed31_32 max_value)
{
if (spl_fixpt_le(arg, min_value))
return min_value;
else if (spl_fixpt_le(max_value, arg))
return max_value;
else
return arg;
}
/*
* @brief
* Binary shift operators
*/
/*
* @brief
* result = arg << shift
*/
static inline struct spl_fixed31_32 spl_fixpt_shl(struct spl_fixed31_32 arg, unsigned char shift)
{
ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
((arg.value < 0) && (arg.value >= ~(LLONG_MAX >> shift))));
arg.value = arg.value << shift;
return arg;
}
/*
* @brief
* result = arg >> shift
*/
static inline struct spl_fixed31_32 spl_fixpt_shr(struct spl_fixed31_32 arg, unsigned char shift)
{
bool negative = arg.value < 0;
if (negative)
arg.value = -arg.value;
arg.value = arg.value >> shift;
if (negative)
arg.value = -arg.value;
return arg;
}
/*
* @brief
* Binary additive operators
*/
/*
* @brief
* result = arg1 + arg2
*/
static inline struct spl_fixed31_32 spl_fixpt_add(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
{
struct spl_fixed31_32 res;
ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) ||
((arg1.value < 0) && (LLONG_MIN - arg1.value <= arg2.value)));
res.value = arg1.value + arg2.value;
return res;
}
/*
* @brief
* result = arg1 + arg2
*/
static inline struct spl_fixed31_32 spl_fixpt_add_int(struct spl_fixed31_32 arg1, int arg2)
{
return spl_fixpt_add(arg1, spl_fixpt_from_int(arg2));
}
/*
* @brief
* result = arg1 - arg2
*/
static inline struct spl_fixed31_32 spl_fixpt_sub(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
{
struct spl_fixed31_32 res;
ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) ||
((arg2.value < 0) && (LLONG_MAX + arg2.value >= arg1.value)));
res.value = arg1.value - arg2.value;
return res;
}
/*
* @brief
* result = arg1 - arg2
*/
static inline struct spl_fixed31_32 spl_fixpt_sub_int(struct spl_fixed31_32 arg1, int arg2)
{
return spl_fixpt_sub(arg1, spl_fixpt_from_int(arg2));
}
/*
* @brief
* Binary multiplicative operators
*/
/*
* @brief
* result = arg1 * arg2
*/
struct spl_fixed31_32 spl_fixpt_mul(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2);
/*
* @brief
* result = arg1 * arg2
*/
static inline struct spl_fixed31_32 spl_fixpt_mul_int(struct spl_fixed31_32 arg1, int arg2)
{
return spl_fixpt_mul(arg1, spl_fixpt_from_int(arg2));
}
/*
* @brief
* result = square(arg) := arg * arg
*/
struct spl_fixed31_32 spl_fixpt_sqr(struct spl_fixed31_32 arg);
/*
* @brief
* result = arg1 / arg2
*/
static inline struct spl_fixed31_32 spl_fixpt_div_int(struct spl_fixed31_32 arg1, long long arg2)
{
return spl_fixpt_from_fraction(arg1.value, spl_fixpt_from_int((int)arg2).value);
}
/*
* @brief
* result = arg1 / arg2
*/
static inline struct spl_fixed31_32 spl_fixpt_div(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
{
return spl_fixpt_from_fraction(arg1.value, arg2.value);
}
/*
* @brief
* Reciprocal function
*/
/*
* @brief
* result = reciprocal(arg) := 1 / arg
*
* @note
* No special actions taken in case argument is zero.
*/
struct spl_fixed31_32 spl_fixpt_recip(struct spl_fixed31_32 arg);
/*
* @brief
* Trigonometric functions
*/
/*
* @brief
* result = sinc(arg) := sin(arg) / arg
*
* @note
* Argument specified in radians,
* internally it's normalized to [-2pi...2pi] range.
*/
struct spl_fixed31_32 spl_fixpt_sinc(struct spl_fixed31_32 arg);
/*
* @brief
* result = sin(arg)
*
* @note
* Argument specified in radians,
* internally it's normalized to [-2pi...2pi] range.
*/
struct spl_fixed31_32 spl_fixpt_sin(struct spl_fixed31_32 arg);
/*
* @brief
* result = cos(arg)
*
* @note
* Argument specified in radians
* and should be in [-2pi...2pi] range -
* passing arguments outside that range
* will cause incorrect result!
*/
struct spl_fixed31_32 spl_fixpt_cos(struct spl_fixed31_32 arg);
/*
* @brief
* Transcendent functions
*/
/*
* @brief
* result = exp(arg)
*
* @note
* Currently, function is verified for abs(arg) <= 1.
*/
struct spl_fixed31_32 spl_fixpt_exp(struct spl_fixed31_32 arg);
/*
* @brief
* result = log(arg)
*
* @note
* Currently, abs(arg) should be less than 1.
* No normalization is done.
* Currently, no special actions taken
* in case of invalid argument(s). Take care!
*/
struct spl_fixed31_32 spl_fixpt_log(struct spl_fixed31_32 arg);
/*
* @brief
* Power function
*/
/*
* @brief
* result = pow(arg1, arg2)
*
* @note
* Currently, abs(arg1) should be less than 1. Take care!
*/
static inline struct spl_fixed31_32 spl_fixpt_pow(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
{
if (arg1.value == 0)
return arg2.value == 0 ? spl_fixpt_one : spl_fixpt_zero;
return spl_fixpt_exp(
spl_fixpt_mul(
spl_fixpt_log(arg1),
arg2));
}
/*
* @brief
* Rounding functions
*/
/*
* @brief
* result = floor(arg) := greatest integer lower than or equal to arg
*/
static inline int spl_fixpt_floor(struct spl_fixed31_32 arg)
{
unsigned long long arg_value = arg.value > 0 ? arg.value : -arg.value;
if (arg.value >= 0)
return (int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
else
return -(int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
}
/*
* @brief
* result = round(arg) := integer nearest to arg
*/
static inline int spl_fixpt_round(struct spl_fixed31_32 arg)
{
unsigned long long arg_value = arg.value > 0 ? arg.value : -arg.value;
const long long summand = spl_fixpt_half.value;
ASSERT(LLONG_MAX - (long long)arg_value >= summand);
arg_value += summand;
if (arg.value >= 0)
return (int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
else
return -(int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
}
/*
* @brief
* result = ceil(arg) := lowest integer greater than or equal to arg
*/
static inline int spl_fixpt_ceil(struct spl_fixed31_32 arg)
{
unsigned long long arg_value = arg.value > 0 ? arg.value : -arg.value;
const long long summand = spl_fixpt_one.value -
spl_fixpt_epsilon.value;
ASSERT(LLONG_MAX - (long long)arg_value >= summand);
arg_value += summand;
if (arg.value >= 0)
return (int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
else
return -(int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
}
/* the following two function are used in scaler hw programming to convert fixed
* point value to format 2 bits from integer part and 19 bits from fractional
* part. The same applies for u0d19, 0 bits from integer part and 19 bits from
* fractional
*/
unsigned int spl_fixpt_u4d19(struct spl_fixed31_32 arg);
unsigned int spl_fixpt_u3d19(struct spl_fixed31_32 arg);
unsigned int spl_fixpt_u2d19(struct spl_fixed31_32 arg);
unsigned int spl_fixpt_u0d19(struct spl_fixed31_32 arg);
unsigned int spl_fixpt_clamp_u0d14(struct spl_fixed31_32 arg);
unsigned int spl_fixpt_clamp_u0d10(struct spl_fixed31_32 arg);
int spl_fixpt_s4d19(struct spl_fixed31_32 arg);
static inline struct spl_fixed31_32 spl_fixpt_truncate(struct spl_fixed31_32 arg, unsigned int frac_bits)
{
bool negative = arg.value < 0;
if (frac_bits >= FIXED31_32_BITS_PER_FRACTIONAL_PART) {
ASSERT(frac_bits == FIXED31_32_BITS_PER_FRACTIONAL_PART);
return arg;
}
if (negative)
arg.value = -arg.value;
arg.value &= (~0ULL) << (FIXED31_32_BITS_PER_FRACTIONAL_PART - frac_bits);
if (negative)
arg.value = -arg.value;
return arg;
}
struct spl_fixed31_32 spl_fixpt_from_ux_dy(unsigned int value, unsigned int integer_bits, unsigned int fractional_bits);
struct spl_fixed31_32 spl_fixpt_from_int_dy(unsigned int int_value,
unsigned int frac_value,
unsigned int integer_bits,
unsigned int fractional_bits);
#endif