python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "core_types.h"
#include "reg_helper.h"
#include "dcn30_dpp.h"
#include "basics/conversion.h"
#include "dcn30_cm_common.h"
#define REG(reg)\
dpp->tf_regs->reg
#define CTX \
dpp->base.ctx
#undef FN
#define FN(reg_name, field_name) \
dpp->tf_shift->field_name, dpp->tf_mask->field_name
static void dpp3_enable_cm_block(
struct dpp *dpp_base)
{
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
unsigned int cm_bypass_mode = 0;
// debug option: put CM in bypass mode
if (dpp_base->ctx->dc->debug.cm_in_bypass)
cm_bypass_mode = 1;
REG_UPDATE(CM_CONTROL, CM_BYPASS, cm_bypass_mode);
}
static enum dc_lut_mode dpp30_get_gamcor_current(struct dpp *dpp_base)
{
enum dc_lut_mode mode;
uint32_t state_mode;
uint32_t lut_mode;
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &state_mode);
if (state_mode == 0)
mode = LUT_BYPASS;
if (state_mode == 2) {//Programmable RAM LUT
REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &lut_mode);
if (lut_mode == 0)
mode = LUT_RAM_A;
else
mode = LUT_RAM_B;
}
return mode;
}
static void dpp3_program_gammcor_lut(
struct dpp *dpp_base,
const struct pwl_result_data *rgb,
uint32_t num,
bool is_ram_a)
{
uint32_t i;
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg;
uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg;
uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg;
/*fill in the LUT with all base values to be used by pwl module
* HW auto increments the LUT index: back-to-back write
*/
if (is_rgb_equal(rgb, num)) {
for (i = 0 ; i < num; i++)
REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].red_reg);
REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_red);
} else {
REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
CM_GAMCOR_LUT_WRITE_COLOR_MASK, 4);
for (i = 0 ; i < num; i++)
REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].red_reg);
REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_red);
REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0);
REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
CM_GAMCOR_LUT_WRITE_COLOR_MASK, 2);
for (i = 0 ; i < num; i++)
REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].green_reg);
REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_green);
REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0);
REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
CM_GAMCOR_LUT_WRITE_COLOR_MASK, 1);
for (i = 0 ; i < num; i++)
REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].blue_reg);
REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_blue);
}
}
static void dpp3_power_on_gamcor_lut(
struct dpp *dpp_base,
bool power_on)
{
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) {
if (power_on) {
REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, 0);
REG_WAIT(CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, 0, 1, 5);
} else {
dpp_base->ctx->dc->optimized_required = true;
dpp_base->deferred_reg_writes.bits.disable_gamcor = true;
}
} else
REG_SET(CM_MEM_PWR_CTRL, 0,
GAMCOR_MEM_PWR_DIS, power_on == true ? 0:1);
}
void dpp3_program_cm_dealpha(
struct dpp *dpp_base,
uint32_t enable, uint32_t additive_blending)
{
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
REG_SET_2(CM_DEALPHA, 0,
CM_DEALPHA_EN, enable,
CM_DEALPHA_ABLND, additive_blending);
}
void dpp3_program_cm_bias(
struct dpp *dpp_base,
struct CM_bias_params *bias_params)
{
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
REG_SET(CM_BIAS_CR_R, 0, CM_BIAS_CR_R, bias_params->cm_bias_cr_r);
REG_SET_2(CM_BIAS_Y_G_CB_B, 0,
CM_BIAS_Y_G, bias_params->cm_bias_y_g,
CM_BIAS_CB_B, bias_params->cm_bias_cb_b);
}
static void dpp3_gamcor_reg_field(
struct dcn3_dpp *dpp,
struct dcn3_xfer_func_reg *reg)
{
reg->shifts.field_region_start_base = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B;
reg->masks.field_region_start_base = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B;
reg->shifts.field_offset = dpp->tf_shift->CM_GAMCOR_RAMA_OFFSET_B;
reg->masks.field_offset = dpp->tf_mask->CM_GAMCOR_RAMA_OFFSET_B;
reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET;
reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET;
reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS;
reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS;
reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET;
reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET;
reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS;
reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS;
reg->shifts.field_region_end = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_B;
reg->masks.field_region_end = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_B;
reg->shifts.field_region_end_slope = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B;
reg->masks.field_region_end_slope = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B;
reg->shifts.field_region_end_base = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B;
reg->masks.field_region_end_base = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B;
reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B;
reg->masks.field_region_linear_slope = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B;
reg->shifts.exp_region_start = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_B;
reg->masks.exp_region_start = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_B;
reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B;
reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B;
}
static void dpp3_configure_gamcor_lut(
struct dpp *dpp_base,
bool is_ram_a)
{
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
CM_GAMCOR_LUT_WRITE_COLOR_MASK, 7);
REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
CM_GAMCOR_LUT_HOST_SEL, is_ram_a == true ? 0:1);
REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0);
}
bool dpp3_program_gamcor_lut(
struct dpp *dpp_base, const struct pwl_params *params)
{
enum dc_lut_mode current_mode;
enum dc_lut_mode next_mode;
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
struct dcn3_xfer_func_reg gam_regs;
dpp3_enable_cm_block(dpp_base);
if (params == NULL) { //bypass if we have no pwl data
REG_SET(CM_GAMCOR_CONTROL, 0, CM_GAMCOR_MODE, 0);
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm)
dpp3_power_on_gamcor_lut(dpp_base, false);
return false;
}
dpp3_power_on_gamcor_lut(dpp_base, true);
REG_SET(CM_GAMCOR_CONTROL, 0, CM_GAMCOR_MODE, 2);
current_mode = dpp30_get_gamcor_current(dpp_base);
if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)
next_mode = LUT_RAM_B;
else
next_mode = LUT_RAM_A;
dpp3_power_on_gamcor_lut(dpp_base, true);
dpp3_configure_gamcor_lut(dpp_base, next_mode == LUT_RAM_A);
if (next_mode == LUT_RAM_B) {
gam_regs.start_cntl_b = REG(CM_GAMCOR_RAMB_START_CNTL_B);
gam_regs.start_cntl_g = REG(CM_GAMCOR_RAMB_START_CNTL_G);
gam_regs.start_cntl_r = REG(CM_GAMCOR_RAMB_START_CNTL_R);
gam_regs.start_slope_cntl_b = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_B);
gam_regs.start_slope_cntl_g = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_G);
gam_regs.start_slope_cntl_r = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_R);
gam_regs.start_end_cntl1_b = REG(CM_GAMCOR_RAMB_END_CNTL1_B);
gam_regs.start_end_cntl2_b = REG(CM_GAMCOR_RAMB_END_CNTL2_B);
gam_regs.start_end_cntl1_g = REG(CM_GAMCOR_RAMB_END_CNTL1_G);
gam_regs.start_end_cntl2_g = REG(CM_GAMCOR_RAMB_END_CNTL2_G);
gam_regs.start_end_cntl1_r = REG(CM_GAMCOR_RAMB_END_CNTL1_R);
gam_regs.start_end_cntl2_r = REG(CM_GAMCOR_RAMB_END_CNTL2_R);
gam_regs.region_start = REG(CM_GAMCOR_RAMB_REGION_0_1);
gam_regs.region_end = REG(CM_GAMCOR_RAMB_REGION_32_33);
//New registers in DCN3AG/DCN GAMCOR block
gam_regs.offset_b = REG(CM_GAMCOR_RAMB_OFFSET_B);
gam_regs.offset_g = REG(CM_GAMCOR_RAMB_OFFSET_G);
gam_regs.offset_r = REG(CM_GAMCOR_RAMB_OFFSET_R);
gam_regs.start_base_cntl_b = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_B);
gam_regs.start_base_cntl_g = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_G);
gam_regs.start_base_cntl_r = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_R);
} else {
gam_regs.start_cntl_b = REG(CM_GAMCOR_RAMA_START_CNTL_B);
gam_regs.start_cntl_g = REG(CM_GAMCOR_RAMA_START_CNTL_G);
gam_regs.start_cntl_r = REG(CM_GAMCOR_RAMA_START_CNTL_R);
gam_regs.start_slope_cntl_b = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_B);
gam_regs.start_slope_cntl_g = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_G);
gam_regs.start_slope_cntl_r = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_R);
gam_regs.start_end_cntl1_b = REG(CM_GAMCOR_RAMA_END_CNTL1_B);
gam_regs.start_end_cntl2_b = REG(CM_GAMCOR_RAMA_END_CNTL2_B);
gam_regs.start_end_cntl1_g = REG(CM_GAMCOR_RAMA_END_CNTL1_G);
gam_regs.start_end_cntl2_g = REG(CM_GAMCOR_RAMA_END_CNTL2_G);
gam_regs.start_end_cntl1_r = REG(CM_GAMCOR_RAMA_END_CNTL1_R);
gam_regs.start_end_cntl2_r = REG(CM_GAMCOR_RAMA_END_CNTL2_R);
gam_regs.region_start = REG(CM_GAMCOR_RAMA_REGION_0_1);
gam_regs.region_end = REG(CM_GAMCOR_RAMA_REGION_32_33);
//New registers in DCN3AG/DCN GAMCOR block
gam_regs.offset_b = REG(CM_GAMCOR_RAMA_OFFSET_B);
gam_regs.offset_g = REG(CM_GAMCOR_RAMA_OFFSET_G);
gam_regs.offset_r = REG(CM_GAMCOR_RAMA_OFFSET_R);
gam_regs.start_base_cntl_b = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_B);
gam_regs.start_base_cntl_g = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_G);
gam_regs.start_base_cntl_r = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_R);
}
//get register fields
dpp3_gamcor_reg_field(dpp, &gam_regs);
//program register set for LUTA/LUTB
cm_helper_program_gamcor_xfer_func(dpp_base->ctx, params, &gam_regs);
dpp3_program_gammcor_lut(dpp_base, params->rgb_resulted, params->hw_points_num,
next_mode == LUT_RAM_A);
//select Gamma LUT to use for next frame
REG_UPDATE(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT, next_mode == LUT_RAM_A ? 0:1);
return true;
}
void dpp3_set_hdr_multiplier(
struct dpp *dpp_base,
uint32_t multiplier)
{
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier);
}
static void program_gamut_remap(
struct dcn3_dpp *dpp,
const uint16_t *regval,
int select)
{
uint16_t selection = 0;
struct color_matrices_reg gam_regs;
if (regval == NULL || select == GAMUT_REMAP_BYPASS) {
REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
CM_GAMUT_REMAP_MODE, 0);
return;
}
switch (select) {
case GAMUT_REMAP_COEFF:
selection = 1;
break;
/*this corresponds to GAMUT_REMAP coefficients set B
*we don't have common coefficient sets in dcn3ag/dcn3
*/
case GAMUT_REMAP_COMA_COEFF:
selection = 2;
break;
default:
break;
}
gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11;
gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
if (select == GAMUT_REMAP_COEFF) {
gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
cm_helper_program_color_matrices(
dpp->base.ctx,
regval,
&gam_regs);
} else if (select == GAMUT_REMAP_COMA_COEFF) {
gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12);
gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34);
cm_helper_program_color_matrices(
dpp->base.ctx,
regval,
&gam_regs);
}
//select coefficient set to use
REG_SET(
CM_GAMUT_REMAP_CONTROL, 0,
CM_GAMUT_REMAP_MODE, selection);
}
void dpp3_cm_set_gamut_remap(
struct dpp *dpp_base,
const struct dpp_grph_csc_adjustment *adjust)
{
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
int i = 0;
int gamut_mode;
if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
/* Bypass if type is bypass or hw */
program_gamut_remap(dpp, NULL, GAMUT_REMAP_BYPASS);
else {
struct fixed31_32 arr_matrix[12];
uint16_t arr_reg_val[12];
for (i = 0; i < 12; i++)
arr_matrix[i] = adjust->temperature_matrix[i];
convert_float_matrix(
arr_reg_val, arr_matrix, 12);
//current coefficient set in use
REG_GET(CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, &gamut_mode);
if (gamut_mode == 0)
gamut_mode = 1; //use coefficient set A
else if (gamut_mode == 1)
gamut_mode = 2;
else
gamut_mode = 1;
//follow dcn2 approach for now - using only coefficient set A
program_gamut_remap(dpp, arr_reg_val, gamut_mode);
}
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "dm_helpers.h"
#include "core_types.h"
#include "resource.h"
#include "dcn30_hwseq.h"
#include "dccg.h"
#include "dce/dce_hwseq.h"
#include "dcn30_mpc.h"
#include "dcn30_dpp.h"
#include "dcn10/dcn10_cm_common.h"
#include "dcn30_cm_common.h"
#include "reg_helper.h"
#include "abm.h"
#include "clk_mgr.h"
#include "hubp.h"
#include "dchubbub.h"
#include "timing_generator.h"
#include "opp.h"
#include "ipp.h"
#include "mpc.h"
#include "mcif_wb.h"
#include "dc_dmub_srv.h"
#include "link_hwss.h"
#include "dpcd_defs.h"
#include "../dcn20/dcn20_hwseq.h"
#include "dcn30_resource.h"
#include "link.h"
#define DC_LOGGER_INIT(logger)
#define CTX \
hws->ctx
#define REG(reg)\
hws->regs->reg
#define DC_LOGGER \
dc->ctx->logger
#undef FN
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
bool dcn30_set_blend_lut(
struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
bool result = true;
struct pwl_params *blend_lut = NULL;
if (plane_state->blend_tf) {
if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
blend_lut = &plane_state->blend_tf->pwl;
else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
cm3_helper_translate_curve_to_hw_format(
plane_state->blend_tf, &dpp_base->regamma_params, false);
blend_lut = &dpp_base->regamma_params;
}
}
result = dpp_base->funcs->dpp_program_blnd_lut(dpp_base, blend_lut);
return result;
}
static bool dcn30_set_mpc_shaper_3dlut(struct pipe_ctx *pipe_ctx,
const struct dc_stream_state *stream)
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
bool result = false;
int acquired_rmu = 0;
int mpcc_id_projected = 0;
const struct pwl_params *shaper_lut = NULL;
//get the shaper lut params
if (stream->func_shaper) {
if (stream->func_shaper->type == TF_TYPE_HWPWL) {
shaper_lut = &stream->func_shaper->pwl;
} else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
cm_helper_translate_curve_to_hw_format(stream->ctx, stream->func_shaper,
&dpp_base->shaper_params, true);
shaper_lut = &dpp_base->shaper_params;
}
}
if (stream->lut3d_func &&
stream->lut3d_func->state.bits.initialized == 1 &&
stream->lut3d_func->state.bits.rmu_idx_valid == 1) {
if (stream->lut3d_func->state.bits.rmu_mux_num == 0)
mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu0_mux;
else if (stream->lut3d_func->state.bits.rmu_mux_num == 1)
mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu1_mux;
else if (stream->lut3d_func->state.bits.rmu_mux_num == 2)
mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu2_mux;
if (mpcc_id_projected != mpcc_id)
BREAK_TO_DEBUGGER();
/* find the reason why logical layer assigned a different
* mpcc_id into acquire_post_bldn_3dlut
*/
acquired_rmu = mpc->funcs->acquire_rmu(mpc, mpcc_id,
stream->lut3d_func->state.bits.rmu_mux_num);
if (acquired_rmu != stream->lut3d_func->state.bits.rmu_mux_num)
BREAK_TO_DEBUGGER();
result = mpc->funcs->program_3dlut(mpc, &stream->lut3d_func->lut_3d,
stream->lut3d_func->state.bits.rmu_mux_num);
result = mpc->funcs->program_shaper(mpc, shaper_lut,
stream->lut3d_func->state.bits.rmu_mux_num);
} else {
// loop through the available mux and release the requested mpcc_id
mpc->funcs->release_rmu(mpc, mpcc_id);
}
return result;
}
bool dcn30_set_input_transfer_func(struct dc *dc,
struct pipe_ctx *pipe_ctx,
const struct dc_plane_state *plane_state)
{
struct dce_hwseq *hws = dc->hwseq;
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
enum dc_transfer_func_predefined tf;
bool result = true;
struct pwl_params *params = NULL;
if (dpp_base == NULL || plane_state == NULL)
return false;
tf = TRANSFER_FUNCTION_UNITY;
if (plane_state->in_transfer_func &&
plane_state->in_transfer_func->type == TF_TYPE_PREDEFINED)
tf = plane_state->in_transfer_func->tf;
dpp_base->funcs->dpp_set_pre_degam(dpp_base, tf);
if (plane_state->in_transfer_func) {
if (plane_state->in_transfer_func->type == TF_TYPE_HWPWL)
params = &plane_state->in_transfer_func->pwl;
else if (plane_state->in_transfer_func->type == TF_TYPE_DISTRIBUTED_POINTS &&
cm3_helper_translate_curve_to_hw_format(plane_state->in_transfer_func,
&dpp_base->degamma_params, false))
params = &dpp_base->degamma_params;
}
result = dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params);
if (pipe_ctx->stream_res.opp && pipe_ctx->stream_res.opp->ctx) {
if (dpp_base->funcs->dpp_program_blnd_lut)
hws->funcs.set_blend_lut(pipe_ctx, plane_state);
if (dpp_base->funcs->dpp_program_shaper_lut &&
dpp_base->funcs->dpp_program_3dlut)
hws->funcs.set_shaper_3dlut(pipe_ctx, plane_state);
}
return result;
}
bool dcn30_set_output_transfer_func(struct dc *dc,
struct pipe_ctx *pipe_ctx,
const struct dc_stream_state *stream)
{
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
struct pwl_params *params = NULL;
bool ret = false;
/* program OGAM or 3DLUT only for the top pipe*/
if (pipe_ctx->top_pipe == NULL) {
/*program rmu shaper and 3dlut in MPC*/
ret = dcn30_set_mpc_shaper_3dlut(pipe_ctx, stream);
if (ret == false && mpc->funcs->set_output_gamma && stream->out_transfer_func) {
if (stream->out_transfer_func->type == TF_TYPE_HWPWL)
params = &stream->out_transfer_func->pwl;
else if (pipe_ctx->stream->out_transfer_func->type ==
TF_TYPE_DISTRIBUTED_POINTS &&
cm3_helper_translate_curve_to_hw_format(
stream->out_transfer_func,
&mpc->blender_params, false))
params = &mpc->blender_params;
/* there are no ROM LUTs in OUTGAM */
if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED)
BREAK_TO_DEBUGGER();
}
}
mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
return ret;
}
static void dcn30_set_writeback(
struct dc *dc,
struct dc_writeback_info *wb_info,
struct dc_state *context)
{
struct mcif_wb *mcif_wb;
struct mcif_buf_params *mcif_buf_params;
ASSERT(wb_info->dwb_pipe_inst < MAX_DWB_PIPES);
ASSERT(wb_info->wb_enabled);
ASSERT(wb_info->mpcc_inst >= 0);
ASSERT(wb_info->mpcc_inst < dc->res_pool->mpcc_count);
mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
mcif_buf_params = &wb_info->mcif_buf_params;
/* set DWB MPC mux */
dc->res_pool->mpc->funcs->set_dwb_mux(dc->res_pool->mpc,
wb_info->dwb_pipe_inst, wb_info->mpcc_inst);
/* set MCIF_WB buffer and arbitration configuration */
mcif_wb->funcs->config_mcif_buf(mcif_wb, mcif_buf_params, wb_info->dwb_params.dest_height);
mcif_wb->funcs->config_mcif_arb(mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
}
void dcn30_update_writeback(
struct dc *dc,
struct dc_writeback_info *wb_info,
struct dc_state *context)
{
struct dwbc *dwb;
dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
DC_LOG_DWB("%s dwb_pipe_inst = %d, mpcc_inst = %d",\
__func__, wb_info->dwb_pipe_inst,\
wb_info->mpcc_inst);
dcn30_set_writeback(dc, wb_info, context);
/* update DWB */
dwb->funcs->update(dwb, &wb_info->dwb_params);
}
bool dcn30_mmhubbub_warmup(
struct dc *dc,
unsigned int num_dwb,
struct dc_writeback_info *wb_info)
{
struct dwbc *dwb;
struct mcif_wb *mcif_wb;
struct mcif_warmup_params warmup_params = {0};
unsigned int i, i_buf;
/*make sure there is no active DWB eanbled */
for (i = 0; i < num_dwb; i++) {
dwb = dc->res_pool->dwbc[wb_info[i].dwb_pipe_inst];
if (dwb->dwb_is_efc_transition || dwb->dwb_is_drc) {
/*can not do warmup while any dwb enabled*/
return false;
}
}
if (wb_info->mcif_warmup_params.p_vmid == 0)
return false;
/*check whether this is new interface: warmup big buffer once*/
if (wb_info->mcif_warmup_params.start_address.quad_part != 0 &&
wb_info->mcif_warmup_params.region_size != 0) {
/*mmhubbub is shared, so it does not matter which MCIF*/
mcif_wb = dc->res_pool->mcif_wb[0];
/*warmup a big chunk of VM buffer at once*/
warmup_params.start_address.quad_part = wb_info->mcif_warmup_params.start_address.quad_part;
warmup_params.address_increment = wb_info->mcif_warmup_params.region_size;
warmup_params.region_size = wb_info->mcif_warmup_params.region_size;
warmup_params.p_vmid = wb_info->mcif_warmup_params.p_vmid;
if (warmup_params.address_increment == 0)
warmup_params.address_increment = dc->dml.soc.vmm_page_size_bytes;
mcif_wb->funcs->warmup_mcif(mcif_wb, &warmup_params);
return true;
}
/*following is the original: warmup each DWB's mcif buffer*/
for (i = 0; i < num_dwb; i++) {
dwb = dc->res_pool->dwbc[wb_info[i].dwb_pipe_inst];
mcif_wb = dc->res_pool->mcif_wb[wb_info[i].dwb_pipe_inst];
/*warmup is for VM mode only*/
if (wb_info[i].mcif_buf_params.p_vmid == 0)
return false;
/* Warmup MCIF_WB */
for (i_buf = 0; i_buf < MCIF_BUF_COUNT; i_buf++) {
warmup_params.start_address.quad_part = wb_info[i].mcif_buf_params.luma_address[i_buf];
warmup_params.address_increment = dc->dml.soc.vmm_page_size_bytes;
warmup_params.region_size = wb_info[i].mcif_buf_params.luma_pitch * wb_info[i].dwb_params.dest_height;
warmup_params.p_vmid = wb_info[i].mcif_buf_params.p_vmid;
mcif_wb->funcs->warmup_mcif(mcif_wb, &warmup_params);
}
}
return true;
}
void dcn30_enable_writeback(
struct dc *dc,
struct dc_writeback_info *wb_info,
struct dc_state *context)
{
struct dwbc *dwb;
struct mcif_wb *mcif_wb;
dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
DC_LOG_DWB("%s dwb_pipe_inst = %d, mpcc_inst = %d",\
__func__, wb_info->dwb_pipe_inst,\
wb_info->mpcc_inst);
/* Update writeback pipe */
dcn30_set_writeback(dc, wb_info, context);
/* Enable MCIF_WB */
mcif_wb->funcs->enable_mcif(mcif_wb);
/* Enable DWB */
dwb->funcs->enable(dwb, &wb_info->dwb_params);
}
void dcn30_disable_writeback(
struct dc *dc,
unsigned int dwb_pipe_inst)
{
struct dwbc *dwb;
struct mcif_wb *mcif_wb;
ASSERT(dwb_pipe_inst < MAX_DWB_PIPES);
dwb = dc->res_pool->dwbc[dwb_pipe_inst];
mcif_wb = dc->res_pool->mcif_wb[dwb_pipe_inst];
DC_LOG_DWB("%s dwb_pipe_inst = %d",\
__func__, dwb_pipe_inst);
/* disable DWB */
dwb->funcs->disable(dwb);
/* disable MCIF */
mcif_wb->funcs->disable_mcif(mcif_wb);
/* disable MPC DWB mux */
dc->res_pool->mpc->funcs->disable_dwb_mux(dc->res_pool->mpc, dwb_pipe_inst);
}
void dcn30_program_all_writeback_pipes_in_tree(
struct dc *dc,
const struct dc_stream_state *stream,
struct dc_state *context)
{
struct dc_writeback_info wb_info;
struct dwbc *dwb;
struct dc_stream_status *stream_status = NULL;
int i_wb, i_pipe, i_stream;
DC_LOG_DWB("%s", __func__);
ASSERT(stream);
for (i_stream = 0; i_stream < context->stream_count; i_stream++) {
if (context->streams[i_stream] == stream) {
stream_status = &context->stream_status[i_stream];
break;
}
}
ASSERT(stream_status);
ASSERT(stream->num_wb_info <= dc->res_pool->res_cap->num_dwb);
/* For each writeback pipe */
for (i_wb = 0; i_wb < stream->num_wb_info; i_wb++) {
/* copy writeback info to local non-const so mpcc_inst can be set */
wb_info = stream->writeback_info[i_wb];
if (wb_info.wb_enabled) {
/* get the MPCC instance for writeback_source_plane */
wb_info.mpcc_inst = -1;
for (i_pipe = 0; i_pipe < dc->res_pool->pipe_count; i_pipe++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i_pipe];
if (!pipe_ctx->plane_state)
continue;
if (pipe_ctx->plane_state == wb_info.writeback_source_plane) {
wb_info.mpcc_inst = pipe_ctx->plane_res.mpcc_inst;
break;
}
}
if (wb_info.mpcc_inst == -1) {
/* Disable writeback pipe and disconnect from MPCC
* if source plane has been removed
*/
dc->hwss.disable_writeback(dc, wb_info.dwb_pipe_inst);
continue;
}
ASSERT(wb_info.dwb_pipe_inst < dc->res_pool->res_cap->num_dwb);
dwb = dc->res_pool->dwbc[wb_info.dwb_pipe_inst];
if (dwb->funcs->is_enabled(dwb)) {
/* writeback pipe already enabled, only need to update */
dc->hwss.update_writeback(dc, &wb_info, context);
} else {
/* Enable writeback pipe and connect to MPCC */
dc->hwss.enable_writeback(dc, &wb_info, context);
}
} else {
/* Disable writeback pipe and disconnect from MPCC */
dc->hwss.disable_writeback(dc, wb_info.dwb_pipe_inst);
}
}
}
void dcn30_init_hw(struct dc *dc)
{
struct abm **abms = dc->res_pool->multiple_abms;
struct dce_hwseq *hws = dc->hwseq;
struct dc_bios *dcb = dc->ctx->dc_bios;
struct resource_pool *res_pool = dc->res_pool;
int i;
int edp_num;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
// Initialize the dccg
if (res_pool->dccg->funcs->dccg_init)
res_pool->dccg->funcs->dccg_init(res_pool->dccg);
if (!dcb->funcs->is_accelerated_mode(dcb)) {
hws->funcs.bios_golden_init(dc);
hws->funcs.disable_vga(dc->hwseq);
}
if (dc->debug.enable_mem_low_power.bits.dmcu) {
// Force ERAM to shutdown if DMCU is not enabled
if (dc->debug.disable_dmcu || dc->config.disable_dmcu) {
REG_UPDATE(DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, 3);
}
}
// Set default OPTC memory power states
if (dc->debug.enable_mem_low_power.bits.optc) {
// Shutdown when unassigned and light sleep in VBLANK
REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1);
}
if (dc->debug.enable_mem_low_power.bits.vga) {
// Power down VGA memory
REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
}
if (dc->ctx->dc_bios->fw_info_valid) {
res_pool->ref_clocks.xtalin_clock_inKhz =
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
if (res_pool->dccg && res_pool->hubbub) {
(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
&res_pool->ref_clocks.dccg_ref_clock_inKhz);
(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
res_pool->ref_clocks.dccg_ref_clock_inKhz,
&res_pool->ref_clocks.dchub_ref_clock_inKhz);
} else {
// Not all ASICs have DCCG sw component
res_pool->ref_clocks.dccg_ref_clock_inKhz =
res_pool->ref_clocks.xtalin_clock_inKhz;
res_pool->ref_clocks.dchub_ref_clock_inKhz =
res_pool->ref_clocks.xtalin_clock_inKhz;
}
} else
ASSERT_CRITICAL(false);
for (i = 0; i < dc->link_count; i++) {
/* Power up AND update implementation according to the
* required signal (which may be different from the
* default signal on connector).
*/
struct dc_link *link = dc->links[i];
link->link_enc->funcs->hw_init(link->link_enc);
/* Check for enabled DIG to identify enabled display */
if (link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
link->link_status.link_active = true;
if (link->link_enc->funcs->fec_is_active &&
link->link_enc->funcs->fec_is_active(link->link_enc))
link->fec_state = dc_link_fec_enabled;
}
}
/* we want to turn off all dp displays before doing detection */
dc->link_srv->blank_all_dp_displays(dc);
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
* pipes we want to use.
* Otherwise, if taking control is not possible, we need to power
* everything down.
*/
if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
hws->funcs.init_pipes(dc, dc->current_state);
if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
/* In headless boot cases, DIG may be turned
* on which causes HW/SW discrepancies.
* To avoid this, power down hardware on boot
* if DIG is turned on and seamless boot not enabled
*/
if (!dc->config.seamless_boot_edp_requested) {
struct dc_link *edp_links[MAX_NUM_EDP];
struct dc_link *edp_link = NULL;
dc_get_edp_links(dc, edp_links, &edp_num);
if (edp_num)
edp_link = edp_links[0];
if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
dc->hwss.edp_backlight_control &&
dc->hwss.power_down &&
dc->hwss.edp_power_control) {
dc->hwss.edp_backlight_control(edp_link, false);
dc->hwss.power_down(dc);
dc->hwss.edp_power_control(edp_link, false);
} else {
for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i];
if (link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
dc->hwss.power_down) {
dc->hwss.power_down(dc);
break;
}
}
}
}
for (i = 0; i < res_pool->audio_count; i++) {
struct audio *audio = res_pool->audios[i];
audio->funcs->hw_init(audio);
}
for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i];
if (link->panel_cntl)
backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (abms[i] != NULL)
abms[i]->funcs->abm_init(abms[i], backlight);
}
/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
REG_WRITE(DIO_MEM_PWR_CTRL, 0);
if (!dc->debug.disable_clock_gate) {
/* enable all DCN clock gating */
REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
if (dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
//if softmax is enabled then hardmax will be set by a different call
if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
dc->res_pool->hubbub, false, false);
if (dc->res_pool->hubbub->funcs->init_crb)
dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
// Get DMCUB capabilities
dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
}
void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
{
if (pipe_ctx == NULL)
return;
if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL)
pipe_ctx->stream_res.stream_enc->funcs->set_avmute(
pipe_ctx->stream_res.stream_enc,
enable);
}
void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx)
{
bool is_hdmi_tmds;
bool is_dp;
ASSERT(pipe_ctx->stream);
if (pipe_ctx->stream_res.stream_enc == NULL)
return; /* this is not root pipe */
is_hdmi_tmds = dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal);
is_dp = dc_is_dp_signal(pipe_ctx->stream->signal);
if (!is_hdmi_tmds && !is_dp)
return;
if (is_hdmi_tmds)
pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
else {
if (pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num)
pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num(
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets(
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
}
}
void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
bool enable = false;
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
enum dynamic_metadata_mode mode = dc_is_dp_signal(stream->signal)
? dmdata_dp
: dmdata_hdmi;
/* if using dynamic meta, don't set up generic infopackets */
if (pipe_ctx->stream->dmdata_address.quad_part != 0) {
pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false;
enable = true;
}
if (!hubp)
return;
if (!stream_enc || !stream_enc->funcs->set_dynamic_metadata)
return;
stream_enc->funcs->set_dynamic_metadata(stream_enc, enable,
hubp->inst, mode);
}
bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
{
union dmub_rb_cmd cmd;
uint32_t tmr_delay = 0, tmr_scale = 0;
struct dc_cursor_attributes cursor_attr;
bool cursor_cache_enable = false;
struct dc_stream_state *stream = NULL;
struct dc_plane_state *plane = NULL;
if (!dc->ctx->dmub_srv)
return false;
if (enable) {
if (dc->current_state) {
int i;
/* First, check no-memory-requests case */
for (i = 0; i < dc->current_state->stream_count; i++) {
if (dc->current_state->stream_status[i].plane_count)
/* Fail eligibility on a visible stream */
break;
}
if (i == dc->current_state->stream_count) {
/* Enable no-memory-requests case */
memset(&cmd, 0, sizeof(cmd));
cmd.mall.header.type = DMUB_CMD__MALL;
cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_NO_DF_REQ;
cmd.mall.header.payload_bytes = sizeof(cmd.mall) - sizeof(cmd.mall.header);
dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
return true;
}
stream = dc->current_state->streams[0];
plane = (stream ? dc->current_state->stream_status[0].plane_states[0] : NULL);
if (stream && plane) {
cursor_cache_enable = stream->cursor_position.enable &&
plane->address.grph.cursor_cache_addr.quad_part;
cursor_attr = stream->cursor_attributes;
}
/*
* Second, check MALL eligibility
*
* single display only, single surface only, 8 and 16 bit formats only, no VM,
* do not use MALL for displays that support PSR as they use D0i3.2 in DMCUB FW
*
* TODO: When we implement multi-display, PSR displays will be allowed if there is
* a non-PSR display present, since in that case we can't do D0i3.2
*/
if (dc->current_state->stream_count == 1 &&
stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED &&
dc->current_state->stream_status[0].plane_count == 1 &&
plane->format <= SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F &&
plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888 &&
plane->address.page_table_base.quad_part == 0 &&
dc->hwss.does_plane_fit_in_mall &&
dc->hwss.does_plane_fit_in_mall(dc, plane,
cursor_cache_enable ? &cursor_attr : NULL)) {
unsigned int v_total = stream->adjust.v_total_max ?
stream->adjust.v_total_max : stream->timing.v_total;
unsigned int refresh_hz = div_u64((unsigned long long) stream->timing.pix_clk_100hz *
100LL, (v_total * stream->timing.h_total));
/*
* one frame time in microsec:
* Delay_Us = 1000000 / refresh
* dynamic_delay_us = 1000000 / refresh + 2 * stutter_period
*
* one frame time modified by 'additional timer percent' (p):
* Delay_Us_modified = dynamic_delay_us + dynamic_delay_us * p / 100
* = dynamic_delay_us * (1 + p / 100)
* = (1000000 / refresh + 2 * stutter_period) * (100 + p) / 100
* = (1000000 + 2 * stutter_period * refresh) * (100 + p) / (100 * refresh)
*
* formula for timer duration based on parameters, from regspec:
* dynamic_delay_us = 65.28 * (64 + MallFrameCacheTmrDly) * 2^MallFrameCacheTmrScale
*
* dynamic_delay_us / 65.28 = (64 + MallFrameCacheTmrDly) * 2^MallFrameCacheTmrScale
* (dynamic_delay_us / 65.28) / 2^MallFrameCacheTmrScale = 64 + MallFrameCacheTmrDly
* MallFrameCacheTmrDly = ((dynamic_delay_us / 65.28) / 2^MallFrameCacheTmrScale) - 64
* = (1000000 + 2 * stutter_period * refresh) * (100 + p) / (100 * refresh) / 65.28 / 2^MallFrameCacheTmrScale - 64
* = (1000000 + 2 * stutter_period * refresh) * (100 + p) / (refresh * 6528 * 2^MallFrameCacheTmrScale) - 64
*
* need to round up the result of the division before the subtraction
*/
unsigned int denom = refresh_hz * 6528;
unsigned int stutter_period = dc->current_state->perf_params.stutter_period_us;
tmr_delay = div_u64(((1000000LL + 2 * stutter_period * refresh_hz) *
(100LL + dc->debug.mall_additional_timer_percent) + denom - 1),
denom) - 64LL;
/* In some cases the stutter period is really big (tiny modes) in these
* cases MALL cant be enabled, So skip these cases to avoid a ASSERT()
*
* We can check if stutter_period is more than 1/10th the frame time to
* consider if we can actually meet the range of hysteresis timer
*/
if (stutter_period > 100000/refresh_hz)
return false;
/* scale should be increased until it fits into 6 bits */
while (tmr_delay & ~0x3F) {
tmr_scale++;
if (tmr_scale > 3) {
/* Delay exceeds range of hysteresis timer */
ASSERT(false);
return false;
}
denom *= 2;
tmr_delay = div_u64(((1000000LL + 2 * stutter_period * refresh_hz) *
(100LL + dc->debug.mall_additional_timer_percent) + denom - 1),
denom) - 64LL;
}
/* Copy HW cursor */
if (cursor_cache_enable) {
memset(&cmd, 0, sizeof(cmd));
cmd.mall.header.type = DMUB_CMD__MALL;
cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_COPY_CURSOR;
cmd.mall.header.payload_bytes =
sizeof(cmd.mall) - sizeof(cmd.mall.header);
switch (cursor_attr.color_format) {
case CURSOR_MODE_MONO:
cmd.mall.cursor_bpp = 2;
break;
case CURSOR_MODE_COLOR_1BIT_AND:
case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
cmd.mall.cursor_bpp = 32;
break;
case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
cmd.mall.cursor_bpp = 64;
break;
}
cmd.mall.cursor_copy_src.quad_part = cursor_attr.address.quad_part;
cmd.mall.cursor_copy_dst.quad_part =
(plane->address.grph.cursor_cache_addr.quad_part + 2047) & ~2047;
cmd.mall.cursor_width = cursor_attr.width;
cmd.mall.cursor_height = cursor_attr.height;
cmd.mall.cursor_pitch = cursor_attr.pitch;
dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
/* Use copied cursor, and it's okay to not switch back */
cursor_attr.address.quad_part = cmd.mall.cursor_copy_dst.quad_part;
dc_stream_set_cursor_attributes(stream, &cursor_attr);
}
/* Enable MALL */
memset(&cmd, 0, sizeof(cmd));
cmd.mall.header.type = DMUB_CMD__MALL;
cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_ALLOW;
cmd.mall.header.payload_bytes = sizeof(cmd.mall) - sizeof(cmd.mall.header);
cmd.mall.tmr_delay = tmr_delay;
cmd.mall.tmr_scale = tmr_scale;
cmd.mall.debug_bits = dc->debug.mall_error_as_fatal;
dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
return true;
}
}
/* No applicable optimizations */
return false;
}
/* Disable MALL */
memset(&cmd, 0, sizeof(cmd));
cmd.mall.header.type = DMUB_CMD__MALL;
cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_DISALLOW;
cmd.mall.header.payload_bytes =
sizeof(cmd.mall) - sizeof(cmd.mall.header);
dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
bool dcn30_does_plane_fit_in_mall(struct dc *dc, struct dc_plane_state *plane, struct dc_cursor_attributes *cursor_attr)
{
// add meta size?
unsigned int surface_size = plane->plane_size.surface_pitch * plane->plane_size.surface_size.height *
(plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4);
unsigned int mall_size = dc->caps.mall_size_total;
unsigned int cursor_size = 0;
if (dc->debug.mall_size_override)
mall_size = 1024 * 1024 * dc->debug.mall_size_override;
if (cursor_attr) {
cursor_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size;
switch (cursor_attr->color_format) {
case CURSOR_MODE_MONO:
cursor_size /= 2;
break;
case CURSOR_MODE_COLOR_1BIT_AND:
case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
cursor_size *= 4;
break;
case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
cursor_size *= 8;
break;
}
}
return (surface_size + cursor_size) < mall_size;
}
void dcn30_hardware_release(struct dc *dc)
{
bool subvp_in_use = false;
uint32_t i;
dc_dmub_srv_p_state_delegate(dc, false, NULL);
dc_dmub_setup_subvp_dmub_command(dc, dc->current_state, false);
/* SubVP treated the same way as FPO. If driver disable and
* we are using a SubVP config, disable and force on DCN side
* to prevent P-State hang on driver enable.
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (!pipe->stream)
continue;
if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
subvp_in_use = true;
break;
}
}
/* If pstate unsupported, or still supported
* by firmware, force it supported by dcn
*/
if (dc->current_state)
if ((!dc->clk_mgr->clks.p_state_change_support || subvp_in_use ||
dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) &&
dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
dc->res_pool->hubbub, true, true);
}
void dcn30_set_disp_pattern_generator(const struct dc *dc,
struct pipe_ctx *pipe_ctx,
enum controller_dp_test_pattern test_pattern,
enum controller_dp_color_space color_space,
enum dc_color_depth color_depth,
const struct tg_color *solid_color,
int width, int height, int offset)
{
pipe_ctx->stream_res.opp->funcs->opp_set_disp_pattern_generator(pipe_ctx->stream_res.opp, test_pattern,
color_space, color_depth, solid_color, width, height, offset);
}
void dcn30_prepare_bandwidth(struct dc *dc,
struct dc_state *context)
{
bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support;
/* Any transition into an FPO config should disable MCLK switching first to avoid
* driver and FW P-State synchronization issues.
*/
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
dc->optimized_required = true;
context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
}
if (dc->clk_mgr->dc_mode_softmax_enabled)
if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
dcn20_prepare_bandwidth(dc, context);
/*
* enabled -> enabled: do not disable
* enabled -> disabled: disable
* disabled -> enabled: don't care
* disabled -> disabled: don't care
*/
if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching)
dc_dmub_srv_p_state_delegate(dc, false, context);
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
/* After disabling P-State, restore the original value to ensure we get the correct P-State
* on the next optimize. */
context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
}
}
void dcn30_set_static_screen_control(struct pipe_ctx **pipe_ctx,
int num_pipes, const struct dc_static_screen_params *params)
{
unsigned int i;
unsigned int triggers = 0;
if (params->triggers.surface_update)
triggers |= 0x100;
if (params->triggers.cursor_update)
triggers |= 0x8;
if (params->triggers.force_trigger)
triggers |= 0x1;
for (i = 0; i < num_pipes; i++)
pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(pipe_ctx[i]->stream_res.tg,
triggers, params->num_frames);
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "reg_helper.h"
#include "fixed31_32.h"
#include "resource.h"
#include "basics/conversion.h"
#include "dwb.h"
#include "dcn30_dwb.h"
#include "dcn30_cm_common.h"
#include "dcn10/dcn10_cm_common.h"
#define REG(reg)\
dwbc30->dwbc_regs->reg
#define CTX \
dwbc30->base.ctx
#undef FN
#define FN(reg_name, field_name) \
dwbc30->dwbc_shift->field_name, dwbc30->dwbc_mask->field_name
#define TO_DCN30_DWBC(dwbc_base) \
container_of(dwbc_base, struct dcn30_dwbc, base)
static void dwb3_get_reg_field_ogam(struct dcn30_dwbc *dwbc30,
struct dcn3_xfer_func_reg *reg)
{
reg->shifts.field_region_start_base = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_BASE_B;
reg->masks.field_region_start_base = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_BASE_B;
reg->shifts.field_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_OFFSET_B;
reg->masks.field_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_OFFSET_B;
reg->shifts.exp_region0_lut_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;
reg->masks.exp_region0_lut_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;
reg->shifts.exp_region0_num_segments = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
reg->masks.exp_region0_num_segments = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
reg->shifts.exp_region1_lut_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION1_LUT_OFFSET;
reg->masks.exp_region1_lut_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION1_LUT_OFFSET;
reg->shifts.exp_region1_num_segments = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
reg->masks.exp_region1_num_segments = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
reg->shifts.field_region_end = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_END_B;
reg->masks.field_region_end = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_END_B;
reg->shifts.field_region_end_slope = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_B;
reg->masks.field_region_end_slope = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_B;
reg->shifts.field_region_end_base = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_END_BASE_B;
reg->masks.field_region_end_base = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_END_BASE_B;
reg->shifts.field_region_linear_slope = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B;
reg->masks.field_region_linear_slope = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B;
reg->shifts.exp_region_start = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_B;
reg->masks.exp_region_start = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_B;
reg->shifts.exp_resion_start_segment = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;
reg->masks.exp_resion_start_segment = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;
}
/*program dwb ogam RAM A*/
static void dwb3_program_ogam_luta_settings(
struct dcn30_dwbc *dwbc30,
const struct pwl_params *params)
{
struct dcn3_xfer_func_reg gam_regs;
dwb3_get_reg_field_ogam(dwbc30, &gam_regs);
gam_regs.start_cntl_b = REG(DWB_OGAM_RAMA_START_CNTL_B);
gam_regs.start_cntl_g = REG(DWB_OGAM_RAMA_START_CNTL_G);
gam_regs.start_cntl_r = REG(DWB_OGAM_RAMA_START_CNTL_R);
gam_regs.start_base_cntl_b = REG(DWB_OGAM_RAMA_START_BASE_CNTL_B);
gam_regs.start_base_cntl_g = REG(DWB_OGAM_RAMA_START_BASE_CNTL_G);
gam_regs.start_base_cntl_r = REG(DWB_OGAM_RAMA_START_BASE_CNTL_R);
gam_regs.start_slope_cntl_b = REG(DWB_OGAM_RAMA_START_SLOPE_CNTL_B);
gam_regs.start_slope_cntl_g = REG(DWB_OGAM_RAMA_START_SLOPE_CNTL_G);
gam_regs.start_slope_cntl_r = REG(DWB_OGAM_RAMA_START_SLOPE_CNTL_R);
gam_regs.start_end_cntl1_b = REG(DWB_OGAM_RAMA_END_CNTL1_B);
gam_regs.start_end_cntl2_b = REG(DWB_OGAM_RAMA_END_CNTL2_B);
gam_regs.start_end_cntl1_g = REG(DWB_OGAM_RAMA_END_CNTL1_G);
gam_regs.start_end_cntl2_g = REG(DWB_OGAM_RAMA_END_CNTL2_G);
gam_regs.start_end_cntl1_r = REG(DWB_OGAM_RAMA_END_CNTL1_R);
gam_regs.start_end_cntl2_r = REG(DWB_OGAM_RAMA_END_CNTL2_R);
gam_regs.offset_b = REG(DWB_OGAM_RAMA_OFFSET_B);
gam_regs.offset_g = REG(DWB_OGAM_RAMA_OFFSET_G);
gam_regs.offset_r = REG(DWB_OGAM_RAMA_OFFSET_R);
gam_regs.region_start = REG(DWB_OGAM_RAMA_REGION_0_1);
gam_regs.region_end = REG(DWB_OGAM_RAMA_REGION_32_33);
/*todo*/
cm_helper_program_gamcor_xfer_func(dwbc30->base.ctx, params, &gam_regs);
}
/*program dwb ogam RAM B*/
static void dwb3_program_ogam_lutb_settings(
struct dcn30_dwbc *dwbc30,
const struct pwl_params *params)
{
struct dcn3_xfer_func_reg gam_regs;
dwb3_get_reg_field_ogam(dwbc30, &gam_regs);
gam_regs.start_cntl_b = REG(DWB_OGAM_RAMB_START_CNTL_B);
gam_regs.start_cntl_g = REG(DWB_OGAM_RAMB_START_CNTL_G);
gam_regs.start_cntl_r = REG(DWB_OGAM_RAMB_START_CNTL_R);
gam_regs.start_base_cntl_b = REG(DWB_OGAM_RAMB_START_BASE_CNTL_B);
gam_regs.start_base_cntl_g = REG(DWB_OGAM_RAMB_START_BASE_CNTL_G);
gam_regs.start_base_cntl_r = REG(DWB_OGAM_RAMB_START_BASE_CNTL_R);
gam_regs.start_slope_cntl_b = REG(DWB_OGAM_RAMB_START_SLOPE_CNTL_B);
gam_regs.start_slope_cntl_g = REG(DWB_OGAM_RAMB_START_SLOPE_CNTL_G);
gam_regs.start_slope_cntl_r = REG(DWB_OGAM_RAMB_START_SLOPE_CNTL_R);
gam_regs.start_end_cntl1_b = REG(DWB_OGAM_RAMB_END_CNTL1_B);
gam_regs.start_end_cntl2_b = REG(DWB_OGAM_RAMB_END_CNTL2_B);
gam_regs.start_end_cntl1_g = REG(DWB_OGAM_RAMB_END_CNTL1_G);
gam_regs.start_end_cntl2_g = REG(DWB_OGAM_RAMB_END_CNTL2_G);
gam_regs.start_end_cntl1_r = REG(DWB_OGAM_RAMB_END_CNTL1_R);
gam_regs.start_end_cntl2_r = REG(DWB_OGAM_RAMB_END_CNTL2_R);
gam_regs.offset_b = REG(DWB_OGAM_RAMB_OFFSET_B);
gam_regs.offset_g = REG(DWB_OGAM_RAMB_OFFSET_G);
gam_regs.offset_r = REG(DWB_OGAM_RAMB_OFFSET_R);
gam_regs.region_start = REG(DWB_OGAM_RAMB_REGION_0_1);
gam_regs.region_end = REG(DWB_OGAM_RAMB_REGION_32_33);
cm_helper_program_gamcor_xfer_func(dwbc30->base.ctx, params, &gam_regs);
}
static enum dc_lut_mode dwb3_get_ogam_current(
struct dcn30_dwbc *dwbc30)
{
enum dc_lut_mode mode;
uint32_t state_mode;
uint32_t ram_select;
REG_GET_2(DWB_OGAM_CONTROL,
DWB_OGAM_MODE_CURRENT, &state_mode,
DWB_OGAM_SELECT_CURRENT, &ram_select);
if (state_mode == 0) {
mode = LUT_BYPASS;
} else if (state_mode == 2) {
if (ram_select == 0)
mode = LUT_RAM_A;
else if (ram_select == 1)
mode = LUT_RAM_B;
else
mode = LUT_BYPASS;
} else {
// Reserved value
mode = LUT_BYPASS;
BREAK_TO_DEBUGGER();
return mode;
}
return mode;
}
static void dwb3_configure_ogam_lut(
struct dcn30_dwbc *dwbc30,
bool is_ram_a)
{
REG_UPDATE_2(DWB_OGAM_LUT_CONTROL,
DWB_OGAM_LUT_WRITE_COLOR_MASK, 7,
DWB_OGAM_LUT_HOST_SEL, (is_ram_a == true) ? 0 : 1);
REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0);
}
static void dwb3_program_ogam_pwl(struct dcn30_dwbc *dwbc30,
const struct pwl_result_data *rgb,
uint32_t num)
{
uint32_t i;
uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg;
uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg;
uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg;
if (is_rgb_equal(rgb, num)) {
for (i = 0 ; i < num; i++)
REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].red_reg);
REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_red);
} else {
REG_UPDATE(DWB_OGAM_LUT_CONTROL,
DWB_OGAM_LUT_WRITE_COLOR_MASK, 4);
for (i = 0 ; i < num; i++)
REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].red_reg);
REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_red);
REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0);
REG_UPDATE(DWB_OGAM_LUT_CONTROL,
DWB_OGAM_LUT_WRITE_COLOR_MASK, 2);
for (i = 0 ; i < num; i++)
REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].green_reg);
REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_green);
REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0);
REG_UPDATE(DWB_OGAM_LUT_CONTROL,
DWB_OGAM_LUT_WRITE_COLOR_MASK, 1);
for (i = 0 ; i < num; i++)
REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].blue_reg);
REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_blue);
}
}
static bool dwb3_program_ogam_lut(
struct dcn30_dwbc *dwbc30,
const struct pwl_params *params)
{
enum dc_lut_mode current_mode;
enum dc_lut_mode next_mode;
if (params == NULL) {
REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 0);
return false;
}
REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 2);
current_mode = dwb3_get_ogam_current(dwbc30);
if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)
next_mode = LUT_RAM_B;
else
next_mode = LUT_RAM_A;
dwb3_configure_ogam_lut(dwbc30, next_mode == LUT_RAM_A);
if (next_mode == LUT_RAM_A)
dwb3_program_ogam_luta_settings(dwbc30, params);
else
dwb3_program_ogam_lutb_settings(dwbc30, params);
dwb3_program_ogam_pwl(
dwbc30, params->rgb_resulted, params->hw_points_num);
REG_UPDATE(DWB_OGAM_CONTROL, DWB_OGAM_SELECT, next_mode == LUT_RAM_A ? 0 : 1);
return true;
}
bool dwb3_ogam_set_input_transfer_func(
struct dwbc *dwbc,
const struct dc_transfer_func *in_transfer_func_dwb_ogam)
{
struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
bool result = false;
struct pwl_params *dwb_ogam_lut = NULL;
if (in_transfer_func_dwb_ogam == NULL)
return result;
dwb_ogam_lut = kzalloc(sizeof(*dwb_ogam_lut), GFP_KERNEL);
if (dwb_ogam_lut) {
cm_helper_translate_curve_to_hw_format(dwbc->ctx,
in_transfer_func_dwb_ogam,
dwb_ogam_lut, false);
result = dwb3_program_ogam_lut(
dwbc30,
dwb_ogam_lut);
kfree(dwb_ogam_lut);
dwb_ogam_lut = NULL;
}
return result;
}
static void dwb3_program_gamut_remap(
struct dwbc *dwbc,
const uint16_t *regval,
enum cm_gamut_coef_format coef_format,
enum cm_gamut_remap_select select)
{
struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
struct color_matrices_reg gam_regs;
if (regval == NULL || select == CM_GAMUT_REMAP_MODE_BYPASS) {
REG_SET(DWB_GAMUT_REMAP_MODE, 0,
DWB_GAMUT_REMAP_MODE, 0);
return;
}
REG_UPDATE(DWB_GAMUT_REMAP_COEF_FORMAT, DWB_GAMUT_REMAP_COEF_FORMAT, coef_format);
gam_regs.shifts.csc_c11 = dwbc30->dwbc_shift->DWB_GAMUT_REMAPA_C11;
gam_regs.masks.csc_c11 = dwbc30->dwbc_mask->DWB_GAMUT_REMAPA_C11;
gam_regs.shifts.csc_c12 = dwbc30->dwbc_shift->DWB_GAMUT_REMAPA_C12;
gam_regs.masks.csc_c12 = dwbc30->dwbc_mask->DWB_GAMUT_REMAPA_C12;
switch (select) {
case CM_GAMUT_REMAP_MODE_RAMA_COEFF:
gam_regs.csc_c11_c12 = REG(DWB_GAMUT_REMAPA_C11_C12);
gam_regs.csc_c33_c34 = REG(DWB_GAMUT_REMAPA_C33_C34);
cm_helper_program_color_matrices(
dwbc30->base.ctx,
regval,
&gam_regs);
break;
case CM_GAMUT_REMAP_MODE_RAMB_COEFF:
gam_regs.csc_c11_c12 = REG(DWB_GAMUT_REMAPB_C11_C12);
gam_regs.csc_c33_c34 = REG(DWB_GAMUT_REMAPB_C33_C34);
cm_helper_program_color_matrices(
dwbc30->base.ctx,
regval,
&gam_regs);
break;
case CM_GAMUT_REMAP_MODE_RESERVED:
/* should never happen, bug */
BREAK_TO_DEBUGGER();
return;
default:
break;
}
REG_SET(DWB_GAMUT_REMAP_MODE, 0,
DWB_GAMUT_REMAP_MODE, select);
}
void dwb3_set_gamut_remap(
struct dwbc *dwbc,
const struct dc_dwb_params *params)
{
struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
struct cm_grph_csc_adjustment adjust = params->csc_params;
int i = 0;
if (adjust.gamut_adjust_type != CM_GAMUT_ADJUST_TYPE_SW) {
/* Bypass if type is bypass or hw */
dwb3_program_gamut_remap(dwbc, NULL, adjust.gamut_coef_format, CM_GAMUT_REMAP_MODE_BYPASS);
} else {
struct fixed31_32 arr_matrix[12];
uint16_t arr_reg_val[12];
unsigned int current_mode;
for (i = 0; i < 12; i++)
arr_matrix[i] = adjust.temperature_matrix[i];
convert_float_matrix(arr_reg_val, arr_matrix, 12);
REG_GET(DWB_GAMUT_REMAP_MODE, DWB_GAMUT_REMAP_MODE_CURRENT, ¤t_mode);
if (current_mode == CM_GAMUT_REMAP_MODE_RAMA_COEFF) {
dwb3_program_gamut_remap(dwbc, arr_reg_val,
adjust.gamut_coef_format, CM_GAMUT_REMAP_MODE_RAMB_COEFF);
} else {
dwb3_program_gamut_remap(dwbc, arr_reg_val,
adjust.gamut_coef_format, CM_GAMUT_REMAP_MODE_RAMA_COEFF);
}
}
}
void dwb3_program_hdr_mult(
struct dwbc *dwbc,
const struct dc_dwb_params *params)
{
struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
REG_UPDATE(DWB_HDR_MULT_COEF, DWB_HDR_MULT_COEF, params->hdr_mult);
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "reg_helper.h"
#include "dcn30_mpc.h"
#include "dcn30_cm_common.h"
#include "basics/conversion.h"
#include "dcn10/dcn10_cm_common.h"
#include "dc.h"
#define REG(reg)\
mpc30->mpc_regs->reg
#define CTX \
mpc30->base.ctx
#undef FN
#define FN(reg_name, field_name) \
mpc30->mpc_shift->field_name, mpc30->mpc_mask->field_name
#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
bool mpc3_is_dwb_idle(
struct mpc *mpc,
int dwb_id)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
unsigned int status;
REG_GET(DWB_MUX[dwb_id], MPC_DWB0_MUX_STATUS, &status);
if (status == 0xf)
return true;
else
return false;
}
void mpc3_set_dwb_mux(
struct mpc *mpc,
int dwb_id,
int mpcc_id)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
REG_SET(DWB_MUX[dwb_id], 0,
MPC_DWB0_MUX, mpcc_id);
}
void mpc3_disable_dwb_mux(
struct mpc *mpc,
int dwb_id)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
REG_SET(DWB_MUX[dwb_id], 0,
MPC_DWB0_MUX, 0xf);
}
void mpc3_set_out_rate_control(
struct mpc *mpc,
int opp_id,
bool enable,
bool rate_2x_mode,
struct mpc_dwb_flow_control *flow_control)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
REG_UPDATE_2(MUX[opp_id],
MPC_OUT_RATE_CONTROL_DISABLE, !enable,
MPC_OUT_RATE_CONTROL, rate_2x_mode);
if (flow_control)
REG_UPDATE_2(MUX[opp_id],
MPC_OUT_FLOW_CONTROL_MODE, flow_control->flow_ctrl_mode,
MPC_OUT_FLOW_CONTROL_COUNT, flow_control->flow_ctrl_cnt1);
}
enum dc_lut_mode mpc3_get_ogam_current(struct mpc *mpc, int mpcc_id)
{
/*Contrary to DCN2 and DCN1 wherein a single status register field holds this info;
*in DCN3/3AG, we need to read two separate fields to retrieve the same info
*/
enum dc_lut_mode mode;
uint32_t state_mode;
uint32_t state_ram_lut_in_use;
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
REG_GET_2(MPCC_OGAM_CONTROL[mpcc_id], MPCC_OGAM_MODE_CURRENT, &state_mode,
MPCC_OGAM_SELECT_CURRENT, &state_ram_lut_in_use);
switch (state_mode) {
case 0:
mode = LUT_BYPASS;
break;
case 2:
switch (state_ram_lut_in_use) {
case 0:
mode = LUT_RAM_A;
break;
case 1:
mode = LUT_RAM_B;
break;
default:
mode = LUT_BYPASS;
break;
}
break;
default:
mode = LUT_BYPASS;
break;
}
return mode;
}
void mpc3_power_on_ogam_lut(
struct mpc *mpc, int mpcc_id,
bool power_on)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
/*
* Powering on: force memory active so the LUT can be updated.
* Powering off: allow entering memory low power mode
*
* Memory low power mode is controlled during MPC OGAM LUT init.
*/
REG_UPDATE(MPCC_MEM_PWR_CTRL[mpcc_id],
MPCC_OGAM_MEM_PWR_DIS, power_on != 0);
/* Wait for memory to be powered on - we won't be able to write to it otherwise. */
if (power_on)
REG_WAIT(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_PWR_STATE, 0, 10, 10);
}
static void mpc3_configure_ogam_lut(
struct mpc *mpc, int mpcc_id,
bool is_ram_a)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
REG_UPDATE_2(MPCC_OGAM_LUT_CONTROL[mpcc_id],
MPCC_OGAM_LUT_WRITE_COLOR_MASK, 7,
MPCC_OGAM_LUT_HOST_SEL, is_ram_a == true ? 0:1);
REG_SET(MPCC_OGAM_LUT_INDEX[mpcc_id], 0, MPCC_OGAM_LUT_INDEX, 0);
}
static void mpc3_ogam_get_reg_field(
struct mpc *mpc,
struct dcn3_xfer_func_reg *reg)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
reg->shifts.field_region_start_base = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B;
reg->masks.field_region_start_base = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B;
reg->shifts.field_offset = mpc30->mpc_shift->MPCC_OGAM_RAMA_OFFSET_B;
reg->masks.field_offset = mpc30->mpc_mask->MPCC_OGAM_RAMA_OFFSET_B;
reg->shifts.exp_region0_lut_offset = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;
reg->masks.exp_region0_lut_offset = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;
reg->shifts.exp_region0_num_segments = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
reg->masks.exp_region0_num_segments = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
reg->shifts.exp_region1_lut_offset = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET;
reg->masks.exp_region1_lut_offset = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET;
reg->shifts.exp_region1_num_segments = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
reg->masks.exp_region1_num_segments = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
reg->shifts.field_region_end = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_END_B;
reg->masks.field_region_end = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_END_B;
reg->shifts.field_region_end_slope = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B;
reg->masks.field_region_end_slope = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B;
reg->shifts.field_region_end_base = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B;
reg->masks.field_region_end_base = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B;
reg->shifts.field_region_linear_slope = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B;
reg->masks.field_region_linear_slope = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B;
reg->shifts.exp_region_start = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_START_B;
reg->masks.exp_region_start = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_START_B;
reg->shifts.exp_resion_start_segment = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;
reg->masks.exp_resion_start_segment = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;
}
static void mpc3_program_luta(struct mpc *mpc, int mpcc_id,
const struct pwl_params *params)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
struct dcn3_xfer_func_reg gam_regs;
mpc3_ogam_get_reg_field(mpc, &gam_regs);
gam_regs.start_cntl_b = REG(MPCC_OGAM_RAMA_START_CNTL_B[mpcc_id]);
gam_regs.start_cntl_g = REG(MPCC_OGAM_RAMA_START_CNTL_G[mpcc_id]);
gam_regs.start_cntl_r = REG(MPCC_OGAM_RAMA_START_CNTL_R[mpcc_id]);
gam_regs.start_slope_cntl_b = REG(MPCC_OGAM_RAMA_START_SLOPE_CNTL_B[mpcc_id]);
gam_regs.start_slope_cntl_g = REG(MPCC_OGAM_RAMA_START_SLOPE_CNTL_G[mpcc_id]);
gam_regs.start_slope_cntl_r = REG(MPCC_OGAM_RAMA_START_SLOPE_CNTL_R[mpcc_id]);
gam_regs.start_end_cntl1_b = REG(MPCC_OGAM_RAMA_END_CNTL1_B[mpcc_id]);
gam_regs.start_end_cntl2_b = REG(MPCC_OGAM_RAMA_END_CNTL2_B[mpcc_id]);
gam_regs.start_end_cntl1_g = REG(MPCC_OGAM_RAMA_END_CNTL1_G[mpcc_id]);
gam_regs.start_end_cntl2_g = REG(MPCC_OGAM_RAMA_END_CNTL2_G[mpcc_id]);
gam_regs.start_end_cntl1_r = REG(MPCC_OGAM_RAMA_END_CNTL1_R[mpcc_id]);
gam_regs.start_end_cntl2_r = REG(MPCC_OGAM_RAMA_END_CNTL2_R[mpcc_id]);
gam_regs.region_start = REG(MPCC_OGAM_RAMA_REGION_0_1[mpcc_id]);
gam_regs.region_end = REG(MPCC_OGAM_RAMA_REGION_32_33[mpcc_id]);
//New registers in DCN3AG/DCN OGAM block
gam_regs.offset_b = REG(MPCC_OGAM_RAMA_OFFSET_B[mpcc_id]);
gam_regs.offset_g = REG(MPCC_OGAM_RAMA_OFFSET_G[mpcc_id]);
gam_regs.offset_r = REG(MPCC_OGAM_RAMA_OFFSET_R[mpcc_id]);
gam_regs.start_base_cntl_b = REG(MPCC_OGAM_RAMA_START_BASE_CNTL_B[mpcc_id]);
gam_regs.start_base_cntl_g = REG(MPCC_OGAM_RAMA_START_BASE_CNTL_G[mpcc_id]);
gam_regs.start_base_cntl_r = REG(MPCC_OGAM_RAMA_START_BASE_CNTL_R[mpcc_id]);
cm_helper_program_gamcor_xfer_func(mpc30->base.ctx, params, &gam_regs);
}
static void mpc3_program_lutb(struct mpc *mpc, int mpcc_id,
const struct pwl_params *params)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
struct dcn3_xfer_func_reg gam_regs;
mpc3_ogam_get_reg_field(mpc, &gam_regs);
gam_regs.start_cntl_b = REG(MPCC_OGAM_RAMB_START_CNTL_B[mpcc_id]);
gam_regs.start_cntl_g = REG(MPCC_OGAM_RAMB_START_CNTL_G[mpcc_id]);
gam_regs.start_cntl_r = REG(MPCC_OGAM_RAMB_START_CNTL_R[mpcc_id]);
gam_regs.start_slope_cntl_b = REG(MPCC_OGAM_RAMB_START_SLOPE_CNTL_B[mpcc_id]);
gam_regs.start_slope_cntl_g = REG(MPCC_OGAM_RAMB_START_SLOPE_CNTL_G[mpcc_id]);
gam_regs.start_slope_cntl_r = REG(MPCC_OGAM_RAMB_START_SLOPE_CNTL_R[mpcc_id]);
gam_regs.start_end_cntl1_b = REG(MPCC_OGAM_RAMB_END_CNTL1_B[mpcc_id]);
gam_regs.start_end_cntl2_b = REG(MPCC_OGAM_RAMB_END_CNTL2_B[mpcc_id]);
gam_regs.start_end_cntl1_g = REG(MPCC_OGAM_RAMB_END_CNTL1_G[mpcc_id]);
gam_regs.start_end_cntl2_g = REG(MPCC_OGAM_RAMB_END_CNTL2_G[mpcc_id]);
gam_regs.start_end_cntl1_r = REG(MPCC_OGAM_RAMB_END_CNTL1_R[mpcc_id]);
gam_regs.start_end_cntl2_r = REG(MPCC_OGAM_RAMB_END_CNTL2_R[mpcc_id]);
gam_regs.region_start = REG(MPCC_OGAM_RAMB_REGION_0_1[mpcc_id]);
gam_regs.region_end = REG(MPCC_OGAM_RAMB_REGION_32_33[mpcc_id]);
//New registers in DCN3AG/DCN OGAM block
gam_regs.offset_b = REG(MPCC_OGAM_RAMB_OFFSET_B[mpcc_id]);
gam_regs.offset_g = REG(MPCC_OGAM_RAMB_OFFSET_G[mpcc_id]);
gam_regs.offset_r = REG(MPCC_OGAM_RAMB_OFFSET_R[mpcc_id]);
gam_regs.start_base_cntl_b = REG(MPCC_OGAM_RAMB_START_BASE_CNTL_B[mpcc_id]);
gam_regs.start_base_cntl_g = REG(MPCC_OGAM_RAMB_START_BASE_CNTL_G[mpcc_id]);
gam_regs.start_base_cntl_r = REG(MPCC_OGAM_RAMB_START_BASE_CNTL_R[mpcc_id]);
cm_helper_program_gamcor_xfer_func(mpc30->base.ctx, params, &gam_regs);
}
static void mpc3_program_ogam_pwl(
struct mpc *mpc, int mpcc_id,
const struct pwl_result_data *rgb,
uint32_t num)
{
uint32_t i;
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg;
uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg;
uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg;
/*the entries of DCN3AG gamma LUTs take 18bit base values as opposed to
*38 base+delta values per entry in earlier DCN architectures
*last base value for our lut is compute by adding the last base value
*in our data + last delta
*/
if (is_rgb_equal(rgb, num)) {
for (i = 0 ; i < num; i++)
REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].red_reg);
REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, last_base_value_red);
} else {
REG_UPDATE(MPCC_OGAM_LUT_CONTROL[mpcc_id],
MPCC_OGAM_LUT_WRITE_COLOR_MASK, 4);
for (i = 0 ; i < num; i++)
REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].red_reg);
REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, last_base_value_red);
REG_SET(MPCC_OGAM_LUT_INDEX[mpcc_id], 0, MPCC_OGAM_LUT_INDEX, 0);
REG_UPDATE(MPCC_OGAM_LUT_CONTROL[mpcc_id],
MPCC_OGAM_LUT_WRITE_COLOR_MASK, 2);
for (i = 0 ; i < num; i++)
REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].green_reg);
REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, last_base_value_green);
REG_SET(MPCC_OGAM_LUT_INDEX[mpcc_id], 0, MPCC_OGAM_LUT_INDEX, 0);
REG_UPDATE(MPCC_OGAM_LUT_CONTROL[mpcc_id],
MPCC_OGAM_LUT_WRITE_COLOR_MASK, 1);
for (i = 0 ; i < num; i++)
REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].blue_reg);
REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, last_base_value_blue);
}
}
void mpc3_set_output_gamma(
struct mpc *mpc,
int mpcc_id,
const struct pwl_params *params)
{
enum dc_lut_mode current_mode;
enum dc_lut_mode next_mode;
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
if (mpc->ctx->dc->debug.cm_in_bypass) {
REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, 0);
return;
}
if (params == NULL) { //disable OGAM
REG_SET(MPCC_OGAM_CONTROL[mpcc_id], 0, MPCC_OGAM_MODE, 0);
return;
}
//enable OGAM
REG_SET(MPCC_OGAM_CONTROL[mpcc_id], 0, MPCC_OGAM_MODE, 2);
current_mode = mpc3_get_ogam_current(mpc, mpcc_id);
if (current_mode == LUT_BYPASS)
next_mode = LUT_RAM_A;
else if (current_mode == LUT_RAM_A)
next_mode = LUT_RAM_B;
else
next_mode = LUT_RAM_A;
mpc3_power_on_ogam_lut(mpc, mpcc_id, true);
mpc3_configure_ogam_lut(mpc, mpcc_id, next_mode == LUT_RAM_A);
if (next_mode == LUT_RAM_A)
mpc3_program_luta(mpc, mpcc_id, params);
else
mpc3_program_lutb(mpc, mpcc_id, params);
mpc3_program_ogam_pwl(
mpc, mpcc_id, params->rgb_resulted, params->hw_points_num);
/*we need to program 2 fields here as apposed to 1*/
REG_UPDATE(MPCC_OGAM_CONTROL[mpcc_id],
MPCC_OGAM_SELECT, next_mode == LUT_RAM_A ? 0:1);
if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc)
mpc3_power_on_ogam_lut(mpc, mpcc_id, false);
}
void mpc3_set_denorm(
struct mpc *mpc,
int opp_id,
enum dc_color_depth output_depth)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
/* De-normalize Fixed U1.13 color data to different target bit depths. 0 is bypass*/
int denorm_mode = 0;
switch (output_depth) {
case COLOR_DEPTH_666:
denorm_mode = 1;
break;
case COLOR_DEPTH_888:
denorm_mode = 2;
break;
case COLOR_DEPTH_999:
denorm_mode = 3;
break;
case COLOR_DEPTH_101010:
denorm_mode = 4;
break;
case COLOR_DEPTH_111111:
denorm_mode = 5;
break;
case COLOR_DEPTH_121212:
denorm_mode = 6;
break;
case COLOR_DEPTH_141414:
case COLOR_DEPTH_161616:
default:
/* not valid used case! */
break;
}
REG_UPDATE(DENORM_CONTROL[opp_id],
MPC_OUT_DENORM_MODE, denorm_mode);
}
void mpc3_set_denorm_clamp(
struct mpc *mpc,
int opp_id,
struct mpc_denorm_clamp denorm_clamp)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
/*program min and max clamp values for the pixel components*/
REG_UPDATE_2(DENORM_CONTROL[opp_id],
MPC_OUT_DENORM_CLAMP_MAX_R_CR, denorm_clamp.clamp_max_r_cr,
MPC_OUT_DENORM_CLAMP_MIN_R_CR, denorm_clamp.clamp_min_r_cr);
REG_UPDATE_2(DENORM_CLAMP_G_Y[opp_id],
MPC_OUT_DENORM_CLAMP_MAX_G_Y, denorm_clamp.clamp_max_g_y,
MPC_OUT_DENORM_CLAMP_MIN_G_Y, denorm_clamp.clamp_min_g_y);
REG_UPDATE_2(DENORM_CLAMP_B_CB[opp_id],
MPC_OUT_DENORM_CLAMP_MAX_B_CB, denorm_clamp.clamp_max_b_cb,
MPC_OUT_DENORM_CLAMP_MIN_B_CB, denorm_clamp.clamp_min_b_cb);
}
static enum dc_lut_mode mpc3_get_shaper_current(struct mpc *mpc, uint32_t rmu_idx)
{
enum dc_lut_mode mode;
uint32_t state_mode;
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
REG_GET(SHAPER_CONTROL[rmu_idx], MPC_RMU_SHAPER_LUT_MODE_CURRENT, &state_mode);
switch (state_mode) {
case 0:
mode = LUT_BYPASS;
break;
case 1:
mode = LUT_RAM_A;
break;
case 2:
mode = LUT_RAM_B;
break;
default:
mode = LUT_BYPASS;
break;
}
return mode;
}
static void mpc3_configure_shaper_lut(
struct mpc *mpc,
bool is_ram_a,
uint32_t rmu_idx)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
REG_UPDATE(SHAPER_LUT_WRITE_EN_MASK[rmu_idx],
MPC_RMU_SHAPER_LUT_WRITE_EN_MASK, 7);
REG_UPDATE(SHAPER_LUT_WRITE_EN_MASK[rmu_idx],
MPC_RMU_SHAPER_LUT_WRITE_SEL, is_ram_a == true ? 0:1);
REG_SET(SHAPER_LUT_INDEX[rmu_idx], 0, MPC_RMU_SHAPER_LUT_INDEX, 0);
}
static void mpc3_program_shaper_luta_settings(
struct mpc *mpc,
const struct pwl_params *params,
uint32_t rmu_idx)
{
const struct gamma_curve *curve;
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
REG_SET_2(SHAPER_RAMA_START_CNTL_B[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x,
MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
REG_SET_2(SHAPER_RAMA_START_CNTL_G[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].green.custom_float_x,
MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
REG_SET_2(SHAPER_RAMA_START_CNTL_R[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].red.custom_float_x,
MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
REG_SET_2(SHAPER_RAMA_END_CNTL_B[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x,
MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y);
REG_SET_2(SHAPER_RAMA_END_CNTL_G[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].green.custom_float_x,
MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].green.custom_float_y);
REG_SET_2(SHAPER_RAMA_END_CNTL_R[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].red.custom_float_x,
MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].red.custom_float_y);
curve = params->arr_curve_points;
REG_SET_4(SHAPER_RAMA_REGION_0_1[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMA_REGION_2_3[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMA_REGION_4_5[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMA_REGION_6_7[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMA_REGION_8_9[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMA_REGION_10_11[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMA_REGION_12_13[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMA_REGION_14_15[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMA_REGION_16_17[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMA_REGION_18_19[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMA_REGION_20_21[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMA_REGION_22_23[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMA_REGION_24_25[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMA_REGION_26_27[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMA_REGION_28_29[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMA_REGION_30_31[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMA_REGION_32_33[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
}
static void mpc3_program_shaper_lutb_settings(
struct mpc *mpc,
const struct pwl_params *params,
uint32_t rmu_idx)
{
const struct gamma_curve *curve;
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
REG_SET_2(SHAPER_RAMB_START_CNTL_B[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x,
MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
REG_SET_2(SHAPER_RAMB_START_CNTL_G[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].green.custom_float_x,
MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
REG_SET_2(SHAPER_RAMB_START_CNTL_R[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].red.custom_float_x,
MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
REG_SET_2(SHAPER_RAMB_END_CNTL_B[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x,
MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y);
REG_SET_2(SHAPER_RAMB_END_CNTL_G[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].green.custom_float_x,
MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].green.custom_float_y);
REG_SET_2(SHAPER_RAMB_END_CNTL_R[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].red.custom_float_x,
MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].red.custom_float_y);
curve = params->arr_curve_points;
REG_SET_4(SHAPER_RAMB_REGION_0_1[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMB_REGION_2_3[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMB_REGION_4_5[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMB_REGION_6_7[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMB_REGION_8_9[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMB_REGION_10_11[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMB_REGION_12_13[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMB_REGION_14_15[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMB_REGION_16_17[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMB_REGION_18_19[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMB_REGION_20_21[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMB_REGION_22_23[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMB_REGION_24_25[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMB_REGION_26_27[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMB_REGION_28_29[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMB_REGION_30_31[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
REG_SET_4(SHAPER_RAMB_REGION_32_33[rmu_idx], 0,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
}
static void mpc3_program_shaper_lut(
struct mpc *mpc,
const struct pwl_result_data *rgb,
uint32_t num,
uint32_t rmu_idx)
{
uint32_t i, red, green, blue;
uint32_t red_delta, green_delta, blue_delta;
uint32_t red_value, green_value, blue_value;
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
for (i = 0 ; i < num; i++) {
red = rgb[i].red_reg;
green = rgb[i].green_reg;
blue = rgb[i].blue_reg;
red_delta = rgb[i].delta_red_reg;
green_delta = rgb[i].delta_green_reg;
blue_delta = rgb[i].delta_blue_reg;
red_value = ((red_delta & 0x3ff) << 14) | (red & 0x3fff);
green_value = ((green_delta & 0x3ff) << 14) | (green & 0x3fff);
blue_value = ((blue_delta & 0x3ff) << 14) | (blue & 0x3fff);
REG_SET(SHAPER_LUT_DATA[rmu_idx], 0, MPC_RMU_SHAPER_LUT_DATA, red_value);
REG_SET(SHAPER_LUT_DATA[rmu_idx], 0, MPC_RMU_SHAPER_LUT_DATA, green_value);
REG_SET(SHAPER_LUT_DATA[rmu_idx], 0, MPC_RMU_SHAPER_LUT_DATA, blue_value);
}
}
static void mpc3_power_on_shaper_3dlut(
struct mpc *mpc,
uint32_t rmu_idx,
bool power_on)
{
uint32_t power_status_shaper = 2;
uint32_t power_status_3dlut = 2;
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
int max_retries = 10;
if (rmu_idx == 0) {
REG_SET(MPC_RMU_MEM_PWR_CTRL, 0,
MPC_RMU0_MEM_PWR_DIS, power_on == true ? 1:0);
/* wait for memory to fully power up */
if (power_on && mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) {
REG_WAIT(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_SHAPER_MEM_PWR_STATE, 0, 1, max_retries);
REG_WAIT(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_3DLUT_MEM_PWR_STATE, 0, 1, max_retries);
}
/*read status is not mandatory, it is just for debugging*/
REG_GET(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_SHAPER_MEM_PWR_STATE, &power_status_shaper);
REG_GET(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_3DLUT_MEM_PWR_STATE, &power_status_3dlut);
} else if (rmu_idx == 1) {
REG_SET(MPC_RMU_MEM_PWR_CTRL, 0,
MPC_RMU1_MEM_PWR_DIS, power_on == true ? 1:0);
if (power_on && mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) {
REG_WAIT(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_SHAPER_MEM_PWR_STATE, 0, 1, max_retries);
REG_WAIT(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_3DLUT_MEM_PWR_STATE, 0, 1, max_retries);
}
REG_GET(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_SHAPER_MEM_PWR_STATE, &power_status_shaper);
REG_GET(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_3DLUT_MEM_PWR_STATE, &power_status_3dlut);
}
/*TODO Add rmu_idx == 2 for SIENNA_CICHLID */
if (power_status_shaper != 0 && power_on == true)
BREAK_TO_DEBUGGER();
if (power_status_3dlut != 0 && power_on == true)
BREAK_TO_DEBUGGER();
}
bool mpc3_program_shaper(
struct mpc *mpc,
const struct pwl_params *params,
uint32_t rmu_idx)
{
enum dc_lut_mode current_mode;
enum dc_lut_mode next_mode;
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
if (params == NULL) {
REG_SET(SHAPER_CONTROL[rmu_idx], 0, MPC_RMU_SHAPER_LUT_MODE, 0);
return false;
}
if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc)
mpc3_power_on_shaper_3dlut(mpc, rmu_idx, true);
current_mode = mpc3_get_shaper_current(mpc, rmu_idx);
if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)
next_mode = LUT_RAM_B;
else
next_mode = LUT_RAM_A;
mpc3_configure_shaper_lut(mpc, next_mode == LUT_RAM_A, rmu_idx);
if (next_mode == LUT_RAM_A)
mpc3_program_shaper_luta_settings(mpc, params, rmu_idx);
else
mpc3_program_shaper_lutb_settings(mpc, params, rmu_idx);
mpc3_program_shaper_lut(
mpc, params->rgb_resulted, params->hw_points_num, rmu_idx);
REG_SET(SHAPER_CONTROL[rmu_idx], 0, MPC_RMU_SHAPER_LUT_MODE, next_mode == LUT_RAM_A ? 1:2);
mpc3_power_on_shaper_3dlut(mpc, rmu_idx, false);
return true;
}
static void mpc3_set_3dlut_mode(
struct mpc *mpc,
enum dc_lut_mode mode,
bool is_color_channel_12bits,
bool is_lut_size17x17x17,
uint32_t rmu_idx)
{
uint32_t lut_mode;
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
if (mode == LUT_BYPASS)
lut_mode = 0;
else if (mode == LUT_RAM_A)
lut_mode = 1;
else
lut_mode = 2;
REG_UPDATE_2(RMU_3DLUT_MODE[rmu_idx],
MPC_RMU_3DLUT_MODE, lut_mode,
MPC_RMU_3DLUT_SIZE, is_lut_size17x17x17 == true ? 0 : 1);
}
static enum dc_lut_mode get3dlut_config(
struct mpc *mpc,
bool *is_17x17x17,
bool *is_12bits_color_channel,
int rmu_idx)
{
uint32_t i_mode, i_enable_10bits, lut_size;
enum dc_lut_mode mode;
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
REG_GET(RMU_3DLUT_MODE[rmu_idx],
MPC_RMU_3DLUT_MODE_CURRENT, &i_mode);
REG_GET(RMU_3DLUT_READ_WRITE_CONTROL[rmu_idx],
MPC_RMU_3DLUT_30BIT_EN, &i_enable_10bits);
switch (i_mode) {
case 0:
mode = LUT_BYPASS;
break;
case 1:
mode = LUT_RAM_A;
break;
case 2:
mode = LUT_RAM_B;
break;
default:
mode = LUT_BYPASS;
break;
}
if (i_enable_10bits > 0)
*is_12bits_color_channel = false;
else
*is_12bits_color_channel = true;
REG_GET(RMU_3DLUT_MODE[rmu_idx], MPC_RMU_3DLUT_SIZE, &lut_size);
if (lut_size == 0)
*is_17x17x17 = true;
else
*is_17x17x17 = false;
return mode;
}
static void mpc3_select_3dlut_ram(
struct mpc *mpc,
enum dc_lut_mode mode,
bool is_color_channel_12bits,
uint32_t rmu_idx)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
REG_UPDATE_2(RMU_3DLUT_READ_WRITE_CONTROL[rmu_idx],
MPC_RMU_3DLUT_RAM_SEL, mode == LUT_RAM_A ? 0 : 1,
MPC_RMU_3DLUT_30BIT_EN, is_color_channel_12bits == true ? 0:1);
}
static void mpc3_select_3dlut_ram_mask(
struct mpc *mpc,
uint32_t ram_selection_mask,
uint32_t rmu_idx)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
REG_UPDATE(RMU_3DLUT_READ_WRITE_CONTROL[rmu_idx], MPC_RMU_3DLUT_WRITE_EN_MASK,
ram_selection_mask);
REG_SET(RMU_3DLUT_INDEX[rmu_idx], 0, MPC_RMU_3DLUT_INDEX, 0);
}
static void mpc3_set3dlut_ram12(
struct mpc *mpc,
const struct dc_rgb *lut,
uint32_t entries,
uint32_t rmu_idx)
{
uint32_t i, red, green, blue, red1, green1, blue1;
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
for (i = 0 ; i < entries; i += 2) {
red = lut[i].red<<4;
green = lut[i].green<<4;
blue = lut[i].blue<<4;
red1 = lut[i+1].red<<4;
green1 = lut[i+1].green<<4;
blue1 = lut[i+1].blue<<4;
REG_SET_2(RMU_3DLUT_DATA[rmu_idx], 0,
MPC_RMU_3DLUT_DATA0, red,
MPC_RMU_3DLUT_DATA1, red1);
REG_SET_2(RMU_3DLUT_DATA[rmu_idx], 0,
MPC_RMU_3DLUT_DATA0, green,
MPC_RMU_3DLUT_DATA1, green1);
REG_SET_2(RMU_3DLUT_DATA[rmu_idx], 0,
MPC_RMU_3DLUT_DATA0, blue,
MPC_RMU_3DLUT_DATA1, blue1);
}
}
static void mpc3_set3dlut_ram10(
struct mpc *mpc,
const struct dc_rgb *lut,
uint32_t entries,
uint32_t rmu_idx)
{
uint32_t i, red, green, blue, value;
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
for (i = 0; i < entries; i++) {
red = lut[i].red;
green = lut[i].green;
blue = lut[i].blue;
//should we shift red 22bit and green 12? ask Nvenko
value = (red<<20) | (green<<10) | blue;
REG_SET(RMU_3DLUT_DATA_30BIT[rmu_idx], 0, MPC_RMU_3DLUT_DATA_30BIT, value);
}
}
void mpc3_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
{
mpcc->mpcc_id = mpcc_inst;
mpcc->dpp_id = 0xf;
mpcc->mpcc_bot = NULL;
mpcc->blnd_cfg.overlap_only = false;
mpcc->blnd_cfg.global_alpha = 0xff;
mpcc->blnd_cfg.global_gain = 0xff;
mpcc->blnd_cfg.background_color_bpc = 4;
mpcc->blnd_cfg.bottom_gain_mode = 0;
mpcc->blnd_cfg.top_gain = 0x1f000;
mpcc->blnd_cfg.bottom_inside_gain = 0x1f000;
mpcc->blnd_cfg.bottom_outside_gain = 0x1f000;
mpcc->sm_cfg.enable = false;
mpcc->shared_bottom = false;
}
static void program_gamut_remap(
struct dcn30_mpc *mpc30,
int mpcc_id,
const uint16_t *regval,
int select)
{
uint16_t selection = 0;
struct color_matrices_reg gam_regs;
if (regval == NULL || select == GAMUT_REMAP_BYPASS) {
REG_SET(MPCC_GAMUT_REMAP_MODE[mpcc_id], 0,
MPCC_GAMUT_REMAP_MODE, GAMUT_REMAP_BYPASS);
return;
}
switch (select) {
case GAMUT_REMAP_COEFF:
selection = 1;
break;
/*this corresponds to GAMUT_REMAP coefficients set B
* we don't have common coefficient sets in dcn3ag/dcn3
*/
case GAMUT_REMAP_COMA_COEFF:
selection = 2;
break;
default:
break;
}
gam_regs.shifts.csc_c11 = mpc30->mpc_shift->MPCC_GAMUT_REMAP_C11_A;
gam_regs.masks.csc_c11 = mpc30->mpc_mask->MPCC_GAMUT_REMAP_C11_A;
gam_regs.shifts.csc_c12 = mpc30->mpc_shift->MPCC_GAMUT_REMAP_C12_A;
gam_regs.masks.csc_c12 = mpc30->mpc_mask->MPCC_GAMUT_REMAP_C12_A;
if (select == GAMUT_REMAP_COEFF) {
gam_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_A[mpcc_id]);
gam_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_A[mpcc_id]);
cm_helper_program_color_matrices(
mpc30->base.ctx,
regval,
&gam_regs);
} else if (select == GAMUT_REMAP_COMA_COEFF) {
gam_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_B[mpcc_id]);
gam_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_B[mpcc_id]);
cm_helper_program_color_matrices(
mpc30->base.ctx,
regval,
&gam_regs);
}
//select coefficient set to use
REG_SET(MPCC_GAMUT_REMAP_MODE[mpcc_id], 0,
MPCC_GAMUT_REMAP_MODE, selection);
}
void mpc3_set_gamut_remap(
struct mpc *mpc,
int mpcc_id,
const struct mpc_grph_gamut_adjustment *adjust)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
int i = 0;
int gamut_mode;
if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
program_gamut_remap(mpc30, mpcc_id, NULL, GAMUT_REMAP_BYPASS);
else {
struct fixed31_32 arr_matrix[12];
uint16_t arr_reg_val[12];
for (i = 0; i < 12; i++)
arr_matrix[i] = adjust->temperature_matrix[i];
convert_float_matrix(
arr_reg_val, arr_matrix, 12);
//current coefficient set in use
REG_GET(MPCC_GAMUT_REMAP_MODE[mpcc_id], MPCC_GAMUT_REMAP_MODE_CURRENT, &gamut_mode);
if (gamut_mode == 0)
gamut_mode = 1; //use coefficient set A
else if (gamut_mode == 1)
gamut_mode = 2;
else
gamut_mode = 1;
program_gamut_remap(mpc30, mpcc_id, arr_reg_val, gamut_mode);
}
}
bool mpc3_program_3dlut(
struct mpc *mpc,
const struct tetrahedral_params *params,
int rmu_idx)
{
enum dc_lut_mode mode;
bool is_17x17x17;
bool is_12bits_color_channel;
const struct dc_rgb *lut0;
const struct dc_rgb *lut1;
const struct dc_rgb *lut2;
const struct dc_rgb *lut3;
int lut_size0;
int lut_size;
if (params == NULL) {
mpc3_set_3dlut_mode(mpc, LUT_BYPASS, false, false, rmu_idx);
return false;
}
mpc3_power_on_shaper_3dlut(mpc, rmu_idx, true);
mode = get3dlut_config(mpc, &is_17x17x17, &is_12bits_color_channel, rmu_idx);
if (mode == LUT_BYPASS || mode == LUT_RAM_B)
mode = LUT_RAM_A;
else
mode = LUT_RAM_B;
is_17x17x17 = !params->use_tetrahedral_9;
is_12bits_color_channel = params->use_12bits;
if (is_17x17x17) {
lut0 = params->tetrahedral_17.lut0;
lut1 = params->tetrahedral_17.lut1;
lut2 = params->tetrahedral_17.lut2;
lut3 = params->tetrahedral_17.lut3;
lut_size0 = sizeof(params->tetrahedral_17.lut0)/
sizeof(params->tetrahedral_17.lut0[0]);
lut_size = sizeof(params->tetrahedral_17.lut1)/
sizeof(params->tetrahedral_17.lut1[0]);
} else {
lut0 = params->tetrahedral_9.lut0;
lut1 = params->tetrahedral_9.lut1;
lut2 = params->tetrahedral_9.lut2;
lut3 = params->tetrahedral_9.lut3;
lut_size0 = sizeof(params->tetrahedral_9.lut0)/
sizeof(params->tetrahedral_9.lut0[0]);
lut_size = sizeof(params->tetrahedral_9.lut1)/
sizeof(params->tetrahedral_9.lut1[0]);
}
mpc3_select_3dlut_ram(mpc, mode,
is_12bits_color_channel, rmu_idx);
mpc3_select_3dlut_ram_mask(mpc, 0x1, rmu_idx);
if (is_12bits_color_channel)
mpc3_set3dlut_ram12(mpc, lut0, lut_size0, rmu_idx);
else
mpc3_set3dlut_ram10(mpc, lut0, lut_size0, rmu_idx);
mpc3_select_3dlut_ram_mask(mpc, 0x2, rmu_idx);
if (is_12bits_color_channel)
mpc3_set3dlut_ram12(mpc, lut1, lut_size, rmu_idx);
else
mpc3_set3dlut_ram10(mpc, lut1, lut_size, rmu_idx);
mpc3_select_3dlut_ram_mask(mpc, 0x4, rmu_idx);
if (is_12bits_color_channel)
mpc3_set3dlut_ram12(mpc, lut2, lut_size, rmu_idx);
else
mpc3_set3dlut_ram10(mpc, lut2, lut_size, rmu_idx);
mpc3_select_3dlut_ram_mask(mpc, 0x8, rmu_idx);
if (is_12bits_color_channel)
mpc3_set3dlut_ram12(mpc, lut3, lut_size, rmu_idx);
else
mpc3_set3dlut_ram10(mpc, lut3, lut_size, rmu_idx);
mpc3_set_3dlut_mode(mpc, mode, is_12bits_color_channel,
is_17x17x17, rmu_idx);
if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc)
mpc3_power_on_shaper_3dlut(mpc, rmu_idx, false);
return true;
}
void mpc3_set_output_csc(
struct mpc *mpc,
int opp_id,
const uint16_t *regval,
enum mpc_output_csc_mode ocsc_mode)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
struct color_matrices_reg ocsc_regs;
REG_WRITE(MPC_OUT_CSC_COEF_FORMAT, 0);
REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE)
return;
if (regval == NULL) {
BREAK_TO_DEBUGGER();
return;
}
ocsc_regs.shifts.csc_c11 = mpc30->mpc_shift->MPC_OCSC_C11_A;
ocsc_regs.masks.csc_c11 = mpc30->mpc_mask->MPC_OCSC_C11_A;
ocsc_regs.shifts.csc_c12 = mpc30->mpc_shift->MPC_OCSC_C12_A;
ocsc_regs.masks.csc_c12 = mpc30->mpc_mask->MPC_OCSC_C12_A;
if (ocsc_mode == MPC_OUTPUT_CSC_COEF_A) {
ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_A[opp_id]);
ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_A[opp_id]);
} else {
ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_B[opp_id]);
ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_B[opp_id]);
}
cm_helper_program_color_matrices(
mpc30->base.ctx,
regval,
&ocsc_regs);
}
void mpc3_set_ocsc_default(
struct mpc *mpc,
int opp_id,
enum dc_color_space color_space,
enum mpc_output_csc_mode ocsc_mode)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
uint32_t arr_size;
struct color_matrices_reg ocsc_regs;
const uint16_t *regval = NULL;
REG_WRITE(MPC_OUT_CSC_COEF_FORMAT, 0);
REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE)
return;
regval = find_color_matrix(color_space, &arr_size);
if (regval == NULL) {
BREAK_TO_DEBUGGER();
return;
}
ocsc_regs.shifts.csc_c11 = mpc30->mpc_shift->MPC_OCSC_C11_A;
ocsc_regs.masks.csc_c11 = mpc30->mpc_mask->MPC_OCSC_C11_A;
ocsc_regs.shifts.csc_c12 = mpc30->mpc_shift->MPC_OCSC_C12_A;
ocsc_regs.masks.csc_c12 = mpc30->mpc_mask->MPC_OCSC_C12_A;
if (ocsc_mode == MPC_OUTPUT_CSC_COEF_A) {
ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_A[opp_id]);
ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_A[opp_id]);
} else {
ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_B[opp_id]);
ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_B[opp_id]);
}
cm_helper_program_color_matrices(
mpc30->base.ctx,
regval,
&ocsc_regs);
}
void mpc3_set_rmu_mux(
struct mpc *mpc,
int rmu_idx,
int value)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
if (rmu_idx == 0)
REG_UPDATE(MPC_RMU_CONTROL, MPC_RMU0_MUX, value);
else if (rmu_idx == 1)
REG_UPDATE(MPC_RMU_CONTROL, MPC_RMU1_MUX, value);
}
uint32_t mpc3_get_rmu_mux_status(
struct mpc *mpc,
int rmu_idx)
{
uint32_t status = 0xf;
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
if (rmu_idx == 0)
REG_GET(MPC_RMU_CONTROL, MPC_RMU0_MUX_STATUS, &status);
else if (rmu_idx == 1)
REG_GET(MPC_RMU_CONTROL, MPC_RMU1_MUX_STATUS, &status);
return status;
}
uint32_t mpcc3_acquire_rmu(struct mpc *mpc, int mpcc_id, int rmu_idx)
{
uint32_t rmu_status;
//determine if this mpcc is already multiplexed to an RMU unit
rmu_status = mpc3_get_rmu_mux_status(mpc, rmu_idx);
if (rmu_status == mpcc_id)
//return rmu_idx of pre_acquired rmu unit
return rmu_idx;
if (rmu_status == 0xf) {//rmu unit is disabled
mpc3_set_rmu_mux(mpc, rmu_idx, mpcc_id);
return rmu_idx;
}
//no vacant RMU units or invalid parameters acquire_post_bldn_3dlut
return -1;
}
static int mpcc3_release_rmu(struct mpc *mpc, int mpcc_id)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
int rmu_idx;
uint32_t rmu_status;
int released_rmu = -1;
for (rmu_idx = 0; rmu_idx < mpc30->num_rmu; rmu_idx++) {
rmu_status = mpc3_get_rmu_mux_status(mpc, rmu_idx);
if (rmu_status == mpcc_id) {
mpc3_set_rmu_mux(mpc, rmu_idx, 0xf);
released_rmu = rmu_idx;
break;
}
}
return released_rmu;
}
static void mpc3_set_mpc_mem_lp_mode(struct mpc *mpc)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
int mpcc_id;
if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) {
if (mpc30->mpc_mask->MPC_RMU0_MEM_LOW_PWR_MODE && mpc30->mpc_mask->MPC_RMU1_MEM_LOW_PWR_MODE) {
REG_UPDATE(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_LOW_PWR_MODE, 3);
REG_UPDATE(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_MEM_LOW_PWR_MODE, 3);
}
if (mpc30->mpc_mask->MPCC_OGAM_MEM_LOW_PWR_MODE) {
for (mpcc_id = 0; mpcc_id < mpc30->num_mpcc; mpcc_id++)
REG_UPDATE(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_LOW_PWR_MODE, 3);
}
}
}
static const struct mpc_funcs dcn30_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
.mpc_init = mpc1_mpc_init,
.mpc_init_single_inst = mpc1_mpc_init_single_inst,
.update_blending = mpc2_update_blending,
.cursor_lock = mpc1_cursor_lock,
.get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp,
.wait_for_idle = mpc2_assert_idle_mpcc,
.assert_mpcc_idle_before_connect = mpc2_assert_mpcc_idle_before_connect,
.init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw,
.set_denorm = mpc3_set_denorm,
.set_denorm_clamp = mpc3_set_denorm_clamp,
.set_output_csc = mpc3_set_output_csc,
.set_ocsc_default = mpc3_set_ocsc_default,
.set_output_gamma = mpc3_set_output_gamma,
.insert_plane_to_secondary = NULL,
.remove_mpcc_from_secondary = NULL,
.set_dwb_mux = mpc3_set_dwb_mux,
.disable_dwb_mux = mpc3_disable_dwb_mux,
.is_dwb_idle = mpc3_is_dwb_idle,
.set_out_rate_control = mpc3_set_out_rate_control,
.set_gamut_remap = mpc3_set_gamut_remap,
.program_shaper = mpc3_program_shaper,
.acquire_rmu = mpcc3_acquire_rmu,
.program_3dlut = mpc3_program_3dlut,
.release_rmu = mpcc3_release_rmu,
.power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
.set_bg_color = mpc1_set_bg_color,
.set_mpc_mem_lp_mode = mpc3_set_mpc_mem_lp_mode,
};
void dcn30_mpc_construct(struct dcn30_mpc *mpc30,
struct dc_context *ctx,
const struct dcn30_mpc_registers *mpc_regs,
const struct dcn30_mpc_shift *mpc_shift,
const struct dcn30_mpc_mask *mpc_mask,
int num_mpcc,
int num_rmu)
{
int i;
mpc30->base.ctx = ctx;
mpc30->base.funcs = &dcn30_mpc_funcs;
mpc30->mpc_regs = mpc_regs;
mpc30->mpc_shift = mpc_shift;
mpc30->mpc_mask = mpc_mask;
mpc30->mpcc_in_use_mask = 0;
mpc30->num_mpcc = num_mpcc;
mpc30->num_rmu = num_rmu;
for (i = 0; i < MAX_MPCC; i++)
mpc3_init_mpcc(&mpc30->base.mpcc_array[i], i);
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "reg_helper.h"
#include "resource.h"
#include "dwb.h"
#include "dcn30_dwb.h"
#define REG(reg)\
dwbc30->dwbc_regs->reg
#define CTX \
dwbc30->base.ctx
#undef FN
#define FN(reg_name, field_name) \
dwbc30->dwbc_shift->field_name, dwbc30->dwbc_mask->field_name
#define DC_LOGGER \
dwbc30->base.ctx->logger
static bool dwb3_get_caps(struct dwbc *dwbc, struct dwb_caps *caps)
{
if (caps) {
caps->adapter_id = 0; /* we only support 1 adapter currently */
caps->hw_version = DCN_VERSION_3_0;
caps->num_pipes = 2;
memset(&caps->reserved, 0, sizeof(caps->reserved));
memset(&caps->reserved2, 0, sizeof(caps->reserved2));
caps->sw_version = dwb_ver_2_0;
caps->caps.support_dwb = true;
caps->caps.support_ogam = true;
caps->caps.support_wbscl = true;
caps->caps.support_ocsc = false;
caps->caps.support_stereo = true;
return true;
} else {
return false;
}
}
void dwb3_config_fc(struct dwbc *dwbc, struct dc_dwb_params *params)
{
struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
/* Set DWB source size */
REG_UPDATE_2(FC_SOURCE_SIZE, FC_SOURCE_WIDTH, params->cnv_params.src_width,
FC_SOURCE_HEIGHT, params->cnv_params.src_height);
/* source size is not equal the source size, then enable cropping. */
if (params->cnv_params.crop_en) {
REG_UPDATE(FC_MODE_CTRL, FC_WINDOW_CROP_EN, 1);
REG_UPDATE(FC_WINDOW_START, FC_WINDOW_START_X, params->cnv_params.crop_x);
REG_UPDATE(FC_WINDOW_START, FC_WINDOW_START_Y, params->cnv_params.crop_y);
REG_UPDATE(FC_WINDOW_SIZE, FC_WINDOW_WIDTH, params->cnv_params.crop_width);
REG_UPDATE(FC_WINDOW_SIZE, FC_WINDOW_HEIGHT, params->cnv_params.crop_height);
} else {
REG_UPDATE(FC_MODE_CTRL, FC_WINDOW_CROP_EN, 0);
}
/* Set CAPTURE_RATE */
REG_UPDATE(FC_MODE_CTRL, FC_FRAME_CAPTURE_RATE, params->capture_rate);
dwb3_set_stereo(dwbc, ¶ms->stereo_params);
}
bool dwb3_enable(struct dwbc *dwbc, struct dc_dwb_params *params)
{
struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
DC_LOG_DWB("%s dwb3_enabled at inst = %d", __func__, dwbc->inst);
/* Set WB_ENABLE (not double buffered; capture not enabled) */
REG_UPDATE(DWB_ENABLE_CLK_CTRL, DWB_ENABLE, 1);
/* Set FC parameters */
dwb3_config_fc(dwbc, params);
/* Program color processing unit */
dwb3_program_hdr_mult(dwbc, params);
dwb3_set_gamut_remap(dwbc, params);
dwb3_ogam_set_input_transfer_func(dwbc, params->out_transfer_func);
/* Program output denorm */
dwb3_set_denorm(dwbc, params);
/* Enable DWB capture enable (double buffered) */
REG_UPDATE(FC_MODE_CTRL, FC_FRAME_CAPTURE_EN, DWB_FRAME_CAPTURE_ENABLE);
/* First pixel count */
REG_UPDATE(FC_FLOW_CTRL, FC_FIRST_PIXEL_DELAY_COUNT, 96);
return true;
}
bool dwb3_disable(struct dwbc *dwbc)
{
struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
/* disable FC */
REG_UPDATE(FC_MODE_CTRL, FC_FRAME_CAPTURE_EN, DWB_FRAME_CAPTURE_DISABLE);
/* disable WB */
REG_UPDATE(DWB_ENABLE_CLK_CTRL, DWB_ENABLE, 0);
DC_LOG_DWB("%s dwb3_disabled at inst = %d", __func__, dwbc->inst);
return true;
}
bool dwb3_update(struct dwbc *dwbc, struct dc_dwb_params *params)
{
struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
unsigned int pre_locked;
/*
* Check if the caller has already locked DWB registers.
* If so: assume the caller will unlock, so don't touch the lock.
* If not: lock them for this update, then unlock after the
* update is complete.
*/
REG_GET(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, &pre_locked);
DC_LOG_DWB("%s dwb update, inst = %d", __func__, dwbc->inst);
if (pre_locked == 0) {
/* Lock DWB registers */
REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 1);
}
/* Set FC parameters */
dwb3_config_fc(dwbc, params);
/* Program color processing unit */
dwb3_program_hdr_mult(dwbc, params);
dwb3_set_gamut_remap(dwbc, params);
dwb3_ogam_set_input_transfer_func(dwbc, params->out_transfer_func);
/* Program output denorm */
dwb3_set_denorm(dwbc, params);
if (pre_locked == 0) {
/* Unlock DWB registers */
REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 0);
}
return true;
}
bool dwb3_is_enabled(struct dwbc *dwbc)
{
struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
unsigned int dwb_enabled = 0;
unsigned int fc_frame_capture_en = 0;
REG_GET(DWB_ENABLE_CLK_CTRL, DWB_ENABLE, &dwb_enabled);
REG_GET(FC_MODE_CTRL, FC_FRAME_CAPTURE_EN, &fc_frame_capture_en);
return ((dwb_enabled != 0) && (fc_frame_capture_en != 0));
}
void dwb3_set_stereo(struct dwbc *dwbc,
struct dwb_stereo_params *stereo_params)
{
struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
if (stereo_params->stereo_enabled) {
REG_UPDATE(FC_MODE_CTRL, FC_EYE_SELECTION, stereo_params->stereo_eye_select);
REG_UPDATE(FC_MODE_CTRL, FC_STEREO_EYE_POLARITY, stereo_params->stereo_polarity);
DC_LOG_DWB("%s dwb stereo enabled", __func__);
} else {
REG_UPDATE(FC_MODE_CTRL, FC_EYE_SELECTION, 0);
DC_LOG_DWB("%s dwb stereo disabled", __func__);
}
}
void dwb3_set_new_content(struct dwbc *dwbc,
bool is_new_content)
{
struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
REG_UPDATE(FC_MODE_CTRL, FC_NEW_CONTENT, is_new_content);
}
void dwb3_set_denorm(struct dwbc *dwbc, struct dc_dwb_params *params)
{
struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
/* Set output format*/
REG_UPDATE(DWB_OUT_CTRL, OUT_FORMAT, params->cnv_params.fc_out_format);
/* Set output denorm */
if (params->cnv_params.fc_out_format == DWB_OUT_FORMAT_32BPP_ARGB ||
params->cnv_params.fc_out_format == DWB_OUT_FORMAT_32BPP_RGBA) {
REG_UPDATE(DWB_OUT_CTRL, OUT_DENORM, params->cnv_params.out_denorm_mode);
REG_UPDATE(DWB_OUT_CTRL, OUT_MAX, params->cnv_params.out_max_pix_val);
REG_UPDATE(DWB_OUT_CTRL, OUT_MIN, params->cnv_params.out_min_pix_val);
}
}
static const struct dwbc_funcs dcn30_dwbc_funcs = {
.get_caps = dwb3_get_caps,
.enable = dwb3_enable,
.disable = dwb3_disable,
.update = dwb3_update,
.is_enabled = dwb3_is_enabled,
.set_stereo = dwb3_set_stereo,
.set_new_content = dwb3_set_new_content,
.dwb_program_output_csc = NULL,
.dwb_ogam_set_input_transfer_func = dwb3_ogam_set_input_transfer_func, //TODO: rename
.dwb_set_scaler = NULL,
};
void dcn30_dwbc_construct(struct dcn30_dwbc *dwbc30,
struct dc_context *ctx,
const struct dcn30_dwbc_registers *dwbc_regs,
const struct dcn30_dwbc_shift *dwbc_shift,
const struct dcn30_dwbc_mask *dwbc_mask,
int inst)
{
dwbc30->base.ctx = ctx;
dwbc30->base.inst = inst;
dwbc30->base.funcs = &dcn30_dwbc_funcs;
dwbc30->dwbc_regs = dwbc_regs;
dwbc30->dwbc_shift = dwbc_shift;
dwbc30->dwbc_mask = dwbc_mask;
}
void dwb3_set_host_read_rate_control(struct dwbc *dwbc, bool host_read_delay)
{
struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
/*
* Set maximum delay of host read access to DWBSCL LUT or OGAM LUT if there are no
* idle cycles in HW pipeline (in number of clock cycles times 4)
*/
REG_UPDATE(DWB_HOST_READ_CONTROL, DWB_HOST_READ_RATE_CONTROL, host_read_delay);
DC_LOG_DWB("%s dwb3_rate_control at inst = %d", __func__, dwbc->inst);
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dc_bios_types.h"
#include "dcn30_vpg.h"
#include "reg_helper.h"
#define DC_LOGGER \
vpg3->base.ctx->logger
#define REG(reg)\
(vpg3->regs->reg)
#undef FN
#define FN(reg_name, field_name) \
vpg3->vpg_shift->field_name, vpg3->vpg_mask->field_name
#define CTX \
vpg3->base.ctx
void vpg3_update_generic_info_packet(
struct vpg *vpg,
uint32_t packet_index,
const struct dc_info_packet *info_packet,
bool immediate_update)
{
struct dcn30_vpg *vpg3 = DCN30_VPG_FROM_VPG(vpg);
uint32_t i;
/* TODOFPGA Figure out a proper number for max_retries polling for lock
* use 50 for now.
*/
uint32_t max_retries = 50;
if (packet_index > 14)
ASSERT(0);
/* poll dig_update_lock is not locked -> asic internal signal
* assume otg master lock will unlock it
*/
/* REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS,
* 0, 10, max_retries);
*/
/* TODO: Check if this is required */
/* check if HW reading GSP memory */
REG_WAIT(VPG_GENERIC_STATUS, VPG_GENERIC_CONFLICT_OCCURED,
0, 10, max_retries);
/* HW does is not reading GSP memory not reading too long ->
* something wrong. clear GPS memory access and notify?
* hw SW is writing to GSP memory
*/
REG_UPDATE(VPG_GENERIC_STATUS, VPG_GENERIC_CONFLICT_CLR, 1);
/* choose which generic packet to use */
REG_UPDATE(VPG_GENERIC_PACKET_ACCESS_CTRL,
VPG_GENERIC_DATA_INDEX, packet_index*9);
/* write generic packet header
* (4th byte is for GENERIC0 only)
*/
REG_SET_4(VPG_GENERIC_PACKET_DATA, 0,
VPG_GENERIC_DATA_BYTE0, info_packet->hb0,
VPG_GENERIC_DATA_BYTE1, info_packet->hb1,
VPG_GENERIC_DATA_BYTE2, info_packet->hb2,
VPG_GENERIC_DATA_BYTE3, info_packet->hb3);
/* write generic packet contents
* (we never use last 4 bytes)
* there are 8 (0-7) mmDIG0_AFMT_GENERIC0_x registers
*/
{
const uint32_t *content =
(const uint32_t *) &info_packet->sb[0];
for (i = 0; i < 8; i++) {
REG_WRITE(VPG_GENERIC_PACKET_DATA, *content++);
}
}
/* atomically update double-buffered GENERIC0 registers in immediate mode
* (update at next block_update when block_update_lock == 0).
*/
if (immediate_update) {
switch (packet_index) {
case 0:
REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
VPG_GENERIC0_IMMEDIATE_UPDATE, 1);
break;
case 1:
REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
VPG_GENERIC1_IMMEDIATE_UPDATE, 1);
break;
case 2:
REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
VPG_GENERIC2_IMMEDIATE_UPDATE, 1);
break;
case 3:
REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
VPG_GENERIC3_IMMEDIATE_UPDATE, 1);
break;
case 4:
REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
VPG_GENERIC4_IMMEDIATE_UPDATE, 1);
break;
case 5:
REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
VPG_GENERIC5_IMMEDIATE_UPDATE, 1);
break;
case 6:
REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
VPG_GENERIC6_IMMEDIATE_UPDATE, 1);
break;
case 7:
REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
VPG_GENERIC7_IMMEDIATE_UPDATE, 1);
break;
case 8:
REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
VPG_GENERIC8_IMMEDIATE_UPDATE, 1);
break;
case 9:
REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
VPG_GENERIC9_IMMEDIATE_UPDATE, 1);
break;
case 10:
REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
VPG_GENERIC10_IMMEDIATE_UPDATE, 1);
break;
case 11:
REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
VPG_GENERIC11_IMMEDIATE_UPDATE, 1);
break;
case 12:
REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
VPG_GENERIC12_IMMEDIATE_UPDATE, 1);
break;
case 13:
REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
VPG_GENERIC13_IMMEDIATE_UPDATE, 1);
break;
case 14:
REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
VPG_GENERIC14_IMMEDIATE_UPDATE, 1);
break;
default:
break;
}
} else {
switch (packet_index) {
case 0:
REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
VPG_GENERIC0_FRAME_UPDATE, 1);
break;
case 1:
REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
VPG_GENERIC1_FRAME_UPDATE, 1);
break;
case 2:
REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
VPG_GENERIC2_FRAME_UPDATE, 1);
break;
case 3:
REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
VPG_GENERIC3_FRAME_UPDATE, 1);
break;
case 4:
REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
VPG_GENERIC4_FRAME_UPDATE, 1);
break;
case 5:
REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
VPG_GENERIC5_FRAME_UPDATE, 1);
break;
case 6:
REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
VPG_GENERIC6_FRAME_UPDATE, 1);
break;
case 7:
REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
VPG_GENERIC7_FRAME_UPDATE, 1);
break;
case 8:
REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
VPG_GENERIC8_FRAME_UPDATE, 1);
break;
case 9:
REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
VPG_GENERIC9_FRAME_UPDATE, 1);
break;
case 10:
REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
VPG_GENERIC10_FRAME_UPDATE, 1);
break;
case 11:
REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
VPG_GENERIC11_FRAME_UPDATE, 1);
break;
case 12:
REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
VPG_GENERIC12_FRAME_UPDATE, 1);
break;
case 13:
REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
VPG_GENERIC13_FRAME_UPDATE, 1);
break;
case 14:
REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
VPG_GENERIC14_FRAME_UPDATE, 1);
break;
default:
break;
}
}
}
static struct vpg_funcs dcn30_vpg_funcs = {
.update_generic_info_packet = vpg3_update_generic_info_packet,
};
void vpg3_construct(struct dcn30_vpg *vpg3,
struct dc_context *ctx,
uint32_t inst,
const struct dcn30_vpg_registers *vpg_regs,
const struct dcn30_vpg_shift *vpg_shift,
const struct dcn30_vpg_mask *vpg_mask)
{
vpg3->base.ctx = ctx;
vpg3->base.inst = inst;
vpg3->base.funcs = &dcn30_vpg_funcs;
vpg3->regs = vpg_regs;
vpg3->vpg_shift = vpg_shift;
vpg3->vpg_mask = vpg_mask;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dcn30_hubp.h"
#include "dm_services.h"
#include "dce_calcs.h"
#include "reg_helper.h"
#include "basics/conversion.h"
#include "dcn20/dcn20_hubp.h"
#include "dcn21/dcn21_hubp.h"
#define REG(reg)\
hubp2->hubp_regs->reg
#define CTX \
hubp2->base.ctx
#undef FN
#define FN(reg_name, field_name) \
hubp2->hubp_shift->field_name, hubp2->hubp_mask->field_name
void hubp3_set_vm_system_aperture_settings(struct hubp *hubp,
struct vm_system_aperture_param *apt)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
PHYSICAL_ADDRESS_LOC mc_vm_apt_low;
PHYSICAL_ADDRESS_LOC mc_vm_apt_high;
// The format of high/low are 48:18 of the 48 bit addr
mc_vm_apt_low.quad_part = apt->sys_low.quad_part >> 18;
mc_vm_apt_high.quad_part = apt->sys_high.quad_part >> 18;
REG_SET(DCN_VM_SYSTEM_APERTURE_LOW_ADDR, 0,
MC_VM_SYSTEM_APERTURE_LOW_ADDR, mc_vm_apt_low.quad_part);
REG_SET(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR, 0,
MC_VM_SYSTEM_APERTURE_HIGH_ADDR, mc_vm_apt_high.quad_part);
REG_SET_2(DCN_VM_MX_L1_TLB_CNTL, 0,
ENABLE_L1_TLB, 1,
SYSTEM_ACCESS_MODE, 0x3);
}
bool hubp3_program_surface_flip_and_addr(
struct hubp *hubp,
const struct dc_plane_address *address,
bool flip_immediate)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
//program flip type
REG_UPDATE(DCSURF_FLIP_CONTROL,
SURFACE_FLIP_TYPE, flip_immediate);
// Program VMID reg
if (flip_immediate == 0)
REG_UPDATE(VMID_SETTINGS_0,
VMID, address->vmid);
if (address->type == PLN_ADDR_TYPE_GRPH_STEREO) {
REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0);
REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x1);
} else {
// turn off stereo if not in stereo
REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x0);
REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x0);
}
/* HW automatically latch rest of address register on write to
* DCSURF_PRIMARY_SURFACE_ADDRESS if SURFACE_UPDATE_LOCK is not used
*
* program high first and then the low addr, order matters!
*/
switch (address->type) {
case PLN_ADDR_TYPE_GRAPHICS:
/* DCN1.0 does not support const color
* TODO: program DCHUBBUB_RET_PATH_DCC_CFGx_0/1
* base on address->grph.dcc_const_color
* x = 0, 2, 4, 6 for pipe 0, 1, 2, 3 for rgb and luma
* x = 1, 3, 5, 7 for pipe 0, 1, 2, 3 for chroma
*/
if (address->grph.addr.quad_part == 0)
break;
REG_UPDATE_2(DCSURF_SURFACE_CONTROL,
PRIMARY_SURFACE_TMZ, address->tmz_surface,
PRIMARY_META_SURFACE_TMZ, address->tmz_surface);
if (address->grph.meta_addr.quad_part != 0) {
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
PRIMARY_META_SURFACE_ADDRESS_HIGH,
address->grph.meta_addr.high_part);
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
PRIMARY_META_SURFACE_ADDRESS,
address->grph.meta_addr.low_part);
}
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
PRIMARY_SURFACE_ADDRESS_HIGH,
address->grph.addr.high_part);
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
PRIMARY_SURFACE_ADDRESS,
address->grph.addr.low_part);
break;
case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE:
if (address->video_progressive.luma_addr.quad_part == 0
|| address->video_progressive.chroma_addr.quad_part == 0)
break;
REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
PRIMARY_SURFACE_TMZ, address->tmz_surface,
PRIMARY_SURFACE_TMZ_C, address->tmz_surface,
PRIMARY_META_SURFACE_TMZ, address->tmz_surface,
PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface);
if (address->video_progressive.luma_meta_addr.quad_part != 0) {
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, 0,
PRIMARY_META_SURFACE_ADDRESS_HIGH_C,
address->video_progressive.chroma_meta_addr.high_part);
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, 0,
PRIMARY_META_SURFACE_ADDRESS_C,
address->video_progressive.chroma_meta_addr.low_part);
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
PRIMARY_META_SURFACE_ADDRESS_HIGH,
address->video_progressive.luma_meta_addr.high_part);
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
PRIMARY_META_SURFACE_ADDRESS,
address->video_progressive.luma_meta_addr.low_part);
}
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, 0,
PRIMARY_SURFACE_ADDRESS_HIGH_C,
address->video_progressive.chroma_addr.high_part);
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_C, 0,
PRIMARY_SURFACE_ADDRESS_C,
address->video_progressive.chroma_addr.low_part);
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
PRIMARY_SURFACE_ADDRESS_HIGH,
address->video_progressive.luma_addr.high_part);
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
PRIMARY_SURFACE_ADDRESS,
address->video_progressive.luma_addr.low_part);
break;
case PLN_ADDR_TYPE_GRPH_STEREO:
if (address->grph_stereo.left_addr.quad_part == 0)
break;
if (address->grph_stereo.right_addr.quad_part == 0)
break;
REG_UPDATE_8(DCSURF_SURFACE_CONTROL,
PRIMARY_SURFACE_TMZ, address->tmz_surface,
PRIMARY_SURFACE_TMZ_C, address->tmz_surface,
PRIMARY_META_SURFACE_TMZ, address->tmz_surface,
PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface,
SECONDARY_SURFACE_TMZ, address->tmz_surface,
SECONDARY_SURFACE_TMZ_C, address->tmz_surface,
SECONDARY_META_SURFACE_TMZ, address->tmz_surface,
SECONDARY_META_SURFACE_TMZ_C, address->tmz_surface);
if (address->grph_stereo.right_meta_addr.quad_part != 0) {
REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, 0,
SECONDARY_META_SURFACE_ADDRESS_HIGH_C,
address->grph_stereo.right_alpha_meta_addr.high_part);
REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, 0,
SECONDARY_META_SURFACE_ADDRESS_C,
address->grph_stereo.right_alpha_meta_addr.low_part);
REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, 0,
SECONDARY_META_SURFACE_ADDRESS_HIGH,
address->grph_stereo.right_meta_addr.high_part);
REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS, 0,
SECONDARY_META_SURFACE_ADDRESS,
address->grph_stereo.right_meta_addr.low_part);
}
if (address->grph_stereo.left_meta_addr.quad_part != 0) {
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, 0,
PRIMARY_META_SURFACE_ADDRESS_HIGH_C,
address->grph_stereo.left_alpha_meta_addr.high_part);
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, 0,
PRIMARY_META_SURFACE_ADDRESS_C,
address->grph_stereo.left_alpha_meta_addr.low_part);
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
PRIMARY_META_SURFACE_ADDRESS_HIGH,
address->grph_stereo.left_meta_addr.high_part);
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
PRIMARY_META_SURFACE_ADDRESS,
address->grph_stereo.left_meta_addr.low_part);
}
REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, 0,
SECONDARY_SURFACE_ADDRESS_HIGH_C,
address->grph_stereo.right_alpha_addr.high_part);
REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS_C, 0,
SECONDARY_SURFACE_ADDRESS_C,
address->grph_stereo.right_alpha_addr.low_part);
REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, 0,
SECONDARY_SURFACE_ADDRESS_HIGH,
address->grph_stereo.right_addr.high_part);
REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS, 0,
SECONDARY_SURFACE_ADDRESS,
address->grph_stereo.right_addr.low_part);
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, 0,
PRIMARY_SURFACE_ADDRESS_HIGH_C,
address->grph_stereo.left_alpha_addr.high_part);
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_C, 0,
PRIMARY_SURFACE_ADDRESS_C,
address->grph_stereo.left_alpha_addr.low_part);
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
PRIMARY_SURFACE_ADDRESS_HIGH,
address->grph_stereo.left_addr.high_part);
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
PRIMARY_SURFACE_ADDRESS,
address->grph_stereo.left_addr.low_part);
break;
case PLN_ADDR_TYPE_RGBEA:
if (address->rgbea.addr.quad_part == 0
|| address->rgbea.alpha_addr.quad_part == 0)
break;
REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
PRIMARY_SURFACE_TMZ, address->tmz_surface,
PRIMARY_SURFACE_TMZ_C, address->tmz_surface,
PRIMARY_META_SURFACE_TMZ, address->tmz_surface,
PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface);
if (address->rgbea.meta_addr.quad_part != 0) {
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, 0,
PRIMARY_META_SURFACE_ADDRESS_HIGH_C,
address->rgbea.alpha_meta_addr.high_part);
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, 0,
PRIMARY_META_SURFACE_ADDRESS_C,
address->rgbea.alpha_meta_addr.low_part);
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
PRIMARY_META_SURFACE_ADDRESS_HIGH,
address->rgbea.meta_addr.high_part);
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
PRIMARY_META_SURFACE_ADDRESS,
address->rgbea.meta_addr.low_part);
}
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, 0,
PRIMARY_SURFACE_ADDRESS_HIGH_C,
address->rgbea.alpha_addr.high_part);
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_C, 0,
PRIMARY_SURFACE_ADDRESS_C,
address->rgbea.alpha_addr.low_part);
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
PRIMARY_SURFACE_ADDRESS_HIGH,
address->rgbea.addr.high_part);
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
PRIMARY_SURFACE_ADDRESS,
address->rgbea.addr.low_part);
break;
default:
BREAK_TO_DEBUGGER();
break;
}
hubp->request_address = *address;
return true;
}
static void hubp3_program_tiling(
struct dcn20_hubp *hubp2,
const union dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
REG_UPDATE_4(DCSURF_ADDR_CONFIG,
NUM_PIPES, log_2(info->gfx9.num_pipes),
PIPE_INTERLEAVE, info->gfx9.pipe_interleave,
MAX_COMPRESSED_FRAGS, log_2(info->gfx9.max_compressed_frags),
NUM_PKRS, log_2(info->gfx9.num_pkrs));
REG_UPDATE_3(DCSURF_TILING_CONFIG,
SW_MODE, info->gfx9.swizzle,
META_LINEAR, info->gfx9.meta_linear,
PIPE_ALIGNED, info->gfx9.pipe_aligned);
}
void hubp3_dcc_control(struct hubp *hubp, bool enable,
enum hubp_ind_block_size blk_size)
{
uint32_t dcc_en = enable ? 1 : 0;
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
PRIMARY_SURFACE_DCC_EN, dcc_en,
PRIMARY_SURFACE_DCC_IND_BLK, blk_size,
SECONDARY_SURFACE_DCC_EN, dcc_en,
SECONDARY_SURFACE_DCC_IND_BLK, blk_size);
}
void hubp3_dcc_control_sienna_cichlid(struct hubp *hubp,
struct dc_plane_dcc_param *dcc)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE_6(DCSURF_SURFACE_CONTROL,
PRIMARY_SURFACE_DCC_EN, dcc->enable,
PRIMARY_SURFACE_DCC_IND_BLK, dcc->dcc_ind_blk,
PRIMARY_SURFACE_DCC_IND_BLK_C, dcc->dcc_ind_blk_c,
SECONDARY_SURFACE_DCC_EN, dcc->enable,
SECONDARY_SURFACE_DCC_IND_BLK, dcc->dcc_ind_blk,
SECONDARY_SURFACE_DCC_IND_BLK_C, dcc->dcc_ind_blk_c);
}
void hubp3_dmdata_set_attributes(
struct hubp *hubp,
const struct dc_dmdata_attributes *attr)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
/*always HW mode */
REG_UPDATE(DMDATA_CNTL,
DMDATA_MODE, 1);
/* for DMDATA flip, need to use SURFACE_UPDATE_LOCK */
REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_UPDATE_LOCK, 1);
/* toggle DMDATA_UPDATED and set repeat and size */
REG_UPDATE(DMDATA_CNTL,
DMDATA_UPDATED, 0);
REG_UPDATE_3(DMDATA_CNTL,
DMDATA_UPDATED, 1,
DMDATA_REPEAT, attr->dmdata_repeat,
DMDATA_SIZE, attr->dmdata_size);
/* set DMDATA address */
REG_WRITE(DMDATA_ADDRESS_LOW, attr->address.low_part);
REG_UPDATE(DMDATA_ADDRESS_HIGH,
DMDATA_ADDRESS_HIGH, attr->address.high_part);
REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_UPDATE_LOCK, 0);
}
void hubp3_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
union dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
bool horizontal_mirror,
unsigned int compat_level)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
hubp3_dcc_control_sienna_cichlid(hubp, dcc);
hubp3_program_tiling(hubp2, tiling_info, format);
hubp2_program_size(hubp, format, plane_size, dcc);
hubp2_program_rotation(hubp, rotation, horizontal_mirror);
hubp2_program_pixel_format(hubp, format);
}
static void hubp3_program_deadline(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
hubp2_program_deadline(hubp, dlg_attr, ttu_attr);
REG_UPDATE(DCN_DMDATA_VM_CNTL,
REFCYC_PER_VM_DMDATA, dlg_attr->refcyc_per_vm_dmdata);
}
void hubp3_read_state(struct hubp *hubp)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
struct dcn_hubp_state *s = &hubp2->state;
struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
hubp2_read_state_common(hubp);
REG_GET_7(DCHUBP_REQ_SIZE_CONFIG,
CHUNK_SIZE, &rq_regs->rq_regs_l.chunk_size,
MIN_CHUNK_SIZE, &rq_regs->rq_regs_l.min_chunk_size,
META_CHUNK_SIZE, &rq_regs->rq_regs_l.meta_chunk_size,
MIN_META_CHUNK_SIZE, &rq_regs->rq_regs_l.min_meta_chunk_size,
DPTE_GROUP_SIZE, &rq_regs->rq_regs_l.dpte_group_size,
SWATH_HEIGHT, &rq_regs->rq_regs_l.swath_height,
PTE_ROW_HEIGHT_LINEAR, &rq_regs->rq_regs_l.pte_row_height_linear);
REG_GET_7(DCHUBP_REQ_SIZE_CONFIG_C,
CHUNK_SIZE_C, &rq_regs->rq_regs_c.chunk_size,
MIN_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_chunk_size,
META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.meta_chunk_size,
MIN_META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_meta_chunk_size,
DPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.dpte_group_size,
SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height,
PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear);
if (REG(UCLK_PSTATE_FORCE))
s->uclk_pstate_force = REG_READ(UCLK_PSTATE_FORCE);
if (REG(DCHUBP_CNTL))
s->hubp_cntl = REG_READ(DCHUBP_CNTL);
}
void hubp3_setup(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
struct _vcs_dpi_display_ttu_regs_st *ttu_attr,
struct _vcs_dpi_display_rq_regs_st *rq_regs,
struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
{
/* otg is locked when this func is called. Register are double buffered.
* disable the requestors is not needed
*/
hubp2_vready_at_or_After_vsync(hubp, pipe_dest);
hubp21_program_requestor(hubp, rq_regs);
hubp3_program_deadline(hubp, dlg_attr, ttu_attr);
}
void hubp3_init(struct hubp *hubp)
{
// DEDCN21-133: Inconsistent row starting line for flip between DPTE and Meta
// This is a chicken bit to enable the ECO fix.
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
//hubp[i].HUBPREQ_DEBUG.HUBPREQ_DEBUG[26] = 1;
REG_WRITE(HUBPREQ_DEBUG, 1 << 26);
}
static struct hubp_funcs dcn30_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
.hubp_program_surface_flip_and_addr = hubp3_program_surface_flip_and_addr,
.hubp_program_surface_config = hubp3_program_surface_config,
.hubp_is_flip_pending = hubp2_is_flip_pending,
.hubp_setup = hubp3_setup,
.hubp_setup_interdependent = hubp2_setup_interdependent,
.hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
.set_blank = hubp2_set_blank,
.set_blank_regs = hubp2_set_blank_regs,
.dcc_control = hubp3_dcc_control,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
.set_cursor_position = hubp2_cursor_set_position,
.hubp_clk_cntl = hubp2_clk_cntl,
.hubp_vtg_sel = hubp2_vtg_sel,
.dmdata_set_attributes = hubp3_dmdata_set_attributes,
.dmdata_load = hubp2_dmdata_load,
.dmdata_status_done = hubp2_dmdata_status_done,
.hubp_read_state = hubp3_read_state,
.hubp_clear_underflow = hubp2_clear_underflow,
.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
.hubp_init = hubp3_init,
.hubp_in_blank = hubp1_in_blank,
.hubp_soft_reset = hubp1_soft_reset,
.hubp_set_flip_int = hubp1_set_flip_int,
};
bool hubp3_construct(
struct dcn20_hubp *hubp2,
struct dc_context *ctx,
uint32_t inst,
const struct dcn_hubp2_registers *hubp_regs,
const struct dcn_hubp2_shift *hubp_shift,
const struct dcn_hubp2_mask *hubp_mask)
{
hubp2->base.funcs = &dcn30_hubp_funcs;
hubp2->base.ctx = ctx;
hubp2->hubp_regs = hubp_regs;
hubp2->hubp_shift = hubp_shift;
hubp2->hubp_mask = hubp_mask;
hubp2->base.inst = inst;
hubp2->base.opp_id = OPP_ID_INVALID;
hubp2->base.mpcc_id = 0xf;
return true;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "core_types.h"
#include "reg_helper.h"
#include "dcn30_dpp.h"
#include "basics/conversion.h"
#include "dcn30_cm_common.h"
#include "custom_float.h"
#define REG(reg) reg
#define CTX \
ctx //dpp->base.ctx
#undef FN
#define FN(reg_name, field_name) \
reg->shifts.field_name, reg->masks.field_name
void cm_helper_program_gamcor_xfer_func(
struct dc_context *ctx,
const struct pwl_params *params,
const struct dcn3_xfer_func_reg *reg)
{
uint32_t reg_region_cur;
unsigned int i = 0;
REG_SET_2(reg->start_cntl_b, 0,
exp_region_start, params->corner_points[0].blue.custom_float_x,
exp_resion_start_segment, 0);
REG_SET_2(reg->start_cntl_g, 0,
exp_region_start, params->corner_points[0].green.custom_float_x,
exp_resion_start_segment, 0);
REG_SET_2(reg->start_cntl_r, 0,
exp_region_start, params->corner_points[0].red.custom_float_x,
exp_resion_start_segment, 0);
REG_SET(reg->start_slope_cntl_b, 0, //linear slope at start of curve
field_region_linear_slope, params->corner_points[0].blue.custom_float_slope);
REG_SET(reg->start_slope_cntl_g, 0,
field_region_linear_slope, params->corner_points[0].green.custom_float_slope);
REG_SET(reg->start_slope_cntl_r, 0,
field_region_linear_slope, params->corner_points[0].red.custom_float_slope);
REG_SET(reg->start_end_cntl1_b, 0,
field_region_end_base, params->corner_points[1].blue.custom_float_y);
REG_SET(reg->start_end_cntl1_g, 0,
field_region_end_base, params->corner_points[1].green.custom_float_y);
REG_SET(reg->start_end_cntl1_r, 0,
field_region_end_base, params->corner_points[1].red.custom_float_y);
REG_SET_2(reg->start_end_cntl2_b, 0,
field_region_end_slope, params->corner_points[1].blue.custom_float_slope,
field_region_end, params->corner_points[1].blue.custom_float_x);
REG_SET_2(reg->start_end_cntl2_g, 0,
field_region_end_slope, params->corner_points[1].green.custom_float_slope,
field_region_end, params->corner_points[1].green.custom_float_x);
REG_SET_2(reg->start_end_cntl2_r, 0,
field_region_end_slope, params->corner_points[1].red.custom_float_slope,
field_region_end, params->corner_points[1].red.custom_float_x);
for (reg_region_cur = reg->region_start;
reg_region_cur <= reg->region_end;
reg_region_cur++) {
const struct gamma_curve *curve0 = &(params->arr_curve_points[2 * i]);
const struct gamma_curve *curve1 = &(params->arr_curve_points[(2 * i) + 1]);
REG_SET_4(reg_region_cur, 0,
exp_region0_lut_offset, curve0->offset,
exp_region0_num_segments, curve0->segments_num,
exp_region1_lut_offset, curve1->offset,
exp_region1_num_segments, curve1->segments_num);
i++;
}
}
/* driver uses 32 regions or less, but DCN HW has 34, extra 2 are set to 0 */
#define MAX_REGIONS_NUMBER 34
#define MAX_LOW_POINT 25
#define NUMBER_REGIONS 32
#define NUMBER_SW_SEGMENTS 16
bool cm3_helper_translate_curve_to_hw_format(
const struct dc_transfer_func *output_tf,
struct pwl_params *lut_params, bool fixpoint)
{
struct curve_points3 *corner_points;
struct pwl_result_data *rgb_resulted;
struct pwl_result_data *rgb;
struct pwl_result_data *rgb_plus_1;
struct pwl_result_data *rgb_minus_1;
struct fixed31_32 end_value;
int32_t region_start, region_end;
int32_t i;
uint32_t j, k, seg_distr[MAX_REGIONS_NUMBER], increment, start_index, hw_points;
if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
return false;
corner_points = lut_params->corner_points;
rgb_resulted = lut_params->rgb_resulted;
hw_points = 0;
memset(lut_params, 0, sizeof(struct pwl_params));
memset(seg_distr, 0, sizeof(seg_distr));
if (output_tf->tf == TRANSFER_FUNCTION_PQ || output_tf->tf == TRANSFER_FUNCTION_GAMMA22 ||
output_tf->tf == TRANSFER_FUNCTION_HLG) {
/* 32 segments
* segments are from 2^-25 to 2^7
*/
for (i = 0; i < NUMBER_REGIONS ; i++)
seg_distr[i] = 3;
region_start = -MAX_LOW_POINT;
region_end = NUMBER_REGIONS - MAX_LOW_POINT;
} else {
/* 11 segments
* segment is from 2^-10 to 2^0
* There are less than 256 points, for optimization
*/
seg_distr[0] = 3;
seg_distr[1] = 4;
seg_distr[2] = 4;
seg_distr[3] = 4;
seg_distr[4] = 4;
seg_distr[5] = 4;
seg_distr[6] = 4;
seg_distr[7] = 4;
seg_distr[8] = 4;
seg_distr[9] = 4;
seg_distr[10] = 1;
region_start = -10;
region_end = 1;
}
for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
seg_distr[i] = -1;
for (k = 0; k < MAX_REGIONS_NUMBER; k++) {
if (seg_distr[k] != -1)
hw_points += (1 << seg_distr[k]);
}
j = 0;
for (k = 0; k < (region_end - region_start); k++) {
increment = NUMBER_SW_SEGMENTS / (1 << seg_distr[k]);
start_index = (region_start + k + MAX_LOW_POINT) *
NUMBER_SW_SEGMENTS;
for (i = start_index; i < start_index + NUMBER_SW_SEGMENTS;
i += increment) {
if (j == hw_points - 1)
break;
rgb_resulted[j].red = output_tf->tf_pts.red[i];
rgb_resulted[j].green = output_tf->tf_pts.green[i];
rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
j++;
}
}
/* last point */
start_index = (region_end + MAX_LOW_POINT) * NUMBER_SW_SEGMENTS;
rgb_resulted[hw_points - 1].red = output_tf->tf_pts.red[start_index];
rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red;
rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green;
rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue;
// All 3 color channels have same x
corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
dc_fixpt_from_int(region_start));
corner_points[0].green.x = corner_points[0].red.x;
corner_points[0].blue.x = corner_points[0].red.x;
corner_points[1].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
dc_fixpt_from_int(region_end));
corner_points[1].green.x = corner_points[1].red.x;
corner_points[1].blue.x = corner_points[1].red.x;
corner_points[0].red.y = rgb_resulted[0].red;
corner_points[0].green.y = rgb_resulted[0].green;
corner_points[0].blue.y = rgb_resulted[0].blue;
corner_points[0].red.slope = dc_fixpt_div(corner_points[0].red.y,
corner_points[0].red.x);
corner_points[0].green.slope = dc_fixpt_div(corner_points[0].green.y,
corner_points[0].green.x);
corner_points[0].blue.slope = dc_fixpt_div(corner_points[0].blue.y,
corner_points[0].blue.x);
/* see comment above, m_arrPoints[1].y should be the Y value for the
* region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
*/
corner_points[1].red.y = rgb_resulted[hw_points - 1].red;
corner_points[1].green.y = rgb_resulted[hw_points - 1].green;
corner_points[1].blue.y = rgb_resulted[hw_points - 1].blue;
corner_points[1].red.slope = dc_fixpt_zero;
corner_points[1].green.slope = dc_fixpt_zero;
corner_points[1].blue.slope = dc_fixpt_zero;
if (output_tf->tf == TRANSFER_FUNCTION_PQ || output_tf->tf == TRANSFER_FUNCTION_HLG) {
/* for PQ/HLG, we want to have a straight line from last HW X point,
* and the slope to be such that we hit 1.0 at 10000/1000 nits.
*/
if (output_tf->tf == TRANSFER_FUNCTION_PQ)
end_value = dc_fixpt_from_int(125);
else
end_value = dc_fixpt_from_fraction(125, 10);
corner_points[1].red.slope = dc_fixpt_div(
dc_fixpt_sub(dc_fixpt_one, corner_points[1].red.y),
dc_fixpt_sub(end_value, corner_points[1].red.x));
corner_points[1].green.slope = dc_fixpt_div(
dc_fixpt_sub(dc_fixpt_one, corner_points[1].green.y),
dc_fixpt_sub(end_value, corner_points[1].green.x));
corner_points[1].blue.slope = dc_fixpt_div(
dc_fixpt_sub(dc_fixpt_one, corner_points[1].blue.y),
dc_fixpt_sub(end_value, corner_points[1].blue.x));
}
lut_params->hw_points_num = hw_points;
k = 0;
for (i = 1; i < MAX_REGIONS_NUMBER; i++) {
if (seg_distr[k] != -1) {
lut_params->arr_curve_points[k].segments_num =
seg_distr[k];
lut_params->arr_curve_points[i].offset =
lut_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
}
k++;
}
if (seg_distr[k] != -1)
lut_params->arr_curve_points[k].segments_num = seg_distr[k];
rgb = rgb_resulted;
rgb_plus_1 = rgb_resulted + 1;
rgb_minus_1 = rgb;
i = 1;
while (i != hw_points + 1) {
if (i >= hw_points - 1) {
if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
rgb_plus_1->red = dc_fixpt_add(rgb->red, rgb_minus_1->delta_red);
if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
rgb_plus_1->green = dc_fixpt_add(rgb->green, rgb_minus_1->delta_green);
if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
rgb_plus_1->blue = dc_fixpt_add(rgb->blue, rgb_minus_1->delta_blue);
}
rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
if (fixpoint == true) {
rgb->delta_red_reg = dc_fixpt_clamp_u0d10(rgb->delta_red);
rgb->delta_green_reg = dc_fixpt_clamp_u0d10(rgb->delta_green);
rgb->delta_blue_reg = dc_fixpt_clamp_u0d10(rgb->delta_blue);
rgb->red_reg = dc_fixpt_clamp_u0d14(rgb->red);
rgb->green_reg = dc_fixpt_clamp_u0d14(rgb->green);
rgb->blue_reg = dc_fixpt_clamp_u0d14(rgb->blue);
}
++rgb_plus_1;
rgb_minus_1 = rgb;
++rgb;
++i;
}
cm3_helper_convert_to_custom_float(rgb_resulted,
lut_params->corner_points,
hw_points, fixpoint);
return true;
}
#define NUM_DEGAMMA_REGIONS 12
bool cm3_helper_translate_curve_to_degamma_hw_format(
const struct dc_transfer_func *output_tf,
struct pwl_params *lut_params)
{
struct curve_points3 *corner_points;
struct pwl_result_data *rgb_resulted;
struct pwl_result_data *rgb;
struct pwl_result_data *rgb_plus_1;
int32_t region_start, region_end;
int32_t i;
uint32_t j, k, seg_distr[MAX_REGIONS_NUMBER], increment, start_index, hw_points;
if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
return false;
corner_points = lut_params->corner_points;
rgb_resulted = lut_params->rgb_resulted;
hw_points = 0;
memset(lut_params, 0, sizeof(struct pwl_params));
memset(seg_distr, 0, sizeof(seg_distr));
region_start = -NUM_DEGAMMA_REGIONS;
region_end = 0;
for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
seg_distr[i] = -1;
/* 12 segments
* segments are from 2^-12 to 0
*/
for (i = 0; i < NUM_DEGAMMA_REGIONS ; i++)
seg_distr[i] = 4;
for (k = 0; k < MAX_REGIONS_NUMBER; k++) {
if (seg_distr[k] != -1)
hw_points += (1 << seg_distr[k]);
}
j = 0;
for (k = 0; k < (region_end - region_start); k++) {
increment = NUMBER_SW_SEGMENTS / (1 << seg_distr[k]);
start_index = (region_start + k + MAX_LOW_POINT) *
NUMBER_SW_SEGMENTS;
for (i = start_index; i < start_index + NUMBER_SW_SEGMENTS;
i += increment) {
if (j == hw_points - 1)
break;
rgb_resulted[j].red = output_tf->tf_pts.red[i];
rgb_resulted[j].green = output_tf->tf_pts.green[i];
rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
j++;
}
}
/* last point */
start_index = (region_end + MAX_LOW_POINT) * NUMBER_SW_SEGMENTS;
rgb_resulted[hw_points - 1].red = output_tf->tf_pts.red[start_index];
rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
dc_fixpt_from_int(region_start));
corner_points[0].green.x = corner_points[0].red.x;
corner_points[0].blue.x = corner_points[0].red.x;
corner_points[1].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
dc_fixpt_from_int(region_end));
corner_points[1].green.x = corner_points[1].red.x;
corner_points[1].blue.x = corner_points[1].red.x;
corner_points[0].red.y = rgb_resulted[0].red;
corner_points[0].green.y = rgb_resulted[0].green;
corner_points[0].blue.y = rgb_resulted[0].blue;
/* see comment above, m_arrPoints[1].y should be the Y value for the
* region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
*/
corner_points[1].red.y = rgb_resulted[hw_points - 1].red;
corner_points[1].green.y = rgb_resulted[hw_points - 1].green;
corner_points[1].blue.y = rgb_resulted[hw_points - 1].blue;
corner_points[1].red.slope = dc_fixpt_zero;
corner_points[1].green.slope = dc_fixpt_zero;
corner_points[1].blue.slope = dc_fixpt_zero;
if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
/* for PQ, we want to have a straight line from last HW X point,
* and the slope to be such that we hit 1.0 at 10000 nits.
*/
const struct fixed31_32 end_value =
dc_fixpt_from_int(125);
corner_points[1].red.slope = dc_fixpt_div(
dc_fixpt_sub(dc_fixpt_one, corner_points[1].red.y),
dc_fixpt_sub(end_value, corner_points[1].red.x));
corner_points[1].green.slope = dc_fixpt_div(
dc_fixpt_sub(dc_fixpt_one, corner_points[1].green.y),
dc_fixpt_sub(end_value, corner_points[1].green.x));
corner_points[1].blue.slope = dc_fixpt_div(
dc_fixpt_sub(dc_fixpt_one, corner_points[1].blue.y),
dc_fixpt_sub(end_value, corner_points[1].blue.x));
}
lut_params->hw_points_num = hw_points;
k = 0;
for (i = 1; i < MAX_REGIONS_NUMBER; i++) {
if (seg_distr[k] != -1) {
lut_params->arr_curve_points[k].segments_num =
seg_distr[k];
lut_params->arr_curve_points[i].offset =
lut_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
}
k++;
}
if (seg_distr[k] != -1)
lut_params->arr_curve_points[k].segments_num = seg_distr[k];
rgb = rgb_resulted;
rgb_plus_1 = rgb_resulted + 1;
i = 1;
while (i != hw_points + 1) {
if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
rgb_plus_1->red = rgb->red;
if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
rgb_plus_1->green = rgb->green;
if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
rgb_plus_1->blue = rgb->blue;
rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
++rgb_plus_1;
++rgb;
++i;
}
cm3_helper_convert_to_custom_float(rgb_resulted,
lut_params->corner_points,
hw_points, false);
return true;
}
bool cm3_helper_convert_to_custom_float(
struct pwl_result_data *rgb_resulted,
struct curve_points3 *corner_points,
uint32_t hw_points_num,
bool fixpoint)
{
struct custom_float_format fmt;
struct pwl_result_data *rgb = rgb_resulted;
uint32_t i = 0;
fmt.exponenta_bits = 6;
fmt.mantissa_bits = 12;
fmt.sign = false;
/* corner_points[0] - beginning base, slope offset for R,G,B
* corner_points[1] - end base, slope offset for R,G,B
*/
if (!convert_to_custom_float_format(corner_points[0].red.x, &fmt,
&corner_points[0].red.custom_float_x)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[0].green.x, &fmt,
&corner_points[0].green.custom_float_x)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[0].blue.x, &fmt,
&corner_points[0].blue.custom_float_x)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[0].red.offset, &fmt,
&corner_points[0].red.custom_float_offset)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[0].green.offset, &fmt,
&corner_points[0].green.custom_float_offset)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[0].blue.offset, &fmt,
&corner_points[0].blue.custom_float_offset)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[0].red.slope, &fmt,
&corner_points[0].red.custom_float_slope)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[0].green.slope, &fmt,
&corner_points[0].green.custom_float_slope)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[0].blue.slope, &fmt,
&corner_points[0].blue.custom_float_slope)) {
BREAK_TO_DEBUGGER();
return false;
}
if (fixpoint == true) {
corner_points[1].red.custom_float_y =
dc_fixpt_clamp_u0d14(corner_points[1].red.y);
corner_points[1].green.custom_float_y =
dc_fixpt_clamp_u0d14(corner_points[1].green.y);
corner_points[1].blue.custom_float_y =
dc_fixpt_clamp_u0d14(corner_points[1].blue.y);
} else {
if (!convert_to_custom_float_format(corner_points[1].red.y,
&fmt, &corner_points[1].red.custom_float_y)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[1].green.y,
&fmt, &corner_points[1].green.custom_float_y)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[1].blue.y,
&fmt, &corner_points[1].blue.custom_float_y)) {
BREAK_TO_DEBUGGER();
return false;
}
}
fmt.mantissa_bits = 10;
fmt.sign = false;
if (!convert_to_custom_float_format(corner_points[1].red.x, &fmt,
&corner_points[1].red.custom_float_x)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[1].green.x, &fmt,
&corner_points[1].green.custom_float_x)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[1].blue.x, &fmt,
&corner_points[1].blue.custom_float_x)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[1].red.slope, &fmt,
&corner_points[1].red.custom_float_slope)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[1].green.slope, &fmt,
&corner_points[1].green.custom_float_slope)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[1].blue.slope, &fmt,
&corner_points[1].blue.custom_float_slope)) {
BREAK_TO_DEBUGGER();
return false;
}
if (hw_points_num == 0 || rgb_resulted == NULL || fixpoint == true)
return true;
fmt.mantissa_bits = 12;
while (i != hw_points_num) {
if (!convert_to_custom_float_format(rgb->red, &fmt,
&rgb->red_reg)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(rgb->green, &fmt,
&rgb->green_reg)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(rgb->blue, &fmt,
&rgb->blue_reg)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(rgb->delta_red, &fmt,
&rgb->delta_red_reg)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(rgb->delta_green, &fmt,
&rgb->delta_green_reg)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(rgb->delta_blue, &fmt,
&rgb->delta_blue_reg)) {
BREAK_TO_DEBUGGER();
return false;
}
++rgb;
++i;
}
return true;
}
bool is_rgb_equal(const struct pwl_result_data *rgb, uint32_t num)
{
uint32_t i;
bool ret = true;
for (i = 0 ; i < num; i++) {
if (rgb[i].red_reg != rgb[i].green_reg ||
rgb[i].blue_reg != rgb[i].red_reg ||
rgb[i].blue_reg != rgb[i].green_reg) {
ret = false;
break;
}
}
return ret;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "dc.h"
#include "dcn30_init.h"
#include "resource.h"
#include "include/irq_service_interface.h"
#include "dcn20/dcn20_resource.h"
#include "dcn30_resource.h"
#include "dcn10/dcn10_ipp.h"
#include "dcn30/dcn30_hubbub.h"
#include "dcn30/dcn30_mpc.h"
#include "dcn30/dcn30_hubp.h"
#include "irq/dcn30/irq_service_dcn30.h"
#include "dcn30/dcn30_dpp.h"
#include "dcn30/dcn30_optc.h"
#include "dcn20/dcn20_hwseq.h"
#include "dcn30/dcn30_hwseq.h"
#include "dce110/dce110_hw_sequencer.h"
#include "dcn30/dcn30_opp.h"
#include "dcn20/dcn20_dsc.h"
#include "dcn30/dcn30_vpg.h"
#include "dcn30/dcn30_afmt.h"
#include "dcn30/dcn30_dio_stream_encoder.h"
#include "dcn30/dcn30_dio_link_encoder.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
#include "clk_mgr.h"
#include "virtual/virtual_stream_encoder.h"
#include "dce110/dce110_resource.h"
#include "dml/display_mode_vba.h"
#include "dcn30/dcn30_dccg.h"
#include "dcn10/dcn10_resource.h"
#include "link.h"
#include "dce/dce_panel_cntl.h"
#include "dcn30/dcn30_dwb.h"
#include "dcn30/dcn30_mmhubbub.h"
#include "sienna_cichlid_ip_offset.h"
#include "dcn/dcn_3_0_0_offset.h"
#include "dcn/dcn_3_0_0_sh_mask.h"
#include "nbio/nbio_7_4_offset.h"
#include "dpcs/dpcs_3_0_0_offset.h"
#include "dpcs/dpcs_3_0_0_sh_mask.h"
#include "mmhub/mmhub_2_0_0_offset.h"
#include "mmhub/mmhub_2_0_0_sh_mask.h"
#include "reg_helper.h"
#include "dce/dmub_abm.h"
#include "dce/dmub_psr.h"
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
#include "dml/dcn30/dcn30_fpu.h"
#include "dml/dcn30/display_mode_vba_30.h"
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
#include "amdgpu_socbb.h"
#include "dc_dmub_srv.h"
#define DC_LOGGER_INIT(logger)
enum dcn30_clk_src_array_id {
DCN30_CLK_SRC_PLL0,
DCN30_CLK_SRC_PLL1,
DCN30_CLK_SRC_PLL2,
DCN30_CLK_SRC_PLL3,
DCN30_CLK_SRC_PLL4,
DCN30_CLK_SRC_PLL5,
DCN30_CLK_SRC_TOTAL
};
/* begin *********************
* macros to expend register list macro defined in HW object header file
*/
/* DCN */
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
#define SR(reg_name)\
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
mm ## reg_name
#define SRI(reg_name, block, id)\
.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define SRI2(reg_name, block, id)\
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
mm ## reg_name
#define SRIR(var_name, reg_name, block, id)\
.var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define SRII(reg_name, block, id)\
.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define SRII_MPC_RMU(reg_name, block, id)\
.RMU##_##reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define SRII_DWB(reg_name, temp_name, block, id)\
.reg_name[id] = BASE(mm ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## temp_name
#define SF_DWB2(reg_name, block, id, field_name, post_fix) \
.field_name = reg_name ## __ ## field_name ## post_fix
#define DCCG_SRII(reg_name, block, id)\
.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define VUPDATE_SRII(reg_name, block, id)\
.reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
mm ## reg_name ## _ ## block ## id
/* NBIO */
#define NBIO_BASE_INNER(seg) \
NBIO_BASE__INST0_SEG ## seg
#define NBIO_BASE(seg) \
NBIO_BASE_INNER(seg)
#define NBIO_SR(reg_name)\
.reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
mm ## reg_name
/* MMHUB */
#define MMHUB_BASE_INNER(seg) \
MMHUB_BASE__INST0_SEG ## seg
#define MMHUB_BASE(seg) \
MMHUB_BASE_INNER(seg)
#define MMHUB_SR(reg_name)\
.reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \
mmMM ## reg_name
/* CLOCK */
#define CLK_BASE_INNER(seg) \
CLK_BASE__INST0_SEG ## seg
#define CLK_BASE(seg) \
CLK_BASE_INNER(seg)
#define CLK_SRI(reg_name, block, inst)\
.reg_name = CLK_BASE(mm ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## _ ## inst ## _ ## reg_name
static const struct bios_registers bios_regs = {
NBIO_SR(BIOS_SCRATCH_3),
NBIO_SR(BIOS_SCRATCH_6)
};
#define clk_src_regs(index, pllid)\
[index] = {\
CS_COMMON_REG_LIST_DCN2_0(index, pllid),\
}
static const struct dce110_clk_src_regs clk_src_regs[] = {
clk_src_regs(0, A),
clk_src_regs(1, B),
clk_src_regs(2, C),
clk_src_regs(3, D),
clk_src_regs(4, E),
clk_src_regs(5, F)
};
static const struct dce110_clk_src_shift cs_shift = {
CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
};
static const struct dce110_clk_src_mask cs_mask = {
CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
};
#define abm_regs(id)\
[id] = {\
ABM_DCN30_REG_LIST(id)\
}
static const struct dce_abm_registers abm_regs[] = {
abm_regs(0),
abm_regs(1),
abm_regs(2),
abm_regs(3),
abm_regs(4),
abm_regs(5),
};
static const struct dce_abm_shift abm_shift = {
ABM_MASK_SH_LIST_DCN30(__SHIFT)
};
static const struct dce_abm_mask abm_mask = {
ABM_MASK_SH_LIST_DCN30(_MASK)
};
#define audio_regs(id)\
[id] = {\
AUD_COMMON_REG_LIST(id)\
}
static const struct dce_audio_registers audio_regs[] = {
audio_regs(0),
audio_regs(1),
audio_regs(2),
audio_regs(3),
audio_regs(4),
audio_regs(5),
audio_regs(6)
};
#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
static const struct dce_audio_shift audio_shift = {
DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
};
static const struct dce_audio_mask audio_mask = {
DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
};
#define vpg_regs(id)\
[id] = {\
VPG_DCN3_REG_LIST(id)\
}
static const struct dcn30_vpg_registers vpg_regs[] = {
vpg_regs(0),
vpg_regs(1),
vpg_regs(2),
vpg_regs(3),
vpg_regs(4),
vpg_regs(5),
vpg_regs(6),
};
static const struct dcn30_vpg_shift vpg_shift = {
DCN3_VPG_MASK_SH_LIST(__SHIFT)
};
static const struct dcn30_vpg_mask vpg_mask = {
DCN3_VPG_MASK_SH_LIST(_MASK)
};
#define afmt_regs(id)\
[id] = {\
AFMT_DCN3_REG_LIST(id)\
}
static const struct dcn30_afmt_registers afmt_regs[] = {
afmt_regs(0),
afmt_regs(1),
afmt_regs(2),
afmt_regs(3),
afmt_regs(4),
afmt_regs(5),
afmt_regs(6),
};
static const struct dcn30_afmt_shift afmt_shift = {
DCN3_AFMT_MASK_SH_LIST(__SHIFT)
};
static const struct dcn30_afmt_mask afmt_mask = {
DCN3_AFMT_MASK_SH_LIST(_MASK)
};
#define stream_enc_regs(id)\
[id] = {\
SE_DCN3_REG_LIST(id)\
}
static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
stream_enc_regs(0),
stream_enc_regs(1),
stream_enc_regs(2),
stream_enc_regs(3),
stream_enc_regs(4),
stream_enc_regs(5)
};
static const struct dcn10_stream_encoder_shift se_shift = {
SE_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
};
static const struct dcn10_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
#define aux_regs(id)\
[id] = {\
DCN2_AUX_REG_LIST(id)\
}
static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
aux_regs(0),
aux_regs(1),
aux_regs(2),
aux_regs(3),
aux_regs(4),
aux_regs(5)
};
#define hpd_regs(id)\
[id] = {\
HPD_REG_LIST(id)\
}
static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
hpd_regs(0),
hpd_regs(1),
hpd_regs(2),
hpd_regs(3),
hpd_regs(4),
hpd_regs(5)
};
#define link_regs(id, phyid)\
[id] = {\
LE_DCN3_REG_LIST(id), \
UNIPHY_DCN2_REG_LIST(phyid), \
DPCS_DCN2_REG_LIST(id), \
SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
}
static const struct dce110_aux_registers_shift aux_shift = {
DCN_AUX_MASK_SH_LIST(__SHIFT)
};
static const struct dce110_aux_registers_mask aux_mask = {
DCN_AUX_MASK_SH_LIST(_MASK)
};
static const struct dcn10_link_enc_registers link_enc_regs[] = {
link_regs(0, A),
link_regs(1, B),
link_regs(2, C),
link_regs(3, D),
link_regs(4, E),
link_regs(5, F)
};
static const struct dcn10_link_enc_shift le_shift = {
LINK_ENCODER_MASK_SH_LIST_DCN30(__SHIFT),\
DPCS_DCN2_MASK_SH_LIST(__SHIFT)
};
static const struct dcn10_link_enc_mask le_mask = {
LINK_ENCODER_MASK_SH_LIST_DCN30(_MASK),\
DPCS_DCN2_MASK_SH_LIST(_MASK)
};
static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
{ DCN_PANEL_CNTL_REG_LIST() }
};
static const struct dce_panel_cntl_shift panel_cntl_shift = {
DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
};
static const struct dce_panel_cntl_mask panel_cntl_mask = {
DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
};
#define dpp_regs(id)\
[id] = {\
DPP_REG_LIST_DCN30(id),\
}
static const struct dcn3_dpp_registers dpp_regs[] = {
dpp_regs(0),
dpp_regs(1),
dpp_regs(2),
dpp_regs(3),
dpp_regs(4),
dpp_regs(5),
};
static const struct dcn3_dpp_shift tf_shift = {
DPP_REG_LIST_SH_MASK_DCN30(__SHIFT)
};
static const struct dcn3_dpp_mask tf_mask = {
DPP_REG_LIST_SH_MASK_DCN30(_MASK)
};
#define opp_regs(id)\
[id] = {\
OPP_REG_LIST_DCN30(id),\
}
static const struct dcn20_opp_registers opp_regs[] = {
opp_regs(0),
opp_regs(1),
opp_regs(2),
opp_regs(3),
opp_regs(4),
opp_regs(5)
};
static const struct dcn20_opp_shift opp_shift = {
OPP_MASK_SH_LIST_DCN20(__SHIFT)
};
static const struct dcn20_opp_mask opp_mask = {
OPP_MASK_SH_LIST_DCN20(_MASK)
};
#define aux_engine_regs(id)\
[id] = {\
AUX_COMMON_REG_LIST0(id), \
.AUXN_IMPCAL = 0, \
.AUXP_IMPCAL = 0, \
.AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \
}
static const struct dce110_aux_registers aux_engine_regs[] = {
aux_engine_regs(0),
aux_engine_regs(1),
aux_engine_regs(2),
aux_engine_regs(3),
aux_engine_regs(4),
aux_engine_regs(5)
};
#define dwbc_regs_dcn3(id)\
[id] = {\
DWBC_COMMON_REG_LIST_DCN30(id),\
}
static const struct dcn30_dwbc_registers dwbc30_regs[] = {
dwbc_regs_dcn3(0),
};
static const struct dcn30_dwbc_shift dwbc30_shift = {
DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
};
static const struct dcn30_dwbc_mask dwbc30_mask = {
DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
#define mcif_wb_regs_dcn3(id)\
[id] = {\
MCIF_WB_COMMON_REG_LIST_DCN30(id),\
}
static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = {
mcif_wb_regs_dcn3(0)
};
static const struct dcn30_mmhubbub_shift mcif_wb30_shift = {
MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
};
static const struct dcn30_mmhubbub_mask mcif_wb30_mask = {
MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
#define dsc_regsDCN20(id)\
[id] = {\
DSC_REG_LIST_DCN20(id)\
}
static const struct dcn20_dsc_registers dsc_regs[] = {
dsc_regsDCN20(0),
dsc_regsDCN20(1),
dsc_regsDCN20(2),
dsc_regsDCN20(3),
dsc_regsDCN20(4),
dsc_regsDCN20(5)
};
static const struct dcn20_dsc_shift dsc_shift = {
DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
};
static const struct dcn20_dsc_mask dsc_mask = {
DSC_REG_LIST_SH_MASK_DCN20(_MASK)
};
static const struct dcn30_mpc_registers mpc_regs = {
MPC_REG_LIST_DCN3_0(0),
MPC_REG_LIST_DCN3_0(1),
MPC_REG_LIST_DCN3_0(2),
MPC_REG_LIST_DCN3_0(3),
MPC_REG_LIST_DCN3_0(4),
MPC_REG_LIST_DCN3_0(5),
MPC_OUT_MUX_REG_LIST_DCN3_0(0),
MPC_OUT_MUX_REG_LIST_DCN3_0(1),
MPC_OUT_MUX_REG_LIST_DCN3_0(2),
MPC_OUT_MUX_REG_LIST_DCN3_0(3),
MPC_OUT_MUX_REG_LIST_DCN3_0(4),
MPC_OUT_MUX_REG_LIST_DCN3_0(5),
MPC_RMU_GLOBAL_REG_LIST_DCN3AG,
MPC_RMU_REG_LIST_DCN3AG(0),
MPC_RMU_REG_LIST_DCN3AG(1),
MPC_RMU_REG_LIST_DCN3AG(2),
MPC_DWB_MUX_REG_LIST_DCN3_0(0),
};
static const struct dcn30_mpc_shift mpc_shift = {
MPC_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
};
static const struct dcn30_mpc_mask mpc_mask = {
MPC_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
#define optc_regs(id)\
[id] = {OPTC_COMMON_REG_LIST_DCN3_0(id)}
static const struct dcn_optc_registers optc_regs[] = {
optc_regs(0),
optc_regs(1),
optc_regs(2),
optc_regs(3),
optc_regs(4),
optc_regs(5)
};
static const struct dcn_optc_shift optc_shift = {
OPTC_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
};
static const struct dcn_optc_mask optc_mask = {
OPTC_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
#define hubp_regs(id)\
[id] = {\
HUBP_REG_LIST_DCN30(id)\
}
static const struct dcn_hubp2_registers hubp_regs[] = {
hubp_regs(0),
hubp_regs(1),
hubp_regs(2),
hubp_regs(3),
hubp_regs(4),
hubp_regs(5)
};
static const struct dcn_hubp2_shift hubp_shift = {
HUBP_MASK_SH_LIST_DCN30(__SHIFT)
};
static const struct dcn_hubp2_mask hubp_mask = {
HUBP_MASK_SH_LIST_DCN30(_MASK)
};
static const struct dcn_hubbub_registers hubbub_reg = {
HUBBUB_REG_LIST_DCN30(0)
};
static const struct dcn_hubbub_shift hubbub_shift = {
HUBBUB_MASK_SH_LIST_DCN30(__SHIFT)
};
static const struct dcn_hubbub_mask hubbub_mask = {
HUBBUB_MASK_SH_LIST_DCN30(_MASK)
};
static const struct dccg_registers dccg_regs = {
DCCG_REG_LIST_DCN30()
};
static const struct dccg_shift dccg_shift = {
DCCG_MASK_SH_LIST_DCN3(__SHIFT)
};
static const struct dccg_mask dccg_mask = {
DCCG_MASK_SH_LIST_DCN3(_MASK)
};
static const struct dce_hwseq_registers hwseq_reg = {
HWSEQ_DCN30_REG_LIST()
};
static const struct dce_hwseq_shift hwseq_shift = {
HWSEQ_DCN30_MASK_SH_LIST(__SHIFT)
};
static const struct dce_hwseq_mask hwseq_mask = {
HWSEQ_DCN30_MASK_SH_LIST(_MASK)
};
#define vmid_regs(id)\
[id] = {\
DCN20_VMID_REG_LIST(id)\
}
static const struct dcn_vmid_registers vmid_regs[] = {
vmid_regs(0),
vmid_regs(1),
vmid_regs(2),
vmid_regs(3),
vmid_regs(4),
vmid_regs(5),
vmid_regs(6),
vmid_regs(7),
vmid_regs(8),
vmid_regs(9),
vmid_regs(10),
vmid_regs(11),
vmid_regs(12),
vmid_regs(13),
vmid_regs(14),
vmid_regs(15)
};
static const struct dcn20_vmid_shift vmid_shifts = {
DCN20_VMID_MASK_SH_LIST(__SHIFT)
};
static const struct dcn20_vmid_mask vmid_masks = {
DCN20_VMID_MASK_SH_LIST(_MASK)
};
static const struct resource_caps res_cap_dcn3 = {
.num_timing_generator = 6,
.num_opp = 6,
.num_video_plane = 6,
.num_audio = 6,
.num_stream_encoder = 6,
.num_pll = 6,
.num_dwb = 1,
.num_ddc = 6,
.num_vmid = 16,
.num_mpc_3dlut = 3,
.num_dsc = 6,
};
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
.per_pixel_alpha = true,
.pixel_format_support = {
.argb8888 = true,
.nv12 = true,
.fp16 = true,
.p010 = true,
.ayuv = false,
},
.max_upscale_factor = {
.argb8888 = 16000,
.nv12 = 16000,
.fp16 = 16000
},
/* 6:1 downscaling ratio: 1000/6 = 166.666 */
.max_downscale_factor = {
.argb8888 = 167,
.nv12 = 167,
.fp16 = 167
},
16,
16
};
static const struct dc_debug_options debug_defaults_drv = {
.disable_dmcu = true, //No DMCU on DCN30
.force_abm_enable = false,
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
.max_downscale_src_width = 7680,/*upto 8K*/
.disable_pplib_wm_range = false,
.scl_reset_length10 = true,
.sanity_checks = false,
.underflow_assert_delay_us = 0xFFFFFFFF,
.dwb_fi_phase = -1, // -1 = disable,
.dmub_command_table = true,
.use_max_lb = true,
.exit_idle_opt_for_cursor_updates = true,
.enable_legacy_fast_update = false,
};
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
.disallow_replay = false,
},
};
static void dcn30_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
*dpp = NULL;
}
static struct dpp *dcn30_dpp_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dcn3_dpp *dpp =
kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL);
if (!dpp)
return NULL;
if (dpp3_construct(dpp, ctx, inst,
&dpp_regs[inst], &tf_shift, &tf_mask))
return &dpp->base;
BREAK_TO_DEBUGGER();
kfree(dpp);
return NULL;
}
static struct output_pixel_processor *dcn30_opp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn20_opp *opp =
kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
if (!opp) {
BREAK_TO_DEBUGGER();
return NULL;
}
dcn20_opp_construct(opp, ctx, inst,
&opp_regs[inst], &opp_shift, &opp_mask);
return &opp->base;
}
static struct dce_aux *dcn30_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
if (!aux_engine)
return NULL;
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
&aux_engine_regs[inst],
&aux_mask,
&aux_shift,
ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST_DCN30(id) }
static const struct dce_i2c_registers i2c_hw_regs[] = {
i2c_inst_regs(1),
i2c_inst_regs(2),
i2c_inst_regs(3),
i2c_inst_regs(4),
i2c_inst_regs(5),
i2c_inst_regs(6),
};
static const struct dce_i2c_shift i2c_shifts = {
I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
};
static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
static struct dce_i2c_hw *dcn30_i2c_hw_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dce_i2c_hw *dce_i2c_hw =
kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
if (!dce_i2c_hw)
return NULL;
dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
&i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
return dce_i2c_hw;
}
static struct mpc *dcn30_mpc_create(
struct dc_context *ctx,
int num_mpcc,
int num_rmu)
{
struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc),
GFP_KERNEL);
if (!mpc30)
return NULL;
dcn30_mpc_construct(mpc30, ctx,
&mpc_regs,
&mpc_shift,
&mpc_mask,
num_mpcc,
num_rmu);
return &mpc30->base;
}
static struct hubbub *dcn30_hubbub_create(struct dc_context *ctx)
{
int i;
struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub),
GFP_KERNEL);
if (!hubbub3)
return NULL;
hubbub3_construct(hubbub3, ctx,
&hubbub_reg,
&hubbub_shift,
&hubbub_mask);
for (i = 0; i < res_cap_dcn3.num_vmid; i++) {
struct dcn20_vmid *vmid = &hubbub3->vmid[i];
vmid->ctx = ctx;
vmid->regs = &vmid_regs[i];
vmid->shifts = &vmid_shifts;
vmid->masks = &vmid_masks;
}
return &hubbub3->base;
}
static struct timing_generator *dcn30_timing_generator_create(
struct dc_context *ctx,
uint32_t instance)
{
struct optc *tgn10 =
kzalloc(sizeof(struct optc), GFP_KERNEL);
if (!tgn10)
return NULL;
tgn10->base.inst = instance;
tgn10->base.ctx = ctx;
tgn10->tg_regs = &optc_regs[instance];
tgn10->tg_shift = &optc_shift;
tgn10->tg_mask = &optc_mask;
dcn30_timing_generator_init(tgn10);
return &tgn10->base;
}
static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_deep_color = COLOR_DEPTH_121212,
.max_hdmi_pixel_clock = 600000,
.hdmi_ycbcr420_supported = true,
.dp_ycbcr420_supported = true,
.fec_supported = true,
.flags.bits.IS_HBR2_CAPABLE = true,
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
.flags.bits.IS_TPS4_CAPABLE = true
};
static struct link_encoder *dcn30_link_encoder_create(
struct dc_context *ctx,
const struct encoder_init_data *enc_init_data)
{
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
if (!enc20)
return NULL;
dcn30_link_encoder_construct(enc20,
enc_init_data,
&link_enc_feature,
&link_enc_regs[enc_init_data->transmitter],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source],
&le_shift,
&le_mask);
return &enc20->enc10.base;
}
static struct panel_cntl *dcn30_panel_cntl_create(const struct panel_cntl_init_data *init_data)
{
struct dce_panel_cntl *panel_cntl =
kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
if (!panel_cntl)
return NULL;
dce_panel_cntl_construct(panel_cntl,
init_data,
&panel_cntl_regs[init_data->inst],
&panel_cntl_shift,
&panel_cntl_mask);
return &panel_cntl->base;
}
static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
{
generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX),
FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
}
static struct audio *dcn30_create_audio(
struct dc_context *ctx, unsigned int inst)
{
return dce_audio_create(ctx, inst,
&audio_regs[inst], &audio_shift, &audio_mask);
}
static struct vpg *dcn30_vpg_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dcn30_vpg *vpg3 = kzalloc(sizeof(struct dcn30_vpg), GFP_KERNEL);
if (!vpg3)
return NULL;
vpg3_construct(vpg3, ctx, inst,
&vpg_regs[inst],
&vpg_shift,
&vpg_mask);
return &vpg3->base;
}
static struct afmt *dcn30_afmt_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dcn30_afmt *afmt3 = kzalloc(sizeof(struct dcn30_afmt), GFP_KERNEL);
if (!afmt3)
return NULL;
afmt3_construct(afmt3, ctx, inst,
&afmt_regs[inst],
&afmt_shift,
&afmt_mask);
return &afmt3->base;
}
static struct stream_encoder *dcn30_stream_encoder_create(enum engine_id eng_id,
struct dc_context *ctx)
{
struct dcn10_stream_encoder *enc1;
struct vpg *vpg;
struct afmt *afmt;
int vpg_inst;
int afmt_inst;
/* Mapping of VPG, AFMT, DME register blocks to DIO block instance */
if (eng_id <= ENGINE_ID_DIGF) {
vpg_inst = eng_id;
afmt_inst = eng_id;
} else
return NULL;
enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
vpg = dcn30_vpg_create(ctx, vpg_inst);
afmt = dcn30_afmt_create(ctx, afmt_inst);
if (!enc1 || !vpg || !afmt) {
kfree(enc1);
kfree(vpg);
kfree(afmt);
return NULL;
}
dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
eng_id, vpg, afmt,
&stream_enc_regs[eng_id],
&se_shift, &se_mask);
return &enc1->base;
}
static struct dce_hwseq *dcn30_hwseq_create(struct dc_context *ctx)
{
struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
if (hws) {
hws->ctx = ctx;
hws->regs = &hwseq_reg;
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
}
return hws;
}
static const struct resource_create_funcs res_create_funcs = {
.read_dce_straps = read_dce_straps,
.create_audio = dcn30_create_audio,
.create_stream_encoder = dcn30_stream_encoder_create,
.create_hwseq = dcn30_hwseq_create,
};
static void dcn30_resource_destruct(struct dcn30_resource_pool *pool)
{
unsigned int i;
for (i = 0; i < pool->base.stream_enc_count; i++) {
if (pool->base.stream_enc[i] != NULL) {
if (pool->base.stream_enc[i]->vpg != NULL) {
kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg));
pool->base.stream_enc[i]->vpg = NULL;
}
if (pool->base.stream_enc[i]->afmt != NULL) {
kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt));
pool->base.stream_enc[i]->afmt = NULL;
}
kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
pool->base.stream_enc[i] = NULL;
}
}
for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
if (pool->base.dscs[i] != NULL)
dcn20_dsc_destroy(&pool->base.dscs[i]);
}
if (pool->base.mpc != NULL) {
kfree(TO_DCN20_MPC(pool->base.mpc));
pool->base.mpc = NULL;
}
if (pool->base.hubbub != NULL) {
kfree(pool->base.hubbub);
pool->base.hubbub = NULL;
}
for (i = 0; i < pool->base.pipe_count; i++) {
if (pool->base.dpps[i] != NULL)
dcn30_dpp_destroy(&pool->base.dpps[i]);
if (pool->base.ipps[i] != NULL)
pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
if (pool->base.hubps[i] != NULL) {
kfree(TO_DCN20_HUBP(pool->base.hubps[i]));
pool->base.hubps[i] = NULL;
}
if (pool->base.irqs != NULL) {
dal_irq_service_destroy(&pool->base.irqs);
}
}
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
if (pool->base.engines[i] != NULL)
dce110_engine_destroy(&pool->base.engines[i]);
if (pool->base.hw_i2cs[i] != NULL) {
kfree(pool->base.hw_i2cs[i]);
pool->base.hw_i2cs[i] = NULL;
}
if (pool->base.sw_i2cs[i] != NULL) {
kfree(pool->base.sw_i2cs[i]);
pool->base.sw_i2cs[i] = NULL;
}
}
for (i = 0; i < pool->base.res_cap->num_opp; i++) {
if (pool->base.opps[i] != NULL)
pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
}
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
if (pool->base.timing_generators[i] != NULL) {
kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
}
for (i = 0; i < pool->base.res_cap->num_dwb; i++) {
if (pool->base.dwbc[i] != NULL) {
kfree(TO_DCN30_DWBC(pool->base.dwbc[i]));
pool->base.dwbc[i] = NULL;
}
if (pool->base.mcif_wb[i] != NULL) {
kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i]));
pool->base.mcif_wb[i] = NULL;
}
}
for (i = 0; i < pool->base.audio_count; i++) {
if (pool->base.audios[i])
dce_aud_destroy(&pool->base.audios[i]);
}
for (i = 0; i < pool->base.clk_src_count; i++) {
if (pool->base.clock_sources[i] != NULL) {
dcn20_clock_source_destroy(&pool->base.clock_sources[i]);
pool->base.clock_sources[i] = NULL;
}
}
for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) {
if (pool->base.mpc_lut[i] != NULL) {
dc_3dlut_func_release(pool->base.mpc_lut[i]);
pool->base.mpc_lut[i] = NULL;
}
if (pool->base.mpc_shaper[i] != NULL) {
dc_transfer_func_release(pool->base.mpc_shaper[i]);
pool->base.mpc_shaper[i] = NULL;
}
}
if (pool->base.dp_clock_source != NULL) {
dcn20_clock_source_destroy(&pool->base.dp_clock_source);
pool->base.dp_clock_source = NULL;
}
for (i = 0; i < pool->base.pipe_count; i++) {
if (pool->base.multiple_abms[i] != NULL)
dce_abm_destroy(&pool->base.multiple_abms[i]);
}
if (pool->base.psr != NULL)
dmub_psr_destroy(&pool->base.psr);
if (pool->base.dccg != NULL)
dcn_dccg_destroy(&pool->base.dccg);
if (pool->base.oem_device != NULL) {
struct dc *dc = pool->base.oem_device->ctx->dc;
dc->link_srv->destroy_ddc_service(&pool->base.oem_device);
}
}
static struct hubp *dcn30_hubp_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dcn20_hubp *hubp2 =
kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
if (!hubp2)
return NULL;
if (hubp3_construct(hubp2, ctx, inst,
&hubp_regs[inst], &hubp_shift, &hubp_mask))
return &hubp2->base;
BREAK_TO_DEBUGGER();
kfree(hubp2);
return NULL;
}
static bool dcn30_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
{
int i;
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc),
GFP_KERNEL);
if (!dwbc30) {
dm_error("DC: failed to create dwbc30!\n");
return false;
}
dcn30_dwbc_construct(dwbc30, ctx,
&dwbc30_regs[i],
&dwbc30_shift,
&dwbc30_mask,
i);
pool->dwbc[i] = &dwbc30->base;
}
return true;
}
static bool dcn30_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
{
int i;
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub),
GFP_KERNEL);
if (!mcif_wb30) {
dm_error("DC: failed to create mcif_wb30!\n");
return false;
}
dcn30_mmhubbub_construct(mcif_wb30, ctx,
&mcif_wb30_regs[i],
&mcif_wb30_shift,
&mcif_wb30_mask,
i);
pool->mcif_wb[i] = &mcif_wb30->base;
}
return true;
}
static struct display_stream_compressor *dcn30_dsc_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn20_dsc *dsc =
kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
if (!dsc) {
BREAK_TO_DEBUGGER();
return NULL;
}
dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);
return &dsc->base;
}
enum dc_status dcn30_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream)
{
return dcn20_add_stream_to_ctx(dc, new_ctx, dc_stream);
}
static void dcn30_destroy_resource_pool(struct resource_pool **pool)
{
struct dcn30_resource_pool *dcn30_pool = TO_DCN30_RES_POOL(*pool);
dcn30_resource_destruct(dcn30_pool);
kfree(dcn30_pool);
*pool = NULL;
}
static struct clock_source *dcn30_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
const struct dce110_clk_src_regs *regs,
bool dp_clk_src)
{
struct dce110_clk_src *clk_src =
kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
if (!clk_src)
return NULL;
if (dcn3_clk_src_construct(clk_src, ctx, bios, id,
regs, &cs_shift, &cs_mask)) {
clk_src->base.dp_clk_src = dp_clk_src;
return &clk_src->base;
}
kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
int dcn30_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
DC_FP_START();
dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
DC_FP_END();
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
if (!res_ctx->pipe_ctx[i].stream)
continue;
pipes[pipe_cnt++].pipe.scale_ratio_depth.lb_depth =
dm_lb_16;
}
return pipe_cnt;
}
void dcn30_populate_dml_writeback_from_context(
struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
{
DC_FP_START();
dcn30_fpu_populate_dml_writeback_from_context(dc, res_ctx, pipes);
DC_FP_END();
}
unsigned int dcn30_calc_max_scaled_time(
unsigned int time_per_pixel,
enum mmhubbub_wbif_mode mode,
unsigned int urgent_watermark)
{
unsigned int time_per_byte = 0;
unsigned int total_free_entry = 0xb40;
unsigned int buf_lh_capability;
unsigned int max_scaled_time;
if (mode == PACKED_444) /* packed mode 32 bpp */
time_per_byte = time_per_pixel/4;
else if (mode == PACKED_444_FP16) /* packed mode 64 bpp */
time_per_byte = time_per_pixel/8;
if (time_per_byte == 0)
time_per_byte = 1;
buf_lh_capability = (total_free_entry*time_per_byte*32) >> 6; /* time_per_byte is in u6.6*/
max_scaled_time = buf_lh_capability - urgent_watermark;
return max_scaled_time;
}
void dcn30_set_mcif_arb_params(
struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt)
{
enum mmhubbub_wbif_mode wbif_mode;
struct display_mode_lib *dml = &context->bw_ctx.dml;
struct mcif_arb_params *wb_arb_params;
int i, j, dwb_pipe;
/* Writeback MCIF_WB arbitration parameters */
dwb_pipe = 0;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
for (j = 0; j < MAX_DWB_PIPES; j++) {
struct dc_writeback_info *writeback_info = &context->res_ctx.pipe_ctx[i].stream->writeback_info[j];
if (writeback_info->wb_enabled == false)
continue;
//wb_arb_params = &context->res_ctx.pipe_ctx[i].stream->writeback_info[j].mcif_arb_params;
wb_arb_params = &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[dwb_pipe];
if (writeback_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_ARGB ||
writeback_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_RGBA)
wbif_mode = PACKED_444_FP16;
else
wbif_mode = PACKED_444;
DC_FP_START();
dcn30_fpu_set_mcif_arb_params(wb_arb_params, dml, pipes, pipe_cnt, j);
DC_FP_END();
wb_arb_params->time_per_pixel = (1000000 << 6) / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; /* time_per_pixel should be in u6.6 format */
wb_arb_params->slice_lines = 32;
wb_arb_params->arbitration_slice = 2; /* irrelevant since there is no YUV output */
wb_arb_params->max_scaled_time = dcn30_calc_max_scaled_time(wb_arb_params->time_per_pixel,
wbif_mode,
wb_arb_params->cli_watermark[0]); /* assume 4 watermark sets have the same value */
dwb_pipe++;
if (dwb_pipe >= MAX_DWB_PIPES)
return;
}
if (dwb_pipe >= MAX_DWB_PIPES)
return;
}
}
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
bool dcn30_acquire_post_bldn_3dlut(
struct resource_context *res_ctx,
const struct resource_pool *pool,
int mpcc_id,
struct dc_3dlut **lut,
struct dc_transfer_func **shaper)
{
int i;
bool ret = false;
union dc_3dlut_state *state;
ASSERT(*lut == NULL && *shaper == NULL);
*lut = NULL;
*shaper = NULL;
for (i = 0; i < pool->res_cap->num_mpc_3dlut; i++) {
if (!res_ctx->is_mpc_3dlut_acquired[i]) {
*lut = pool->mpc_lut[i];
*shaper = pool->mpc_shaper[i];
state = &pool->mpc_lut[i]->state;
res_ctx->is_mpc_3dlut_acquired[i] = true;
state->bits.rmu_idx_valid = 1;
state->bits.rmu_mux_num = i;
if (state->bits.rmu_mux_num == 0)
state->bits.mpc_rmu0_mux = mpcc_id;
else if (state->bits.rmu_mux_num == 1)
state->bits.mpc_rmu1_mux = mpcc_id;
else if (state->bits.rmu_mux_num == 2)
state->bits.mpc_rmu2_mux = mpcc_id;
ret = true;
break;
}
}
return ret;
}
bool dcn30_release_post_bldn_3dlut(
struct resource_context *res_ctx,
const struct resource_pool *pool,
struct dc_3dlut **lut,
struct dc_transfer_func **shaper)
{
int i;
bool ret = false;
for (i = 0; i < pool->res_cap->num_mpc_3dlut; i++) {
if (pool->mpc_lut[i] == *lut && pool->mpc_shaper[i] == *shaper) {
res_ctx->is_mpc_3dlut_acquired[i] = false;
pool->mpc_lut[i]->state.raw = 0;
*lut = NULL;
*shaper = NULL;
ret = true;
break;
}
}
return ret;
}
static bool is_soc_bounding_box_valid(struct dc *dc)
{
uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev;
if (ASICREV_IS_SIENNA_CICHLID_P(hw_internal_rev))
return true;
return false;
}
static bool init_soc_bounding_box(struct dc *dc,
struct dcn30_resource_pool *pool)
{
struct _vcs_dpi_soc_bounding_box_st *loaded_bb = &dcn3_0_soc;
struct _vcs_dpi_ip_params_st *loaded_ip = &dcn3_0_ip;
DC_LOGGER_INIT(dc->ctx->logger);
if (!is_soc_bounding_box_valid(dc)) {
DC_LOG_ERROR("%s: not valid soc bounding box\n", __func__);
return false;
}
loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
loaded_ip->max_num_dpp = pool->base.pipe_count;
loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk;
dcn20_patch_bounding_box(dc, loaded_bb);
DC_FP_START();
patch_dcn30_soc_bounding_box(dc, &dcn3_0_soc);
DC_FP_END();
return true;
}
static bool dcn30_split_stream_for_mpc_or_odm(
const struct dc *dc,
struct resource_context *res_ctx,
struct pipe_ctx *pri_pipe,
struct pipe_ctx *sec_pipe,
bool odm)
{
int pipe_idx = sec_pipe->pipe_idx;
const struct resource_pool *pool = dc->res_pool;
*sec_pipe = *pri_pipe;
sec_pipe->pipe_idx = pipe_idx;
sec_pipe->plane_res.mi = pool->mis[pipe_idx];
sec_pipe->plane_res.hubp = pool->hubps[pipe_idx];
sec_pipe->plane_res.ipp = pool->ipps[pipe_idx];
sec_pipe->plane_res.xfm = pool->transforms[pipe_idx];
sec_pipe->plane_res.dpp = pool->dpps[pipe_idx];
sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst;
sec_pipe->stream_res.dsc = NULL;
if (odm) {
if (pri_pipe->next_odm_pipe) {
ASSERT(pri_pipe->next_odm_pipe != sec_pipe);
sec_pipe->next_odm_pipe = pri_pipe->next_odm_pipe;
sec_pipe->next_odm_pipe->prev_odm_pipe = sec_pipe;
}
if (pri_pipe->top_pipe && pri_pipe->top_pipe->next_odm_pipe) {
pri_pipe->top_pipe->next_odm_pipe->bottom_pipe = sec_pipe;
sec_pipe->top_pipe = pri_pipe->top_pipe->next_odm_pipe;
}
if (pri_pipe->bottom_pipe && pri_pipe->bottom_pipe->next_odm_pipe) {
pri_pipe->bottom_pipe->next_odm_pipe->top_pipe = sec_pipe;
sec_pipe->bottom_pipe = pri_pipe->bottom_pipe->next_odm_pipe;
}
pri_pipe->next_odm_pipe = sec_pipe;
sec_pipe->prev_odm_pipe = pri_pipe;
if (!sec_pipe->top_pipe)
sec_pipe->stream_res.opp = pool->opps[pipe_idx];
else
sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp;
if (sec_pipe->stream->timing.flags.DSC == 1) {
dcn20_acquire_dsc(dc, res_ctx, &sec_pipe->stream_res.dsc, pipe_idx);
ASSERT(sec_pipe->stream_res.dsc);
if (sec_pipe->stream_res.dsc == NULL)
return false;
}
} else {
if (pri_pipe->bottom_pipe) {
ASSERT(pri_pipe->bottom_pipe != sec_pipe);
sec_pipe->bottom_pipe = pri_pipe->bottom_pipe;
sec_pipe->bottom_pipe->top_pipe = sec_pipe;
}
pri_pipe->bottom_pipe = sec_pipe;
sec_pipe->top_pipe = pri_pipe;
ASSERT(pri_pipe->plane_state);
}
return true;
}
static struct pipe_ctx *dcn30_find_split_pipe(
struct dc *dc,
struct dc_state *context,
int old_index)
{
struct pipe_ctx *pipe = NULL;
int i;
if (old_index >= 0 && context->res_ctx.pipe_ctx[old_index].stream == NULL) {
pipe = &context->res_ctx.pipe_ctx[old_index];
pipe->pipe_idx = old_index;
}
if (!pipe)
for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
if (dc->current_state->res_ctx.pipe_ctx[i].top_pipe == NULL
&& dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) {
if (context->res_ctx.pipe_ctx[i].stream == NULL) {
pipe = &context->res_ctx.pipe_ctx[i];
pipe->pipe_idx = i;
break;
}
}
}
/*
* May need to fix pipes getting tossed from 1 opp to another on flip
* Add for debugging transient underflow during topology updates:
* ASSERT(pipe);
*/
if (!pipe)
for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
if (context->res_ctx.pipe_ctx[i].stream == NULL) {
pipe = &context->res_ctx.pipe_ctx[i];
pipe->pipe_idx = i;
break;
}
}
return pipe;
}
noinline bool dcn30_internal_validate_bw(
struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *vlevel_out,
bool fast_validate,
bool allow_self_refresh_only)
{
bool out = false;
bool repopulate_pipes = false;
int split[MAX_PIPES] = { 0 };
bool merge[MAX_PIPES] = { false };
bool newly_split[MAX_PIPES] = { false };
int pipe_cnt, i, pipe_idx, vlevel;
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
ASSERT(pipes);
if (!pipes)
return false;
context->bw_ctx.dml.vba.maxMpcComb = 0;
context->bw_ctx.dml.vba.VoltageLevel = 0;
context->bw_ctx.dml.vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
if (!pipe_cnt) {
out = true;
goto validate_out;
}
dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
if (!fast_validate || !allow_self_refresh_only) {
/*
* DML favors voltage over p-state, but we're more interested in
* supporting p-state over voltage. We can't support p-state in
* prefetch mode > 0 so try capping the prefetch mode to start.
*/
context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank =
dm_allow_self_refresh_and_mclk_switch;
vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
/* This may adjust vlevel and maxMpcComb */
if (vlevel < context->bw_ctx.dml.soc.num_states)
vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
}
if (allow_self_refresh_only &&
(fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported)) {
/*
* If mode is unsupported or there's still no p-state support
* then fall back to favoring voltage.
*
* We don't actually support prefetch mode 2, so require that we
* at least support prefetch mode 1.
*/
context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank =
dm_allow_self_refresh;
vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
if (vlevel < context->bw_ctx.dml.soc.num_states) {
memset(split, 0, sizeof(split));
memset(merge, 0, sizeof(merge));
vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
}
}
dml_log_mode_support_params(&context->bw_ctx.dml);
if (vlevel == context->bw_ctx.dml.soc.num_states)
goto validate_fail;
if (!dc->config.enable_windowed_mpo_odm) {
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *mpo_pipe = pipe->bottom_pipe;
if (!pipe->stream)
continue;
/* We only support full screen mpo with ODM */
if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
&& pipe->plane_state && mpo_pipe
&& memcmp(&mpo_pipe->plane_state->clip_rect,
&pipe->stream->src,
sizeof(struct rect)) != 0) {
ASSERT(mpo_pipe->plane_state != pipe->plane_state);
goto validate_fail;
}
pipe_idx++;
}
}
/* merge pipes if necessary */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
/*skip pipes that don't need merging*/
if (!merge[i])
continue;
/* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */
if (pipe->prev_odm_pipe) {
/*split off odm pipe*/
pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe;
if (pipe->next_odm_pipe)
pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe;
pipe->bottom_pipe = NULL;
pipe->next_odm_pipe = NULL;
pipe->plane_state = NULL;
pipe->stream = NULL;
pipe->top_pipe = NULL;
pipe->prev_odm_pipe = NULL;
if (pipe->stream_res.dsc)
dcn20_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc);
memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
repopulate_pipes = true;
} else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
struct pipe_ctx *top_pipe = pipe->top_pipe;
struct pipe_ctx *bottom_pipe = pipe->bottom_pipe;
top_pipe->bottom_pipe = bottom_pipe;
if (bottom_pipe)
bottom_pipe->top_pipe = top_pipe;
pipe->top_pipe = NULL;
pipe->bottom_pipe = NULL;
pipe->plane_state = NULL;
pipe->stream = NULL;
memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
repopulate_pipes = true;
} else
ASSERT(0); /* Should never try to merge master pipe */
}
for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
struct pipe_ctx *hsplit_pipe = NULL;
bool odm;
int old_index = -1;
if (!pipe->stream || newly_split[i])
continue;
pipe_idx++;
odm = vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled;
if (!pipe->plane_state && !odm)
continue;
if (split[i]) {
if (odm) {
if (split[i] == 4 && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe)
old_index = old_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
else if (old_pipe->next_odm_pipe)
old_index = old_pipe->next_odm_pipe->pipe_idx;
} else {
if (split[i] == 4 && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe &&
old_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
old_index = old_pipe->bottom_pipe->bottom_pipe->pipe_idx;
else if (old_pipe->bottom_pipe &&
old_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
old_index = old_pipe->bottom_pipe->pipe_idx;
}
hsplit_pipe = dcn30_find_split_pipe(dc, context, old_index);
ASSERT(hsplit_pipe);
if (!hsplit_pipe)
goto validate_fail;
if (!dcn30_split_stream_for_mpc_or_odm(
dc, &context->res_ctx,
pipe, hsplit_pipe, odm))
goto validate_fail;
newly_split[hsplit_pipe->pipe_idx] = true;
repopulate_pipes = true;
}
if (split[i] == 4) {
struct pipe_ctx *pipe_4to1;
if (odm && old_pipe->next_odm_pipe)
old_index = old_pipe->next_odm_pipe->pipe_idx;
else if (!odm && old_pipe->bottom_pipe &&
old_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
old_index = old_pipe->bottom_pipe->pipe_idx;
else
old_index = -1;
pipe_4to1 = dcn30_find_split_pipe(dc, context, old_index);
ASSERT(pipe_4to1);
if (!pipe_4to1)
goto validate_fail;
if (!dcn30_split_stream_for_mpc_or_odm(
dc, &context->res_ctx,
pipe, pipe_4to1, odm))
goto validate_fail;
newly_split[pipe_4to1->pipe_idx] = true;
if (odm && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe
&& old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe)
old_index = old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
else if (!odm && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe &&
old_pipe->bottom_pipe->bottom_pipe->bottom_pipe &&
old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
old_index = old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx;
else
old_index = -1;
pipe_4to1 = dcn30_find_split_pipe(dc, context, old_index);
ASSERT(pipe_4to1);
if (!pipe_4to1)
goto validate_fail;
if (!dcn30_split_stream_for_mpc_or_odm(
dc, &context->res_ctx,
hsplit_pipe, pipe_4to1, odm))
goto validate_fail;
newly_split[pipe_4to1->pipe_idx] = true;
}
if (odm)
dcn20_build_mapped_resource(dc, context, pipe->stream);
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (pipe->plane_state) {
if (!resource_build_scaling_params(pipe))
goto validate_fail;
}
}
/* Actual dsc count per stream dsc validation*/
if (!dcn20_validate_dsc(dc, context)) {
vba->ValidationStatus[vba->soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE;
goto validate_fail;
}
if (repopulate_pipes)
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
context->bw_ctx.dml.vba.VoltageLevel = vlevel;
*vlevel_out = vlevel;
*pipe_cnt_out = pipe_cnt;
out = true;
goto validate_out;
validate_fail:
out = false;
validate_out:
return out;
}
static int get_refresh_rate(struct dc_state *context)
{
int refresh_rate = 0;
int h_v_total = 0;
struct dc_crtc_timing *timing = NULL;
if (context == NULL || context->streams[0] == NULL)
return 0;
/* check if refresh rate at least 120hz */
timing = &context->streams[0]->timing;
if (timing == NULL)
return 0;
h_v_total = timing->h_total * timing->v_total;
if (h_v_total == 0)
return 0;
refresh_rate = ((timing->pix_clk_100hz * 100) / (h_v_total)) + 1;
return refresh_rate;
}
#define MAX_STRETCHED_V_BLANK 500 // in micro-seconds
/*
* Scaling factor for v_blank stretch calculations considering timing in
* micro-seconds and pixel clock in 100hz.
* Note: the parenthesis are necessary to ensure the correct order of
* operation where V_SCALE is used.
*/
#define V_SCALE (10000 / MAX_STRETCHED_V_BLANK)
static int get_frame_rate_at_max_stretch_100hz(struct dc_state *context)
{
struct dc_crtc_timing *timing = NULL;
uint32_t sec_per_100_lines;
uint32_t max_v_blank;
uint32_t curr_v_blank;
uint32_t v_stretch_max;
uint32_t stretched_frame_pix_cnt;
uint32_t scaled_stretched_frame_pix_cnt;
uint32_t scaled_refresh_rate;
if (context == NULL || context->streams[0] == NULL)
return 0;
/* check if refresh rate at least 120hz */
timing = &context->streams[0]->timing;
if (timing == NULL)
return 0;
sec_per_100_lines = timing->pix_clk_100hz / timing->h_total + 1;
max_v_blank = sec_per_100_lines / V_SCALE + 1;
curr_v_blank = timing->v_total - timing->v_addressable;
v_stretch_max = (max_v_blank > curr_v_blank) ? (max_v_blank - curr_v_blank) : (0);
stretched_frame_pix_cnt = (v_stretch_max + timing->v_total) * timing->h_total;
scaled_stretched_frame_pix_cnt = stretched_frame_pix_cnt / 10000;
scaled_refresh_rate = (timing->pix_clk_100hz) / scaled_stretched_frame_pix_cnt + 1;
return scaled_refresh_rate;
}
static bool is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(struct dc_state *context)
{
int refresh_rate_max_stretch_100hz;
int min_refresh_100hz;
if (context == NULL || context->streams[0] == NULL)
return false;
refresh_rate_max_stretch_100hz = get_frame_rate_at_max_stretch_100hz(context);
min_refresh_100hz = context->streams[0]->timing.min_refresh_in_uhz / 10000;
if (refresh_rate_max_stretch_100hz < min_refresh_100hz)
return false;
return true;
}
bool dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context)
{
int refresh_rate = 0;
const int minimum_refreshrate_supported = 120;
if (context == NULL || context->streams[0] == NULL)
return false;
if (context->streams[0]->sink->edid_caps.panel_patch.disable_fams)
return false;
if (dc->debug.disable_fams)
return false;
if (!dc->caps.dmub_caps.mclk_sw)
return false;
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down)
return false;
/* more then 1 monitor connected */
if (context->stream_count != 1)
return false;
refresh_rate = get_refresh_rate(context);
if (refresh_rate < minimum_refreshrate_supported)
return false;
if (!is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(context))
return false;
if (!context->streams[0]->allow_freesync)
return false;
if (context->streams[0]->vrr_active_variable && dc->debug.disable_fams_gaming)
return false;
context->streams[0]->fpo_in_use = true;
return true;
}
/*
* set up FPO watermarks, pstate, dram latency
*/
void dcn30_setup_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context)
{
ASSERT(dc != NULL && context != NULL);
if (dc == NULL || context == NULL)
return;
/* Set wm_a.pstate so high natural MCLK switches are impossible: 4 seconds */
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 4U * 1000U * 1000U * 1000U;
}
void dcn30_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
{
DC_FP_START();
dcn30_fpu_update_soc_for_wm_a(dc, context);
DC_FP_END();
}
void dcn30_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
int vlevel)
{
DC_FP_START();
dcn30_fpu_calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
DC_FP_END();
}
bool dcn30_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
bool out = false;
BW_VAL_TRACE_SETUP();
int vlevel = 0;
int pipe_cnt = 0;
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
DC_LOGGER_INIT(dc->ctx->logger);
BW_VAL_TRACE_COUNT();
DC_FP_START();
out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
DC_FP_END();
if (pipe_cnt == 0)
goto validate_out;
if (!out)
goto validate_fail;
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
if (fast_validate) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
DC_FP_START();
if (dc->res_pool->funcs->calculate_wm_and_dlg)
dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
DC_FP_END();
BW_VAL_TRACE_END_WATERMARKS();
goto validate_out;
validate_fail:
DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
BW_VAL_TRACE_SKIP(fail);
out = false;
validate_out:
kfree(pipes);
BW_VAL_TRACE_FINISH();
return out;
}
void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
unsigned int i, j;
unsigned int num_states = 0;
unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0};
unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0};
unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0};
unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0};
unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {694, 875, 1000, 1200};
unsigned int num_dcfclk_sta_targets = 4;
unsigned int num_uclk_states;
struct dc_bounding_box_max_clk dcn30_bb_max_clk;
memset(&dcn30_bb_max_clk, 0, sizeof(dcn30_bb_max_clk));
if (dc->ctx->dc_bios->vram_info.num_chans)
dcn3_0_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
DC_FP_START();
dcn30_fpu_update_dram_channel_width_bytes(dc);
DC_FP_END();
if (bw_params->clk_table.entries[0].memclk_mhz) {
for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
if (bw_params->clk_table.entries[i].dcfclk_mhz > dcn30_bb_max_clk.max_dcfclk_mhz)
dcn30_bb_max_clk.max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
if (bw_params->clk_table.entries[i].dispclk_mhz > dcn30_bb_max_clk.max_dispclk_mhz)
dcn30_bb_max_clk.max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
if (bw_params->clk_table.entries[i].dppclk_mhz > dcn30_bb_max_clk.max_dppclk_mhz)
dcn30_bb_max_clk.max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
if (bw_params->clk_table.entries[i].phyclk_mhz > dcn30_bb_max_clk.max_phyclk_mhz)
dcn30_bb_max_clk.max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
}
DC_FP_START();
dcn30_fpu_update_max_clk(&dcn30_bb_max_clk);
DC_FP_END();
if (dcn30_bb_max_clk.max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
// If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array
dcfclk_sta_targets[num_dcfclk_sta_targets] = dcn30_bb_max_clk.max_dcfclk_mhz;
num_dcfclk_sta_targets++;
} else if (dcn30_bb_max_clk.max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
// If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates
for (i = 0; i < num_dcfclk_sta_targets; i++) {
if (dcfclk_sta_targets[i] > dcn30_bb_max_clk.max_dcfclk_mhz) {
dcfclk_sta_targets[i] = dcn30_bb_max_clk.max_dcfclk_mhz;
break;
}
}
// Update size of array since we "removed" duplicates
num_dcfclk_sta_targets = i + 1;
}
num_uclk_states = bw_params->clk_table.num_entries;
// Calculate optimal dcfclk for each uclk
for (i = 0; i < num_uclk_states; i++) {
DC_FP_START();
dcn30_fpu_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
&optimal_dcfclk_for_uclk[i], NULL);
DC_FP_END();
if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) {
optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz;
}
}
// Calculate optimal uclk for each dcfclk sta target
for (i = 0; i < num_dcfclk_sta_targets; i++) {
for (j = 0; j < num_uclk_states; j++) {
if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
optimal_uclk_for_dcfclk_sta_targets[i] =
bw_params->clk_table.entries[j].memclk_mhz * 16;
break;
}
}
}
i = 0;
j = 0;
// create the final dcfclk and uclk table
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= dcn30_bb_max_clk.max_dcfclk_mhz) {
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
} else {
j = num_uclk_states;
}
}
}
while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
}
while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES &&
optimal_dcfclk_for_uclk[j] <= dcn30_bb_max_clk.max_dcfclk_mhz) {
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
}
dcn3_0_soc.num_states = num_states;
DC_FP_START();
dcn30_fpu_update_bw_bounding_box(dc, bw_params, &dcn30_bb_max_clk, dcfclk_mhz, dram_speed_mts);
DC_FP_END();
}
}
static void dcn30_get_panel_config_defaults(struct dc_panel_config *panel_config)
{
*panel_config = panel_config_defaults;
}
static const struct resource_funcs dcn30_res_pool_funcs = {
.destroy = dcn30_destroy_resource_pool,
.link_enc_create = dcn30_link_encoder_create,
.panel_cntl_create = dcn30_panel_cntl_create,
.validate_bandwidth = dcn30_validate_bandwidth,
.calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
.acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context,
.set_mcif_arb_params = dcn30_set_mcif_arb_params,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn30_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn30_get_panel_config_defaults,
};
#define CTX ctx
#define REG(reg_name) \
(DCN_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
static uint32_t read_pipe_fuses(struct dc_context *ctx)
{
uint32_t value = REG_READ(CC_DC_PIPE_DIS);
/* Support for max 6 pipes */
value = value & 0x3f;
return value;
}
static bool dcn30_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
struct dcn30_resource_pool *pool)
{
int i;
struct dc_context *ctx = dc->ctx;
struct irq_service_init_data init_data;
struct ddc_service_init_data ddc_init_data = {0};
uint32_t pipe_fuses = read_pipe_fuses(ctx);
uint32_t num_pipes = 0;
if (!(pipe_fuses == 0 || pipe_fuses == 0x3e)) {
BREAK_TO_DEBUGGER();
dm_error("DC: Unexpected fuse recipe for navi2x !\n");
/* fault to single pipe */
pipe_fuses = 0x3e;
}
DC_FP_START();
ctx->dc_bios->regs = &bios_regs;
pool->base.res_cap = &res_cap_dcn3;
pool->base.funcs = &dcn30_res_pool_funcs;
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 600;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a not applied by default*/
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 8;
/* total size = mall per channel * num channels * 1024 * 1024 */
dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576;
dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
dc->caps.max_slave_planes = 2;
dc->caps.max_slave_yuv_planes = 2;
dc->caps.max_slave_rgb_planes = 2;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
dc->caps.color.dpp.input_lut_shared = 0;
dc->caps.color.dpp.icsc = 1;
dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr
dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1;
dc->caps.color.dpp.dgam_rom_caps.pq = 1;
dc->caps.color.dpp.dgam_rom_caps.hlg = 1;
dc->caps.color.dpp.post_csc = 1;
dc->caps.color.dpp.gamma_corr = 1;
dc->caps.color.dpp.dgam_rom_for_yuv = 0;
dc->caps.color.dpp.hw_3d_lut = 1;
dc->caps.color.dpp.ogam_ram = 1;
// no OGAM ROM on DCN3
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
dc->caps.color.dpp.ogam_rom_caps.pq = 0;
dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
dc->caps.color.dpp.ocsc = 0;
dc->caps.color.mpc.gamut_remap = 1;
dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //3
dc->caps.color.mpc.ogam_ram = 1;
dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
dc->caps.dp_hdmi21_pcon_support = true;
dc->caps.max_v_total = (1 << 15) - 1;
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
enum bp_result bp_query_result;
uint8_t is_vbios_lttpr_enable = 0;
bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable);
dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
}
if (ctx->dc_bios->funcs->get_lttpr_interop) {
enum bp_result bp_query_result;
uint8_t is_vbios_interop_enabled = 0;
bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios,
&is_vbios_interop_enabled);
dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
}
}
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
// Init the vm_helper
if (dc->vm_helper)
vm_helper_init(dc->vm_helper, 16);
/*************************************************
* Create resources *
*************************************************/
/* Clock Sources for Pixel Clock*/
pool->base.clock_sources[DCN30_CLK_SRC_PLL0] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL0,
&clk_src_regs[0], false);
pool->base.clock_sources[DCN30_CLK_SRC_PLL1] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL1,
&clk_src_regs[1], false);
pool->base.clock_sources[DCN30_CLK_SRC_PLL2] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL2,
&clk_src_regs[2], false);
pool->base.clock_sources[DCN30_CLK_SRC_PLL3] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL3,
&clk_src_regs[3], false);
pool->base.clock_sources[DCN30_CLK_SRC_PLL4] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL4,
&clk_src_regs[4], false);
pool->base.clock_sources[DCN30_CLK_SRC_PLL5] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL5,
&clk_src_regs[5], false);
pool->base.clk_src_count = DCN30_CLK_SRC_TOTAL;
/* todo: not reuse phy_pll registers */
pool->base.dp_clock_source =
dcn30_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_ID_DP_DTO,
&clk_src_regs[0], true);
for (i = 0; i < pool->base.clk_src_count; i++) {
if (pool->base.clock_sources[i] == NULL) {
dm_error("DC: failed to create clock sources!\n");
BREAK_TO_DEBUGGER();
goto create_fail;
}
}
/* DCCG */
pool->base.dccg = dccg30_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask);
if (pool->base.dccg == NULL) {
dm_error("DC: failed to create dccg!\n");
BREAK_TO_DEBUGGER();
goto create_fail;
}
/* PP Lib and SMU interfaces */
init_soc_bounding_box(dc, pool);
num_pipes = dcn3_0_ip.max_num_dpp;
for (i = 0; i < dcn3_0_ip.max_num_dpp; i++)
if (pipe_fuses & 1 << i)
num_pipes--;
dcn3_0_ip.max_num_dpp = num_pipes;
dcn3_0_ip.max_num_otg = num_pipes;
dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
/* IRQ */
init_data.ctx = dc->ctx;
pool->base.irqs = dal_irq_service_dcn30_create(&init_data);
if (!pool->base.irqs)
goto create_fail;
/* HUBBUB */
pool->base.hubbub = dcn30_hubbub_create(ctx);
if (pool->base.hubbub == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create hubbub!\n");
goto create_fail;
}
/* HUBPs, DPPs, OPPs and TGs */
for (i = 0; i < pool->base.pipe_count; i++) {
pool->base.hubps[i] = dcn30_hubp_create(ctx, i);
if (pool->base.hubps[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create hubps!\n");
goto create_fail;
}
pool->base.dpps[i] = dcn30_dpp_create(ctx, i);
if (pool->base.dpps[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create dpps!\n");
goto create_fail;
}
}
for (i = 0; i < pool->base.res_cap->num_opp; i++) {
pool->base.opps[i] = dcn30_opp_create(ctx, i);
if (pool->base.opps[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create output pixel processor!\n");
goto create_fail;
}
}
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
pool->base.timing_generators[i] = dcn30_timing_generator_create(
ctx, i);
if (pool->base.timing_generators[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create tg!\n");
goto create_fail;
}
}
pool->base.timing_generator_count = i;
/* PSR */
pool->base.psr = dmub_psr_create(ctx);
if (pool->base.psr == NULL) {
dm_error("DC: failed to create PSR obj!\n");
BREAK_TO_DEBUGGER();
goto create_fail;
}
/* ABM */
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
pool->base.multiple_abms[i] = dmub_abm_create(ctx,
&abm_regs[i],
&abm_shift,
&abm_mask);
if (pool->base.multiple_abms[i] == NULL) {
dm_error("DC: failed to create abm for pipe %d!\n", i);
BREAK_TO_DEBUGGER();
goto create_fail;
}
}
/* MPC and DSC */
pool->base.mpc = dcn30_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut);
if (pool->base.mpc == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create mpc!\n");
goto create_fail;
}
for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
pool->base.dscs[i] = dcn30_dsc_create(ctx, i);
if (pool->base.dscs[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create display stream compressor %d!\n", i);
goto create_fail;
}
}
/* DWB and MMHUBBUB */
if (!dcn30_dwbc_create(ctx, &pool->base)) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create dwbc!\n");
goto create_fail;
}
if (!dcn30_mmhubbub_create(ctx, &pool->base)) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create mcif_wb!\n");
goto create_fail;
}
/* AUX and I2C */
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
pool->base.engines[i] = dcn30_aux_engine_create(ctx, i);
if (pool->base.engines[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC:failed to create aux engine!!\n");
goto create_fail;
}
pool->base.hw_i2cs[i] = dcn30_i2c_hw_create(ctx, i);
if (pool->base.hw_i2cs[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC:failed to create hw i2c!!\n");
goto create_fail;
}
pool->base.sw_i2cs[i] = NULL;
}
/* Audio, Stream Encoders including DIG and virtual, MPC 3D LUTs */
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))
goto create_fail;
/* HW Sequencer and Plane caps */
dcn30_hw_sequencer_construct(dc);
dc->caps.max_planes = pool->base.pipe_count;
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
ddc_init_data.ctx = dc->ctx;
ddc_init_data.link = NULL;
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
ddc_init_data.id.enum_id = 0;
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data);
} else {
pool->base.oem_device = NULL;
}
DC_FP_END();
return true;
create_fail:
DC_FP_END();
dcn30_resource_destruct(pool);
return false;
}
struct resource_pool *dcn30_create_resource_pool(
const struct dc_init_data *init_data,
struct dc *dc)
{
struct dcn30_resource_pool *pool =
kzalloc(sizeof(struct dcn30_resource_pool), GFP_KERNEL);
if (!pool)
return NULL;
if (dcn30_resource_construct(init_data->num_virtual_links, dc, pool))
return &pool->base;
BREAK_TO_DEBUGGER();
kfree(pool);
return NULL;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "reg_helper.h"
#include "core_types.h"
#include "link_encoder.h"
#include "dcn30_dio_link_encoder.h"
#include "stream_encoder.h"
#include "dc_bios_types.h"
/* #include "dcn3ag/dcn3ag_phy_fw.h" */
#include "gpio_service_interface.h"
#define CTX \
enc10->base.ctx
#define DC_LOGGER \
enc10->base.ctx->logger
#define REG(reg)\
(enc10->link_regs->reg)
#undef FN
#define FN(reg_name, field_name) \
enc10->link_shift->field_name, enc10->link_mask->field_name
#define IND_REG(index) \
(enc10->link_regs->index)
bool dcn30_link_encoder_validate_output_with_stream(
struct link_encoder *enc,
const struct dc_stream_state *stream)
{
return dcn10_link_encoder_validate_output_with_stream(enc, stream);
}
static const struct link_encoder_funcs dcn30_link_enc_funcs = {
.read_state = link_enc2_read_state,
.validate_output_with_stream =
dcn30_link_encoder_validate_output_with_stream,
.hw_init = enc3_hw_init,
.setup = dcn10_link_encoder_setup,
.enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
.enable_dp_output = dcn20_link_encoder_enable_dp_output,
.enable_dp_mst_output = dcn10_link_encoder_enable_dp_mst_output,
.disable_output = dcn10_link_encoder_disable_output,
.dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings,
.dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern,
.update_mst_stream_allocation_table =
dcn10_link_encoder_update_mst_stream_allocation_table,
.psr_program_dp_dphy_fast_training =
dcn10_psr_program_dp_dphy_fast_training,
.psr_program_secondary_packet = dcn10_psr_program_secondary_packet,
.connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe,
.enable_hpd = dcn10_link_encoder_enable_hpd,
.disable_hpd = dcn10_link_encoder_disable_hpd,
.is_dig_enabled = dcn10_is_dig_enabled,
.destroy = dcn10_link_encoder_destroy,
.fec_set_enable = enc2_fec_set_enable,
.fec_set_ready = enc2_fec_set_ready,
.fec_is_active = enc2_fec_is_active,
.get_dig_frontend = dcn10_get_dig_frontend,
.get_dig_mode = dcn10_get_dig_mode,
.is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode,
.get_max_link_cap = dcn20_link_encoder_get_max_link_cap,
};
void dcn30_link_encoder_construct(
struct dcn20_link_encoder *enc20,
const struct encoder_init_data *init_data,
const struct encoder_feature_support *enc_features,
const struct dcn10_link_enc_registers *link_regs,
const struct dcn10_link_enc_aux_registers *aux_regs,
const struct dcn10_link_enc_hpd_registers *hpd_regs,
const struct dcn10_link_enc_shift *link_shift,
const struct dcn10_link_enc_mask *link_mask)
{
struct bp_encoder_cap_info bp_cap_info = {0};
const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
enum bp_result result = BP_RESULT_OK;
struct dcn10_link_encoder *enc10 = &enc20->enc10;
enc10->base.funcs = &dcn30_link_enc_funcs;
enc10->base.ctx = init_data->ctx;
enc10->base.id = init_data->encoder;
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
enc10->base.features = *enc_features;
enc10->base.transmitter = init_data->transmitter;
/* set the flag to indicate whether driver poll the I2C data pin
* while doing the DP sink detect
*/
/* if (dal_adapter_service_is_feature_supported(as,
FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
enc10->base.features.flags.bits.
DP_SINK_DETECT_POLL_DATA_PIN = true;*/
enc10->base.output_signals =
SIGNAL_TYPE_DVI_SINGLE_LINK |
SIGNAL_TYPE_DVI_DUAL_LINK |
SIGNAL_TYPE_LVDS |
SIGNAL_TYPE_DISPLAY_PORT |
SIGNAL_TYPE_DISPLAY_PORT_MST |
SIGNAL_TYPE_EDP |
SIGNAL_TYPE_HDMI_TYPE_A;
/* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
* SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
* SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
* DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS.
* Prefer DIG assignment is decided by board design.
* For DCE 8.0, there are only max 6 UNIPHYs, we assume board design
* and VBIOS will filter out 7 UNIPHY for DCE 8.0.
* By this, adding DIGG should not hurt DCE 8.0.
* This will let DCE 8.1 share DCE 8.0 as much as possible
*/
enc10->link_regs = link_regs;
enc10->aux_regs = aux_regs;
enc10->hpd_regs = hpd_regs;
enc10->link_shift = link_shift;
enc10->link_mask = link_mask;
switch (enc10->base.transmitter) {
case TRANSMITTER_UNIPHY_A:
enc10->base.preferred_engine = ENGINE_ID_DIGA;
break;
case TRANSMITTER_UNIPHY_B:
enc10->base.preferred_engine = ENGINE_ID_DIGB;
break;
case TRANSMITTER_UNIPHY_C:
enc10->base.preferred_engine = ENGINE_ID_DIGC;
break;
case TRANSMITTER_UNIPHY_D:
enc10->base.preferred_engine = ENGINE_ID_DIGD;
break;
case TRANSMITTER_UNIPHY_E:
enc10->base.preferred_engine = ENGINE_ID_DIGE;
break;
case TRANSMITTER_UNIPHY_F:
enc10->base.preferred_engine = ENGINE_ID_DIGF;
break;
case TRANSMITTER_UNIPHY_G:
enc10->base.preferred_engine = ENGINE_ID_DIGG;
break;
default:
ASSERT_CRITICAL(false);
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
}
/* default to one to mirror Windows behavior */
enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios,
enc10->base.id, &bp_cap_info);
/* Override features with DCE-specific values */
if (result == BP_RESULT_OK) {
enc10->base.features.flags.bits.IS_HBR2_CAPABLE =
bp_cap_info.DP_HBR2_EN;
enc10->base.features.flags.bits.IS_HBR3_CAPABLE =
bp_cap_info.DP_HBR3_EN;
enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
enc10->base.features.flags.bits.IS_DP2_CAPABLE = bp_cap_info.IS_DP2_CAPABLE;
enc10->base.features.flags.bits.IS_UHBR10_CAPABLE = bp_cap_info.DP_UHBR10_EN;
enc10->base.features.flags.bits.IS_UHBR13_5_CAPABLE = bp_cap_info.DP_UHBR13_5_EN;
enc10->base.features.flags.bits.IS_UHBR20_CAPABLE = bp_cap_info.DP_UHBR20_EN;
enc10->base.features.flags.bits.DP_IS_USB_C =
bp_cap_info.DP_IS_USB_C;
} else {
DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
__func__,
result);
}
if (enc10->base.ctx->dc->debug.hdmi20_disable) {
enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
}
}
#define AUX_REG(reg)\
(enc10->aux_regs->reg)
#define AUX_REG_READ(reg_name) \
dm_read_reg(CTX, AUX_REG(reg_name))
#define AUX_REG_WRITE(reg_name, val) \
dm_write_reg(CTX, AUX_REG(reg_name), val)
void enc3_hw_init(struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
/*
00 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__1to2 : 1/2
01 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__3to4 : 3/4
02 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__7to8 : 7/8
03 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__15to16 : 15/16
04 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__31to32 : 31/32
05 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__63to64 : 63/64
06 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__127to128 : 127/128
07 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__255to256 : 255/256
*/
/*
AUX_REG_UPDATE_5(AUX_DPHY_RX_CONTROL0,
AUX_RX_START_WINDOW = 1 [6:4]
AUX_RX_RECEIVE_WINDOW = 1 default is 2 [10:8]
AUX_RX_HALF_SYM_DETECT_LEN = 1 [13:12] default is 1
AUX_RX_TRANSITION_FILTER_EN = 1 [16] default is 1
AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT [17] is 0 default is 0
AUX_RX_ALLOW_BELOW_THRESHOLD_START [18] is 1 default is 1
AUX_RX_ALLOW_BELOW_THRESHOLD_STOP [19] is 1 default is 1
AUX_RX_PHASE_DETECT_LEN, [21,20] = 0x3 default is 3
AUX_RX_DETECTION_THRESHOLD [30:28] = 1
*/
AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a);
//AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32;
// Set AUX_TX_REF_DIV Divider to generate 2 MHz reference from refclk
// 27MHz -> 0xd
// 100MHz -> 0x32
// 48MHz -> 0x18
// Set TMDS_CTL0 to 1. This is a legacy setting.
REG_UPDATE(TMDS_CTL_BITS, TMDS_CTL0, 1);
dcn10_aux_initialize(enc10);
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "reg_helper.h"
#include "resource.h"
#include "mcif_wb.h"
#include "dcn30_mmhubbub.h"
#define REG(reg)\
mcif_wb30->mcif_wb_regs->reg
#define CTX \
mcif_wb30->base.ctx
#undef FN
#define FN(reg_name, field_name) \
mcif_wb30->mcif_wb_shift->field_name, mcif_wb30->mcif_wb_mask->field_name
#define MCIF_ADDR(addr) (((unsigned long long)addr & 0xffffffffff) + 0xFE) >> 8
#define MCIF_ADDR_HIGH(addr) (unsigned long long)addr >> 40
/* wbif programming guide:
* 1. set up wbif parameter:
* unsigned long long luma_address[4]; //4 frame buffer
* unsigned long long chroma_address[4];
* unsigned int luma_pitch;
* unsigned int chroma_pitch;
* unsigned int warmup_pitch=0x10; //256B align, the page size is 4KB when it is 0x10
* unsigned int slice_lines; //slice size
* unsigned int time_per_pixel; // time per pixel, in ns
* unsigned int arbitration_slice; // 0: 2048 bytes 1: 4096 bytes 2: 8192 Bytes
* unsigned int max_scaled_time; // used for QOS generation
* unsigned int swlock=0x0;
* unsigned int cli_watermark[4]; //4 group urgent watermark
* unsigned int pstate_watermark[4]; //4 group pstate watermark
* unsigned int sw_int_en; // Software interrupt enable, frame end and overflow
* unsigned int sw_slice_int_en; // slice end interrupt enable
* unsigned int sw_overrun_int_en; // overrun error interrupt enable
* unsigned int vce_int_en; // VCE interrupt enable, frame end and overflow
* unsigned int vce_slice_int_en; // VCE slice end interrupt enable, frame end and overflow
*
* 2. configure wbif register
* a. call mmhubbub_config_wbif()
*
* 3. Enable wbif
* call set_wbif_bufmgr_enable();
*
* 4. wbif_dump_status(), option, for debug purpose
* the bufmgr status can show the progress of write back, can be used for debug purpose
*/
static void mmhubbub3_warmup_mcif(struct mcif_wb *mcif_wb,
struct mcif_warmup_params *params)
{
struct dcn30_mmhubbub *mcif_wb30 = TO_DCN30_MMHUBBUB(mcif_wb);
union large_integer start_address_shift = {.quad_part = params->start_address.quad_part >> 5};
/* Set base address and region size for warmup */
REG_SET(MMHUBBUB_WARMUP_BASE_ADDR_HIGH, 0, MMHUBBUB_WARMUP_BASE_ADDR_HIGH, start_address_shift.high_part);
REG_SET(MMHUBBUB_WARMUP_BASE_ADDR_LOW, 0, MMHUBBUB_WARMUP_BASE_ADDR_LOW, start_address_shift.low_part);
REG_SET(MMHUBBUB_WARMUP_ADDR_REGION, 0, MMHUBBUB_WARMUP_ADDR_REGION, params->region_size >> 5);
// REG_SET(MMHUBBUB_WARMUP_P_VMID, 0, MMHUBBUB_WARMUP_P_VMID, params->p_vmid);
/* Set address increment and enable warmup */
REG_SET_3(MMHUBBUB_WARMUP_CONTROL_STATUS, 0, MMHUBBUB_WARMUP_EN, true,
MMHUBBUB_WARMUP_SW_INT_EN, true,
MMHUBBUB_WARMUP_INC_ADDR, params->address_increment >> 5);
/* Wait for an interrupt to signal warmup is completed */
REG_WAIT(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_SW_INT_STATUS, 1, 20, 100);
/* Acknowledge interrupt */
REG_UPDATE(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_SW_INT_ACK, 1);
/* Disable warmup */
REG_UPDATE(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_EN, false);
}
static void mmhubbub3_config_mcif_buf(struct mcif_wb *mcif_wb,
struct mcif_buf_params *params,
unsigned int dest_height)
{
struct dcn30_mmhubbub *mcif_wb30 = TO_DCN30_MMHUBBUB(mcif_wb);
/* buffer address for packing mode or Luma in planar mode */
REG_UPDATE(MCIF_WB_BUF_1_ADDR_Y, MCIF_WB_BUF_1_ADDR_Y, MCIF_ADDR(params->luma_address[0]));
REG_UPDATE(MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[0]));
/* buffer address for Chroma in planar mode (unused in packing mode) */
REG_UPDATE(MCIF_WB_BUF_1_ADDR_C, MCIF_WB_BUF_1_ADDR_C, MCIF_ADDR(params->chroma_address[0]));
REG_UPDATE(MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[0]));
/* buffer address for packing mode or Luma in planar mode */
REG_UPDATE(MCIF_WB_BUF_2_ADDR_Y, MCIF_WB_BUF_2_ADDR_Y, MCIF_ADDR(params->luma_address[1]));
REG_UPDATE(MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[1]));
/* buffer address for Chroma in planar mode (unused in packing mode) */
REG_UPDATE(MCIF_WB_BUF_2_ADDR_C, MCIF_WB_BUF_2_ADDR_C, MCIF_ADDR(params->chroma_address[1]));
REG_UPDATE(MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[1]));
/* buffer address for packing mode or Luma in planar mode */
REG_UPDATE(MCIF_WB_BUF_3_ADDR_Y, MCIF_WB_BUF_3_ADDR_Y, MCIF_ADDR(params->luma_address[2]));
REG_UPDATE(MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[2]));
/* buffer address for Chroma in planar mode (unused in packing mode) */
REG_UPDATE(MCIF_WB_BUF_3_ADDR_C, MCIF_WB_BUF_3_ADDR_C, MCIF_ADDR(params->chroma_address[2]));
REG_UPDATE(MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[2]));
/* buffer address for packing mode or Luma in planar mode */
REG_UPDATE(MCIF_WB_BUF_4_ADDR_Y, MCIF_WB_BUF_4_ADDR_Y, MCIF_ADDR(params->luma_address[3]));
REG_UPDATE(MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[3]));
/* buffer address for Chroma in planar mode (unused in packing mode) */
REG_UPDATE(MCIF_WB_BUF_4_ADDR_C, MCIF_WB_BUF_4_ADDR_C, MCIF_ADDR(params->chroma_address[3]));
REG_UPDATE(MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[3]));
/* setup luma & chroma size
* should be enough to contain a whole frame Luma data,
* the programmed value is frame buffer size [27:8], 256-byte aligned
*/
REG_UPDATE(MCIF_WB_BUF_LUMA_SIZE, MCIF_WB_BUF_LUMA_SIZE, (params->luma_pitch>>8) * dest_height);
REG_UPDATE(MCIF_WB_BUF_CHROMA_SIZE, MCIF_WB_BUF_CHROMA_SIZE, (params->chroma_pitch>>8) * dest_height);
/* enable address fence */
REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUF_ADDR_FENCE_EN, 1);
/* setup pitch, the programmed value is [15:8], 256B align */
REG_UPDATE_2(MCIF_WB_BUF_PITCH, MCIF_WB_BUF_LUMA_PITCH, params->luma_pitch >> 8,
MCIF_WB_BUF_CHROMA_PITCH, params->chroma_pitch >> 8);
}
static void mmhubbub3_config_mcif_arb(struct mcif_wb *mcif_wb,
struct mcif_arb_params *params)
{
struct dcn30_mmhubbub *mcif_wb30 = TO_DCN30_MMHUBBUB(mcif_wb);
/* Programmed by the video driver based on the CRTC timing (for DWB) */
REG_UPDATE(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_TIME_PER_PIXEL, params->time_per_pixel);
/* Programming dwb watermark */
/* Watermark to generate urgent in MCIF_WB_CLI, value is determined by MCIF_WB_CLI_WATERMARK_MASK. */
/* Program in ns. A formula will be provided in the pseudo code to calculate the value. */
REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK_MASK, 0x0);
/* urgent_watermarkA */
REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[0]);
REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK_MASK, 0x1);
/* urgent_watermarkB */
REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[1]);
REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK_MASK, 0x2);
/* urgent_watermarkC */
REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[2]);
REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK_MASK, 0x3);
/* urgent_watermarkD */
REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[3]);
/* Programming nb pstate watermark */
/* nbp_state_change_watermarkA */
REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x0);
REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK,
NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[0]);
/* nbp_state_change_watermarkB */
REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x1);
REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK,
NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[1]);
/* nbp_state_change_watermarkC */
REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x2);
REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK,
NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[2]);
/* nbp_state_change_watermarkD */
REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x3);
REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK,
NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[3]);
/* dram_speed_change_duration */
REG_UPDATE(MCIF_WB_DRAM_SPEED_CHANGE_DURATION_VBI,
MCIF_WB_DRAM_SPEED_CHANGE_DURATION_VBI, params->dram_speed_change_duration);
/* max_scaled_time */
REG_UPDATE(MULTI_LEVEL_QOS_CTRL, MAX_SCALED_TIME_TO_URGENT, params->max_scaled_time);
/* slice_lines */
REG_UPDATE(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_SLICE_SIZE, params->slice_lines-1);
/* Set arbitration unit for Luma/Chroma */
/* arb_unit=2 should be chosen for more efficiency */
/* Arbitration size, 0: 2048 bytes 1: 4096 bytes 2: 8192 Bytes */
REG_UPDATE(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_CLIENT_ARBITRATION_SLICE, params->arbitration_slice);
}
static const struct mcif_wb_funcs dcn30_mmhubbub_funcs = {
.warmup_mcif = mmhubbub3_warmup_mcif,
.enable_mcif = mmhubbub2_enable_mcif,
.disable_mcif = mmhubbub2_disable_mcif,
.config_mcif_buf = mmhubbub3_config_mcif_buf,
.config_mcif_arb = mmhubbub3_config_mcif_arb,
.config_mcif_irq = mmhubbub2_config_mcif_irq,
.dump_frame = mcifwb2_dump_frame,
};
void dcn30_mmhubbub_construct(struct dcn30_mmhubbub *mcif_wb30,
struct dc_context *ctx,
const struct dcn30_mmhubbub_registers *mcif_wb_regs,
const struct dcn30_mmhubbub_shift *mcif_wb_shift,
const struct dcn30_mmhubbub_mask *mcif_wb_mask,
int inst)
{
mcif_wb30->base.ctx = ctx;
mcif_wb30->base.inst = inst;
mcif_wb30->base.funcs = &dcn30_mmhubbub_funcs;
mcif_wb30->mcif_wb_regs = mcif_wb_regs;
mcif_wb30->mcif_wb_shift = mcif_wb_shift;
mcif_wb30->mcif_wb_mask = mcif_wb_mask;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c |
/*
* Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "display_mode_lib.h"
#include "dc_features.h"
#include "dcn20/display_mode_vba_20.h"
#include "dcn20/display_rq_dlg_calc_20.h"
#include "dcn20/display_mode_vba_20v2.h"
#include "dcn20/display_rq_dlg_calc_20v2.h"
#include "dcn21/display_mode_vba_21.h"
#include "dcn21/display_rq_dlg_calc_21.h"
#include "dcn30/display_mode_vba_30.h"
#include "dcn30/display_rq_dlg_calc_30.h"
#include "dcn31/display_mode_vba_31.h"
#include "dcn31/display_rq_dlg_calc_31.h"
#include "dcn314/display_mode_vba_314.h"
#include "dcn314/display_rq_dlg_calc_314.h"
#include "dcn32/display_mode_vba_32.h"
#include "dcn32/display_rq_dlg_calc_32.h"
#include "dml_logger.h"
static const struct dml_funcs dml20_funcs = {
.validate = dml20_ModeSupportAndSystemConfigurationFull,
.recalculate = dml20_recalculate,
.rq_dlg_get_dlg_reg = dml20_rq_dlg_get_dlg_reg,
.rq_dlg_get_rq_reg = dml20_rq_dlg_get_rq_reg
};
static const struct dml_funcs dml20v2_funcs = {
.validate = dml20v2_ModeSupportAndSystemConfigurationFull,
.recalculate = dml20v2_recalculate,
.rq_dlg_get_dlg_reg = dml20v2_rq_dlg_get_dlg_reg,
.rq_dlg_get_rq_reg = dml20v2_rq_dlg_get_rq_reg
};
static const struct dml_funcs dml21_funcs = {
.validate = dml21_ModeSupportAndSystemConfigurationFull,
.recalculate = dml21_recalculate,
.rq_dlg_get_dlg_reg = dml21_rq_dlg_get_dlg_reg,
.rq_dlg_get_rq_reg = dml21_rq_dlg_get_rq_reg
};
static const struct dml_funcs dml30_funcs = {
.validate = dml30_ModeSupportAndSystemConfigurationFull,
.recalculate = dml30_recalculate,
.rq_dlg_get_dlg_reg = dml30_rq_dlg_get_dlg_reg,
.rq_dlg_get_rq_reg = dml30_rq_dlg_get_rq_reg
};
static const struct dml_funcs dml31_funcs = {
.validate = dml31_ModeSupportAndSystemConfigurationFull,
.recalculate = dml31_recalculate,
.rq_dlg_get_dlg_reg = dml31_rq_dlg_get_dlg_reg,
.rq_dlg_get_rq_reg = dml31_rq_dlg_get_rq_reg
};
static const struct dml_funcs dml314_funcs = {
.validate = dml314_ModeSupportAndSystemConfigurationFull,
.recalculate = dml314_recalculate,
.rq_dlg_get_dlg_reg = dml314_rq_dlg_get_dlg_reg,
.rq_dlg_get_rq_reg = dml314_rq_dlg_get_rq_reg
};
static const struct dml_funcs dml32_funcs = {
.validate = dml32_ModeSupportAndSystemConfigurationFull,
.recalculate = dml32_recalculate,
.rq_dlg_get_dlg_reg_v2 = dml32_rq_dlg_get_dlg_reg,
.rq_dlg_get_rq_reg_v2 = dml32_rq_dlg_get_rq_reg
};
void dml_init_instance(struct display_mode_lib *lib,
const struct _vcs_dpi_soc_bounding_box_st *soc_bb,
const struct _vcs_dpi_ip_params_st *ip_params,
enum dml_project project)
{
lib->soc = *soc_bb;
lib->ip = *ip_params;
lib->project = project;
switch (project) {
case DML_PROJECT_NAVI10:
case DML_PROJECT_DCN201:
lib->funcs = dml20_funcs;
break;
case DML_PROJECT_NAVI10v2:
lib->funcs = dml20v2_funcs;
break;
case DML_PROJECT_DCN21:
lib->funcs = dml21_funcs;
break;
case DML_PROJECT_DCN30:
lib->funcs = dml30_funcs;
break;
case DML_PROJECT_DCN31:
case DML_PROJECT_DCN315:
lib->funcs = dml31_funcs;
break;
case DML_PROJECT_DCN314:
lib->funcs = dml314_funcs;
break;
case DML_PROJECT_DCN32:
lib->funcs = dml32_funcs;
break;
default:
break;
}
}
const char *dml_get_status_message(enum dm_validation_status status)
{
switch (status) {
case DML_VALIDATION_OK: return "Validation OK";
case DML_FAIL_SCALE_RATIO_TAP: return "Scale ratio/tap";
case DML_FAIL_SOURCE_PIXEL_FORMAT: return "Source pixel format";
case DML_FAIL_VIEWPORT_SIZE: return "Viewport size";
case DML_FAIL_TOTAL_V_ACTIVE_BW: return "Total vertical active bandwidth";
case DML_FAIL_DIO_SUPPORT: return "DIO support";
case DML_FAIL_NOT_ENOUGH_DSC: return "Not enough DSC Units";
case DML_FAIL_DSC_CLK_REQUIRED: return "DSC clock required";
case DML_FAIL_URGENT_LATENCY: return "Urgent latency";
case DML_FAIL_REORDERING_BUFFER: return "Re-ordering buffer";
case DML_FAIL_DISPCLK_DPPCLK: return "Dispclk and Dppclk";
case DML_FAIL_TOTAL_AVAILABLE_PIPES: return "Total available pipes";
case DML_FAIL_NUM_OTG: return "Number of OTG";
case DML_FAIL_WRITEBACK_MODE: return "Writeback mode";
case DML_FAIL_WRITEBACK_LATENCY: return "Writeback latency";
case DML_FAIL_WRITEBACK_SCALE_RATIO_TAP: return "Writeback scale ratio/tap";
case DML_FAIL_CURSOR_SUPPORT: return "Cursor support";
case DML_FAIL_PITCH_SUPPORT: return "Pitch support";
case DML_FAIL_PTE_BUFFER_SIZE: return "PTE buffer size";
case DML_FAIL_DSC_INPUT_BPC: return "DSC input bpc";
case DML_FAIL_PREFETCH_SUPPORT: return "Prefetch support";
case DML_FAIL_V_RATIO_PREFETCH: return "Vertical ratio prefetch";
default: return "Unknown Status";
}
}
void dml_log_pipe_params(
struct display_mode_lib *mode_lib,
display_e2e_pipe_params_st *pipes,
int pipe_cnt)
{
display_pipe_source_params_st *pipe_src;
display_pipe_dest_params_st *pipe_dest;
scaler_ratio_depth_st *scale_ratio_depth;
scaler_taps_st *scale_taps;
display_output_params_st *dout;
display_clocks_and_cfg_st *clks_cfg;
int i;
for (i = 0; i < pipe_cnt; i++) {
pipe_src = &(pipes[i].pipe.src);
pipe_dest = &(pipes[i].pipe.dest);
scale_ratio_depth = &(pipes[i].pipe.scale_ratio_depth);
scale_taps = &(pipes[i].pipe.scale_taps);
dout = &(pipes[i].dout);
clks_cfg = &(pipes[i].clks_cfg);
dml_print("DML PARAMS: =====================================\n");
dml_print("DML PARAMS: PIPE [%d] SOURCE PARAMS:\n", i);
dml_print("DML PARAMS: source_format = %d\n", pipe_src->source_format);
dml_print("DML PARAMS: dcc = %d\n", pipe_src->dcc);
dml_print("DML PARAMS: dcc_rate = %d\n", pipe_src->dcc_rate);
dml_print("DML PARAMS: dcc_use_global = %d\n", pipe_src->dcc_use_global);
dml_print("DML PARAMS: vm = %d\n", pipe_src->vm);
dml_print("DML PARAMS: gpuvm = %d\n", pipe_src->gpuvm);
dml_print("DML PARAMS: hostvm = %d\n", pipe_src->hostvm);
dml_print("DML PARAMS: gpuvm_levels_force_en = %d\n", pipe_src->gpuvm_levels_force_en);
dml_print("DML PARAMS: gpuvm_levels_force = %d\n", pipe_src->gpuvm_levels_force);
dml_print("DML PARAMS: source_scan = %d\n", pipe_src->source_scan);
dml_print("DML PARAMS: sw_mode = %d\n", pipe_src->sw_mode);
dml_print("DML PARAMS: macro_tile_size = %d\n", pipe_src->macro_tile_size);
dml_print("DML PARAMS: viewport_width = %d\n", pipe_src->viewport_width);
dml_print("DML PARAMS: viewport_height = %d\n", pipe_src->viewport_height);
dml_print("DML PARAMS: viewport_y_y = %d\n", pipe_src->viewport_y_y);
dml_print("DML PARAMS: viewport_y_c = %d\n", pipe_src->viewport_y_c);
dml_print("DML PARAMS: viewport_width_c = %d\n", pipe_src->viewport_width_c);
dml_print("DML PARAMS: viewport_height_c = %d\n", pipe_src->viewport_height_c);
dml_print("DML PARAMS: data_pitch = %d\n", pipe_src->data_pitch);
dml_print("DML PARAMS: data_pitch_c = %d\n", pipe_src->data_pitch_c);
dml_print("DML PARAMS: meta_pitch = %d\n", pipe_src->meta_pitch);
dml_print("DML PARAMS: meta_pitch_c = %d\n", pipe_src->meta_pitch_c);
dml_print("DML PARAMS: cur0_src_width = %d\n", pipe_src->cur0_src_width);
dml_print("DML PARAMS: cur0_bpp = %d\n", pipe_src->cur0_bpp);
dml_print("DML PARAMS: cur1_src_width = %d\n", pipe_src->cur1_src_width);
dml_print("DML PARAMS: cur1_bpp = %d\n", pipe_src->cur1_bpp);
dml_print("DML PARAMS: num_cursors = %d\n", pipe_src->num_cursors);
dml_print("DML PARAMS: is_hsplit = %d\n", pipe_src->is_hsplit);
dml_print("DML PARAMS: hsplit_grp = %d\n", pipe_src->hsplit_grp);
dml_print("DML PARAMS: dynamic_metadata_enable = %d\n", pipe_src->dynamic_metadata_enable);
dml_print("DML PARAMS: dmdata_lines_before_active = %d\n", pipe_src->dynamic_metadata_lines_before_active);
dml_print("DML PARAMS: dmdata_xmit_bytes = %d\n", pipe_src->dynamic_metadata_xmit_bytes);
dml_print("DML PARAMS: immediate_flip = %d\n", pipe_src->immediate_flip);
dml_print("DML PARAMS: v_total_min = %d\n", pipe_src->v_total_min);
dml_print("DML PARAMS: v_total_max = %d\n", pipe_src->v_total_max);
dml_print("DML PARAMS: =====================================\n");
dml_print("DML PARAMS: PIPE [%d] DESTINATION PARAMS:\n", i);
dml_print("DML PARAMS: recout_width = %d\n", pipe_dest->recout_width);
dml_print("DML PARAMS: recout_height = %d\n", pipe_dest->recout_height);
dml_print("DML PARAMS: full_recout_width = %d\n", pipe_dest->full_recout_width);
dml_print("DML PARAMS: full_recout_height = %d\n", pipe_dest->full_recout_height);
dml_print("DML PARAMS: hblank_start = %d\n", pipe_dest->hblank_start);
dml_print("DML PARAMS: hblank_end = %d\n", pipe_dest->hblank_end);
dml_print("DML PARAMS: vblank_start = %d\n", pipe_dest->vblank_start);
dml_print("DML PARAMS: vblank_end = %d\n", pipe_dest->vblank_end);
dml_print("DML PARAMS: htotal = %d\n", pipe_dest->htotal);
dml_print("DML PARAMS: vtotal = %d\n", pipe_dest->vtotal);
dml_print("DML PARAMS: vactive = %d\n", pipe_dest->vactive);
dml_print("DML PARAMS: hactive = %d\n", pipe_dest->hactive);
dml_print("DML PARAMS: vstartup_start = %d\n", pipe_dest->vstartup_start);
dml_print("DML PARAMS: vupdate_offset = %d\n", pipe_dest->vupdate_offset);
dml_print("DML PARAMS: vupdate_width = %d\n", pipe_dest->vupdate_width);
dml_print("DML PARAMS: vready_offset = %d\n", pipe_dest->vready_offset);
dml_print("DML PARAMS: interlaced = %d\n", pipe_dest->interlaced);
dml_print("DML PARAMS: pixel_rate_mhz = %3.2f\n", pipe_dest->pixel_rate_mhz);
dml_print("DML PARAMS: sync_vblank_all_planes = %d\n", pipe_dest->synchronized_vblank_all_planes);
dml_print("DML PARAMS: otg_inst = %d\n", pipe_dest->otg_inst);
dml_print("DML PARAMS: odm_combine = %d\n", pipe_dest->odm_combine);
dml_print("DML PARAMS: use_maximum_vstartup = %d\n", pipe_dest->use_maximum_vstartup);
dml_print("DML PARAMS: vtotal_max = %d\n", pipe_dest->vtotal_max);
dml_print("DML PARAMS: vtotal_min = %d\n", pipe_dest->vtotal_min);
dml_print("DML PARAMS: =====================================\n");
dml_print("DML PARAMS: PIPE [%d] SCALER PARAMS:\n", i);
dml_print("DML PARAMS: hscl_ratio = %3.4f\n", scale_ratio_depth->hscl_ratio);
dml_print("DML PARAMS: vscl_ratio = %3.4f\n", scale_ratio_depth->vscl_ratio);
dml_print("DML PARAMS: hscl_ratio_c = %3.4f\n", scale_ratio_depth->hscl_ratio_c);
dml_print("DML PARAMS: vscl_ratio_c = %3.4f\n", scale_ratio_depth->vscl_ratio_c);
dml_print("DML PARAMS: vinit = %3.4f\n", scale_ratio_depth->vinit);
dml_print("DML PARAMS: vinit_c = %3.4f\n", scale_ratio_depth->vinit_c);
dml_print("DML PARAMS: vinit_bot = %3.4f\n", scale_ratio_depth->vinit_bot);
dml_print("DML PARAMS: vinit_bot_c = %3.4f\n", scale_ratio_depth->vinit_bot_c);
dml_print("DML PARAMS: lb_depth = %d\n", scale_ratio_depth->lb_depth);
dml_print("DML PARAMS: scl_enable = %d\n", scale_ratio_depth->scl_enable);
dml_print("DML PARAMS: htaps = %d\n", scale_taps->htaps);
dml_print("DML PARAMS: vtaps = %d\n", scale_taps->vtaps);
dml_print("DML PARAMS: htaps_c = %d\n", scale_taps->htaps_c);
dml_print("DML PARAMS: vtaps_c = %d\n", scale_taps->vtaps_c);
dml_print("DML PARAMS: =====================================\n");
dml_print("DML PARAMS: PIPE [%d] DISPLAY OUTPUT PARAMS:\n", i);
dml_print("DML PARAMS: output_type = %d\n", dout->output_type);
dml_print("DML PARAMS: output_format = %d\n", dout->output_format);
dml_print("DML PARAMS: dsc_input_bpc = %d\n", dout->dsc_input_bpc);
dml_print("DML PARAMS: output_bpp = %3.4f\n", dout->output_bpp);
dml_print("DML PARAMS: dp_lanes = %d\n", dout->dp_lanes);
dml_print("DML PARAMS: dsc_enable = %d\n", dout->dsc_enable);
dml_print("DML PARAMS: dsc_slices = %d\n", dout->dsc_slices);
dml_print("DML PARAMS: wb_enable = %d\n", dout->wb_enable);
dml_print("DML PARAMS: num_active_wb = %d\n", dout->num_active_wb);
dml_print("DML PARAMS: =====================================\n");
dml_print("DML PARAMS: PIPE [%d] CLOCK CONFIG PARAMS:\n", i);
dml_print("DML PARAMS: voltage = %d\n", clks_cfg->voltage);
dml_print("DML PARAMS: dppclk_mhz = %3.2f\n", clks_cfg->dppclk_mhz);
dml_print("DML PARAMS: refclk_mhz = %3.2f\n", clks_cfg->refclk_mhz);
dml_print("DML PARAMS: dispclk_mhz = %3.2f\n", clks_cfg->dispclk_mhz);
dml_print("DML PARAMS: dcfclk_mhz = %3.2f\n", clks_cfg->dcfclk_mhz);
dml_print("DML PARAMS: socclk_mhz = %3.2f\n", clks_cfg->socclk_mhz);
dml_print("DML PARAMS: =====================================\n");
}
}
void dml_log_mode_support_params(struct display_mode_lib *mode_lib)
{
int i;
for (i = mode_lib->vba.soc.num_states; i >= 0; i--) {
dml_print("DML SUPPORT: ===============================================\n");
dml_print("DML SUPPORT: Voltage State %d\n", i);
dml_print("DML SUPPORT: Mode Supported : %s\n", mode_lib->vba.ModeSupport[i][0] ? "Supported" : "NOT Supported");
dml_print("DML SUPPORT: Mode Supported (pipe split) : %s\n", mode_lib->vba.ModeSupport[i][1] ? "Supported" : "NOT Supported");
dml_print("DML SUPPORT: Scale Ratio And Taps : %s\n", mode_lib->vba.ScaleRatioAndTapsSupport ? "Supported" : "NOT Supported");
dml_print("DML SUPPORT: Source Format Pixel And Scan : %s\n", mode_lib->vba.SourceFormatPixelAndScanSupport ? "Supported" : "NOT Supported");
dml_print("DML SUPPORT: Viewport Size : [%s, %s]\n", mode_lib->vba.ViewportSizeSupport[i][0] ? "Supported" : "NOT Supported", mode_lib->vba.ViewportSizeSupport[i][1] ? "Supported" : "NOT Supported");
dml_print("DML SUPPORT: DIO Support : %s\n", mode_lib->vba.DIOSupport[i] ? "Supported" : "NOT Supported");
dml_print("DML SUPPORT: ODM Combine 4To1 Support Check : %s\n", mode_lib->vba.ODMCombine4To1SupportCheckOK[i] ? "Supported" : "NOT Supported");
dml_print("DML SUPPORT: DSC Units : %s\n", mode_lib->vba.NotEnoughDSCUnits[i] ? "Not Supported" : "Supported");
dml_print("DML SUPPORT: DSCCLK Required : %s\n", mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] ? "Not Supported" : "Supported");
dml_print("DML SUPPORT: DTBCLK Required : %s\n", mode_lib->vba.DTBCLKRequiredMoreThanSupported[i] ? "Not Supported" : "Supported");
dml_print("DML SUPPORT: Re-ordering Buffer : [%s, %s]\n", mode_lib->vba.ROBSupport[i][0] ? "Supported" : "NOT Supported", mode_lib->vba.ROBSupport[i][1] ? "Supported" : "NOT Supported");
dml_print("DML SUPPORT: DISPCLK and DPPCLK : [%s, %s]\n", mode_lib->vba.DISPCLK_DPPCLK_Support[i][0] ? "Supported" : "NOT Supported", mode_lib->vba.DISPCLK_DPPCLK_Support[i][1] ? "Supported" : "NOT Supported");
dml_print("DML SUPPORT: Total Available Pipes : [%s, %s]\n", mode_lib->vba.TotalAvailablePipesSupport[i][0] ? "Supported" : "NOT Supported", mode_lib->vba.TotalAvailablePipesSupport[i][1] ? "Supported" : "NOT Supported");
dml_print("DML SUPPORT: Writeback Latency : %s\n", mode_lib->vba.WritebackLatencySupport ? "Supported" : "NOT Supported");
dml_print("DML SUPPORT: Writeback Scale Ratio And Taps : %s\n", mode_lib->vba.WritebackScaleRatioAndTapsSupport ? "Supported" : "NOT Supported");
dml_print("DML SUPPORT: Cursor : %s\n", mode_lib->vba.CursorSupport ? "Supported" : "NOT Supported");
dml_print("DML SUPPORT: Pitch : %s\n", mode_lib->vba.PitchSupport ? "Supported" : "NOT Supported");
dml_print("DML SUPPORT: Prefetch : [%s, %s]\n", mode_lib->vba.PrefetchSupported[i][0] ? "Supported" : "NOT Supported", mode_lib->vba.PrefetchSupported[i][1] ? "Supported" : "NOT Supported");
dml_print("DML SUPPORT: Dynamic Metadata : [%s, %s]\n", mode_lib->vba.DynamicMetadataSupported[i][0] ? "Supported" : "NOT Supported", mode_lib->vba.DynamicMetadataSupported[i][1] ? "Supported" : "NOT Supported");
dml_print("DML SUPPORT: Total Vertical Active Bandwidth : [%s, %s]\n", mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] ? "Supported" : "NOT Supported", mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][1] ? "Supported" : "NOT Supported");
dml_print("DML SUPPORT: VRatio In Prefetch : [%s, %s]\n", mode_lib->vba.VRatioInPrefetchSupported[i][0] ? "Supported" : "NOT Supported", mode_lib->vba.VRatioInPrefetchSupported[i][1] ? "Supported" : "NOT Supported");
dml_print("DML SUPPORT: PTE Buffer Size Not Exceeded : [%s, %s]\n", mode_lib->vba.PTEBufferSizeNotExceeded[i][0] ? "Supported" : "NOT Supported", mode_lib->vba.PTEBufferSizeNotExceeded[i][1] ? "Supported" : "NOT Supported");
dml_print("DML SUPPORT: DSC Input BPC : %s\n", mode_lib->vba.NonsupportedDSCInputBPC ? "Not Supported" : "Supported");
dml_print("DML SUPPORT: HostVMEnable : %d\n", mode_lib->vba.HostVMEnable);
dml_print("DML SUPPORT: ImmediateFlipSupportedForState : [%d, %d]\n", mode_lib->vba.ImmediateFlipSupportedForState[i][0], mode_lib->vba.ImmediateFlipSupportedForState[i][1]);
}
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c |
/*
* Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "display_rq_dlg_helpers.h"
#include "dml_logger.h"
void print__rq_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_params_st *rq_param)
{
dml_print("DML_RQ_DLG_CALC: ***************************\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_PARAM_ST\n");
dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
print__data_rq_sizing_params_st(mode_lib, &rq_param->sizing.rq_l);
dml_print("DML_RQ_DLG_CALC: <CHROMA> ===\n");
print__data_rq_sizing_params_st(mode_lib, &rq_param->sizing.rq_c);
dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
print__data_rq_dlg_params_st(mode_lib, &rq_param->dlg.rq_l);
dml_print("DML_RQ_DLG_CALC: <CHROMA>\n");
print__data_rq_dlg_params_st(mode_lib, &rq_param->dlg.rq_c);
dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
print__data_rq_misc_params_st(mode_lib, &rq_param->misc.rq_l);
dml_print("DML_RQ_DLG_CALC: <CHROMA>\n");
print__data_rq_misc_params_st(mode_lib, &rq_param->misc.rq_c);
dml_print("DML_RQ_DLG_CALC: ***************************\n");
}
void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_sizing_params_st *rq_sizing)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_SIZING_PARAM_ST\n");
dml_print("DML_RQ_DLG_CALC: chunk_bytes = %0d\n", rq_sizing->chunk_bytes);
dml_print("DML_RQ_DLG_CALC: min_chunk_bytes = %0d\n", rq_sizing->min_chunk_bytes);
dml_print("DML_RQ_DLG_CALC: meta_chunk_bytes = %0d\n", rq_sizing->meta_chunk_bytes);
dml_print(
"DML_RQ_DLG_CALC: min_meta_chunk_bytes = %0d\n",
rq_sizing->min_meta_chunk_bytes);
dml_print("DML_RQ_DLG_CALC: mpte_group_bytes = %0d\n", rq_sizing->mpte_group_bytes);
dml_print("DML_RQ_DLG_CALC: dpte_group_bytes = %0d\n", rq_sizing->dpte_group_bytes);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_dlg_params_st *rq_dlg_param)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_DLG_PARAM_ST\n");
dml_print(
"DML_RQ_DLG_CALC: swath_width_ub = %0d\n",
rq_dlg_param->swath_width_ub);
dml_print(
"DML_RQ_DLG_CALC: swath_height = %0d\n",
rq_dlg_param->swath_height);
dml_print(
"DML_RQ_DLG_CALC: req_per_swath_ub = %0d\n",
rq_dlg_param->req_per_swath_ub);
dml_print(
"DML_RQ_DLG_CALC: meta_pte_bytes_per_frame_ub = %0d\n",
rq_dlg_param->meta_pte_bytes_per_frame_ub);
dml_print(
"DML_RQ_DLG_CALC: dpte_req_per_row_ub = %0d\n",
rq_dlg_param->dpte_req_per_row_ub);
dml_print(
"DML_RQ_DLG_CALC: dpte_groups_per_row_ub = %0d\n",
rq_dlg_param->dpte_groups_per_row_ub);
dml_print(
"DML_RQ_DLG_CALC: dpte_row_height = %0d\n",
rq_dlg_param->dpte_row_height);
dml_print(
"DML_RQ_DLG_CALC: dpte_bytes_per_row_ub = %0d\n",
rq_dlg_param->dpte_bytes_per_row_ub);
dml_print(
"DML_RQ_DLG_CALC: meta_chunks_per_row_ub = %0d\n",
rq_dlg_param->meta_chunks_per_row_ub);
dml_print(
"DML_RQ_DLG_CALC: meta_req_per_row_ub = %0d\n",
rq_dlg_param->meta_req_per_row_ub);
dml_print(
"DML_RQ_DLG_CALC: meta_row_height = %0d\n",
rq_dlg_param->meta_row_height);
dml_print(
"DML_RQ_DLG_CALC: meta_bytes_per_row_ub = %0d\n",
rq_dlg_param->meta_bytes_per_row_ub);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_misc_params_st *rq_misc_param)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_MISC_PARAM_ST\n");
dml_print(
"DML_RQ_DLG_CALC: full_swath_bytes = %0d\n",
rq_misc_param->full_swath_bytes);
dml_print(
"DML_RQ_DLG_CALC: stored_swath_bytes = %0d\n",
rq_misc_param->stored_swath_bytes);
dml_print("DML_RQ_DLG_CALC: blk256_width = %0d\n", rq_misc_param->blk256_width);
dml_print("DML_RQ_DLG_CALC: blk256_height = %0d\n", rq_misc_param->blk256_height);
dml_print("DML_RQ_DLG_CALC: req_width = %0d\n", rq_misc_param->req_width);
dml_print("DML_RQ_DLG_CALC: req_height = %0d\n", rq_misc_param->req_height);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_dlg_params_st *rq_dlg_param)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_DLG_PARAM_ST\n");
dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
print__data_rq_dlg_params_st(mode_lib, &rq_dlg_param->rq_l);
dml_print("DML_RQ_DLG_CALC: <CHROMA>\n");
print__data_rq_dlg_params_st(mode_lib, &rq_dlg_param->rq_c);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_DLG_PARAM_ST\n");
dml_print("DML_RQ_DLG_CALC: t_mclk_wm_us = %3.2f\n", dlg_sys_param->t_mclk_wm_us);
dml_print("DML_RQ_DLG_CALC: t_urg_wm_us = %3.2f\n", dlg_sys_param->t_urg_wm_us);
dml_print("DML_RQ_DLG_CALC: t_sr_wm_us = %3.2f\n", dlg_sys_param->t_sr_wm_us);
dml_print("DML_RQ_DLG_CALC: t_extra_us = %3.2f\n", dlg_sys_param->t_extra_us);
dml_print(
"DML_RQ_DLG_CALC: deepsleep_dcfclk_mhz = %3.2f\n",
dlg_sys_param->deepsleep_dcfclk_mhz);
dml_print(
"DML_RQ_DLG_CALC: total_flip_bw = %3.2f\n",
dlg_sys_param->total_flip_bw);
dml_print(
"DML_RQ_DLG_CALC: total_flip_bytes = %i\n",
dlg_sys_param->total_flip_bytes);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
void print__data_rq_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_regs_st *rq_regs)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_REGS_ST\n");
dml_print("DML_RQ_DLG_CALC: chunk_size = 0x%0x\n", rq_regs->chunk_size);
dml_print("DML_RQ_DLG_CALC: min_chunk_size = 0x%0x\n", rq_regs->min_chunk_size);
dml_print("DML_RQ_DLG_CALC: meta_chunk_size = 0x%0x\n", rq_regs->meta_chunk_size);
dml_print(
"DML_RQ_DLG_CALC: min_meta_chunk_size = 0x%0x\n",
rq_regs->min_meta_chunk_size);
dml_print("DML_RQ_DLG_CALC: dpte_group_size = 0x%0x\n", rq_regs->dpte_group_size);
dml_print("DML_RQ_DLG_CALC: mpte_group_size = 0x%0x\n", rq_regs->mpte_group_size);
dml_print("DML_RQ_DLG_CALC: swath_height = 0x%0x\n", rq_regs->swath_height);
dml_print(
"DML_RQ_DLG_CALC: pte_row_height_linear = 0x%0x\n",
rq_regs->pte_row_height_linear);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
void print__rq_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_regs_st *rq_regs)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_REGS_ST\n");
dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
print__data_rq_regs_st(mode_lib, &rq_regs->rq_regs_l);
dml_print("DML_RQ_DLG_CALC: <CHROMA>\n");
print__data_rq_regs_st(mode_lib, &rq_regs->rq_regs_c);
dml_print("DML_RQ_DLG_CALC: drq_expansion_mode = 0x%0x\n", rq_regs->drq_expansion_mode);
dml_print("DML_RQ_DLG_CALC: prq_expansion_mode = 0x%0x\n", rq_regs->prq_expansion_mode);
dml_print("DML_RQ_DLG_CALC: mrq_expansion_mode = 0x%0x\n", rq_regs->mrq_expansion_mode);
dml_print("DML_RQ_DLG_CALC: crq_expansion_mode = 0x%0x\n", rq_regs->crq_expansion_mode);
dml_print("DML_RQ_DLG_CALC: plane1_base_address = 0x%0x\n", rq_regs->plane1_base_address);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
void print__dlg_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_dlg_regs_st *dlg_regs)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_DLG_REGS_ST\n");
dml_print(
"DML_RQ_DLG_CALC: refcyc_h_blank_end = 0x%0x\n",
dlg_regs->refcyc_h_blank_end);
dml_print(
"DML_RQ_DLG_CALC: dlg_vblank_end = 0x%0x\n",
dlg_regs->dlg_vblank_end);
dml_print(
"DML_RQ_DLG_CALC: min_dst_y_next_start = 0x%0x\n",
dlg_regs->min_dst_y_next_start);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_htotal = 0x%0x\n",
dlg_regs->refcyc_per_htotal);
dml_print(
"DML_RQ_DLG_CALC: refcyc_x_after_scaler = 0x%0x\n",
dlg_regs->refcyc_x_after_scaler);
dml_print(
"DML_RQ_DLG_CALC: dst_y_after_scaler = 0x%0x\n",
dlg_regs->dst_y_after_scaler);
dml_print(
"DML_RQ_DLG_CALC: dst_y_prefetch = 0x%0x\n",
dlg_regs->dst_y_prefetch);
dml_print(
"DML_RQ_DLG_CALC: dst_y_per_vm_vblank = 0x%0x\n",
dlg_regs->dst_y_per_vm_vblank);
dml_print(
"DML_RQ_DLG_CALC: dst_y_per_row_vblank = 0x%0x\n",
dlg_regs->dst_y_per_row_vblank);
dml_print(
"DML_RQ_DLG_CALC: dst_y_per_vm_flip = 0x%0x\n",
dlg_regs->dst_y_per_vm_flip);
dml_print(
"DML_RQ_DLG_CALC: dst_y_per_row_flip = 0x%0x\n",
dlg_regs->dst_y_per_row_flip);
dml_print(
"DML_RQ_DLG_CALC: ref_freq_to_pix_freq = 0x%0x\n",
dlg_regs->ref_freq_to_pix_freq);
dml_print(
"DML_RQ_DLG_CALC: vratio_prefetch = 0x%0x\n",
dlg_regs->vratio_prefetch);
dml_print(
"DML_RQ_DLG_CALC: vratio_prefetch_c = 0x%0x\n",
dlg_regs->vratio_prefetch_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_pte_group_vblank_l = 0x%0x\n",
dlg_regs->refcyc_per_pte_group_vblank_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_pte_group_vblank_c = 0x%0x\n",
dlg_regs->refcyc_per_pte_group_vblank_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_meta_chunk_vblank_l = 0x%0x\n",
dlg_regs->refcyc_per_meta_chunk_vblank_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_meta_chunk_vblank_c = 0x%0x\n",
dlg_regs->refcyc_per_meta_chunk_vblank_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_pte_group_flip_l = 0x%0x\n",
dlg_regs->refcyc_per_pte_group_flip_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_pte_group_flip_c = 0x%0x\n",
dlg_regs->refcyc_per_pte_group_flip_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_meta_chunk_flip_l = 0x%0x\n",
dlg_regs->refcyc_per_meta_chunk_flip_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_meta_chunk_flip_c = 0x%0x\n",
dlg_regs->refcyc_per_meta_chunk_flip_c);
dml_print(
"DML_RQ_DLG_CALC: dst_y_per_pte_row_nom_l = 0x%0x\n",
dlg_regs->dst_y_per_pte_row_nom_l);
dml_print(
"DML_RQ_DLG_CALC: dst_y_per_pte_row_nom_c = 0x%0x\n",
dlg_regs->dst_y_per_pte_row_nom_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_pte_group_nom_l = 0x%0x\n",
dlg_regs->refcyc_per_pte_group_nom_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_pte_group_nom_c = 0x%0x\n",
dlg_regs->refcyc_per_pte_group_nom_c);
dml_print(
"DML_RQ_DLG_CALC: dst_y_per_meta_row_nom_l = 0x%0x\n",
dlg_regs->dst_y_per_meta_row_nom_l);
dml_print(
"DML_RQ_DLG_CALC: dst_y_per_meta_row_nom_c = 0x%0x\n",
dlg_regs->dst_y_per_meta_row_nom_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_meta_chunk_nom_l = 0x%0x\n",
dlg_regs->refcyc_per_meta_chunk_nom_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_meta_chunk_nom_c = 0x%0x\n",
dlg_regs->refcyc_per_meta_chunk_nom_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_line_delivery_pre_l = 0x%0x\n",
dlg_regs->refcyc_per_line_delivery_pre_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_line_delivery_pre_c = 0x%0x\n",
dlg_regs->refcyc_per_line_delivery_pre_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_line_delivery_l = 0x%0x\n",
dlg_regs->refcyc_per_line_delivery_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_line_delivery_c = 0x%0x\n",
dlg_regs->refcyc_per_line_delivery_c);
dml_print(
"DML_RQ_DLG_CALC: chunk_hdl_adjust_cur0 = 0x%0x\n",
dlg_regs->chunk_hdl_adjust_cur0);
dml_print(
"DML_RQ_DLG_CALC: dst_y_offset_cur1 = 0x%0x\n",
dlg_regs->dst_y_offset_cur1);
dml_print(
"DML_RQ_DLG_CALC: chunk_hdl_adjust_cur1 = 0x%0x\n",
dlg_regs->chunk_hdl_adjust_cur1);
dml_print(
"DML_RQ_DLG_CALC: vready_after_vcount0 = 0x%0x\n",
dlg_regs->vready_after_vcount0);
dml_print(
"DML_RQ_DLG_CALC: dst_y_delta_drq_limit = 0x%0x\n",
dlg_regs->dst_y_delta_drq_limit);
dml_print(
"DML_RQ_DLG_CALC: xfc_reg_transfer_delay = 0x%0x\n",
dlg_regs->xfc_reg_transfer_delay);
dml_print(
"DML_RQ_DLG_CALC: xfc_reg_precharge_delay = 0x%0x\n",
dlg_regs->xfc_reg_precharge_delay);
dml_print(
"DML_RQ_DLG_CALC: xfc_reg_remote_surface_flip_latency = 0x%0x\n",
dlg_regs->xfc_reg_remote_surface_flip_latency);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_vm_dmdata = 0x%0x\n",
dlg_regs->refcyc_per_vm_dmdata);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
void print__ttu_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_ttu_regs_st *ttu_regs)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_TTU_REGS_ST\n");
dml_print(
"DML_RQ_DLG_CALC: qos_level_low_wm = 0x%0x\n",
ttu_regs->qos_level_low_wm);
dml_print(
"DML_RQ_DLG_CALC: qos_level_high_wm = 0x%0x\n",
ttu_regs->qos_level_high_wm);
dml_print(
"DML_RQ_DLG_CALC: min_ttu_vblank = 0x%0x\n",
ttu_regs->min_ttu_vblank);
dml_print(
"DML_RQ_DLG_CALC: qos_level_flip = 0x%0x\n",
ttu_regs->qos_level_flip);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_l = 0x%0x\n",
ttu_regs->refcyc_per_req_delivery_pre_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_req_delivery_l = 0x%0x\n",
ttu_regs->refcyc_per_req_delivery_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_c = 0x%0x\n",
ttu_regs->refcyc_per_req_delivery_pre_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_req_delivery_c = 0x%0x\n",
ttu_regs->refcyc_per_req_delivery_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_req_delivery_cur0 = 0x%0x\n",
ttu_regs->refcyc_per_req_delivery_cur0);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_cur0 = 0x%0x\n",
ttu_regs->refcyc_per_req_delivery_pre_cur0);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_req_delivery_cur1 = 0x%0x\n",
ttu_regs->refcyc_per_req_delivery_cur1);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_cur1 = 0x%0x\n",
ttu_regs->refcyc_per_req_delivery_pre_cur1);
dml_print(
"DML_RQ_DLG_CALC: qos_level_fixed_l = 0x%0x\n",
ttu_regs->qos_level_fixed_l);
dml_print(
"DML_RQ_DLG_CALC: qos_ramp_disable_l = 0x%0x\n",
ttu_regs->qos_ramp_disable_l);
dml_print(
"DML_RQ_DLG_CALC: qos_level_fixed_c = 0x%0x\n",
ttu_regs->qos_level_fixed_c);
dml_print(
"DML_RQ_DLG_CALC: qos_ramp_disable_c = 0x%0x\n",
ttu_regs->qos_ramp_disable_c);
dml_print(
"DML_RQ_DLG_CALC: qos_level_fixed_cur0 = 0x%0x\n",
ttu_regs->qos_level_fixed_cur0);
dml_print(
"DML_RQ_DLG_CALC: qos_ramp_disable_cur0 = 0x%0x\n",
ttu_regs->qos_ramp_disable_cur0);
dml_print(
"DML_RQ_DLG_CALC: qos_level_fixed_cur1 = 0x%0x\n",
ttu_regs->qos_level_fixed_cur1);
dml_print(
"DML_RQ_DLG_CALC: qos_ramp_disable_cur1 = 0x%0x\n",
ttu_regs->qos_ramp_disable_cur1);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c |
/*
* Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "display_mode_lib.h"
#include "display_mode_vba.h"
#include "dml_inline_defs.h"
/*
* NOTE:
* This file is gcc-parsable HW gospel, coming straight from HW engineers.
*
* It doesn't adhere to Linux kernel style and sometimes will do things in odd
* ways. Unless there is something clearly wrong with it the code should
* remain as-is as it provides us with a guarantee from HW that it is correct.
*/
static void fetch_socbb_params(struct display_mode_lib *mode_lib);
static void fetch_ip_params(struct display_mode_lib *mode_lib);
static void fetch_pipe_params(struct display_mode_lib *mode_lib);
static void recalculate_params(
struct display_mode_lib *mode_lib,
const display_e2e_pipe_params_st *pipes,
unsigned int num_pipes);
static unsigned int CursorBppEnumToBits(enum cursor_bpp ebpp);
static void cache_debug_params(struct display_mode_lib *mode_lib);
unsigned int dml_get_voltage_level(
struct display_mode_lib *mode_lib,
const display_e2e_pipe_params_st *pipes,
unsigned int num_pipes)
{
bool need_recalculate = memcmp(&mode_lib->soc, &mode_lib->vba.soc, sizeof(mode_lib->vba.soc)) != 0
|| memcmp(&mode_lib->ip, &mode_lib->vba.ip, sizeof(mode_lib->vba.ip)) != 0
|| num_pipes != mode_lib->vba.cache_num_pipes
|| memcmp(pipes, mode_lib->vba.cache_pipes,
sizeof(display_e2e_pipe_params_st) * num_pipes) != 0;
mode_lib->vba.soc = mode_lib->soc;
mode_lib->vba.ip = mode_lib->ip;
memcpy(mode_lib->vba.cache_pipes, pipes, sizeof(*pipes) * num_pipes);
mode_lib->vba.cache_num_pipes = num_pipes;
if (need_recalculate && pipes[0].clks_cfg.dppclk_mhz != 0)
mode_lib->funcs.recalculate(mode_lib);
else {
fetch_socbb_params(mode_lib);
fetch_ip_params(mode_lib);
fetch_pipe_params(mode_lib);
PixelClockAdjustmentForProgressiveToInterlaceUnit(mode_lib);
}
mode_lib->funcs.validate(mode_lib);
cache_debug_params(mode_lib);
return mode_lib->vba.VoltageLevel;
}
#define dml_get_attr_func(attr, var) double get_##attr(struct display_mode_lib *mode_lib, const display_e2e_pipe_params_st *pipes, unsigned int num_pipes) \
{ \
recalculate_params(mode_lib, pipes, num_pipes); \
return var; \
}
dml_get_attr_func(clk_dcf_deepsleep, mode_lib->vba.DCFCLKDeepSleep);
dml_get_attr_func(wm_urgent, mode_lib->vba.UrgentWatermark);
dml_get_attr_func(wm_memory_trip, mode_lib->vba.UrgentLatency);
dml_get_attr_func(wm_writeback_urgent, mode_lib->vba.WritebackUrgentWatermark);
dml_get_attr_func(wm_stutter_exit, mode_lib->vba.StutterExitWatermark);
dml_get_attr_func(wm_stutter_enter_exit, mode_lib->vba.StutterEnterPlusExitWatermark);
dml_get_attr_func(wm_z8_stutter_exit, mode_lib->vba.Z8StutterExitWatermark);
dml_get_attr_func(wm_z8_stutter_enter_exit, mode_lib->vba.Z8StutterEnterPlusExitWatermark);
dml_get_attr_func(stutter_efficiency_z8, mode_lib->vba.Z8StutterEfficiency);
dml_get_attr_func(stutter_num_bursts_z8, mode_lib->vba.Z8NumberOfStutterBurstsPerFrame);
dml_get_attr_func(wm_dram_clock_change, mode_lib->vba.DRAMClockChangeWatermark);
dml_get_attr_func(wm_writeback_dram_clock_change, mode_lib->vba.WritebackDRAMClockChangeWatermark);
dml_get_attr_func(stutter_efficiency, mode_lib->vba.StutterEfficiency);
dml_get_attr_func(stutter_efficiency_no_vblank, mode_lib->vba.StutterEfficiencyNotIncludingVBlank);
dml_get_attr_func(stutter_period, mode_lib->vba.StutterPeriod);
dml_get_attr_func(urgent_latency, mode_lib->vba.UrgentLatency);
dml_get_attr_func(urgent_extra_latency, mode_lib->vba.UrgentExtraLatency);
dml_get_attr_func(nonurgent_latency, mode_lib->vba.NonUrgentLatencyTolerance);
dml_get_attr_func(dram_clock_change_latency, mode_lib->vba.MinActiveDRAMClockChangeLatencySupported);
dml_get_attr_func(dispclk_calculated, mode_lib->vba.DISPCLK_calculated);
dml_get_attr_func(total_data_read_bw, mode_lib->vba.TotalDataReadBandwidth);
dml_get_attr_func(return_bw, mode_lib->vba.ReturnBW);
dml_get_attr_func(tcalc, mode_lib->vba.TCalc);
dml_get_attr_func(fraction_of_urgent_bandwidth, mode_lib->vba.FractionOfUrgentBandwidth);
dml_get_attr_func(fraction_of_urgent_bandwidth_imm_flip, mode_lib->vba.FractionOfUrgentBandwidthImmediateFlip);
dml_get_attr_func(cstate_max_cap_mode, mode_lib->vba.DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE);
dml_get_attr_func(comp_buffer_size_kbytes, mode_lib->vba.CompressedBufferSizeInkByte);
dml_get_attr_func(pixel_chunk_size_in_kbyte, mode_lib->vba.PixelChunkSizeInKByte);
dml_get_attr_func(alpha_pixel_chunk_size_in_kbyte, mode_lib->vba.AlphaPixelChunkSizeInKByte);
dml_get_attr_func(meta_chunk_size_in_kbyte, mode_lib->vba.MetaChunkSize);
dml_get_attr_func(min_pixel_chunk_size_in_byte, mode_lib->vba.MinPixelChunkSizeBytes);
dml_get_attr_func(min_meta_chunk_size_in_byte, mode_lib->vba.MinMetaChunkSizeBytes);
dml_get_attr_func(fclk_watermark, mode_lib->vba.Watermark.FCLKChangeWatermark);
dml_get_attr_func(usr_retraining_watermark, mode_lib->vba.Watermark.USRRetrainingWatermark);
dml_get_attr_func(comp_buffer_reserved_space_kbytes, mode_lib->vba.CompBufReservedSpaceKBytes);
dml_get_attr_func(comp_buffer_reserved_space_64bytes, mode_lib->vba.CompBufReservedSpace64B);
dml_get_attr_func(comp_buffer_reserved_space_zs, mode_lib->vba.CompBufReservedSpaceZs);
dml_get_attr_func(unbounded_request_enabled, mode_lib->vba.UnboundedRequestEnabled);
#define dml_get_pipe_attr_func(attr, var) double get_##attr(struct display_mode_lib *mode_lib, const display_e2e_pipe_params_st *pipes, unsigned int num_pipes, unsigned int which_pipe) \
{\
unsigned int which_plane; \
recalculate_params(mode_lib, pipes, num_pipes); \
which_plane = mode_lib->vba.pipe_plane[which_pipe]; \
return var[which_plane]; \
}
dml_get_pipe_attr_func(dsc_delay, mode_lib->vba.DSCDelay);
dml_get_pipe_attr_func(dppclk_calculated, mode_lib->vba.DPPCLK_calculated);
dml_get_pipe_attr_func(dscclk_calculated, mode_lib->vba.DSCCLK_calculated);
dml_get_pipe_attr_func(min_ttu_vblank, mode_lib->vba.MinTTUVBlank);
dml_get_pipe_attr_func(min_ttu_vblank_in_us, mode_lib->vba.MinTTUVBlank);
dml_get_pipe_attr_func(vratio_prefetch_l, mode_lib->vba.VRatioPrefetchY);
dml_get_pipe_attr_func(vratio_prefetch_c, mode_lib->vba.VRatioPrefetchC);
dml_get_pipe_attr_func(dst_x_after_scaler, mode_lib->vba.DSTXAfterScaler);
dml_get_pipe_attr_func(dst_y_after_scaler, mode_lib->vba.DSTYAfterScaler);
dml_get_pipe_attr_func(dst_y_per_vm_vblank, mode_lib->vba.DestinationLinesToRequestVMInVBlank);
dml_get_pipe_attr_func(dst_y_per_row_vblank, mode_lib->vba.DestinationLinesToRequestRowInVBlank);
dml_get_pipe_attr_func(dst_y_prefetch, mode_lib->vba.DestinationLinesForPrefetch);
dml_get_pipe_attr_func(dst_y_per_vm_flip, mode_lib->vba.DestinationLinesToRequestVMInImmediateFlip);
dml_get_pipe_attr_func(dst_y_per_row_flip, mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip);
dml_get_pipe_attr_func(refcyc_per_vm_group_vblank, mode_lib->vba.TimePerVMGroupVBlank);
dml_get_pipe_attr_func(refcyc_per_vm_group_flip, mode_lib->vba.TimePerVMGroupFlip);
dml_get_pipe_attr_func(refcyc_per_vm_req_vblank, mode_lib->vba.TimePerVMRequestVBlank);
dml_get_pipe_attr_func(refcyc_per_vm_req_flip, mode_lib->vba.TimePerVMRequestFlip);
dml_get_pipe_attr_func(refcyc_per_vm_group_vblank_in_us, mode_lib->vba.TimePerVMGroupVBlank);
dml_get_pipe_attr_func(refcyc_per_vm_group_flip_in_us, mode_lib->vba.TimePerVMGroupFlip);
dml_get_pipe_attr_func(refcyc_per_vm_req_vblank_in_us, mode_lib->vba.TimePerVMRequestVBlank);
dml_get_pipe_attr_func(refcyc_per_vm_req_flip_in_us, mode_lib->vba.TimePerVMRequestFlip);
dml_get_pipe_attr_func(refcyc_per_vm_dmdata_in_us, mode_lib->vba.Tdmdl_vm);
dml_get_pipe_attr_func(dmdata_dl_delta_in_us, mode_lib->vba.Tdmdl);
dml_get_pipe_attr_func(refcyc_per_line_delivery_l_in_us, mode_lib->vba.DisplayPipeLineDeliveryTimeLuma);
dml_get_pipe_attr_func(refcyc_per_line_delivery_c_in_us, mode_lib->vba.DisplayPipeLineDeliveryTimeChroma);
dml_get_pipe_attr_func(refcyc_per_line_delivery_pre_l_in_us, mode_lib->vba.DisplayPipeLineDeliveryTimeLumaPrefetch);
dml_get_pipe_attr_func(refcyc_per_line_delivery_pre_c_in_us, mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch);
dml_get_pipe_attr_func(refcyc_per_req_delivery_l_in_us, mode_lib->vba.DisplayPipeRequestDeliveryTimeLuma);
dml_get_pipe_attr_func(refcyc_per_req_delivery_c_in_us, mode_lib->vba.DisplayPipeRequestDeliveryTimeChroma);
dml_get_pipe_attr_func(refcyc_per_req_delivery_pre_l_in_us, mode_lib->vba.DisplayPipeRequestDeliveryTimeLumaPrefetch);
dml_get_pipe_attr_func(refcyc_per_req_delivery_pre_c_in_us, mode_lib->vba.DisplayPipeRequestDeliveryTimeChromaPrefetch);
dml_get_pipe_attr_func(refcyc_per_cursor_req_delivery_in_us, mode_lib->vba.CursorRequestDeliveryTime);
dml_get_pipe_attr_func(refcyc_per_cursor_req_delivery_pre_in_us, mode_lib->vba.CursorRequestDeliveryTimePrefetch);
dml_get_pipe_attr_func(refcyc_per_meta_chunk_nom_l_in_us, mode_lib->vba.TimePerMetaChunkNominal);
dml_get_pipe_attr_func(refcyc_per_meta_chunk_nom_c_in_us, mode_lib->vba.TimePerChromaMetaChunkNominal);
dml_get_pipe_attr_func(refcyc_per_meta_chunk_vblank_l_in_us, mode_lib->vba.TimePerMetaChunkVBlank);
dml_get_pipe_attr_func(refcyc_per_meta_chunk_vblank_c_in_us, mode_lib->vba.TimePerChromaMetaChunkVBlank);
dml_get_pipe_attr_func(refcyc_per_meta_chunk_flip_l_in_us, mode_lib->vba.TimePerMetaChunkFlip);
dml_get_pipe_attr_func(refcyc_per_meta_chunk_flip_c_in_us, mode_lib->vba.TimePerChromaMetaChunkFlip);
dml_get_pipe_attr_func(vstartup, mode_lib->vba.VStartup);
dml_get_pipe_attr_func(vupdate_offset, mode_lib->vba.VUpdateOffsetPix);
dml_get_pipe_attr_func(vupdate_width, mode_lib->vba.VUpdateWidthPix);
dml_get_pipe_attr_func(vready_offset, mode_lib->vba.VReadyOffsetPix);
dml_get_pipe_attr_func(vready_at_or_after_vsync, mode_lib->vba.VREADY_AT_OR_AFTER_VSYNC);
dml_get_pipe_attr_func(min_dst_y_next_start, mode_lib->vba.MIN_DST_Y_NEXT_START);
dml_get_pipe_attr_func(dst_y_per_pte_row_nom_l, mode_lib->vba.DST_Y_PER_PTE_ROW_NOM_L);
dml_get_pipe_attr_func(dst_y_per_pte_row_nom_c, mode_lib->vba.DST_Y_PER_PTE_ROW_NOM_C);
dml_get_pipe_attr_func(dst_y_per_meta_row_nom_l, mode_lib->vba.DST_Y_PER_META_ROW_NOM_L);
dml_get_pipe_attr_func(dst_y_per_meta_row_nom_c, mode_lib->vba.DST_Y_PER_META_ROW_NOM_C);
dml_get_pipe_attr_func(refcyc_per_pte_group_nom_l_in_us, mode_lib->vba.time_per_pte_group_nom_luma);
dml_get_pipe_attr_func(refcyc_per_pte_group_nom_c_in_us, mode_lib->vba.time_per_pte_group_nom_chroma);
dml_get_pipe_attr_func(refcyc_per_pte_group_vblank_l_in_us, mode_lib->vba.time_per_pte_group_vblank_luma);
dml_get_pipe_attr_func(refcyc_per_pte_group_vblank_c_in_us, mode_lib->vba.time_per_pte_group_vblank_chroma);
dml_get_pipe_attr_func(refcyc_per_pte_group_flip_l_in_us, mode_lib->vba.time_per_pte_group_flip_luma);
dml_get_pipe_attr_func(refcyc_per_pte_group_flip_c_in_us, mode_lib->vba.time_per_pte_group_flip_chroma);
dml_get_pipe_attr_func(vstartup_calculated, mode_lib->vba.VStartup);
dml_get_pipe_attr_func(dpte_row_height_linear_c, mode_lib->vba.dpte_row_height_linear_chroma);
dml_get_pipe_attr_func(swath_height_l, mode_lib->vba.SwathHeightY);
dml_get_pipe_attr_func(swath_height_c, mode_lib->vba.SwathHeightC);
dml_get_pipe_attr_func(det_stored_buffer_size_l_bytes, mode_lib->vba.DETBufferSizeY);
dml_get_pipe_attr_func(det_stored_buffer_size_c_bytes, mode_lib->vba.DETBufferSizeC);
dml_get_pipe_attr_func(dpte_group_size_in_bytes, mode_lib->vba.dpte_group_bytes);
dml_get_pipe_attr_func(vm_group_size_in_bytes, mode_lib->vba.vm_group_bytes);
dml_get_pipe_attr_func(dpte_row_height_linear_l, mode_lib->vba.dpte_row_height_linear);
dml_get_pipe_attr_func(pte_buffer_mode, mode_lib->vba.PTE_BUFFER_MODE);
dml_get_pipe_attr_func(subviewport_lines_needed_in_mall, mode_lib->vba.SubViewportLinesNeededInMALL);
dml_get_pipe_attr_func(surface_size_in_mall, mode_lib->vba.SurfaceSizeInMALL)
double get_total_immediate_flip_bytes(
struct display_mode_lib *mode_lib,
const display_e2e_pipe_params_st *pipes,
unsigned int num_pipes)
{
recalculate_params(mode_lib, pipes, num_pipes);
return mode_lib->vba.TotImmediateFlipBytes;
}
double get_total_immediate_flip_bw(
struct display_mode_lib *mode_lib,
const display_e2e_pipe_params_st *pipes,
unsigned int num_pipes)
{
unsigned int k;
double immediate_flip_bw = 0.0;
recalculate_params(mode_lib, pipes, num_pipes);
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
immediate_flip_bw += mode_lib->vba.ImmediateFlipBW[k];
return immediate_flip_bw;
}
double get_total_prefetch_bw(
struct display_mode_lib *mode_lib,
const display_e2e_pipe_params_st *pipes,
unsigned int num_pipes)
{
unsigned int k;
double total_prefetch_bw = 0.0;
recalculate_params(mode_lib, pipes, num_pipes);
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
total_prefetch_bw += mode_lib->vba.PrefetchBandwidth[k];
return total_prefetch_bw;
}
unsigned int get_total_surface_size_in_mall_bytes(
struct display_mode_lib *mode_lib,
const display_e2e_pipe_params_st *pipes,
unsigned int num_pipes)
{
unsigned int k;
unsigned int size = 0.0;
recalculate_params(mode_lib, pipes, num_pipes);
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k)
size += mode_lib->vba.SurfaceSizeInMALL[k];
return size;
}
static unsigned int get_pipe_idx(struct display_mode_lib *mode_lib, unsigned int plane_idx)
{
int pipe_idx = -1;
int i;
ASSERT(plane_idx < DC__NUM_DPP__MAX);
for (i = 0; i < DC__NUM_DPP__MAX ; i++) {
if (plane_idx == mode_lib->vba.pipe_plane[i]) {
pipe_idx = i;
break;
}
}
ASSERT(pipe_idx >= 0);
return pipe_idx;
}
double get_det_buffer_size_kbytes(struct display_mode_lib *mode_lib, const display_e2e_pipe_params_st *pipes,
unsigned int num_pipes, unsigned int pipe_idx)
{
unsigned int plane_idx;
double det_buf_size_kbytes;
recalculate_params(mode_lib, pipes, num_pipes);
plane_idx = mode_lib->vba.pipe_plane[pipe_idx];
dml_print("DML::%s: num_pipes=%d pipe_idx=%d plane_idx=%0d\n", __func__, num_pipes, pipe_idx, plane_idx);
det_buf_size_kbytes = mode_lib->vba.DETBufferSizeInKByte[plane_idx]; // per hubp DET buffer size
dml_print("DML::%s: det_buf_size_kbytes=%3.2f\n", __func__, det_buf_size_kbytes);
return det_buf_size_kbytes;
}
bool get_is_phantom_pipe(struct display_mode_lib *mode_lib, const display_e2e_pipe_params_st *pipes,
unsigned int num_pipes, unsigned int pipe_idx)
{
unsigned int plane_idx;
recalculate_params(mode_lib, pipes, num_pipes);
plane_idx = mode_lib->vba.pipe_plane[pipe_idx];
dml_print("DML::%s: num_pipes=%d pipe_idx=%d UseMALLForPStateChange=%0d\n", __func__, num_pipes, pipe_idx,
mode_lib->vba.UsesMALLForPStateChange[plane_idx]);
return (mode_lib->vba.UsesMALLForPStateChange[plane_idx] == dm_use_mall_pstate_change_phantom_pipe);
}
static void fetch_socbb_params(struct display_mode_lib *mode_lib)
{
soc_bounding_box_st *soc = &mode_lib->vba.soc;
int i;
// SOC Bounding Box Parameters
mode_lib->vba.ReturnBusWidth = soc->return_bus_width_bytes;
mode_lib->vba.NumberOfChannels = soc->num_chans;
mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly =
soc->pct_ideal_dram_sdp_bw_after_urgent_pixel_only; // there's always that one bastard variable that's so long it throws everything out of alignment!
mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData =
soc->pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm;
mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly =
soc->pct_ideal_dram_sdp_bw_after_urgent_vm_only;
mode_lib->vba.MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation =
soc->max_avg_sdp_bw_use_normal_percent;
mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation =
soc->max_avg_dram_bw_use_normal_percent;
mode_lib->vba.UrgentLatencyPixelDataOnly = soc->urgent_latency_pixel_data_only_us;
mode_lib->vba.UrgentLatencyPixelMixedWithVMData = soc->urgent_latency_pixel_mixed_with_vm_data_us;
mode_lib->vba.UrgentLatencyVMDataOnly = soc->urgent_latency_vm_data_only_us;
mode_lib->vba.RoundTripPingLatencyCycles = soc->round_trip_ping_latency_dcfclk_cycles;
mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelDataOnly =
soc->urgent_out_of_order_return_per_channel_pixel_only_bytes;
mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelMixedWithVMData =
soc->urgent_out_of_order_return_per_channel_pixel_and_vm_bytes;
mode_lib->vba.UrgentOutOfOrderReturnPerChannelVMDataOnly =
soc->urgent_out_of_order_return_per_channel_vm_only_bytes;
mode_lib->vba.WritebackLatency = soc->writeback_latency_us;
mode_lib->vba.SRExitTime = soc->sr_exit_time_us;
mode_lib->vba.SREnterPlusExitTime = soc->sr_enter_plus_exit_time_us;
mode_lib->vba.PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency = soc->pct_ideal_sdp_bw_after_urgent;
mode_lib->vba.PercentOfIdealDRAMBWReceivedAfterUrgLatencyPixelMixedWithVMData = soc->pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm;
mode_lib->vba.PercentOfIdealDRAMBWReceivedAfterUrgLatencyPixelDataOnly = soc->pct_ideal_dram_sdp_bw_after_urgent_pixel_only;
mode_lib->vba.PercentOfIdealDRAMBWReceivedAfterUrgLatencyVMDataOnly = soc->pct_ideal_dram_sdp_bw_after_urgent_vm_only;
mode_lib->vba.MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation =
soc->max_avg_sdp_bw_use_normal_percent;
mode_lib->vba.SRExitZ8Time = soc->sr_exit_z8_time_us;
mode_lib->vba.SREnterPlusExitZ8Time = soc->sr_enter_plus_exit_z8_time_us;
mode_lib->vba.FCLKChangeLatency = soc->fclk_change_latency_us;
mode_lib->vba.USRRetrainingLatency = soc->usr_retraining_latency_us;
mode_lib->vba.SMNLatency = soc->smn_latency_us;
mode_lib->vba.MALLAllocatedForDCNFinal = soc->mall_allocated_for_dcn_mbytes;
mode_lib->vba.PercentOfIdealDRAMBWReceivedAfterUrgLatencySTROBE = soc->pct_ideal_dram_bw_after_urgent_strobe;
mode_lib->vba.MaxAveragePercentOfIdealFabricBWDisplayCanUseInNormalSystemOperation =
soc->max_avg_fabric_bw_use_normal_percent;
mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperationSTROBE =
soc->max_avg_dram_bw_use_normal_strobe_percent;
mode_lib->vba.DRAMClockChangeRequirementFinal = soc->dram_clock_change_requirement_final;
mode_lib->vba.FCLKChangeRequirementFinal = 1;
mode_lib->vba.USRRetrainingRequiredFinal = 1;
mode_lib->vba.AllowForPStateChangeOrStutterInVBlankFinal = soc->allow_for_pstate_or_stutter_in_vblank_final;
mode_lib->vba.DRAMClockChangeLatency = soc->dram_clock_change_latency_us;
mode_lib->vba.DummyPStateCheck = soc->dram_clock_change_latency_us == soc->dummy_pstate_latency_us;
mode_lib->vba.DRAMClockChangeSupportsVActive = !soc->disable_dram_clock_change_vactive_support ||
mode_lib->vba.DummyPStateCheck;
mode_lib->vba.AllowDramClockChangeOneDisplayVactive = soc->allow_dram_clock_one_display_vactive;
mode_lib->vba.AllowDRAMSelfRefreshOrDRAMClockChangeInVblank =
soc->allow_dram_self_refresh_or_dram_clock_change_in_vblank;
mode_lib->vba.Downspreading = soc->downspread_percent;
mode_lib->vba.DRAMChannelWidth = soc->dram_channel_width_bytes; // new!
mode_lib->vba.FabricDatapathToDCNDataReturn = soc->fabric_datapath_to_dcn_data_return_bytes; // new!
mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading = soc->dcn_downspread_percent; // new
mode_lib->vba.DISPCLKDPPCLKVCOSpeed = soc->dispclk_dppclk_vco_speed_mhz; // new
mode_lib->vba.VMMPageSize = soc->vmm_page_size_bytes;
mode_lib->vba.GPUVMMinPageSize = soc->gpuvm_min_page_size_bytes / 1024;
mode_lib->vba.HostVMMinPageSize = soc->hostvm_min_page_size_bytes / 1024;
// Set the voltage scaling clocks as the defaults. Most of these will
// be set to different values by the test
for (i = 0; i < mode_lib->vba.soc.num_states; i++)
if (soc->clock_limits[i].state == mode_lib->vba.VoltageLevel)
break;
mode_lib->vba.DCFCLK = soc->clock_limits[i].dcfclk_mhz;
mode_lib->vba.SOCCLK = soc->clock_limits[i].socclk_mhz;
mode_lib->vba.DRAMSpeed = soc->clock_limits[i].dram_speed_mts;
mode_lib->vba.FabricClock = soc->clock_limits[i].fabricclk_mhz;
mode_lib->vba.XFCBusTransportTime = soc->xfc_bus_transport_time_us;
mode_lib->vba.XFCXBUFLatencyTolerance = soc->xfc_xbuf_latency_tolerance_us;
mode_lib->vba.UseUrgentBurstBandwidth = soc->use_urgent_burst_bw;
mode_lib->vba.SupportGFX7CompatibleTilingIn32bppAnd64bpp = false;
mode_lib->vba.WritebackLumaAndChromaScalingSupported = true;
mode_lib->vba.MaxHSCLRatio = 4;
mode_lib->vba.MaxVSCLRatio = 4;
mode_lib->vba.Cursor64BppSupport = true;
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
mode_lib->vba.DCFCLKPerState[i] = soc->clock_limits[i].dcfclk_mhz;
mode_lib->vba.FabricClockPerState[i] = soc->clock_limits[i].fabricclk_mhz;
mode_lib->vba.SOCCLKPerState[i] = soc->clock_limits[i].socclk_mhz;
mode_lib->vba.PHYCLKPerState[i] = soc->clock_limits[i].phyclk_mhz;
mode_lib->vba.PHYCLKD18PerState[i] = soc->clock_limits[i].phyclk_d18_mhz;
mode_lib->vba.PHYCLKD32PerState[i] = soc->clock_limits[i].phyclk_d32_mhz;
mode_lib->vba.MaxDppclk[i] = soc->clock_limits[i].dppclk_mhz;
mode_lib->vba.MaxDSCCLK[i] = soc->clock_limits[i].dscclk_mhz;
mode_lib->vba.DRAMSpeedPerState[i] = soc->clock_limits[i].dram_speed_mts;
//mode_lib->vba.DRAMSpeedPerState[i] = soc->clock_limits[i].dram_speed_mhz;
mode_lib->vba.MaxDispclk[i] = soc->clock_limits[i].dispclk_mhz;
mode_lib->vba.DTBCLKPerState[i] = soc->clock_limits[i].dtbclk_mhz;
}
mode_lib->vba.DoUrgentLatencyAdjustment =
soc->do_urgent_latency_adjustment;
mode_lib->vba.UrgentLatencyAdjustmentFabricClockComponent =
soc->urgent_latency_adjustment_fabric_clock_component_us;
mode_lib->vba.UrgentLatencyAdjustmentFabricClockReference =
soc->urgent_latency_adjustment_fabric_clock_reference_mhz;
mode_lib->vba.MaxVRatioPre = soc->max_vratio_pre;
}
static void fetch_ip_params(struct display_mode_lib *mode_lib)
{
ip_params_st *ip = &mode_lib->vba.ip;
// IP Parameters
mode_lib->vba.UseMinimumRequiredDCFCLK = ip->use_min_dcfclk;
mode_lib->vba.ClampMinDCFCLK = ip->clamp_min_dcfclk;
mode_lib->vba.MaxNumDPP = ip->max_num_dpp;
mode_lib->vba.MaxNumOTG = ip->max_num_otg;
mode_lib->vba.MaxNumHDMIFRLOutputs = ip->max_num_hdmi_frl_outputs;
mode_lib->vba.MaxNumWriteback = ip->max_num_wb;
mode_lib->vba.CursorChunkSize = ip->cursor_chunk_size;
mode_lib->vba.CursorBufferSize = ip->cursor_buffer_size;
mode_lib->vba.MaxDCHUBToPSCLThroughput = ip->max_dchub_pscl_bw_pix_per_clk;
mode_lib->vba.MaxPSCLToLBThroughput = ip->max_pscl_lb_bw_pix_per_clk;
mode_lib->vba.ROBBufferSizeInKByte = ip->rob_buffer_size_kbytes;
mode_lib->vba.DETBufferSizeInKByte[0] = ip->det_buffer_size_kbytes;
mode_lib->vba.ConfigReturnBufferSizeInKByte = ip->config_return_buffer_size_in_kbytes;
mode_lib->vba.CompressedBufferSegmentSizeInkByte = ip->compressed_buffer_segment_size_in_kbytes;
mode_lib->vba.MetaFIFOSizeInKEntries = ip->meta_fifo_size_in_kentries;
mode_lib->vba.ZeroSizeBufferEntries = ip->zero_size_buffer_entries;
mode_lib->vba.COMPBUF_RESERVED_SPACE_64B = ip->compbuf_reserved_space_64b;
mode_lib->vba.COMPBUF_RESERVED_SPACE_ZS = ip->compbuf_reserved_space_zs;
mode_lib->vba.MaximumDSCBitsPerComponent = ip->maximum_dsc_bits_per_component;
mode_lib->vba.DSC422NativeSupport = ip->dsc422_native_support;
/* In DCN3.2, nomDETInKByte should be initialized correctly. */
mode_lib->vba.nomDETInKByte = ip->det_buffer_size_kbytes;
mode_lib->vba.CompbufReservedSpace64B = ip->compbuf_reserved_space_64b;
mode_lib->vba.CompbufReservedSpaceZs = ip->compbuf_reserved_space_zs;
mode_lib->vba.CompressedBufferSegmentSizeInkByteFinal = ip->compressed_buffer_segment_size_in_kbytes;
mode_lib->vba.LineBufferSizeFinal = ip->line_buffer_size_bits;
mode_lib->vba.AlphaPixelChunkSizeInKByte = ip->alpha_pixel_chunk_size_kbytes; // not ysed
mode_lib->vba.MinPixelChunkSizeBytes = ip->min_pixel_chunk_size_bytes; // not used
mode_lib->vba.MaximumPixelsPerLinePerDSCUnit = ip->maximum_pixels_per_line_per_dsc_unit;
mode_lib->vba.MaxNumDP2p0Outputs = ip->max_num_dp2p0_outputs;
mode_lib->vba.MaxNumDP2p0Streams = ip->max_num_dp2p0_streams;
mode_lib->vba.DCCMetaBufferSizeBytes = ip->dcc_meta_buffer_size_bytes;
mode_lib->vba.PixelChunkSizeInKByte = ip->pixel_chunk_size_kbytes;
mode_lib->vba.MetaChunkSize = ip->meta_chunk_size_kbytes;
mode_lib->vba.MinMetaChunkSizeBytes = ip->min_meta_chunk_size_bytes;
mode_lib->vba.WritebackChunkSize = ip->writeback_chunk_size_kbytes;
mode_lib->vba.LineBufferSize = ip->line_buffer_size_bits;
mode_lib->vba.MaxLineBufferLines = ip->max_line_buffer_lines;
mode_lib->vba.PTEBufferSizeInRequestsLuma = ip->dpte_buffer_size_in_pte_reqs_luma;
mode_lib->vba.PTEBufferSizeInRequestsChroma = ip->dpte_buffer_size_in_pte_reqs_chroma;
mode_lib->vba.DPPOutputBufferPixels = ip->dpp_output_buffer_pixels;
mode_lib->vba.OPPOutputBufferLines = ip->opp_output_buffer_lines;
mode_lib->vba.MaxHSCLRatio = ip->max_hscl_ratio;
mode_lib->vba.MaxVSCLRatio = ip->max_vscl_ratio;
mode_lib->vba.WritebackInterfaceLumaBufferSize = ip->writeback_luma_buffer_size_kbytes * 1024;
mode_lib->vba.WritebackInterfaceChromaBufferSize = ip->writeback_chroma_buffer_size_kbytes * 1024;
mode_lib->vba.WritebackInterfaceBufferSize = ip->writeback_interface_buffer_size_kbytes;
mode_lib->vba.WritebackLineBufferSize = ip->writeback_line_buffer_buffer_size;
mode_lib->vba.WritebackChromaLineBufferWidth =
ip->writeback_chroma_line_buffer_width_pixels;
mode_lib->vba.WritebackLineBufferLumaBufferSize =
ip->writeback_line_buffer_luma_buffer_size;
mode_lib->vba.WritebackLineBufferChromaBufferSize =
ip->writeback_line_buffer_chroma_buffer_size;
mode_lib->vba.Writeback10bpc420Supported = ip->writeback_10bpc420_supported;
mode_lib->vba.WritebackMaxHSCLRatio = ip->writeback_max_hscl_ratio;
mode_lib->vba.WritebackMaxVSCLRatio = ip->writeback_max_vscl_ratio;
mode_lib->vba.WritebackMinHSCLRatio = ip->writeback_min_hscl_ratio;
mode_lib->vba.WritebackMinVSCLRatio = ip->writeback_min_vscl_ratio;
mode_lib->vba.WritebackMaxHSCLTaps = ip->writeback_max_hscl_taps;
mode_lib->vba.WritebackMaxVSCLTaps = ip->writeback_max_vscl_taps;
mode_lib->vba.WritebackConfiguration = dm_normal;
mode_lib->vba.GPUVMMaxPageTableLevels = ip->gpuvm_max_page_table_levels;
mode_lib->vba.HostVMMaxNonCachedPageTableLevels = ip->hostvm_max_page_table_levels;
mode_lib->vba.HostVMMaxPageTableLevels = ip->hostvm_max_page_table_levels;
mode_lib->vba.HostVMCachedPageTableLevels = ip->hostvm_cached_page_table_levels;
mode_lib->vba.MaxInterDCNTileRepeaters = ip->max_inter_dcn_tile_repeaters;
mode_lib->vba.NumberOfDSC = ip->num_dsc;
mode_lib->vba.ODMCapability = ip->odm_capable;
mode_lib->vba.DISPCLKRampingMargin = ip->dispclk_ramp_margin_percent;
mode_lib->vba.XFCSupported = ip->xfc_supported;
mode_lib->vba.XFCFillBWOverhead = ip->xfc_fill_bw_overhead_percent;
mode_lib->vba.XFCFillConstant = ip->xfc_fill_constant_bytes;
mode_lib->vba.DPPCLKDelaySubtotal = ip->dppclk_delay_subtotal;
mode_lib->vba.DPPCLKDelaySCL = ip->dppclk_delay_scl;
mode_lib->vba.DPPCLKDelaySCLLBOnly = ip->dppclk_delay_scl_lb_only;
mode_lib->vba.DPPCLKDelayCNVCFormater = ip->dppclk_delay_cnvc_formatter;
mode_lib->vba.DPPCLKDelayCNVCCursor = ip->dppclk_delay_cnvc_cursor;
mode_lib->vba.DISPCLKDelaySubtotal = ip->dispclk_delay_subtotal;
mode_lib->vba.DynamicMetadataVMEnabled = ip->dynamic_metadata_vm_enabled;
mode_lib->vba.ODMCombine4To1Supported = ip->odm_combine_4to1_supported;
mode_lib->vba.ProgressiveToInterlaceUnitInOPP = ip->ptoi_supported;
mode_lib->vba.PDEProcessingBufIn64KBReqs = ip->pde_proc_buffer_size_64k_reqs;
mode_lib->vba.PTEGroupSize = ip->pte_group_size_bytes;
mode_lib->vba.SupportGFX7CompatibleTilingIn32bppAnd64bpp = ip->gfx7_compat_tiling_supported;
}
static void fetch_pipe_params(struct display_mode_lib *mode_lib)
{
display_e2e_pipe_params_st *pipes = mode_lib->vba.cache_pipes;
ip_params_st *ip = &mode_lib->vba.ip;
unsigned int OTGInstPlane[DC__NUM_DPP__MAX];
unsigned int j, k;
bool PlaneVisited[DC__NUM_DPP__MAX];
bool visited[DC__NUM_DPP__MAX];
// Convert Pipes to Planes
for (k = 0; k < mode_lib->vba.cache_num_pipes; ++k)
visited[k] = false;
mode_lib->vba.NumberOfActivePlanes = 0;
mode_lib->vba.NumberOfActiveSurfaces = 0;
mode_lib->vba.ImmediateFlipSupport = false;
for (j = 0; j < mode_lib->vba.cache_num_pipes; ++j) {
display_pipe_source_params_st *src = &pipes[j].pipe.src;
display_pipe_dest_params_st *dst = &pipes[j].pipe.dest;
scaler_ratio_depth_st *scl = &pipes[j].pipe.scale_ratio_depth;
scaler_taps_st *taps = &pipes[j].pipe.scale_taps;
display_output_params_st *dout = &pipes[j].dout;
display_clocks_and_cfg_st *clks = &pipes[j].clks_cfg;
if (visited[j])
continue;
visited[j] = true;
mode_lib->vba.ImmediateFlipRequirement[j] = dm_immediate_flip_not_required;
mode_lib->vba.pipe_plane[j] = mode_lib->vba.NumberOfActivePlanes;
mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes] = 1;
mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] =
(enum scan_direction_class) (src->source_scan);
mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] =
src->viewport_width;
mode_lib->vba.ViewportWidthChroma[mode_lib->vba.NumberOfActivePlanes] =
src->viewport_width_c;
mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] =
src->viewport_height;
mode_lib->vba.ViewportHeightChroma[mode_lib->vba.NumberOfActivePlanes] =
src->viewport_height_c;
mode_lib->vba.ViewportYStartY[mode_lib->vba.NumberOfActivePlanes] =
src->viewport_y_y;
mode_lib->vba.ViewportYStartC[mode_lib->vba.NumberOfActivePlanes] =
src->viewport_y_c;
mode_lib->vba.SourceRotation[mode_lib->vba.NumberOfActiveSurfaces] = src->source_rotation;
mode_lib->vba.ViewportXStartY[mode_lib->vba.NumberOfActiveSurfaces] = src->viewport_x_y;
mode_lib->vba.ViewportXStartC[mode_lib->vba.NumberOfActiveSurfaces] = src->viewport_x_c;
// TODO: Assign correct value to viewport_stationary
mode_lib->vba.ViewportStationary[mode_lib->vba.NumberOfActivePlanes] =
src->viewport_stationary;
mode_lib->vba.UsesMALLForPStateChange[mode_lib->vba.NumberOfActivePlanes] = src->use_mall_for_pstate_change;
mode_lib->vba.UseMALLForStaticScreen[mode_lib->vba.NumberOfActivePlanes] = src->use_mall_for_static_screen;
mode_lib->vba.GPUVMMinPageSizeKBytes[mode_lib->vba.NumberOfActivePlanes] = src->gpuvm_min_page_size_kbytes;
mode_lib->vba.RefreshRate[mode_lib->vba.NumberOfActivePlanes] = dst->refresh_rate; //todo remove this
mode_lib->vba.OutputLinkDPRate[mode_lib->vba.NumberOfActivePlanes] = dout->dp_rate;
mode_lib->vba.ODMUse[mode_lib->vba.NumberOfActivePlanes] = dst->odm_combine_policy;
mode_lib->vba.DETSizeOverride[mode_lib->vba.NumberOfActivePlanes] = src->det_size_override;
if (src->det_size_override)
mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = src->det_size_override;
else
mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = ip->det_buffer_size_kbytes;
//TODO: Need to assign correct values to dp_multistream vars
mode_lib->vba.OutputMultistreamEn[mode_lib->vba.NumberOfActiveSurfaces] = dout->dp_multistream_en;
mode_lib->vba.OutputMultistreamId[mode_lib->vba.NumberOfActiveSurfaces] = dout->dp_multistream_id;
mode_lib->vba.PitchY[mode_lib->vba.NumberOfActivePlanes] = src->data_pitch;
mode_lib->vba.SurfaceWidthY[mode_lib->vba.NumberOfActivePlanes] = src->surface_width_y;
mode_lib->vba.SurfaceHeightY[mode_lib->vba.NumberOfActivePlanes] = src->surface_height_y;
mode_lib->vba.PitchC[mode_lib->vba.NumberOfActivePlanes] = src->data_pitch_c;
mode_lib->vba.SurfaceHeightC[mode_lib->vba.NumberOfActivePlanes] = src->surface_height_c;
mode_lib->vba.SurfaceWidthC[mode_lib->vba.NumberOfActivePlanes] = src->surface_width_c;
mode_lib->vba.DCCMetaPitchY[mode_lib->vba.NumberOfActivePlanes] = src->meta_pitch;
mode_lib->vba.DCCMetaPitchC[mode_lib->vba.NumberOfActivePlanes] = src->meta_pitch_c;
mode_lib->vba.HRatio[mode_lib->vba.NumberOfActivePlanes] = scl->hscl_ratio;
mode_lib->vba.HRatioChroma[mode_lib->vba.NumberOfActivePlanes] = scl->hscl_ratio_c;
mode_lib->vba.VRatio[mode_lib->vba.NumberOfActivePlanes] = scl->vscl_ratio;
mode_lib->vba.VRatioChroma[mode_lib->vba.NumberOfActivePlanes] = scl->vscl_ratio_c;
mode_lib->vba.ScalerEnabled[mode_lib->vba.NumberOfActivePlanes] = scl->scl_enable;
mode_lib->vba.Interlace[mode_lib->vba.NumberOfActivePlanes] = dst->interlaced;
if (dst->interlaced && !ip->ptoi_supported) {
mode_lib->vba.VRatio[mode_lib->vba.NumberOfActivePlanes] *= 2.0;
mode_lib->vba.VRatioChroma[mode_lib->vba.NumberOfActivePlanes] *= 2.0;
}
mode_lib->vba.htaps[mode_lib->vba.NumberOfActivePlanes] = taps->htaps;
mode_lib->vba.vtaps[mode_lib->vba.NumberOfActivePlanes] = taps->vtaps;
mode_lib->vba.HTAPsChroma[mode_lib->vba.NumberOfActivePlanes] = taps->htaps_c;
mode_lib->vba.VTAPsChroma[mode_lib->vba.NumberOfActivePlanes] = taps->vtaps_c;
mode_lib->vba.HTotal[mode_lib->vba.NumberOfActivePlanes] = dst->htotal;
mode_lib->vba.VTotal[mode_lib->vba.NumberOfActivePlanes] = dst->vtotal;
mode_lib->vba.VFrontPorch[mode_lib->vba.NumberOfActivePlanes] = dst->vfront_porch;
mode_lib->vba.VBlankNom[mode_lib->vba.NumberOfActivePlanes] = dst->vblank_nom;
mode_lib->vba.DCCFractionOfZeroSizeRequestsLuma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_fraction_of_zs_req_luma;
mode_lib->vba.DCCFractionOfZeroSizeRequestsChroma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_fraction_of_zs_req_chroma;
mode_lib->vba.DCCEnable[mode_lib->vba.NumberOfActivePlanes] =
src->dcc_use_global ?
ip->dcc_supported : src->dcc && ip->dcc_supported;
mode_lib->vba.DCCRate[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate;
/* TODO: Needs to be set based on src->dcc_rate_luma/chroma */
mode_lib->vba.DCCRateLuma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate;
mode_lib->vba.DCCRateChroma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate_chroma;
mode_lib->vba.SourcePixelFormat[mode_lib->vba.NumberOfActivePlanes] = (enum source_format_class) (src->source_format);
mode_lib->vba.HActive[mode_lib->vba.NumberOfActivePlanes] = dst->hactive;
mode_lib->vba.VActive[mode_lib->vba.NumberOfActivePlanes] = dst->vactive;
mode_lib->vba.SurfaceTiling[mode_lib->vba.NumberOfActivePlanes] =
(enum dm_swizzle_mode) (src->sw_mode);
mode_lib->vba.ScalerRecoutWidth[mode_lib->vba.NumberOfActivePlanes] =
dst->recout_width; // TODO: or should this be full_recout_width???...maybe only when in hsplit mode?
mode_lib->vba.ODMCombineEnabled[mode_lib->vba.NumberOfActivePlanes] =
dst->odm_combine;
mode_lib->vba.OutputFormat[mode_lib->vba.NumberOfActivePlanes] =
(enum output_format_class) (dout->output_format);
mode_lib->vba.OutputBpp[mode_lib->vba.NumberOfActivePlanes] =
dout->output_bpp;
mode_lib->vba.Output[mode_lib->vba.NumberOfActivePlanes] =
(enum output_encoder_class) (dout->output_type);
mode_lib->vba.skip_dio_check[mode_lib->vba.NumberOfActivePlanes] =
dout->is_virtual;
if (dout->dsc_enable)
mode_lib->vba.ForcedOutputLinkBPP[mode_lib->vba.NumberOfActivePlanes] = dout->output_bpp;
else
mode_lib->vba.ForcedOutputLinkBPP[mode_lib->vba.NumberOfActivePlanes] = 0.0;
mode_lib->vba.OutputLinkDPLanes[mode_lib->vba.NumberOfActivePlanes] =
dout->dp_lanes;
/* TODO: Needs to be set based on dout->audio.audio_sample_rate_khz/sample_layout */
mode_lib->vba.AudioSampleRate[mode_lib->vba.NumberOfActivePlanes] =
dout->max_audio_sample_rate;
mode_lib->vba.AudioSampleLayout[mode_lib->vba.NumberOfActivePlanes] =
1;
mode_lib->vba.DRAMClockChangeLatencyOverride = 0.0;
mode_lib->vba.DSCEnabled[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable;
mode_lib->vba.DSCEnable[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable;
mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] =
dout->dsc_slices;
if (!dout->dsc_input_bpc) {
mode_lib->vba.DSCInputBitPerComponent[mode_lib->vba.NumberOfActivePlanes] =
ip->maximum_dsc_bits_per_component;
} else {
mode_lib->vba.DSCInputBitPerComponent[mode_lib->vba.NumberOfActivePlanes] =
dout->dsc_input_bpc;
}
mode_lib->vba.WritebackEnable[mode_lib->vba.NumberOfActivePlanes] = dout->wb_enable;
mode_lib->vba.ActiveWritebacksPerPlane[mode_lib->vba.NumberOfActivePlanes] =
dout->num_active_wb;
mode_lib->vba.WritebackSourceHeight[mode_lib->vba.NumberOfActivePlanes] =
dout->wb.wb_src_height;
mode_lib->vba.WritebackSourceWidth[mode_lib->vba.NumberOfActivePlanes] =
dout->wb.wb_src_width;
mode_lib->vba.WritebackDestinationWidth[mode_lib->vba.NumberOfActivePlanes] =
dout->wb.wb_dst_width;
mode_lib->vba.WritebackDestinationHeight[mode_lib->vba.NumberOfActivePlanes] =
dout->wb.wb_dst_height;
mode_lib->vba.WritebackHRatio[mode_lib->vba.NumberOfActivePlanes] =
dout->wb.wb_hratio;
mode_lib->vba.WritebackVRatio[mode_lib->vba.NumberOfActivePlanes] =
dout->wb.wb_vratio;
mode_lib->vba.WritebackPixelFormat[mode_lib->vba.NumberOfActivePlanes] =
(enum source_format_class) (dout->wb.wb_pixel_format);
mode_lib->vba.WritebackHTaps[mode_lib->vba.NumberOfActivePlanes] =
dout->wb.wb_htaps_luma;
mode_lib->vba.WritebackVTaps[mode_lib->vba.NumberOfActivePlanes] =
dout->wb.wb_vtaps_luma;
mode_lib->vba.WritebackLumaHTaps[mode_lib->vba.NumberOfActivePlanes] =
dout->wb.wb_htaps_luma;
mode_lib->vba.WritebackLumaVTaps[mode_lib->vba.NumberOfActivePlanes] =
dout->wb.wb_vtaps_luma;
mode_lib->vba.WritebackChromaHTaps[mode_lib->vba.NumberOfActivePlanes] =
dout->wb.wb_htaps_chroma;
mode_lib->vba.WritebackChromaVTaps[mode_lib->vba.NumberOfActivePlanes] =
dout->wb.wb_vtaps_chroma;
mode_lib->vba.WritebackHRatio[mode_lib->vba.NumberOfActivePlanes] =
dout->wb.wb_hratio;
mode_lib->vba.WritebackVRatio[mode_lib->vba.NumberOfActivePlanes] =
dout->wb.wb_vratio;
mode_lib->vba.DynamicMetadataEnable[mode_lib->vba.NumberOfActivePlanes] =
src->dynamic_metadata_enable;
mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[mode_lib->vba.NumberOfActivePlanes] =
src->dynamic_metadata_lines_before_active;
mode_lib->vba.DynamicMetadataTransmittedBytes[mode_lib->vba.NumberOfActivePlanes] =
src->dynamic_metadata_xmit_bytes;
mode_lib->vba.XFCEnabled[mode_lib->vba.NumberOfActivePlanes] = src->xfc_enable
&& ip->xfc_supported;
mode_lib->vba.XFCSlvChunkSize = src->xfc_params.xfc_slv_chunk_size_bytes;
mode_lib->vba.XFCTSlvVupdateOffset = src->xfc_params.xfc_tslv_vupdate_offset_us;
mode_lib->vba.XFCTSlvVupdateWidth = src->xfc_params.xfc_tslv_vupdate_width_us;
mode_lib->vba.XFCTSlvVreadyOffset = src->xfc_params.xfc_tslv_vready_offset_us;
mode_lib->vba.PixelClock[mode_lib->vba.NumberOfActivePlanes] = dst->pixel_rate_mhz;
mode_lib->vba.PixelClockBackEnd[mode_lib->vba.NumberOfActivePlanes] = dst->pixel_rate_mhz;
mode_lib->vba.DPPCLK[mode_lib->vba.NumberOfActivePlanes] = clks->dppclk_mhz;
mode_lib->vba.DRRDisplay[mode_lib->vba.NumberOfActiveSurfaces] = dst->drr_display;
if (ip->is_line_buffer_bpp_fixed)
mode_lib->vba.LBBitPerPixel[mode_lib->vba.NumberOfActivePlanes] =
ip->line_buffer_fixed_bpp;
else {
unsigned int lb_depth;
switch (scl->lb_depth) {
case dm_lb_6:
lb_depth = 18;
break;
case dm_lb_8:
lb_depth = 24;
break;
case dm_lb_10:
lb_depth = 30;
break;
case dm_lb_12:
lb_depth = 36;
break;
case dm_lb_16:
lb_depth = 48;
break;
case dm_lb_19:
lb_depth = 57;
break;
default:
lb_depth = 36;
}
mode_lib->vba.LBBitPerPixel[mode_lib->vba.NumberOfActivePlanes] = lb_depth;
}
mode_lib->vba.NumberOfCursors[mode_lib->vba.NumberOfActivePlanes] = 0;
// The DML spreadsheet assumes that the two cursors utilize the same amount of bandwidth. We'll
// calculate things a little more accurately
for (k = 0; k < DC__NUM_CURSOR__MAX; ++k) {
switch (k) {
case 0:
mode_lib->vba.CursorBPP[mode_lib->vba.NumberOfActivePlanes][0] =
CursorBppEnumToBits(
(enum cursor_bpp) (src->cur0_bpp));
mode_lib->vba.CursorWidth[mode_lib->vba.NumberOfActivePlanes][0] =
src->cur0_src_width;
if (src->cur0_src_width > 0)
mode_lib->vba.NumberOfCursors[mode_lib->vba.NumberOfActivePlanes]++;
break;
case 1:
mode_lib->vba.CursorBPP[mode_lib->vba.NumberOfActivePlanes][1] =
CursorBppEnumToBits(
(enum cursor_bpp) (src->cur1_bpp));
mode_lib->vba.CursorWidth[mode_lib->vba.NumberOfActivePlanes][1] =
src->cur1_src_width;
if (src->cur1_src_width > 0)
mode_lib->vba.NumberOfCursors[mode_lib->vba.NumberOfActivePlanes]++;
break;
default:
dml_print(
"ERROR: Number of cursors specified exceeds supported maximum\n")
;
}
}
OTGInstPlane[mode_lib->vba.NumberOfActivePlanes] = dst->otg_inst;
if (j == 0)
mode_lib->vba.UseMaximumVStartup = dst->use_maximum_vstartup;
else
mode_lib->vba.UseMaximumVStartup = mode_lib->vba.UseMaximumVStartup
|| dst->use_maximum_vstartup;
if (dst->odm_combine && !src->is_hsplit)
dml_print(
"ERROR: ODM Combine is specified but is_hsplit has not be specified for pipe %i\n",
j);
if (src->is_hsplit) {
for (k = j + 1; k < mode_lib->vba.cache_num_pipes; ++k) {
display_pipe_source_params_st *src_k = &pipes[k].pipe.src;
display_pipe_dest_params_st *dst_k = &pipes[k].pipe.dest;
if (src_k->is_hsplit && !visited[k]
&& src->hsplit_grp == src_k->hsplit_grp) {
mode_lib->vba.pipe_plane[k] =
mode_lib->vba.NumberOfActivePlanes;
mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes]++;
if (src_k->det_size_override)
mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = src_k->det_size_override;
if (mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes]
== dm_horz) {
mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] +=
src_k->viewport_width;
mode_lib->vba.ViewportWidthChroma[mode_lib->vba.NumberOfActivePlanes] +=
src_k->viewport_width_c;
mode_lib->vba.ScalerRecoutWidth[mode_lib->vba.NumberOfActivePlanes] +=
dst_k->recout_width;
} else {
mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] +=
src_k->viewport_height;
mode_lib->vba.ViewportHeightChroma[mode_lib->vba.NumberOfActivePlanes] +=
src_k->viewport_height_c;
}
visited[k] = true;
}
}
}
if (src->viewport_width_max) {
int hdiv_c = src->source_format >= dm_420_8 && src->source_format <= dm_422_10 ? 2 : 1;
int vdiv_c = src->source_format >= dm_420_8 && src->source_format <= dm_420_12 ? 2 : 1;
if (mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] > src->viewport_width_max)
mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] = src->viewport_width_max;
if (mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] > src->viewport_height_max)
mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] = src->viewport_height_max;
if (mode_lib->vba.ViewportWidthChroma[mode_lib->vba.NumberOfActivePlanes] > src->viewport_width_max / hdiv_c)
mode_lib->vba.ViewportWidthChroma[mode_lib->vba.NumberOfActivePlanes] = src->viewport_width_max / hdiv_c;
if (mode_lib->vba.ViewportHeightChroma[mode_lib->vba.NumberOfActivePlanes] > src->viewport_height_max / vdiv_c)
mode_lib->vba.ViewportHeightChroma[mode_lib->vba.NumberOfActivePlanes] = src->viewport_height_max / vdiv_c;
}
if (pipes[j].pipe.src.immediate_flip) {
mode_lib->vba.ImmediateFlipSupport = true;
mode_lib->vba.ImmediateFlipRequirement[j] = dm_immediate_flip_required;
}
mode_lib->vba.NumberOfActivePlanes++;
mode_lib->vba.NumberOfActiveSurfaces++;
}
// handle overlays through BlendingAndTiming
// BlendingAndTiming tells you which instance to look at to get timing, the so called 'master'
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
PlaneVisited[j] = false;
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
for (k = j + 1; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (!PlaneVisited[k] && OTGInstPlane[j] == OTGInstPlane[k]) {
// doesn't matter, so choose the smaller one
mode_lib->vba.BlendingAndTiming[j] = j;
PlaneVisited[j] = true;
mode_lib->vba.BlendingAndTiming[k] = j;
PlaneVisited[k] = true;
}
}
if (!PlaneVisited[j]) {
mode_lib->vba.BlendingAndTiming[j] = j;
PlaneVisited[j] = true;
}
}
mode_lib->vba.SynchronizeTimingsFinal = pipes[0].pipe.dest.synchronize_timings;
mode_lib->vba.DCCProgrammingAssumesScanDirectionUnknownFinal = false;
mode_lib->vba.DisableUnboundRequestIfCompBufReservedSpaceNeedAdjustment = 0;
mode_lib->vba.UseUnboundedRequesting = dm_unbounded_requesting;
for (k = 0; k < mode_lib->vba.cache_num_pipes; ++k) {
if (pipes[k].pipe.src.unbounded_req_mode == 0)
mode_lib->vba.UseUnboundedRequesting = dm_unbounded_requesting_disable;
}
// TODO: ODMCombineEnabled => 2 * DPPPerPlane...actually maybe not since all pipes are specified
// Do we want the dscclk to automatically be halved? Guess not since the value is specified
mode_lib->vba.SynchronizedVBlank = pipes[0].pipe.dest.synchronized_vblank_all_planes;
for (k = 1; k < mode_lib->vba.cache_num_pipes; ++k) {
ASSERT(mode_lib->vba.SynchronizedVBlank == pipes[k].pipe.dest.synchronized_vblank_all_planes);
}
mode_lib->vba.GPUVMEnable = false;
mode_lib->vba.HostVMEnable = false;
mode_lib->vba.OverrideGPUVMPageTableLevels = 0;
mode_lib->vba.OverrideHostVMPageTableLevels = 0;
for (k = 0; k < mode_lib->vba.cache_num_pipes; ++k) {
mode_lib->vba.GPUVMEnable = mode_lib->vba.GPUVMEnable || !!pipes[k].pipe.src.gpuvm || !!pipes[k].pipe.src.vm;
mode_lib->vba.OverrideGPUVMPageTableLevels =
(pipes[k].pipe.src.gpuvm_levels_force_en
&& mode_lib->vba.OverrideGPUVMPageTableLevels
< pipes[k].pipe.src.gpuvm_levels_force) ?
pipes[k].pipe.src.gpuvm_levels_force :
mode_lib->vba.OverrideGPUVMPageTableLevels;
mode_lib->vba.HostVMEnable = mode_lib->vba.HostVMEnable || !!pipes[k].pipe.src.hostvm || !!pipes[k].pipe.src.vm;
mode_lib->vba.OverrideHostVMPageTableLevels =
(pipes[k].pipe.src.hostvm_levels_force_en
&& mode_lib->vba.OverrideHostVMPageTableLevels
< pipes[k].pipe.src.hostvm_levels_force) ?
pipes[k].pipe.src.hostvm_levels_force :
mode_lib->vba.OverrideHostVMPageTableLevels;
}
if (mode_lib->vba.OverrideGPUVMPageTableLevels)
mode_lib->vba.GPUVMMaxPageTableLevels = mode_lib->vba.OverrideGPUVMPageTableLevels;
if (mode_lib->vba.OverrideHostVMPageTableLevels)
mode_lib->vba.HostVMMaxPageTableLevels = mode_lib->vba.OverrideHostVMPageTableLevels;
mode_lib->vba.GPUVMEnable = mode_lib->vba.GPUVMEnable && !!ip->gpuvm_enable;
mode_lib->vba.HostVMEnable = mode_lib->vba.HostVMEnable && !!ip->hostvm_enable;
for (k = 0; k < mode_lib->vba.cache_num_pipes; ++k) {
mode_lib->vba.ForceOneRowForFrame[k] = pipes[k].pipe.src.force_one_row_for_frame;
mode_lib->vba.PteBufferMode[k] = pipes[k].pipe.src.pte_buffer_mode;
if (mode_lib->vba.PteBufferMode[k] == 0 && mode_lib->vba.GPUVMEnable) {
if (mode_lib->vba.ForceOneRowForFrame[k] ||
(mode_lib->vba.GPUVMMinPageSizeKBytes[k] > 64*1024) ||
(mode_lib->vba.UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_disable) ||
(mode_lib->vba.UseMALLForStaticScreen[k] != dm_use_mall_static_screen_disable)) {
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: ERROR: Invalid PteBufferMode=%d for plane %0d!\n",
__func__, mode_lib->vba.PteBufferMode[k], k);
dml_print("DML::%s: - ForceOneRowForFrame = %d\n",
__func__, mode_lib->vba.ForceOneRowForFrame[k]);
dml_print("DML::%s: - GPUVMMinPageSizeKBytes = %d\n",
__func__, mode_lib->vba.GPUVMMinPageSizeKBytes[k]);
dml_print("DML::%s: - UseMALLForPStateChange = %d\n",
__func__, (int) mode_lib->vba.UsesMALLForPStateChange[k]);
dml_print("DML::%s: - UseMALLForStaticScreen = %d\n",
__func__, (int) mode_lib->vba.UseMALLForStaticScreen[k]);
#endif
ASSERT(0);
}
}
}
}
/**
* cache_debug_params: Cache any params that needed to be maintained from the initial validation
* for debug purposes.
*
* The DML getters can modify some of the VBA params that we are interested in (for example when
* calculating with dummy p-state latency), so cache any params here that we want for debugging
*
* @mode_lib: mode_lib input/output of validate call
*
* Return: void
*
*/
static void cache_debug_params(struct display_mode_lib *mode_lib)
{
int k = 0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; k++)
mode_lib->vba.CachedActiveDRAMClockChangeLatencyMargin[k] = mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
}
// in wm mode we pull the parameters needed from the display_e2e_pipe_params_st structs
// rather than working them out as in recalculate_ms
static void recalculate_params(
struct display_mode_lib *mode_lib,
const display_e2e_pipe_params_st *pipes,
unsigned int num_pipes)
{
// This is only safe to use memcmp because there are non-POD types in struct display_mode_lib
if (memcmp(&mode_lib->soc, &mode_lib->vba.soc, sizeof(mode_lib->vba.soc)) != 0
|| memcmp(&mode_lib->ip, &mode_lib->vba.ip, sizeof(mode_lib->vba.ip)) != 0
|| num_pipes != mode_lib->vba.cache_num_pipes
|| memcmp(
pipes,
mode_lib->vba.cache_pipes,
sizeof(display_e2e_pipe_params_st) * num_pipes) != 0) {
mode_lib->vba.soc = mode_lib->soc;
mode_lib->vba.ip = mode_lib->ip;
memcpy(mode_lib->vba.cache_pipes, pipes, sizeof(*pipes) * num_pipes);
mode_lib->vba.cache_num_pipes = num_pipes;
mode_lib->funcs.recalculate(mode_lib);
}
}
void Calculate256BBlockSizes(
enum source_format_class SourcePixelFormat,
enum dm_swizzle_mode SurfaceTiling,
unsigned int BytePerPixelY,
unsigned int BytePerPixelC,
unsigned int *BlockHeight256BytesY,
unsigned int *BlockHeight256BytesC,
unsigned int *BlockWidth256BytesY,
unsigned int *BlockWidth256BytesC)
{
if ((SourcePixelFormat == dm_444_64 || SourcePixelFormat == dm_444_32
|| SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_8)) {
if (SurfaceTiling == dm_sw_linear) {
*BlockHeight256BytesY = 1;
} else if (SourcePixelFormat == dm_444_64) {
*BlockHeight256BytesY = 4;
} else if (SourcePixelFormat == dm_444_8) {
*BlockHeight256BytesY = 16;
} else {
*BlockHeight256BytesY = 8;
}
*BlockWidth256BytesY = 256 / BytePerPixelY / *BlockHeight256BytesY;
*BlockHeight256BytesC = 0;
*BlockWidth256BytesC = 0;
} else {
if (SurfaceTiling == dm_sw_linear) {
*BlockHeight256BytesY = 1;
*BlockHeight256BytesC = 1;
} else if (SourcePixelFormat == dm_420_8) {
*BlockHeight256BytesY = 16;
*BlockHeight256BytesC = 8;
} else {
*BlockHeight256BytesY = 8;
*BlockHeight256BytesC = 8;
}
*BlockWidth256BytesY = 256 / BytePerPixelY / *BlockHeight256BytesY;
*BlockWidth256BytesC = 256 / BytePerPixelC / *BlockHeight256BytesC;
}
}
bool CalculateMinAndMaxPrefetchMode(
enum self_refresh_affinity AllowDRAMSelfRefreshOrDRAMClockChangeInVblank,
unsigned int *MinPrefetchMode,
unsigned int *MaxPrefetchMode)
{
if (AllowDRAMSelfRefreshOrDRAMClockChangeInVblank
== dm_neither_self_refresh_nor_mclk_switch) {
*MinPrefetchMode = 2;
*MaxPrefetchMode = 2;
return false;
} else if (AllowDRAMSelfRefreshOrDRAMClockChangeInVblank == dm_allow_self_refresh) {
*MinPrefetchMode = 1;
*MaxPrefetchMode = 1;
return false;
} else if (AllowDRAMSelfRefreshOrDRAMClockChangeInVblank
== dm_allow_self_refresh_and_mclk_switch) {
*MinPrefetchMode = 0;
*MaxPrefetchMode = 0;
return false;
} else if (AllowDRAMSelfRefreshOrDRAMClockChangeInVblank
== dm_try_to_allow_self_refresh_and_mclk_switch) {
*MinPrefetchMode = 0;
*MaxPrefetchMode = 2;
return false;
}
*MinPrefetchMode = 0;
*MaxPrefetchMode = 2;
return true;
}
void PixelClockAdjustmentForProgressiveToInterlaceUnit(struct display_mode_lib *mode_lib)
{
unsigned int k;
//Progressive To Interlace Unit Effect
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
mode_lib->vba.PixelClockBackEnd[k] = mode_lib->vba.PixelClock[k];
if (mode_lib->vba.Interlace[k] == 1
&& mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true) {
mode_lib->vba.PixelClock[k] = 2 * mode_lib->vba.PixelClock[k];
}
}
}
static unsigned int CursorBppEnumToBits(enum cursor_bpp ebpp)
{
switch (ebpp) {
case dm_cur_2bit:
return 2;
case dm_cur_32bit:
return 32;
case dm_cur_64bit:
return 64;
default:
return 0;
}
}
void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib)
{
soc_bounding_box_st *soc = &mode_lib->vba.soc;
unsigned int k;
unsigned int total_pipes = 0;
unsigned int pipe_idx = 0;
mode_lib->vba.VoltageLevel = mode_lib->vba.cache_pipes[0].clks_cfg.voltage;
mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBWPerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb];
if (mode_lib->vba.ReturnBW == 0)
mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBWPerState[mode_lib->vba.VoltageLevel][0];
mode_lib->vba.FabricAndDRAMBandwidth = mode_lib->vba.FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel];
fetch_socbb_params(mode_lib);
fetch_ip_params(mode_lib);
fetch_pipe_params(mode_lib);
mode_lib->vba.DCFCLK = mode_lib->vba.cache_pipes[0].clks_cfg.dcfclk_mhz;
mode_lib->vba.SOCCLK = mode_lib->vba.cache_pipes[0].clks_cfg.socclk_mhz;
if (mode_lib->vba.cache_pipes[0].clks_cfg.dispclk_mhz > 0.0)
mode_lib->vba.DISPCLK = mode_lib->vba.cache_pipes[0].clks_cfg.dispclk_mhz;
else
mode_lib->vba.DISPCLK = soc->clock_limits[mode_lib->vba.VoltageLevel].dispclk_mhz;
// Total Available Pipes Support Check
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
total_pipes += mode_lib->vba.DPPPerPlane[k];
pipe_idx = get_pipe_idx(mode_lib, k);
if (mode_lib->vba.cache_pipes[pipe_idx].clks_cfg.dppclk_mhz > 0.0)
mode_lib->vba.DPPCLK[k] = mode_lib->vba.cache_pipes[pipe_idx].clks_cfg.dppclk_mhz;
else
mode_lib->vba.DPPCLK[k] = soc->clock_limits[mode_lib->vba.VoltageLevel].dppclk_mhz;
}
ASSERT(total_pipes <= DC__NUM_DPP__MAX);
}
double CalculateWriteBackDISPCLK(
enum source_format_class WritebackPixelFormat,
double PixelClock,
double WritebackHRatio,
double WritebackVRatio,
unsigned int WritebackLumaHTaps,
unsigned int WritebackLumaVTaps,
unsigned int WritebackChromaHTaps,
unsigned int WritebackChromaVTaps,
double WritebackDestinationWidth,
unsigned int HTotal,
unsigned int WritebackChromaLineBufferWidth)
{
double CalculateWriteBackDISPCLK = 1.01 * PixelClock * dml_max(
dml_ceil(WritebackLumaHTaps / 4.0, 1) / WritebackHRatio,
dml_max((WritebackLumaVTaps * dml_ceil(1.0 / WritebackVRatio, 1) * dml_ceil(WritebackDestinationWidth / 4.0, 1)
+ dml_ceil(WritebackDestinationWidth / 4.0, 1)) / (double) HTotal + dml_ceil(1.0 / WritebackVRatio, 1)
* (dml_ceil(WritebackLumaVTaps / 4.0, 1) + 4.0) / (double) HTotal,
dml_ceil(1.0 / WritebackVRatio, 1) * WritebackDestinationWidth / (double) HTotal));
if (WritebackPixelFormat != dm_444_32) {
CalculateWriteBackDISPCLK = dml_max(CalculateWriteBackDISPCLK, 1.01 * PixelClock * dml_max(
dml_ceil(WritebackChromaHTaps / 2.0, 1) / (2 * WritebackHRatio),
dml_max((WritebackChromaVTaps * dml_ceil(1 / (2 * WritebackVRatio), 1) * dml_ceil(WritebackDestinationWidth / 2.0 / 2.0, 1)
+ dml_ceil(WritebackDestinationWidth / 2.0 / WritebackChromaLineBufferWidth, 1)) / HTotal
+ dml_ceil(1 / (2 * WritebackVRatio), 1) * (dml_ceil(WritebackChromaVTaps / 4.0, 1) + 4) / HTotal,
dml_ceil(1.0 / (2 * WritebackVRatio), 1) * WritebackDestinationWidth / 2.0 / HTotal)));
}
return CalculateWriteBackDISPCLK;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c |
/*
* Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dml1_display_rq_dlg_calc.h"
#include "display_mode_lib.h"
#include "dml_inline_defs.h"
/*
* NOTE:
* This file is gcc-parseable HW gospel, coming straight from HW engineers.
*
* It doesn't adhere to Linux kernel style and sometimes will do things in odd
* ways. Unless there is something clearly wrong with it the code should
* remain as-is as it provides us with a guarantee from HW that it is correct.
*/
static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
{
unsigned int ret_val = 0;
if (source_format == dm_444_16) {
if (!is_chroma)
ret_val = 2;
} else if (source_format == dm_444_32) {
if (!is_chroma)
ret_val = 4;
} else if (source_format == dm_444_64) {
if (!is_chroma)
ret_val = 8;
} else if (source_format == dm_420_8) {
if (is_chroma)
ret_val = 2;
else
ret_val = 1;
} else if (source_format == dm_420_10) {
if (is_chroma)
ret_val = 4;
else
ret_val = 2;
}
return ret_val;
}
static bool is_dual_plane(enum source_format_class source_format)
{
bool ret_val = 0;
if ((source_format == dm_420_8) || (source_format == dm_420_10))
ret_val = 1;
return ret_val;
}
static void get_blk256_size(
unsigned int *blk256_width,
unsigned int *blk256_height,
unsigned int bytes_per_element)
{
if (bytes_per_element == 1) {
*blk256_width = 16;
*blk256_height = 16;
} else if (bytes_per_element == 2) {
*blk256_width = 16;
*blk256_height = 8;
} else if (bytes_per_element == 4) {
*blk256_width = 8;
*blk256_height = 8;
} else if (bytes_per_element == 8) {
*blk256_width = 8;
*blk256_height = 4;
}
}
static double get_refcyc_per_delivery(
struct display_mode_lib *mode_lib,
double refclk_freq_in_mhz,
double pclk_freq_in_mhz,
unsigned int recout_width,
double vratio,
double hscale_pixel_rate,
unsigned int delivery_width,
unsigned int req_per_swath_ub)
{
double refcyc_per_delivery = 0.0;
if (vratio <= 1.0) {
refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) recout_width
/ pclk_freq_in_mhz / (double) req_per_swath_ub;
} else {
refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) delivery_width
/ (double) hscale_pixel_rate / (double) req_per_swath_ub;
}
DTRACE("DLG: %s: refclk_freq_in_mhz = %3.2f", __func__, refclk_freq_in_mhz);
DTRACE("DLG: %s: pclk_freq_in_mhz = %3.2f", __func__, pclk_freq_in_mhz);
DTRACE("DLG: %s: recout_width = %d", __func__, recout_width);
DTRACE("DLG: %s: vratio = %3.2f", __func__, vratio);
DTRACE("DLG: %s: req_per_swath_ub = %d", __func__, req_per_swath_ub);
DTRACE("DLG: %s: refcyc_per_delivery= %3.2f", __func__, refcyc_per_delivery);
return refcyc_per_delivery;
}
static double get_vratio_pre(
struct display_mode_lib *mode_lib,
unsigned int max_num_sw,
unsigned int max_partial_sw,
unsigned int swath_height,
double vinit,
double l_sw)
{
double prefill = dml_floor(vinit, 1);
double vratio_pre = 1.0;
vratio_pre = (max_num_sw * swath_height + max_partial_sw) / l_sw;
if (swath_height > 4) {
double tmp0 = (max_num_sw * swath_height) / (l_sw - (prefill - 3.0) / 2.0);
if (tmp0 > vratio_pre)
vratio_pre = tmp0;
}
DTRACE("DLG: %s: max_num_sw = %0d", __func__, max_num_sw);
DTRACE("DLG: %s: max_partial_sw = %0d", __func__, max_partial_sw);
DTRACE("DLG: %s: swath_height = %0d", __func__, swath_height);
DTRACE("DLG: %s: vinit = %3.2f", __func__, vinit);
DTRACE("DLG: %s: vratio_pre = %3.2f", __func__, vratio_pre);
if (vratio_pre < 1.0) {
DTRACE("WARNING_DLG: %s: vratio_pre=%3.2f < 1.0, set to 1.0", __func__, vratio_pre);
vratio_pre = 1.0;
}
if (vratio_pre > 4.0) {
DTRACE(
"WARNING_DLG: %s: vratio_pre=%3.2f > 4.0 (max scaling ratio). set to 4.0",
__func__,
vratio_pre);
vratio_pre = 4.0;
}
return vratio_pre;
}
static void get_swath_need(
struct display_mode_lib *mode_lib,
unsigned int *max_num_sw,
unsigned int *max_partial_sw,
unsigned int swath_height,
double vinit)
{
double prefill = dml_floor(vinit, 1);
unsigned int max_partial_sw_int;
DTRACE("DLG: %s: swath_height = %0d", __func__, swath_height);
DTRACE("DLG: %s: vinit = %3.2f", __func__, vinit);
ASSERT(prefill > 0.0 && prefill <= 8.0);
*max_num_sw = (unsigned int) (dml_ceil((prefill - 1.0) / (double) swath_height, 1) + 1.0); /* prefill has to be >= 1 */
max_partial_sw_int =
(prefill == 1) ?
(swath_height - 1) :
((unsigned int) (prefill - 2.0) % swath_height);
*max_partial_sw = (max_partial_sw_int < 1) ? 1 : max_partial_sw_int; /* ensure minimum of 1 is used */
DTRACE("DLG: %s: max_num_sw = %0d", __func__, *max_num_sw);
DTRACE("DLG: %s: max_partial_sw = %0d", __func__, *max_partial_sw);
}
static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_size)
{
if (tile_size == dm_256k_tile)
return (256 * 1024);
else if (tile_size == dm_64k_tile)
return (64 * 1024);
else
return (4 * 1024);
}
static void extract_rq_sizing_regs(
struct display_mode_lib *mode_lib,
struct _vcs_dpi_display_data_rq_regs_st *rq_regs,
const struct _vcs_dpi_display_data_rq_sizing_params_st *rq_sizing)
{
DTRACE("DLG: %s: rq_sizing param", __func__);
print__data_rq_sizing_params_st(mode_lib, rq_sizing);
rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10;
if (rq_sizing->min_chunk_bytes == 0)
rq_regs->min_chunk_size = 0;
else
rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1;
rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10;
if (rq_sizing->min_meta_chunk_bytes == 0)
rq_regs->min_meta_chunk_size = 0;
else
rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1;
rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6;
rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6;
}
void dml1_extract_rq_regs(
struct display_mode_lib *mode_lib,
struct _vcs_dpi_display_rq_regs_st *rq_regs,
const struct _vcs_dpi_display_rq_params_st *rq_param)
{
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
unsigned int detile_buf_plane1_addr = 0;
extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l);
if (rq_param->yuv420)
extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c);
rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height);
rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height);
/* TODO: take the max between luma, chroma chunk size?
* okay for now, as we are setting chunk_bytes to 8kb anyways
*/
if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024) { /*32kb */
rq_regs->drq_expansion_mode = 0;
} else {
rq_regs->drq_expansion_mode = 2;
}
rq_regs->prq_expansion_mode = 1;
rq_regs->mrq_expansion_mode = 1;
rq_regs->crq_expansion_mode = 1;
if (rq_param->yuv420) {
if ((double) rq_param->misc.rq_l.stored_swath_bytes
/ (double) rq_param->misc.rq_c.stored_swath_bytes <= 1.5) {
detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); /* half to chroma */
} else {
detile_buf_plane1_addr = dml_round_to_multiple(
(unsigned int) ((2.0 * detile_buf_size_in_bytes) / 3.0),
256,
0) / 64.0; /* 2/3 to chroma */
}
}
rq_regs->plane1_base_address = detile_buf_plane1_addr;
}
static void handle_det_buf_split(
struct display_mode_lib *mode_lib,
struct _vcs_dpi_display_rq_params_st *rq_param,
const struct _vcs_dpi_display_pipe_source_params_st *pipe_src_param)
{
unsigned int total_swath_bytes = 0;
unsigned int swath_bytes_l = 0;
unsigned int swath_bytes_c = 0;
unsigned int full_swath_bytes_packed_l = 0;
unsigned int full_swath_bytes_packed_c = 0;
bool req128_l = 0;
bool req128_c = 0;
bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear);
bool surf_vert = (pipe_src_param->source_scan == dm_vert);
unsigned int log2_swath_height_l = 0;
unsigned int log2_swath_height_c = 0;
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
full_swath_bytes_packed_l = rq_param->misc.rq_l.full_swath_bytes;
full_swath_bytes_packed_c = rq_param->misc.rq_c.full_swath_bytes;
if (rq_param->yuv420_10bpc) {
full_swath_bytes_packed_l = dml_round_to_multiple(
rq_param->misc.rq_l.full_swath_bytes * 2 / 3,
256,
1) + 256;
full_swath_bytes_packed_c = dml_round_to_multiple(
rq_param->misc.rq_c.full_swath_bytes * 2 / 3,
256,
1) + 256;
}
if (rq_param->yuv420) {
total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;
if (total_swath_bytes <= detile_buf_size_in_bytes) { /*full 256b request */
req128_l = 0;
req128_c = 0;
swath_bytes_l = full_swath_bytes_packed_l;
swath_bytes_c = full_swath_bytes_packed_c;
} else { /*128b request (for luma only for yuv420 8bpc) */
req128_l = 1;
req128_c = 0;
swath_bytes_l = full_swath_bytes_packed_l / 2;
swath_bytes_c = full_swath_bytes_packed_c;
}
/* Bug workaround, luma and chroma req size needs to be the same. (see: DEGVIDCN10-137)
* TODO: Remove after rtl fix
*/
if (req128_l == 1) {
req128_c = 1;
DTRACE("DLG: %s: bug workaround DEGVIDCN10-137", __func__);
}
/* Note: assumption, the config that pass in will fit into
* the detiled buffer.
*/
} else {
total_swath_bytes = 2 * full_swath_bytes_packed_l;
if (total_swath_bytes <= detile_buf_size_in_bytes)
req128_l = 0;
else
req128_l = 1;
swath_bytes_l = total_swath_bytes;
swath_bytes_c = 0;
}
rq_param->misc.rq_l.stored_swath_bytes = swath_bytes_l;
rq_param->misc.rq_c.stored_swath_bytes = swath_bytes_c;
if (surf_linear) {
log2_swath_height_l = 0;
log2_swath_height_c = 0;
} else {
unsigned int swath_height_l;
unsigned int swath_height_c;
if (!surf_vert) {
swath_height_l = rq_param->misc.rq_l.blk256_height;
swath_height_c = rq_param->misc.rq_c.blk256_height;
} else {
swath_height_l = rq_param->misc.rq_l.blk256_width;
swath_height_c = rq_param->misc.rq_c.blk256_width;
}
if (swath_height_l > 0)
log2_swath_height_l = dml_log2(swath_height_l);
if (req128_l && log2_swath_height_l > 0)
log2_swath_height_l -= 1;
if (swath_height_c > 0)
log2_swath_height_c = dml_log2(swath_height_c);
if (req128_c && log2_swath_height_c > 0)
log2_swath_height_c -= 1;
}
rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
DTRACE("DLG: %s: req128_l = %0d", __func__, req128_l);
DTRACE("DLG: %s: req128_c = %0d", __func__, req128_c);
DTRACE("DLG: %s: full_swath_bytes_packed_l = %0d", __func__, full_swath_bytes_packed_l);
DTRACE("DLG: %s: full_swath_bytes_packed_c = %0d", __func__, full_swath_bytes_packed_c);
}
/* Need refactor. */
static void dml1_rq_dlg_get_row_heights(
struct display_mode_lib *mode_lib,
unsigned int *o_dpte_row_height,
unsigned int *o_meta_row_height,
unsigned int vp_width,
unsigned int data_pitch,
int source_format,
int tiling,
int macro_tile_size,
int source_scan,
int is_chroma)
{
bool surf_linear = (tiling == dm_sw_linear);
bool surf_vert = (source_scan == dm_vert);
unsigned int bytes_per_element = get_bytes_per_element(
(enum source_format_class) source_format,
is_chroma);
unsigned int log2_bytes_per_element = dml_log2(bytes_per_element);
unsigned int blk256_width = 0;
unsigned int blk256_height = 0;
unsigned int log2_blk256_height;
unsigned int blk_bytes;
unsigned int log2_blk_bytes;
unsigned int log2_blk_height;
unsigned int log2_blk_width;
unsigned int log2_meta_req_bytes;
unsigned int log2_meta_req_height;
unsigned int log2_meta_req_width;
unsigned int log2_meta_row_height;
unsigned int log2_vmpg_bytes;
unsigned int dpte_buf_in_pte_reqs;
unsigned int log2_vmpg_height;
unsigned int log2_vmpg_width;
unsigned int log2_dpte_req_height_ptes;
unsigned int log2_dpte_req_width_ptes;
unsigned int log2_dpte_req_height;
unsigned int log2_dpte_req_width;
unsigned int log2_dpte_row_height_linear;
unsigned int log2_dpte_row_height;
unsigned int dpte_req_width;
if (surf_linear) {
blk256_width = 256;
blk256_height = 1;
} else {
get_blk256_size(&blk256_width, &blk256_height, bytes_per_element);
}
log2_blk256_height = dml_log2((double) blk256_height);
blk_bytes = surf_linear ?
256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
log2_blk_bytes = dml_log2((double) blk_bytes);
log2_blk_height = 0;
log2_blk_width = 0;
/* remember log rule
* "+" in log is multiply
* "-" in log is divide
* "/2" is like square root
* blk is vertical biased
*/
if (tiling != dm_sw_linear)
log2_blk_height = log2_blk256_height
+ dml_ceil((double) (log2_blk_bytes - 8) / 2.0, 1);
else
log2_blk_height = 0; /* blk height of 1 */
log2_blk_width = log2_blk_bytes - log2_bytes_per_element - log2_blk_height;
/* ------- */
/* meta */
/* ------- */
log2_meta_req_bytes = 6; /* meta request is 64b and is 8x8byte meta element */
/* each 64b meta request for dcn is 8x8 meta elements and
* a meta element covers one 256b block of the data surface.
*/
log2_meta_req_height = log2_blk256_height + 3; /* meta req is 8x8 */
log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element
- log2_meta_req_height;
log2_meta_row_height = 0;
/* the dimensions of a meta row are meta_row_width x meta_row_height in elements.
* calculate upper bound of the meta_row_width
*/
if (!surf_vert)
log2_meta_row_height = log2_meta_req_height;
else
log2_meta_row_height = log2_meta_req_width;
*o_meta_row_height = 1 << log2_meta_row_height;
/* ------ */
/* dpte */
/* ------ */
log2_vmpg_bytes = dml_log2(mode_lib->soc.vmm_page_size_bytes);
dpte_buf_in_pte_reqs = mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma;
log2_vmpg_height = 0;
log2_vmpg_width = 0;
log2_dpte_req_height_ptes = 0;
log2_dpte_req_width_ptes = 0;
log2_dpte_req_height = 0;
log2_dpte_req_width = 0;
log2_dpte_row_height_linear = 0;
log2_dpte_row_height = 0;
dpte_req_width = 0; /* 64b dpte req width in data element */
if (surf_linear)
log2_vmpg_height = 0; /* one line high */
else
log2_vmpg_height = (log2_vmpg_bytes - 8) / 2 + log2_blk256_height;
log2_vmpg_width = log2_vmpg_bytes - log2_bytes_per_element - log2_vmpg_height;
/* only 3 possible shapes for dpte request in dimensions of ptes: 8x1, 4x2, 2x4. */
if (log2_blk_bytes <= log2_vmpg_bytes)
log2_dpte_req_height_ptes = 0;
else if (log2_blk_height - log2_vmpg_height >= 2)
log2_dpte_req_height_ptes = 2;
else
log2_dpte_req_height_ptes = log2_blk_height - log2_vmpg_height;
log2_dpte_req_width_ptes = 3 - log2_dpte_req_height_ptes;
ASSERT((log2_dpte_req_width_ptes == 3 && log2_dpte_req_height_ptes == 0) || /* 8x1 */
(log2_dpte_req_width_ptes == 2 && log2_dpte_req_height_ptes == 1) || /* 4x2 */
(log2_dpte_req_width_ptes == 1 && log2_dpte_req_height_ptes == 2)); /* 2x4 */
/* the dpte request dimensions in data elements is dpte_req_width x dpte_req_height
* log2_wmpg_width is how much 1 pte represent, now trying to calculate how much 64b pte req represent
*/
log2_dpte_req_height = log2_vmpg_height + log2_dpte_req_height_ptes;
log2_dpte_req_width = log2_vmpg_width + log2_dpte_req_width_ptes;
dpte_req_width = 1 << log2_dpte_req_width;
/* calculate pitch dpte row buffer can hold
* round the result down to a power of two.
*/
if (surf_linear) {
log2_dpte_row_height_linear = dml_floor(
dml_log2(dpte_buf_in_pte_reqs * dpte_req_width / data_pitch),
1);
ASSERT(log2_dpte_row_height_linear >= 3);
if (log2_dpte_row_height_linear > 7)
log2_dpte_row_height_linear = 7;
log2_dpte_row_height = log2_dpte_row_height_linear;
} else {
/* the upper bound of the dpte_row_width without dependency on viewport position follows. */
if (!surf_vert)
log2_dpte_row_height = log2_dpte_req_height;
else
log2_dpte_row_height =
(log2_blk_width < log2_dpte_req_width) ?
log2_blk_width : log2_dpte_req_width;
}
/* From programming guide:
* There is a special case of saving only half of ptes returned due to buffer space limits.
* this case applies to 4 and 8bpe in horizontal access of a vp_width greater than 2560+16
* when the pte request is 2x4 ptes (which happens when vmpg_bytes =4kb and tile blk_bytes >=64kb).
*/
if (!surf_vert && vp_width > (2560 + 16) && bytes_per_element >= 4 && log2_vmpg_bytes == 12
&& log2_blk_bytes >= 16)
log2_dpte_row_height = log2_dpte_row_height - 1; /*half of the full height */
*o_dpte_row_height = 1 << log2_dpte_row_height;
}
static void get_surf_rq_param(
struct display_mode_lib *mode_lib,
struct _vcs_dpi_display_data_rq_sizing_params_st *rq_sizing_param,
struct _vcs_dpi_display_data_rq_dlg_params_st *rq_dlg_param,
struct _vcs_dpi_display_data_rq_misc_params_st *rq_misc_param,
const struct _vcs_dpi_display_pipe_source_params_st *pipe_src_param,
bool is_chroma)
{
bool mode_422 = 0;
unsigned int vp_width = 0;
unsigned int vp_height = 0;
unsigned int data_pitch = 0;
unsigned int meta_pitch = 0;
unsigned int ppe = mode_422 ? 2 : 1;
bool surf_linear;
bool surf_vert;
unsigned int bytes_per_element;
unsigned int log2_bytes_per_element;
unsigned int blk256_width;
unsigned int blk256_height;
unsigned int log2_blk256_width;
unsigned int log2_blk256_height;
unsigned int blk_bytes;
unsigned int log2_blk_bytes;
unsigned int log2_blk_height;
unsigned int log2_blk_width;
unsigned int log2_meta_req_bytes;
unsigned int log2_meta_req_height;
unsigned int log2_meta_req_width;
unsigned int meta_req_width;
unsigned int meta_req_height;
unsigned int log2_meta_row_height;
unsigned int meta_row_width_ub;
unsigned int log2_meta_chunk_bytes;
unsigned int log2_meta_chunk_height;
unsigned int log2_meta_chunk_width;
unsigned int log2_min_meta_chunk_bytes;
unsigned int min_meta_chunk_width;
unsigned int meta_chunk_width;
unsigned int meta_chunk_per_row_int;
unsigned int meta_row_remainder;
unsigned int meta_chunk_threshold;
unsigned int meta_blk_bytes;
unsigned int meta_blk_height;
unsigned int meta_blk_width;
unsigned int meta_surface_bytes;
unsigned int vmpg_bytes;
unsigned int meta_pte_req_per_frame_ub;
unsigned int meta_pte_bytes_per_frame_ub;
unsigned int log2_vmpg_bytes;
unsigned int dpte_buf_in_pte_reqs;
unsigned int log2_vmpg_height;
unsigned int log2_vmpg_width;
unsigned int log2_dpte_req_height_ptes;
unsigned int log2_dpte_req_width_ptes;
unsigned int log2_dpte_req_height;
unsigned int log2_dpte_req_width;
unsigned int log2_dpte_row_height_linear;
unsigned int log2_dpte_row_height;
unsigned int log2_dpte_group_width;
unsigned int dpte_row_width_ub;
unsigned int dpte_row_height;
unsigned int dpte_req_height;
unsigned int dpte_req_width;
unsigned int dpte_group_width;
unsigned int log2_dpte_group_bytes;
unsigned int log2_dpte_group_length;
unsigned int func_meta_row_height, func_dpte_row_height;
/* TODO check if ppe apply for both luma and chroma in 422 case */
if (is_chroma) {
vp_width = pipe_src_param->viewport_width_c / ppe;
vp_height = pipe_src_param->viewport_height_c;
data_pitch = pipe_src_param->data_pitch_c;
meta_pitch = pipe_src_param->meta_pitch_c;
} else {
vp_width = pipe_src_param->viewport_width / ppe;
vp_height = pipe_src_param->viewport_height;
data_pitch = pipe_src_param->data_pitch;
meta_pitch = pipe_src_param->meta_pitch;
}
rq_sizing_param->chunk_bytes = 8192;
if (rq_sizing_param->chunk_bytes == 64 * 1024)
rq_sizing_param->min_chunk_bytes = 0;
else
rq_sizing_param->min_chunk_bytes = 1024;
rq_sizing_param->meta_chunk_bytes = 2048;
rq_sizing_param->min_meta_chunk_bytes = 256;
rq_sizing_param->mpte_group_bytes = 2048;
surf_linear = (pipe_src_param->sw_mode == dm_sw_linear);
surf_vert = (pipe_src_param->source_scan == dm_vert);
bytes_per_element = get_bytes_per_element(
(enum source_format_class) pipe_src_param->source_format,
is_chroma);
log2_bytes_per_element = dml_log2(bytes_per_element);
blk256_width = 0;
blk256_height = 0;
if (surf_linear) {
blk256_width = 256 / bytes_per_element;
blk256_height = 1;
} else {
get_blk256_size(&blk256_width, &blk256_height, bytes_per_element);
}
DTRACE("DLG: %s: surf_linear = %d", __func__, surf_linear);
DTRACE("DLG: %s: surf_vert = %d", __func__, surf_vert);
DTRACE("DLG: %s: blk256_width = %d", __func__, blk256_width);
DTRACE("DLG: %s: blk256_height = %d", __func__, blk256_height);
log2_blk256_width = dml_log2((double) blk256_width);
log2_blk256_height = dml_log2((double) blk256_height);
blk_bytes =
surf_linear ? 256 : get_blk_size_bytes(
(enum source_macro_tile_size) pipe_src_param->macro_tile_size);
log2_blk_bytes = dml_log2((double) blk_bytes);
log2_blk_height = 0;
log2_blk_width = 0;
/* remember log rule
* "+" in log is multiply
* "-" in log is divide
* "/2" is like square root
* blk is vertical biased
*/
if (pipe_src_param->sw_mode != dm_sw_linear)
log2_blk_height = log2_blk256_height
+ dml_ceil((double) (log2_blk_bytes - 8) / 2.0, 1);
else
log2_blk_height = 0; /* blk height of 1 */
log2_blk_width = log2_blk_bytes - log2_bytes_per_element - log2_blk_height;
if (!surf_vert) {
rq_dlg_param->swath_width_ub = dml_round_to_multiple(vp_width - 1, blk256_width, 1)
+ blk256_width;
rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_width;
} else {
rq_dlg_param->swath_width_ub = dml_round_to_multiple(
vp_height - 1,
blk256_height,
1) + blk256_height;
rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_height;
}
if (!surf_vert)
rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_height
* bytes_per_element;
else
rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_width
* bytes_per_element;
rq_misc_param->blk256_height = blk256_height;
rq_misc_param->blk256_width = blk256_width;
/* ------- */
/* meta */
/* ------- */
log2_meta_req_bytes = 6; /* meta request is 64b and is 8x8byte meta element */
/* each 64b meta request for dcn is 8x8 meta elements and
* a meta element covers one 256b block of the data surface.
*/
log2_meta_req_height = log2_blk256_height + 3; /* meta req is 8x8 byte, each byte represent 1 blk256 */
log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element
- log2_meta_req_height;
meta_req_width = 1 << log2_meta_req_width;
meta_req_height = 1 << log2_meta_req_height;
log2_meta_row_height = 0;
meta_row_width_ub = 0;
/* the dimensions of a meta row are meta_row_width x meta_row_height in elements.
* calculate upper bound of the meta_row_width
*/
if (!surf_vert) {
log2_meta_row_height = log2_meta_req_height;
meta_row_width_ub = dml_round_to_multiple(vp_width - 1, meta_req_width, 1)
+ meta_req_width;
rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_width;
} else {
log2_meta_row_height = log2_meta_req_width;
meta_row_width_ub = dml_round_to_multiple(vp_height - 1, meta_req_height, 1)
+ meta_req_height;
rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_height;
}
rq_dlg_param->meta_bytes_per_row_ub = rq_dlg_param->meta_req_per_row_ub * 64;
log2_meta_chunk_bytes = dml_log2(rq_sizing_param->meta_chunk_bytes);
log2_meta_chunk_height = log2_meta_row_height;
/*full sized meta chunk width in unit of data elements */
log2_meta_chunk_width = log2_meta_chunk_bytes + 8 - log2_bytes_per_element
- log2_meta_chunk_height;
log2_min_meta_chunk_bytes = dml_log2(rq_sizing_param->min_meta_chunk_bytes);
min_meta_chunk_width = 1
<< (log2_min_meta_chunk_bytes + 8 - log2_bytes_per_element
- log2_meta_chunk_height);
meta_chunk_width = 1 << log2_meta_chunk_width;
meta_chunk_per_row_int = (unsigned int) (meta_row_width_ub / meta_chunk_width);
meta_row_remainder = meta_row_width_ub % meta_chunk_width;
meta_chunk_threshold = 0;
meta_blk_bytes = 4096;
meta_blk_height = blk256_height * 64;
meta_blk_width = meta_blk_bytes * 256 / bytes_per_element / meta_blk_height;
meta_surface_bytes = meta_pitch
* (dml_round_to_multiple(vp_height - 1, meta_blk_height, 1)
+ meta_blk_height) * bytes_per_element / 256;
vmpg_bytes = mode_lib->soc.vmm_page_size_bytes;
meta_pte_req_per_frame_ub = (dml_round_to_multiple(
meta_surface_bytes - vmpg_bytes,
8 * vmpg_bytes,
1) + 8 * vmpg_bytes) / (8 * vmpg_bytes);
meta_pte_bytes_per_frame_ub = meta_pte_req_per_frame_ub * 64; /*64B mpte request */
rq_dlg_param->meta_pte_bytes_per_frame_ub = meta_pte_bytes_per_frame_ub;
DTRACE("DLG: %s: meta_blk_height = %d", __func__, meta_blk_height);
DTRACE("DLG: %s: meta_blk_width = %d", __func__, meta_blk_width);
DTRACE("DLG: %s: meta_surface_bytes = %d", __func__, meta_surface_bytes);
DTRACE("DLG: %s: meta_pte_req_per_frame_ub = %d", __func__, meta_pte_req_per_frame_ub);
DTRACE("DLG: %s: meta_pte_bytes_per_frame_ub = %d", __func__, meta_pte_bytes_per_frame_ub);
if (!surf_vert)
meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_width;
else
meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_height;
if (meta_row_remainder <= meta_chunk_threshold)
rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 1;
else
rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 2;
rq_dlg_param->meta_row_height = 1 << log2_meta_row_height;
/* ------ */
/* dpte */
/* ------ */
log2_vmpg_bytes = dml_log2(mode_lib->soc.vmm_page_size_bytes);
dpte_buf_in_pte_reqs = mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma;
log2_vmpg_height = 0;
log2_vmpg_width = 0;
log2_dpte_req_height_ptes = 0;
log2_dpte_req_width_ptes = 0;
log2_dpte_req_height = 0;
log2_dpte_req_width = 0;
log2_dpte_row_height_linear = 0;
log2_dpte_row_height = 0;
log2_dpte_group_width = 0;
dpte_row_width_ub = 0;
dpte_row_height = 0;
dpte_req_height = 0; /* 64b dpte req height in data element */
dpte_req_width = 0; /* 64b dpte req width in data element */
dpte_group_width = 0;
log2_dpte_group_bytes = 0;
log2_dpte_group_length = 0;
if (surf_linear)
log2_vmpg_height = 0; /* one line high */
else
log2_vmpg_height = (log2_vmpg_bytes - 8) / 2 + log2_blk256_height;
log2_vmpg_width = log2_vmpg_bytes - log2_bytes_per_element - log2_vmpg_height;
/* only 3 possible shapes for dpte request in dimensions of ptes: 8x1, 4x2, 2x4. */
if (log2_blk_bytes <= log2_vmpg_bytes)
log2_dpte_req_height_ptes = 0;
else if (log2_blk_height - log2_vmpg_height >= 2)
log2_dpte_req_height_ptes = 2;
else
log2_dpte_req_height_ptes = log2_blk_height - log2_vmpg_height;
log2_dpte_req_width_ptes = 3 - log2_dpte_req_height_ptes;
/* Ensure we only have the 3 shapes */
ASSERT((log2_dpte_req_width_ptes == 3 && log2_dpte_req_height_ptes == 0) || /* 8x1 */
(log2_dpte_req_width_ptes == 2 && log2_dpte_req_height_ptes == 1) || /* 4x2 */
(log2_dpte_req_width_ptes == 1 && log2_dpte_req_height_ptes == 2)); /* 2x4 */
/* The dpte request dimensions in data elements is dpte_req_width x dpte_req_height
* log2_vmpg_width is how much 1 pte represent, now calculating how much a 64b pte req represent
* That depends on the pte shape (i.e. 8x1, 4x2, 2x4)
*/
log2_dpte_req_height = log2_vmpg_height + log2_dpte_req_height_ptes;
log2_dpte_req_width = log2_vmpg_width + log2_dpte_req_width_ptes;
dpte_req_height = 1 << log2_dpte_req_height;
dpte_req_width = 1 << log2_dpte_req_width;
/* calculate pitch dpte row buffer can hold
* round the result down to a power of two.
*/
if (surf_linear) {
log2_dpte_row_height_linear = dml_floor(
dml_log2(dpte_buf_in_pte_reqs * dpte_req_width / data_pitch),
1);
ASSERT(log2_dpte_row_height_linear >= 3);
if (log2_dpte_row_height_linear > 7)
log2_dpte_row_height_linear = 7;
log2_dpte_row_height = log2_dpte_row_height_linear;
rq_dlg_param->dpte_row_height = 1 << log2_dpte_row_height;
/* For linear, the dpte row is pitch dependent and the pte requests wrap at the pitch boundary.
* the dpte_row_width_ub is the upper bound of data_pitch*dpte_row_height in elements with this unique buffering.
*/
dpte_row_width_ub = dml_round_to_multiple(
data_pitch * dpte_row_height - 1,
dpte_req_width,
1) + dpte_req_width;
rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
} else {
/* for tiled mode, row height is the same as req height and row store up to vp size upper bound */
if (!surf_vert) {
log2_dpte_row_height = log2_dpte_req_height;
dpte_row_width_ub = dml_round_to_multiple(vp_width - 1, dpte_req_width, 1)
+ dpte_req_width;
rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
} else {
log2_dpte_row_height =
(log2_blk_width < log2_dpte_req_width) ?
log2_blk_width : log2_dpte_req_width;
dpte_row_width_ub = dml_round_to_multiple(vp_height - 1, dpte_req_height, 1)
+ dpte_req_height;
rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_height;
}
rq_dlg_param->dpte_row_height = 1 << log2_dpte_row_height;
}
rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 64;
/* From programming guide:
* There is a special case of saving only half of ptes returned due to buffer space limits.
* this case applies to 4 and 8bpe in horizontal access of a vp_width greater than 2560+16
* when the pte request is 2x4 ptes (which happens when vmpg_bytes =4kb and tile blk_bytes >=64kb).
*/
if (!surf_vert && vp_width > (2560 + 16) && bytes_per_element >= 4 && log2_vmpg_bytes == 12
&& log2_blk_bytes >= 16) {
log2_dpte_row_height = log2_dpte_row_height - 1; /*half of the full height */
rq_dlg_param->dpte_row_height = 1 << log2_dpte_row_height;
}
/* the dpte_group_bytes is reduced for the specific case of vertical
* access of a tile surface that has dpte request of 8x1 ptes.
*/
if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) /*reduced, in this case, will have page fault within a group */
rq_sizing_param->dpte_group_bytes = 512;
else
/*full size */
rq_sizing_param->dpte_group_bytes = 2048;
/*since pte request size is 64byte, the number of data pte requests per full sized group is as follows. */
log2_dpte_group_bytes = dml_log2(rq_sizing_param->dpte_group_bytes);
log2_dpte_group_length = log2_dpte_group_bytes - 6; /*length in 64b requests */
/* full sized data pte group width in elements */
if (!surf_vert)
log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_width;
else
log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_height;
dpte_group_width = 1 << log2_dpte_group_width;
/* since dpte groups are only aligned to dpte_req_width and not dpte_group_width,
* the upper bound for the dpte groups per row is as follows.
*/
rq_dlg_param->dpte_groups_per_row_ub = dml_ceil(
(double) dpte_row_width_ub / dpte_group_width,
1);
dml1_rq_dlg_get_row_heights(
mode_lib,
&func_dpte_row_height,
&func_meta_row_height,
vp_width,
data_pitch,
pipe_src_param->source_format,
pipe_src_param->sw_mode,
pipe_src_param->macro_tile_size,
pipe_src_param->source_scan,
is_chroma);
/* Just a check to make sure this function and the new one give the same
* result. The standalone get_row_heights() function is based off of the
* code in this function so the same changes need to be made to both.
*/
if (rq_dlg_param->meta_row_height != func_meta_row_height) {
DTRACE(
"MISMATCH: rq_dlg_param->meta_row_height = %d",
rq_dlg_param->meta_row_height);
DTRACE("MISMATCH: func_meta_row_height = %d", func_meta_row_height);
ASSERT(0);
}
if (rq_dlg_param->dpte_row_height != func_dpte_row_height) {
DTRACE(
"MISMATCH: rq_dlg_param->dpte_row_height = %d",
rq_dlg_param->dpte_row_height);
DTRACE("MISMATCH: func_dpte_row_height = %d", func_dpte_row_height);
ASSERT(0);
}
}
void dml1_rq_dlg_get_rq_params(
struct display_mode_lib *mode_lib,
struct _vcs_dpi_display_rq_params_st *rq_param,
const struct _vcs_dpi_display_pipe_source_params_st *pipe_src_param)
{
/* get param for luma surface */
rq_param->yuv420 = pipe_src_param->source_format == dm_420_8
|| pipe_src_param->source_format == dm_420_10;
rq_param->yuv420_10bpc = pipe_src_param->source_format == dm_420_10;
get_surf_rq_param(
mode_lib,
&(rq_param->sizing.rq_l),
&(rq_param->dlg.rq_l),
&(rq_param->misc.rq_l),
pipe_src_param,
0);
if (is_dual_plane((enum source_format_class) pipe_src_param->source_format)) {
/* get param for chroma surface */
get_surf_rq_param(
mode_lib,
&(rq_param->sizing.rq_c),
&(rq_param->dlg.rq_c),
&(rq_param->misc.rq_c),
pipe_src_param,
1);
}
/* calculate how to split the det buffer space between luma and chroma */
handle_det_buf_split(mode_lib, rq_param, pipe_src_param);
print__rq_params_st(mode_lib, rq_param);
}
/* Note: currently taken in as is.
* Nice to decouple code from hw register implement and extract code that are repeated for luma and chroma.
*/
void dml1_rq_dlg_get_dlg_params(
struct display_mode_lib *mode_lib,
struct _vcs_dpi_display_dlg_regs_st *disp_dlg_regs,
struct _vcs_dpi_display_ttu_regs_st *disp_ttu_regs,
const struct _vcs_dpi_display_rq_dlg_params_st *rq_dlg_param,
const struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param,
const struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe_param,
const bool cstate_en,
const bool pstate_en,
const bool vm_en,
const bool iflip_en)
{
/* Timing */
unsigned int htotal = e2e_pipe_param->pipe.dest.htotal;
unsigned int hblank_end = e2e_pipe_param->pipe.dest.hblank_end;
unsigned int vblank_start = e2e_pipe_param->pipe.dest.vblank_start;
unsigned int vblank_end = e2e_pipe_param->pipe.dest.vblank_end;
bool interlaced = e2e_pipe_param->pipe.dest.interlaced;
unsigned int min_vblank = mode_lib->ip.min_vblank_lines;
double pclk_freq_in_mhz = e2e_pipe_param->pipe.dest.pixel_rate_mhz;
double refclk_freq_in_mhz = e2e_pipe_param->clks_cfg.refclk_mhz;
double dppclk_freq_in_mhz = e2e_pipe_param->clks_cfg.dppclk_mhz;
double dispclk_freq_in_mhz = e2e_pipe_param->clks_cfg.dispclk_mhz;
double ref_freq_to_pix_freq;
double prefetch_xy_calc_in_dcfclk;
double min_dcfclk_mhz;
double t_calc_us;
double min_ttu_vblank;
double min_dst_y_ttu_vblank;
unsigned int dlg_vblank_start;
bool dcc_en;
bool dual_plane;
bool mode_422;
unsigned int access_dir;
unsigned int bytes_per_element_l;
unsigned int bytes_per_element_c;
unsigned int vp_height_l;
unsigned int vp_width_l;
unsigned int vp_height_c;
unsigned int vp_width_c;
unsigned int htaps_l;
unsigned int htaps_c;
double hratios_l;
double hratios_c;
double vratio_l;
double vratio_c;
double line_time_in_us;
double vinit_l;
double vinit_c;
double vinit_bot_l;
double vinit_bot_c;
unsigned int swath_height_l;
unsigned int swath_width_ub_l;
unsigned int dpte_bytes_per_row_ub_l;
unsigned int dpte_groups_per_row_ub_l;
unsigned int meta_pte_bytes_per_frame_ub_l;
unsigned int meta_bytes_per_row_ub_l;
unsigned int swath_height_c;
unsigned int swath_width_ub_c;
unsigned int dpte_bytes_per_row_ub_c;
unsigned int dpte_groups_per_row_ub_c;
unsigned int meta_chunks_per_row_ub_l;
unsigned int vupdate_offset;
unsigned int vupdate_width;
unsigned int vready_offset;
unsigned int dppclk_delay_subtotal;
unsigned int dispclk_delay_subtotal;
unsigned int pixel_rate_delay_subtotal;
unsigned int vstartup_start;
unsigned int dst_x_after_scaler;
unsigned int dst_y_after_scaler;
double line_wait;
double line_o;
double line_setup;
double line_calc;
double dst_y_prefetch;
double t_pre_us;
unsigned int vm_bytes;
unsigned int meta_row_bytes;
unsigned int max_num_sw_l;
unsigned int max_num_sw_c;
unsigned int max_partial_sw_l;
unsigned int max_partial_sw_c;
double max_vinit_l;
double max_vinit_c;
unsigned int lsw_l;
unsigned int lsw_c;
unsigned int sw_bytes_ub_l;
unsigned int sw_bytes_ub_c;
unsigned int sw_bytes;
unsigned int dpte_row_bytes;
double prefetch_bw;
double flip_bw;
double t_vm_us;
double t_r0_us;
double dst_y_per_vm_vblank;
double dst_y_per_row_vblank;
double min_dst_y_per_vm_vblank;
double min_dst_y_per_row_vblank;
double lsw;
double vratio_pre_l;
double vratio_pre_c;
unsigned int req_per_swath_ub_l;
unsigned int req_per_swath_ub_c;
unsigned int meta_row_height_l;
unsigned int swath_width_pixels_ub_l;
unsigned int swath_width_pixels_ub_c;
unsigned int scaler_rec_in_width_l;
unsigned int scaler_rec_in_width_c;
unsigned int dpte_row_height_l;
unsigned int dpte_row_height_c;
double hscale_pixel_rate_l;
double hscale_pixel_rate_c;
double min_hratio_fact_l;
double min_hratio_fact_c;
double refcyc_per_line_delivery_pre_l;
double refcyc_per_line_delivery_pre_c;
double refcyc_per_line_delivery_l;
double refcyc_per_line_delivery_c;
double refcyc_per_req_delivery_pre_l;
double refcyc_per_req_delivery_pre_c;
double refcyc_per_req_delivery_l;
double refcyc_per_req_delivery_c;
double refcyc_per_req_delivery_pre_cur0;
double refcyc_per_req_delivery_cur0;
unsigned int full_recout_width;
double hratios_cur0;
unsigned int cur0_src_width;
enum cursor_bpp cur0_bpp;
unsigned int cur0_req_size;
unsigned int cur0_req_width;
double cur0_width_ub;
double cur0_req_per_width;
double hactive_cur0;
memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
DTRACE("DLG: %s: cstate_en = %d", __func__, cstate_en);
DTRACE("DLG: %s: pstate_en = %d", __func__, pstate_en);
DTRACE("DLG: %s: vm_en = %d", __func__, vm_en);
DTRACE("DLG: %s: iflip_en = %d", __func__, iflip_en);
/* ------------------------- */
/* Section 1.5.2.1: OTG dependent Params */
/* ------------------------- */
DTRACE("DLG: %s: dppclk_freq_in_mhz = %3.2f", __func__, dppclk_freq_in_mhz);
DTRACE("DLG: %s: dispclk_freq_in_mhz = %3.2f", __func__, dispclk_freq_in_mhz);
DTRACE("DLG: %s: refclk_freq_in_mhz = %3.2f", __func__, refclk_freq_in_mhz);
DTRACE("DLG: %s: pclk_freq_in_mhz = %3.2f", __func__, pclk_freq_in_mhz);
DTRACE("DLG: %s: interlaced = %d", __func__, interlaced);
ref_freq_to_pix_freq = refclk_freq_in_mhz / pclk_freq_in_mhz;
ASSERT(ref_freq_to_pix_freq < 4.0);
disp_dlg_regs->ref_freq_to_pix_freq =
(unsigned int) (ref_freq_to_pix_freq * dml_pow(2, 19));
disp_dlg_regs->refcyc_per_htotal = (unsigned int) (ref_freq_to_pix_freq * (double) htotal
* dml_pow(2, 8));
disp_dlg_regs->refcyc_h_blank_end = (unsigned int) ((double) hblank_end
* (double) ref_freq_to_pix_freq);
ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13));
disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; /* 15 bits */
prefetch_xy_calc_in_dcfclk = 24.0; /* TODO: ip_param */
min_dcfclk_mhz = dlg_sys_param->deepsleep_dcfclk_mhz;
t_calc_us = prefetch_xy_calc_in_dcfclk / min_dcfclk_mhz;
min_ttu_vblank = dlg_sys_param->t_urg_wm_us;
if (cstate_en)
min_ttu_vblank = dml_max(dlg_sys_param->t_sr_wm_us, min_ttu_vblank);
if (pstate_en)
min_ttu_vblank = dml_max(dlg_sys_param->t_mclk_wm_us, min_ttu_vblank);
min_ttu_vblank = min_ttu_vblank + t_calc_us;
min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double) htotal;
dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start
+ min_dst_y_ttu_vblank) * dml_pow(2, 2));
ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int) dml_pow(2, 18));
DTRACE("DLG: %s: min_dcfclk_mhz = %3.2f", __func__, min_dcfclk_mhz);
DTRACE("DLG: %s: min_ttu_vblank = %3.2f", __func__, min_ttu_vblank);
DTRACE(
"DLG: %s: min_dst_y_ttu_vblank = %3.2f",
__func__,
min_dst_y_ttu_vblank);
DTRACE("DLG: %s: t_calc_us = %3.2f", __func__, t_calc_us);
DTRACE(
"DLG: %s: disp_dlg_regs->min_dst_y_next_start = 0x%0x",
__func__,
disp_dlg_regs->min_dst_y_next_start);
DTRACE(
"DLG: %s: ref_freq_to_pix_freq = %3.2f",
__func__,
ref_freq_to_pix_freq);
/* ------------------------- */
/* Section 1.5.2.2: Prefetch, Active and TTU */
/* ------------------------- */
/* Prefetch Calc */
/* Source */
dcc_en = e2e_pipe_param->pipe.src.dcc;
dual_plane = is_dual_plane(
(enum source_format_class) e2e_pipe_param->pipe.src.source_format);
mode_422 = 0; /* TODO */
access_dir = (e2e_pipe_param->pipe.src.source_scan == dm_vert); /* vp access direction: horizontal or vertical accessed */
bytes_per_element_l = get_bytes_per_element(
(enum source_format_class) e2e_pipe_param->pipe.src.source_format,
0);
bytes_per_element_c = get_bytes_per_element(
(enum source_format_class) e2e_pipe_param->pipe.src.source_format,
1);
vp_height_l = e2e_pipe_param->pipe.src.viewport_height;
vp_width_l = e2e_pipe_param->pipe.src.viewport_width;
vp_height_c = e2e_pipe_param->pipe.src.viewport_height_c;
vp_width_c = e2e_pipe_param->pipe.src.viewport_width_c;
/* Scaling */
htaps_l = e2e_pipe_param->pipe.scale_taps.htaps;
htaps_c = e2e_pipe_param->pipe.scale_taps.htaps_c;
hratios_l = e2e_pipe_param->pipe.scale_ratio_depth.hscl_ratio;
hratios_c = e2e_pipe_param->pipe.scale_ratio_depth.hscl_ratio_c;
vratio_l = e2e_pipe_param->pipe.scale_ratio_depth.vscl_ratio;
vratio_c = e2e_pipe_param->pipe.scale_ratio_depth.vscl_ratio_c;
line_time_in_us = (htotal / pclk_freq_in_mhz);
vinit_l = e2e_pipe_param->pipe.scale_ratio_depth.vinit;
vinit_c = e2e_pipe_param->pipe.scale_ratio_depth.vinit_c;
vinit_bot_l = e2e_pipe_param->pipe.scale_ratio_depth.vinit_bot;
vinit_bot_c = e2e_pipe_param->pipe.scale_ratio_depth.vinit_bot_c;
swath_height_l = rq_dlg_param->rq_l.swath_height;
swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub;
dpte_bytes_per_row_ub_l = rq_dlg_param->rq_l.dpte_bytes_per_row_ub;
dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub;
meta_pte_bytes_per_frame_ub_l = rq_dlg_param->rq_l.meta_pte_bytes_per_frame_ub;
meta_bytes_per_row_ub_l = rq_dlg_param->rq_l.meta_bytes_per_row_ub;
swath_height_c = rq_dlg_param->rq_c.swath_height;
swath_width_ub_c = rq_dlg_param->rq_c.swath_width_ub;
dpte_bytes_per_row_ub_c = rq_dlg_param->rq_c.dpte_bytes_per_row_ub;
dpte_groups_per_row_ub_c = rq_dlg_param->rq_c.dpte_groups_per_row_ub;
meta_chunks_per_row_ub_l = rq_dlg_param->rq_l.meta_chunks_per_row_ub;
vupdate_offset = e2e_pipe_param->pipe.dest.vupdate_offset;
vupdate_width = e2e_pipe_param->pipe.dest.vupdate_width;
vready_offset = e2e_pipe_param->pipe.dest.vready_offset;
dppclk_delay_subtotal = mode_lib->ip.dppclk_delay_subtotal;
dispclk_delay_subtotal = mode_lib->ip.dispclk_delay_subtotal;
pixel_rate_delay_subtotal = dppclk_delay_subtotal * pclk_freq_in_mhz / dppclk_freq_in_mhz
+ dispclk_delay_subtotal * pclk_freq_in_mhz / dispclk_freq_in_mhz;
vstartup_start = e2e_pipe_param->pipe.dest.vstartup_start;
if (interlaced)
vstartup_start = vstartup_start / 2;
if (vstartup_start >= min_vblank) {
DTRACE(
"WARNING_DLG: %s: vblank_start=%d vblank_end=%d",
__func__,
vblank_start,
vblank_end);
DTRACE(
"WARNING_DLG: %s: vstartup_start=%d should be less than min_vblank=%d",
__func__,
vstartup_start,
min_vblank);
min_vblank = vstartup_start + 1;
DTRACE(
"WARNING_DLG: %s: vstartup_start=%d should be less than min_vblank=%d",
__func__,
vstartup_start,
min_vblank);
}
dst_x_after_scaler = 0;
dst_y_after_scaler = 0;
if (e2e_pipe_param->pipe.src.is_hsplit)
dst_x_after_scaler = pixel_rate_delay_subtotal
+ e2e_pipe_param->pipe.dest.recout_width;
else
dst_x_after_scaler = pixel_rate_delay_subtotal;
if (e2e_pipe_param->dout.output_format == dm_420)
dst_y_after_scaler = 1;
else
dst_y_after_scaler = 0;
if (dst_x_after_scaler >= htotal) {
dst_x_after_scaler = dst_x_after_scaler - htotal;
dst_y_after_scaler = dst_y_after_scaler + 1;
}
DTRACE("DLG: %s: htotal = %d", __func__, htotal);
DTRACE(
"DLG: %s: pixel_rate_delay_subtotal = %d",
__func__,
pixel_rate_delay_subtotal);
DTRACE("DLG: %s: dst_x_after_scaler = %d", __func__, dst_x_after_scaler);
DTRACE("DLG: %s: dst_y_after_scaler = %d", __func__, dst_y_after_scaler);
line_wait = mode_lib->soc.urgent_latency_us;
if (cstate_en)
line_wait = dml_max(mode_lib->soc.sr_enter_plus_exit_time_us, line_wait);
if (pstate_en)
line_wait = dml_max(
mode_lib->soc.dram_clock_change_latency_us
+ mode_lib->soc.urgent_latency_us,
line_wait);
line_wait = line_wait / line_time_in_us;
line_o = (double) dst_y_after_scaler + dst_x_after_scaler / (double) htotal;
line_setup = (double) (vupdate_offset + vupdate_width + vready_offset) / (double) htotal;
line_calc = t_calc_us / line_time_in_us;
DTRACE(
"DLG: %s: soc.sr_enter_plus_exit_time_us = %3.2f",
__func__,
(double) mode_lib->soc.sr_enter_plus_exit_time_us);
DTRACE(
"DLG: %s: soc.dram_clock_change_latency_us = %3.2f",
__func__,
(double) mode_lib->soc.dram_clock_change_latency_us);
DTRACE(
"DLG: %s: soc.urgent_latency_us = %3.2f",
__func__,
mode_lib->soc.urgent_latency_us);
DTRACE("DLG: %s: swath_height_l = %d", __func__, swath_height_l);
if (dual_plane)
DTRACE("DLG: %s: swath_height_c = %d", __func__, swath_height_c);
DTRACE("DLG: %s: line_time_in_us = %3.2f", __func__, (double) line_time_in_us);
DTRACE("DLG: %s: vupdate_offset = %d", __func__, vupdate_offset);
DTRACE("DLG: %s: vupdate_width = %d", __func__, vupdate_width);
DTRACE("DLG: %s: vready_offset = %d", __func__, vready_offset);
DTRACE("DLG: %s: line_time_in_us = %3.2f", __func__, line_time_in_us);
DTRACE("DLG: %s: line_wait = %3.2f", __func__, line_wait);
DTRACE("DLG: %s: line_o = %3.2f", __func__, line_o);
DTRACE("DLG: %s: line_setup = %3.2f", __func__, line_setup);
DTRACE("DLG: %s: line_calc = %3.2f", __func__, line_calc);
dst_y_prefetch = ((double) min_vblank - 1.0)
- (line_setup + line_calc + line_wait + line_o);
DTRACE("DLG: %s: dst_y_prefetch (before rnd) = %3.2f", __func__, dst_y_prefetch);
ASSERT(dst_y_prefetch >= 2.0);
dst_y_prefetch = dml_floor(4.0 * (dst_y_prefetch + 0.125), 1) / 4;
DTRACE("DLG: %s: dst_y_prefetch (after rnd) = %3.2f", __func__, dst_y_prefetch);
t_pre_us = dst_y_prefetch * line_time_in_us;
vm_bytes = 0;
meta_row_bytes = 0;
if (dcc_en && vm_en)
vm_bytes = meta_pte_bytes_per_frame_ub_l;
if (dcc_en)
meta_row_bytes = meta_bytes_per_row_ub_l;
max_num_sw_l = 0;
max_num_sw_c = 0;
max_partial_sw_l = 0;
max_partial_sw_c = 0;
max_vinit_l = interlaced ? dml_max(vinit_l, vinit_bot_l) : vinit_l;
max_vinit_c = interlaced ? dml_max(vinit_c, vinit_bot_c) : vinit_c;
get_swath_need(mode_lib, &max_num_sw_l, &max_partial_sw_l, swath_height_l, max_vinit_l);
if (dual_plane)
get_swath_need(
mode_lib,
&max_num_sw_c,
&max_partial_sw_c,
swath_height_c,
max_vinit_c);
lsw_l = max_num_sw_l * swath_height_l + max_partial_sw_l;
lsw_c = max_num_sw_c * swath_height_c + max_partial_sw_c;
sw_bytes_ub_l = lsw_l * swath_width_ub_l * bytes_per_element_l;
sw_bytes_ub_c = lsw_c * swath_width_ub_c * bytes_per_element_c;
sw_bytes = 0;
dpte_row_bytes = 0;
if (vm_en) {
if (dual_plane)
dpte_row_bytes = dpte_bytes_per_row_ub_l + dpte_bytes_per_row_ub_c;
else
dpte_row_bytes = dpte_bytes_per_row_ub_l;
} else {
dpte_row_bytes = 0;
}
if (dual_plane)
sw_bytes = sw_bytes_ub_l + sw_bytes_ub_c;
else
sw_bytes = sw_bytes_ub_l;
DTRACE("DLG: %s: sw_bytes_ub_l = %d", __func__, sw_bytes_ub_l);
DTRACE("DLG: %s: sw_bytes_ub_c = %d", __func__, sw_bytes_ub_c);
DTRACE("DLG: %s: sw_bytes = %d", __func__, sw_bytes);
DTRACE("DLG: %s: vm_bytes = %d", __func__, vm_bytes);
DTRACE("DLG: %s: meta_row_bytes = %d", __func__, meta_row_bytes);
DTRACE("DLG: %s: dpte_row_bytes = %d", __func__, dpte_row_bytes);
prefetch_bw = (vm_bytes + 2 * dpte_row_bytes + 2 * meta_row_bytes + sw_bytes) / t_pre_us;
flip_bw = ((vm_bytes + dpte_row_bytes + meta_row_bytes) * dlg_sys_param->total_flip_bw)
/ (double) dlg_sys_param->total_flip_bytes;
t_vm_us = line_time_in_us / 4.0;
if (vm_en && dcc_en) {
t_vm_us = dml_max(
dlg_sys_param->t_extra_us,
dml_max((double) vm_bytes / prefetch_bw, t_vm_us));
if (iflip_en && !dual_plane) {
t_vm_us = dml_max(mode_lib->soc.urgent_latency_us, t_vm_us);
if (flip_bw > 0.)
t_vm_us = dml_max(vm_bytes / flip_bw, t_vm_us);
}
}
t_r0_us = dml_max(dlg_sys_param->t_extra_us - t_vm_us, line_time_in_us - t_vm_us);
if (vm_en || dcc_en) {
t_r0_us = dml_max(
(double) (dpte_row_bytes + meta_row_bytes) / prefetch_bw,
dlg_sys_param->t_extra_us);
t_r0_us = dml_max((double) (line_time_in_us - t_vm_us), t_r0_us);
if (iflip_en && !dual_plane) {
t_r0_us = dml_max(mode_lib->soc.urgent_latency_us * 2.0, t_r0_us);
if (flip_bw > 0.)
t_r0_us = dml_max(
(dpte_row_bytes + meta_row_bytes) / flip_bw,
t_r0_us);
}
}
disp_dlg_regs->dst_y_after_scaler = dst_y_after_scaler; /* in terms of line */
disp_dlg_regs->refcyc_x_after_scaler = dst_x_after_scaler * ref_freq_to_pix_freq; /* in terms of refclk */
ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int) dml_pow(2, 13));
DTRACE(
"DLG: %s: disp_dlg_regs->dst_y_after_scaler = 0x%0x",
__func__,
disp_dlg_regs->dst_y_after_scaler);
DTRACE(
"DLG: %s: disp_dlg_regs->refcyc_x_after_scaler = 0x%0x",
__func__,
disp_dlg_regs->refcyc_x_after_scaler);
disp_dlg_regs->dst_y_prefetch = (unsigned int) (dst_y_prefetch * dml_pow(2, 2));
DTRACE(
"DLG: %s: disp_dlg_regs->dst_y_prefetch = %d",
__func__,
disp_dlg_regs->dst_y_prefetch);
dst_y_per_vm_vblank = 0.0;
dst_y_per_row_vblank = 0.0;
dst_y_per_vm_vblank = t_vm_us / line_time_in_us;
dst_y_per_vm_vblank = dml_floor(4.0 * (dst_y_per_vm_vblank + 0.125), 1) / 4.0;
disp_dlg_regs->dst_y_per_vm_vblank = (unsigned int) (dst_y_per_vm_vblank * dml_pow(2, 2));
dst_y_per_row_vblank = t_r0_us / line_time_in_us;
dst_y_per_row_vblank = dml_floor(4.0 * (dst_y_per_row_vblank + 0.125), 1) / 4.0;
disp_dlg_regs->dst_y_per_row_vblank = (unsigned int) (dst_y_per_row_vblank * dml_pow(2, 2));
DTRACE("DLG: %s: lsw_l = %d", __func__, lsw_l);
DTRACE("DLG: %s: lsw_c = %d", __func__, lsw_c);
DTRACE("DLG: %s: dpte_bytes_per_row_ub_l = %d", __func__, dpte_bytes_per_row_ub_l);
DTRACE("DLG: %s: dpte_bytes_per_row_ub_c = %d", __func__, dpte_bytes_per_row_ub_c);
DTRACE("DLG: %s: prefetch_bw = %3.2f", __func__, prefetch_bw);
DTRACE("DLG: %s: flip_bw = %3.2f", __func__, flip_bw);
DTRACE("DLG: %s: t_pre_us = %3.2f", __func__, t_pre_us);
DTRACE("DLG: %s: t_vm_us = %3.2f", __func__, t_vm_us);
DTRACE("DLG: %s: t_r0_us = %3.2f", __func__, t_r0_us);
DTRACE("DLG: %s: dst_y_per_vm_vblank = %3.2f", __func__, dst_y_per_vm_vblank);
DTRACE("DLG: %s: dst_y_per_row_vblank = %3.2f", __func__, dst_y_per_row_vblank);
DTRACE("DLG: %s: dst_y_prefetch = %3.2f", __func__, dst_y_prefetch);
min_dst_y_per_vm_vblank = 8.0;
min_dst_y_per_row_vblank = 16.0;
if (htotal <= 75) {
min_vblank = 300;
min_dst_y_per_vm_vblank = 100.0;
min_dst_y_per_row_vblank = 100.0;
}
ASSERT(dst_y_per_vm_vblank < min_dst_y_per_vm_vblank);
ASSERT(dst_y_per_row_vblank < min_dst_y_per_row_vblank);
ASSERT(dst_y_prefetch > (dst_y_per_vm_vblank + dst_y_per_row_vblank));
lsw = dst_y_prefetch - (dst_y_per_vm_vblank + dst_y_per_row_vblank);
DTRACE("DLG: %s: lsw = %3.2f", __func__, lsw);
vratio_pre_l = get_vratio_pre(
mode_lib,
max_num_sw_l,
max_partial_sw_l,
swath_height_l,
max_vinit_l,
lsw);
vratio_pre_c = 1.0;
if (dual_plane)
vratio_pre_c = get_vratio_pre(
mode_lib,
max_num_sw_c,
max_partial_sw_c,
swath_height_c,
max_vinit_c,
lsw);
DTRACE("DLG: %s: vratio_pre_l=%3.2f", __func__, vratio_pre_l);
DTRACE("DLG: %s: vratio_pre_c=%3.2f", __func__, vratio_pre_c);
ASSERT(vratio_pre_l <= 4.0);
if (vratio_pre_l >= 4.0)
disp_dlg_regs->vratio_prefetch = (unsigned int) dml_pow(2, 21) - 1;
else
disp_dlg_regs->vratio_prefetch = (unsigned int) (vratio_pre_l * dml_pow(2, 19));
ASSERT(vratio_pre_c <= 4.0);
if (vratio_pre_c >= 4.0)
disp_dlg_regs->vratio_prefetch_c = (unsigned int) dml_pow(2, 21) - 1;
else
disp_dlg_regs->vratio_prefetch_c = (unsigned int) (vratio_pre_c * dml_pow(2, 19));
disp_dlg_regs->refcyc_per_pte_group_vblank_l =
(unsigned int) (dst_y_per_row_vblank * (double) htotal
* ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l);
ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int) dml_pow(2, 13));
disp_dlg_regs->refcyc_per_pte_group_vblank_c =
(unsigned int) (dst_y_per_row_vblank * (double) htotal
* ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_c);
ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c < (unsigned int) dml_pow(2, 13));
disp_dlg_regs->refcyc_per_meta_chunk_vblank_l =
(unsigned int) (dst_y_per_row_vblank * (double) htotal
* ref_freq_to_pix_freq / (double) meta_chunks_per_row_ub_l);
ASSERT(disp_dlg_regs->refcyc_per_meta_chunk_vblank_l < (unsigned int) dml_pow(2, 13));
disp_dlg_regs->refcyc_per_meta_chunk_vblank_c =
disp_dlg_regs->refcyc_per_meta_chunk_vblank_l;/* dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now */
/* Active */
req_per_swath_ub_l = rq_dlg_param->rq_l.req_per_swath_ub;
req_per_swath_ub_c = rq_dlg_param->rq_c.req_per_swath_ub;
meta_row_height_l = rq_dlg_param->rq_l.meta_row_height;
swath_width_pixels_ub_l = 0;
swath_width_pixels_ub_c = 0;
scaler_rec_in_width_l = 0;
scaler_rec_in_width_c = 0;
dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height;
dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height;
disp_dlg_regs->dst_y_per_pte_row_nom_l = (unsigned int) ((double) dpte_row_height_l
/ (double) vratio_l * dml_pow(2, 2));
ASSERT(disp_dlg_regs->dst_y_per_pte_row_nom_l < (unsigned int) dml_pow(2, 17));
disp_dlg_regs->dst_y_per_pte_row_nom_c = (unsigned int) ((double) dpte_row_height_c
/ (double) vratio_c * dml_pow(2, 2));
ASSERT(disp_dlg_regs->dst_y_per_pte_row_nom_c < (unsigned int) dml_pow(2, 17));
disp_dlg_regs->dst_y_per_meta_row_nom_l = (unsigned int) ((double) meta_row_height_l
/ (double) vratio_l * dml_pow(2, 2));
ASSERT(disp_dlg_regs->dst_y_per_meta_row_nom_l < (unsigned int) dml_pow(2, 17));
disp_dlg_regs->dst_y_per_meta_row_nom_c = disp_dlg_regs->dst_y_per_meta_row_nom_l; /* dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now */
disp_dlg_regs->refcyc_per_pte_group_nom_l = (unsigned int) ((double) dpte_row_height_l
/ (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
/ (double) dpte_groups_per_row_ub_l);
if (disp_dlg_regs->refcyc_per_pte_group_nom_l >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_pte_group_nom_l = dml_pow(2, 23) - 1;
disp_dlg_regs->refcyc_per_pte_group_nom_c = (unsigned int) ((double) dpte_row_height_c
/ (double) vratio_c * (double) htotal * ref_freq_to_pix_freq
/ (double) dpte_groups_per_row_ub_c);
if (disp_dlg_regs->refcyc_per_pte_group_nom_c >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_pte_group_nom_c = dml_pow(2, 23) - 1;
disp_dlg_regs->refcyc_per_meta_chunk_nom_l = (unsigned int) ((double) meta_row_height_l
/ (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
/ (double) meta_chunks_per_row_ub_l);
if (disp_dlg_regs->refcyc_per_meta_chunk_nom_l >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_meta_chunk_nom_l = dml_pow(2, 23) - 1;
if (mode_422) {
swath_width_pixels_ub_l = swath_width_ub_l * 2; /* *2 for 2 pixel per element */
swath_width_pixels_ub_c = swath_width_ub_c * 2;
} else {
swath_width_pixels_ub_l = swath_width_ub_l * 1;
swath_width_pixels_ub_c = swath_width_ub_c * 1;
}
hscale_pixel_rate_l = 0.;
hscale_pixel_rate_c = 0.;
min_hratio_fact_l = 1.0;
min_hratio_fact_c = 1.0;
if (htaps_l <= 1)
min_hratio_fact_l = 2.0;
else if (htaps_l <= 6) {
if ((hratios_l * 2.0) > 4.0)
min_hratio_fact_l = 4.0;
else
min_hratio_fact_l = hratios_l * 2.0;
} else {
if (hratios_l > 4.0)
min_hratio_fact_l = 4.0;
else
min_hratio_fact_l = hratios_l;
}
hscale_pixel_rate_l = min_hratio_fact_l * dppclk_freq_in_mhz;
if (htaps_c <= 1)
min_hratio_fact_c = 2.0;
else if (htaps_c <= 6) {
if ((hratios_c * 2.0) > 4.0)
min_hratio_fact_c = 4.0;
else
min_hratio_fact_c = hratios_c * 2.0;
} else {
if (hratios_c > 4.0)
min_hratio_fact_c = 4.0;
else
min_hratio_fact_c = hratios_c;
}
hscale_pixel_rate_c = min_hratio_fact_c * dppclk_freq_in_mhz;
refcyc_per_line_delivery_pre_l = 0.;
refcyc_per_line_delivery_pre_c = 0.;
refcyc_per_line_delivery_l = 0.;
refcyc_per_line_delivery_c = 0.;
refcyc_per_req_delivery_pre_l = 0.;
refcyc_per_req_delivery_pre_c = 0.;
refcyc_per_req_delivery_l = 0.;
refcyc_per_req_delivery_c = 0.;
refcyc_per_req_delivery_pre_cur0 = 0.;
refcyc_per_req_delivery_cur0 = 0.;
full_recout_width = 0;
if (e2e_pipe_param->pipe.src.is_hsplit) {
if (e2e_pipe_param->pipe.dest.full_recout_width == 0) {
DTRACE("DLG: %s: Warningfull_recout_width not set in hsplit mode", __func__);
full_recout_width = e2e_pipe_param->pipe.dest.recout_width * 2; /* assume half split for dcn1 */
} else
full_recout_width = e2e_pipe_param->pipe.dest.full_recout_width;
} else
full_recout_width = e2e_pipe_param->pipe.dest.recout_width;
refcyc_per_line_delivery_pre_l = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
full_recout_width,
vratio_pre_l,
hscale_pixel_rate_l,
swath_width_pixels_ub_l,
1); /* per line */
refcyc_per_line_delivery_l = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
full_recout_width,
vratio_l,
hscale_pixel_rate_l,
swath_width_pixels_ub_l,
1); /* per line */
DTRACE("DLG: %s: full_recout_width = %d", __func__, full_recout_width);
DTRACE("DLG: %s: hscale_pixel_rate_l = %3.2f", __func__, hscale_pixel_rate_l);
DTRACE(
"DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f",
__func__,
refcyc_per_line_delivery_pre_l);
DTRACE(
"DLG: %s: refcyc_per_line_delivery_l = %3.2f",
__func__,
refcyc_per_line_delivery_l);
disp_dlg_regs->refcyc_per_line_delivery_pre_l = (unsigned int) dml_floor(
refcyc_per_line_delivery_pre_l,
1);
disp_dlg_regs->refcyc_per_line_delivery_l = (unsigned int) dml_floor(
refcyc_per_line_delivery_l,
1);
ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int) dml_pow(2, 13));
ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int) dml_pow(2, 13));
if (dual_plane) {
refcyc_per_line_delivery_pre_c = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
full_recout_width,
vratio_pre_c,
hscale_pixel_rate_c,
swath_width_pixels_ub_c,
1); /* per line */
refcyc_per_line_delivery_c = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
full_recout_width,
vratio_c,
hscale_pixel_rate_c,
swath_width_pixels_ub_c,
1); /* per line */
DTRACE(
"DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f",
__func__,
refcyc_per_line_delivery_pre_c);
DTRACE(
"DLG: %s: refcyc_per_line_delivery_c = %3.2f",
__func__,
refcyc_per_line_delivery_c);
disp_dlg_regs->refcyc_per_line_delivery_pre_c = (unsigned int) dml_floor(
refcyc_per_line_delivery_pre_c,
1);
disp_dlg_regs->refcyc_per_line_delivery_c = (unsigned int) dml_floor(
refcyc_per_line_delivery_c,
1);
ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int) dml_pow(2, 13));
ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int) dml_pow(2, 13));
}
disp_dlg_regs->chunk_hdl_adjust_cur0 = 3;
/* TTU - Luma / Chroma */
if (access_dir) { /* vertical access */
scaler_rec_in_width_l = vp_height_l;
scaler_rec_in_width_c = vp_height_c;
} else {
scaler_rec_in_width_l = vp_width_l;
scaler_rec_in_width_c = vp_width_c;
}
refcyc_per_req_delivery_pre_l = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
full_recout_width,
vratio_pre_l,
hscale_pixel_rate_l,
scaler_rec_in_width_l,
req_per_swath_ub_l); /* per req */
refcyc_per_req_delivery_l = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
full_recout_width,
vratio_l,
hscale_pixel_rate_l,
scaler_rec_in_width_l,
req_per_swath_ub_l); /* per req */
DTRACE(
"DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f",
__func__,
refcyc_per_req_delivery_pre_l);
DTRACE(
"DLG: %s: refcyc_per_req_delivery_l = %3.2f",
__func__,
refcyc_per_req_delivery_l);
disp_ttu_regs->refcyc_per_req_delivery_pre_l = (unsigned int) (refcyc_per_req_delivery_pre_l
* dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_l = (unsigned int) (refcyc_per_req_delivery_l
* dml_pow(2, 10));
ASSERT(refcyc_per_req_delivery_pre_l < dml_pow(2, 13));
ASSERT(refcyc_per_req_delivery_l < dml_pow(2, 13));
if (dual_plane) {
refcyc_per_req_delivery_pre_c = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
full_recout_width,
vratio_pre_c,
hscale_pixel_rate_c,
scaler_rec_in_width_c,
req_per_swath_ub_c); /* per req */
refcyc_per_req_delivery_c = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
full_recout_width,
vratio_c,
hscale_pixel_rate_c,
scaler_rec_in_width_c,
req_per_swath_ub_c); /* per req */
DTRACE(
"DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f",
__func__,
refcyc_per_req_delivery_pre_c);
DTRACE(
"DLG: %s: refcyc_per_req_delivery_c = %3.2f",
__func__,
refcyc_per_req_delivery_c);
disp_ttu_regs->refcyc_per_req_delivery_pre_c =
(unsigned int) (refcyc_per_req_delivery_pre_c * dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_c = (unsigned int) (refcyc_per_req_delivery_c
* dml_pow(2, 10));
ASSERT(refcyc_per_req_delivery_pre_c < dml_pow(2, 13));
ASSERT(refcyc_per_req_delivery_c < dml_pow(2, 13));
}
/* TTU - Cursor */
hratios_cur0 = e2e_pipe_param->pipe.scale_ratio_depth.hscl_ratio;
cur0_src_width = e2e_pipe_param->pipe.src.cur0_src_width; /* cursor source width */
cur0_bpp = (enum cursor_bpp) e2e_pipe_param->pipe.src.cur0_bpp;
cur0_req_size = 0;
cur0_req_width = 0;
cur0_width_ub = 0.0;
cur0_req_per_width = 0.0;
hactive_cur0 = 0.0;
ASSERT(cur0_src_width <= 256);
if (cur0_src_width > 0) {
unsigned int cur0_bit_per_pixel = 0;
if (cur0_bpp == dm_cur_2bit) {
cur0_req_size = 64; /* byte */
cur0_bit_per_pixel = 2;
} else { /* 32bit */
cur0_bit_per_pixel = 32;
if (cur0_src_width >= 1 && cur0_src_width <= 16)
cur0_req_size = 64;
else if (cur0_src_width >= 17 && cur0_src_width <= 31)
cur0_req_size = 128;
else
cur0_req_size = 256;
}
cur0_req_width = (double) cur0_req_size / ((double) cur0_bit_per_pixel / 8.0);
cur0_width_ub = dml_ceil((double) cur0_src_width / (double) cur0_req_width, 1)
* (double) cur0_req_width;
cur0_req_per_width = cur0_width_ub / (double) cur0_req_width;
hactive_cur0 = (double) cur0_src_width / hratios_cur0; /* TODO: oswin to think about what to do for cursor */
if (vratio_pre_l <= 1.0) {
refcyc_per_req_delivery_pre_cur0 = hactive_cur0 * ref_freq_to_pix_freq
/ (double) cur0_req_per_width;
} else {
refcyc_per_req_delivery_pre_cur0 = (double) refclk_freq_in_mhz
* (double) cur0_src_width / hscale_pixel_rate_l
/ (double) cur0_req_per_width;
}
disp_ttu_regs->refcyc_per_req_delivery_pre_cur0 =
(unsigned int) (refcyc_per_req_delivery_pre_cur0 * dml_pow(2, 10));
ASSERT(refcyc_per_req_delivery_pre_cur0 < dml_pow(2, 13));
if (vratio_l <= 1.0) {
refcyc_per_req_delivery_cur0 = hactive_cur0 * ref_freq_to_pix_freq
/ (double) cur0_req_per_width;
} else {
refcyc_per_req_delivery_cur0 = (double) refclk_freq_in_mhz
* (double) cur0_src_width / hscale_pixel_rate_l
/ (double) cur0_req_per_width;
}
DTRACE("DLG: %s: cur0_req_width = %d", __func__, cur0_req_width);
DTRACE(
"DLG: %s: cur0_width_ub = %3.2f",
__func__,
cur0_width_ub);
DTRACE(
"DLG: %s: cur0_req_per_width = %3.2f",
__func__,
cur0_req_per_width);
DTRACE(
"DLG: %s: hactive_cur0 = %3.2f",
__func__,
hactive_cur0);
DTRACE(
"DLG: %s: refcyc_per_req_delivery_pre_cur0 = %3.2f",
__func__,
refcyc_per_req_delivery_pre_cur0);
DTRACE(
"DLG: %s: refcyc_per_req_delivery_cur0 = %3.2f",
__func__,
refcyc_per_req_delivery_cur0);
disp_ttu_regs->refcyc_per_req_delivery_cur0 =
(unsigned int) (refcyc_per_req_delivery_cur0 * dml_pow(2, 10));
ASSERT(refcyc_per_req_delivery_cur0 < dml_pow(2, 13));
} else {
disp_ttu_regs->refcyc_per_req_delivery_pre_cur0 = 0;
disp_ttu_regs->refcyc_per_req_delivery_cur0 = 0;
}
/* TTU - Misc */
disp_ttu_regs->qos_level_low_wm = 0;
ASSERT(disp_ttu_regs->qos_level_low_wm < dml_pow(2, 14));
disp_ttu_regs->qos_level_high_wm = (unsigned int) (4.0 * (double) htotal
* ref_freq_to_pix_freq);
ASSERT(disp_ttu_regs->qos_level_high_wm < dml_pow(2, 14));
disp_ttu_regs->qos_level_flip = 14;
disp_ttu_regs->qos_level_fixed_l = 8;
disp_ttu_regs->qos_level_fixed_c = 8;
disp_ttu_regs->qos_level_fixed_cur0 = 8;
disp_ttu_regs->qos_ramp_disable_l = 0;
disp_ttu_regs->qos_ramp_disable_c = 0;
disp_ttu_regs->qos_ramp_disable_cur0 = 0;
disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
print__ttu_regs_st(mode_lib, disp_ttu_regs);
print__dlg_regs_st(mode_lib, disp_dlg_regs);
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c |
/*
* Copyright 2019-2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "resource.h"
#include "clk_mgr.h"
#include "dcn20/dcn20_resource.h"
#include "dcn303/dcn303_resource.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "dcn303_fpu.h"
struct _vcs_dpi_ip_params_st dcn3_03_ip = {
.use_min_dcfclk = 0,
.clamp_min_dcfclk = 0,
.odm_capable = 1,
.gpuvm_enable = 1,
.hostvm_enable = 0,
.gpuvm_max_page_table_levels = 4,
.hostvm_max_page_table_levels = 4,
.hostvm_cached_page_table_levels = 0,
.pte_group_size_bytes = 2048,
.num_dsc = 2,
.rob_buffer_size_kbytes = 184,
.det_buffer_size_kbytes = 184,
.dpte_buffer_size_in_pte_reqs_luma = 64,
.dpte_buffer_size_in_pte_reqs_chroma = 34,
.pde_proc_buffer_size_64k_reqs = 48,
.dpp_output_buffer_pixels = 2560,
.opp_output_buffer_lines = 1,
.pixel_chunk_size_kbytes = 8,
.pte_enable = 1,
.max_page_table_levels = 2,
.pte_chunk_size_kbytes = 2, // ?
.meta_chunk_size_kbytes = 2,
.writeback_chunk_size_kbytes = 8,
.line_buffer_size_bits = 789504,
.is_line_buffer_bpp_fixed = 0, // ?
.line_buffer_fixed_bpp = 0, // ?
.dcc_supported = true,
.writeback_interface_buffer_size_kbytes = 90,
.writeback_line_buffer_buffer_size = 0,
.max_line_buffer_lines = 12,
.writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640
.writeback_chroma_buffer_size_kbytes = 8,
.writeback_chroma_line_buffer_width_pixels = 4,
.writeback_max_hscl_ratio = 1,
.writeback_max_vscl_ratio = 1,
.writeback_min_hscl_ratio = 1,
.writeback_min_vscl_ratio = 1,
.writeback_max_hscl_taps = 1,
.writeback_max_vscl_taps = 1,
.writeback_line_buffer_luma_buffer_size = 0,
.writeback_line_buffer_chroma_buffer_size = 14643,
.cursor_buffer_size = 8,
.cursor_chunk_size = 2,
.max_num_otg = 2,
.max_num_dpp = 2,
.max_num_wb = 1,
.max_dchub_pscl_bw_pix_per_clk = 4,
.max_pscl_lb_bw_pix_per_clk = 2,
.max_lb_vscl_bw_pix_per_clk = 4,
.max_vscl_hscl_bw_pix_per_clk = 4,
.max_hscl_ratio = 6,
.max_vscl_ratio = 6,
.hscl_mults = 4,
.vscl_mults = 4,
.max_hscl_taps = 8,
.max_vscl_taps = 8,
.dispclk_ramp_margin_percent = 1,
.underscan_factor = 1.11,
.min_vblank_lines = 32,
.dppclk_delay_subtotal = 46,
.dynamic_metadata_vm_enabled = true,
.dppclk_delay_scl_lb_only = 16,
.dppclk_delay_scl = 50,
.dppclk_delay_cnvc_formatter = 27,
.dppclk_delay_cnvc_cursor = 6,
.dispclk_delay_subtotal = 119,
.dcfclk_cstate_latency = 5.2, // SRExitTime
.max_inter_dcn_tile_repeaters = 8,
.max_num_hdmi_frl_outputs = 1,
.odm_combine_4to1_supported = false,
.xfc_supported = false,
.xfc_fill_bw_overhead_percent = 10.0,
.xfc_fill_constant_bytes = 0,
.gfx7_compat_tiling_supported = 0,
.number_of_cursors = 1,
};
struct _vcs_dpi_soc_bounding_box_st dcn3_03_soc = {
.clock_limits = {
{
.state = 0,
.dispclk_mhz = 562.0,
.dppclk_mhz = 300.0,
.phyclk_mhz = 300.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 405.6,
},
},
.min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
.num_states = 1,
.sr_exit_time_us = 35.5,
.sr_enter_plus_exit_time_us = 40,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
.urgent_latency_vm_data_only_us = 4.0,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
.max_avg_sdp_bw_use_normal_percent = 60.0,
.max_avg_dram_bw_use_normal_percent = 40.0,
.writeback_latency_us = 12.0,
.max_request_size_bytes = 256,
.fabric_datapath_to_dcn_data_return_bytes = 64,
.dcn_downspread_percent = 0.5,
.downspread_percent = 0.38,
.dram_page_open_time_ns = 50.0,
.dram_rw_turnaround_time_ns = 17.5,
.dram_return_buffer_per_channel_bytes = 8192,
.round_trip_ping_latency_dcfclk_cycles = 156,
.urgent_out_of_order_return_per_channel_bytes = 4096,
.channel_interleave_bytes = 256,
.num_banks = 8,
.gpuvm_min_page_size_bytes = 4096,
.hostvm_min_page_size_bytes = 4096,
.dram_clock_change_latency_us = 404,
.dummy_pstate_latency_us = 5,
.writeback_dram_clock_change_latency_us = 23.0,
.return_bus_width_bytes = 64,
.dispclk_dppclk_vco_speed_mhz = 3650,
.xfc_bus_transport_time_us = 20, // ?
.xfc_xbuf_latency_tolerance_us = 4, // ?
.use_urgent_burst_bw = 1, // ?
.do_urgent_latency_adjustment = true,
.urgent_latency_adjustment_fabric_clock_component_us = 1.0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 1000,
};
static void dcn303_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
unsigned int *optimal_dcfclk,
unsigned int *optimal_fclk)
{
double bw_from_dram, bw_from_dram1, bw_from_dram2;
bw_from_dram1 = uclk_mts * dcn3_03_soc.num_chans *
dcn3_03_soc.dram_channel_width_bytes * (dcn3_03_soc.max_avg_dram_bw_use_normal_percent / 100);
bw_from_dram2 = uclk_mts * dcn3_03_soc.num_chans *
dcn3_03_soc.dram_channel_width_bytes * (dcn3_03_soc.max_avg_sdp_bw_use_normal_percent / 100);
bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
if (optimal_fclk)
*optimal_fclk = bw_from_dram /
(dcn3_03_soc.fabric_datapath_to_dcn_data_return_bytes *
(dcn3_03_soc.max_avg_sdp_bw_use_normal_percent / 100));
if (optimal_dcfclk)
*optimal_dcfclk = bw_from_dram /
(dcn3_03_soc.return_bus_width_bytes * (dcn3_03_soc.max_avg_sdp_bw_use_normal_percent / 100));
}
void dcn303_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
unsigned int i, j;
unsigned int num_states = 0;
unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0};
unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0};
unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0};
unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0};
unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {694, 875, 1000, 1200};
unsigned int num_dcfclk_sta_targets = 4;
unsigned int num_uclk_states;
dc_assert_fp_enabled();
if (dc->ctx->dc_bios->vram_info.num_chans)
dcn3_03_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
dcn3_03_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
dcn3_03_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
if (bw_params->clk_table.entries[0].memclk_mhz) {
int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0;
for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
}
if (!max_dcfclk_mhz)
max_dcfclk_mhz = dcn3_03_soc.clock_limits[0].dcfclk_mhz;
if (!max_dispclk_mhz)
max_dispclk_mhz = dcn3_03_soc.clock_limits[0].dispclk_mhz;
if (!max_dppclk_mhz)
max_dppclk_mhz = dcn3_03_soc.clock_limits[0].dppclk_mhz;
if (!max_phyclk_mhz)
max_phyclk_mhz = dcn3_03_soc.clock_limits[0].phyclk_mhz;
if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz;
num_dcfclk_sta_targets++;
} else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
for (i = 0; i < num_dcfclk_sta_targets; i++) {
if (dcfclk_sta_targets[i] > max_dcfclk_mhz) {
dcfclk_sta_targets[i] = max_dcfclk_mhz;
break;
}
}
/* Update size of array since we "removed" duplicates */
num_dcfclk_sta_targets = i + 1;
}
num_uclk_states = bw_params->clk_table.num_entries;
/* Calculate optimal dcfclk for each uclk */
for (i = 0; i < num_uclk_states; i++) {
dcn303_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
&optimal_dcfclk_for_uclk[i], NULL);
if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz)
optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz;
}
/* Calculate optimal uclk for each dcfclk sta target */
for (i = 0; i < num_dcfclk_sta_targets; i++) {
for (j = 0; j < num_uclk_states; j++) {
if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
optimal_uclk_for_dcfclk_sta_targets[i] =
bw_params->clk_table.entries[j].memclk_mhz * 16;
break;
}
}
}
i = 0;
j = 0;
/* create the final dcfclk and uclk table */
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
dram_speed_mts[num_states++] =
bw_params->clk_table.entries[j++].memclk_mhz * 16;
} else {
j = num_uclk_states;
}
}
}
while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
}
while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES &&
optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
}
dcn3_03_soc.num_states = num_states;
for (i = 0; i < dcn3_03_soc.num_states; i++) {
dcn3_03_soc.clock_limits[i].state = i;
dcn3_03_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
dcn3_03_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i];
dcn3_03_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
/* Fill all states with max values of all other clocks */
dcn3_03_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
dcn3_03_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
dcn3_03_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz;
/* Populate from bw_params for DTBCLK, SOCCLK */
if (!bw_params->clk_table.entries[i].dtbclk_mhz && i > 0)
dcn3_03_soc.clock_limits[i].dtbclk_mhz = dcn3_03_soc.clock_limits[i-1].dtbclk_mhz;
else
dcn3_03_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0)
dcn3_03_soc.clock_limits[i].socclk_mhz = dcn3_03_soc.clock_limits[i-1].socclk_mhz;
else
dcn3_03_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz;
/* These clocks cannot come from bw_params, always fill from dcn3_03_soc[1] */
/* FCLK, PHYCLK_D18, DSCCLK */
dcn3_03_soc.clock_limits[i].phyclk_d18_mhz = dcn3_03_soc.clock_limits[0].phyclk_d18_mhz;
dcn3_03_soc.clock_limits[i].dscclk_mhz = dcn3_03_soc.clock_limits[0].dscclk_mhz;
}
if (dcn3_03_soc.num_chans <= 4) {
for (i = 0; i < dcn3_03_soc.num_states; i++) {
if (dcn3_03_soc.clock_limits[i].dram_speed_mts > 1700)
break;
if (dcn3_03_soc.clock_limits[i].dram_speed_mts >= 1500) {
dcn3_03_soc.clock_limits[i].dcfclk_mhz = 100;
dcn3_03_soc.clock_limits[i].fabricclk_mhz = 100;
}
}
}
/* re-init DML with updated bb */
dml_init_instance(&dc->dml, &dcn3_03_soc, &dcn3_03_ip, DML_PROJECT_DCN30);
if (dc->current_state)
dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_03_soc, &dcn3_03_ip, DML_PROJECT_DCN30);
}
}
void dcn303_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info)
{
dc_assert_fp_enabled();
if (bb_info.dram_clock_change_latency_100ns > 0)
dcn3_03_soc.dram_clock_change_latency_us = bb_info.dram_clock_change_latency_100ns * 10;
if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
dcn3_03_soc.sr_enter_plus_exit_time_us = bb_info.dram_sr_enter_exit_latency_100ns * 10;
if (bb_info.dram_sr_exit_latency_100ns > 0)
dcn3_03_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c |
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "resource.h"
#include "clk_mgr.h"
#include "dcn20/dcn20_resource.h"
#include "dcn302/dcn302_resource.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "dcn302_fpu.h"
struct _vcs_dpi_ip_params_st dcn3_02_ip = {
.use_min_dcfclk = 0,
.clamp_min_dcfclk = 0,
.odm_capable = 1,
.gpuvm_enable = 1,
.hostvm_enable = 0,
.gpuvm_max_page_table_levels = 4,
.hostvm_max_page_table_levels = 4,
.hostvm_cached_page_table_levels = 0,
.pte_group_size_bytes = 2048,
.num_dsc = 5,
.rob_buffer_size_kbytes = 184,
.det_buffer_size_kbytes = 184,
.dpte_buffer_size_in_pte_reqs_luma = 64,
.dpte_buffer_size_in_pte_reqs_chroma = 34,
.pde_proc_buffer_size_64k_reqs = 48,
.dpp_output_buffer_pixels = 2560,
.opp_output_buffer_lines = 1,
.pixel_chunk_size_kbytes = 8,
.pte_enable = 1,
.max_page_table_levels = 2,
.pte_chunk_size_kbytes = 2, // ?
.meta_chunk_size_kbytes = 2,
.writeback_chunk_size_kbytes = 8,
.line_buffer_size_bits = 789504,
.is_line_buffer_bpp_fixed = 0, // ?
.line_buffer_fixed_bpp = 0, // ?
.dcc_supported = true,
.writeback_interface_buffer_size_kbytes = 90,
.writeback_line_buffer_buffer_size = 0,
.max_line_buffer_lines = 12,
.writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640
.writeback_chroma_buffer_size_kbytes = 8,
.writeback_chroma_line_buffer_width_pixels = 4,
.writeback_max_hscl_ratio = 1,
.writeback_max_vscl_ratio = 1,
.writeback_min_hscl_ratio = 1,
.writeback_min_vscl_ratio = 1,
.writeback_max_hscl_taps = 1,
.writeback_max_vscl_taps = 1,
.writeback_line_buffer_luma_buffer_size = 0,
.writeback_line_buffer_chroma_buffer_size = 14643,
.cursor_buffer_size = 8,
.cursor_chunk_size = 2,
.max_num_otg = 5,
.max_num_dpp = 5,
.max_num_wb = 1,
.max_dchub_pscl_bw_pix_per_clk = 4,
.max_pscl_lb_bw_pix_per_clk = 2,
.max_lb_vscl_bw_pix_per_clk = 4,
.max_vscl_hscl_bw_pix_per_clk = 4,
.max_hscl_ratio = 6,
.max_vscl_ratio = 6,
.hscl_mults = 4,
.vscl_mults = 4,
.max_hscl_taps = 8,
.max_vscl_taps = 8,
.dispclk_ramp_margin_percent = 1,
.underscan_factor = 1.11,
.min_vblank_lines = 32,
.dppclk_delay_subtotal = 46,
.dynamic_metadata_vm_enabled = true,
.dppclk_delay_scl_lb_only = 16,
.dppclk_delay_scl = 50,
.dppclk_delay_cnvc_formatter = 27,
.dppclk_delay_cnvc_cursor = 6,
.dispclk_delay_subtotal = 119,
.dcfclk_cstate_latency = 5.2, // SRExitTime
.max_inter_dcn_tile_repeaters = 8,
.max_num_hdmi_frl_outputs = 1,
.odm_combine_4to1_supported = true,
.xfc_supported = false,
.xfc_fill_bw_overhead_percent = 10.0,
.xfc_fill_constant_bytes = 0,
.gfx7_compat_tiling_supported = 0,
.number_of_cursors = 1,
};
struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc = {
.clock_limits = {
{
.state = 0,
.dispclk_mhz = 562.0,
.dppclk_mhz = 300.0,
.phyclk_mhz = 300.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 405.6,
},
},
.min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
.num_states = 1,
.sr_exit_time_us = 26.5,
.sr_enter_plus_exit_time_us = 31,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
.urgent_latency_vm_data_only_us = 4.0,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
.max_avg_sdp_bw_use_normal_percent = 60.0,
.max_avg_dram_bw_use_normal_percent = 40.0,
.writeback_latency_us = 12.0,
.max_request_size_bytes = 256,
.fabric_datapath_to_dcn_data_return_bytes = 64,
.dcn_downspread_percent = 0.5,
.downspread_percent = 0.38,
.dram_page_open_time_ns = 50.0,
.dram_rw_turnaround_time_ns = 17.5,
.dram_return_buffer_per_channel_bytes = 8192,
.round_trip_ping_latency_dcfclk_cycles = 156,
.urgent_out_of_order_return_per_channel_bytes = 4096,
.channel_interleave_bytes = 256,
.num_banks = 8,
.gpuvm_min_page_size_bytes = 4096,
.hostvm_min_page_size_bytes = 4096,
.dram_clock_change_latency_us = 404,
.dummy_pstate_latency_us = 5,
.writeback_dram_clock_change_latency_us = 23.0,
.return_bus_width_bytes = 64,
.dispclk_dppclk_vco_speed_mhz = 3650,
.xfc_bus_transport_time_us = 20, // ?
.xfc_xbuf_latency_tolerance_us = 4, // ?
.use_urgent_burst_bw = 1, // ?
.do_urgent_latency_adjustment = true,
.urgent_latency_adjustment_fabric_clock_component_us = 1.0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 1000,
};
static void dcn302_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
unsigned int *optimal_dcfclk,
unsigned int *optimal_fclk)
{
double bw_from_dram, bw_from_dram1, bw_from_dram2;
bw_from_dram1 = uclk_mts * dcn3_02_soc.num_chans *
dcn3_02_soc.dram_channel_width_bytes *
(dcn3_02_soc.max_avg_dram_bw_use_normal_percent / 100);
bw_from_dram2 = uclk_mts * dcn3_02_soc.num_chans *
dcn3_02_soc.dram_channel_width_bytes *
(dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100);
bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
if (optimal_fclk)
*optimal_fclk = bw_from_dram /
(dcn3_02_soc.fabric_datapath_to_dcn_data_return_bytes *
(dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100));
if (optimal_dcfclk)
*optimal_dcfclk = bw_from_dram /
(dcn3_02_soc.return_bus_width_bytes *
(dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100));
}
void dcn302_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
unsigned int i, j;
unsigned int num_states = 0;
unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0};
unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0};
unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0};
unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0};
unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {694, 875, 1000, 1200};
unsigned int num_dcfclk_sta_targets = 4;
unsigned int num_uclk_states;
dc_assert_fp_enabled();
if (dc->ctx->dc_bios->vram_info.num_chans)
dcn3_02_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
dcn3_02_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
dcn3_02_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
if (bw_params->clk_table.entries[0].memclk_mhz) {
int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0;
for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
}
if (!max_dcfclk_mhz)
max_dcfclk_mhz = dcn3_02_soc.clock_limits[0].dcfclk_mhz;
if (!max_dispclk_mhz)
max_dispclk_mhz = dcn3_02_soc.clock_limits[0].dispclk_mhz;
if (!max_dppclk_mhz)
max_dppclk_mhz = dcn3_02_soc.clock_limits[0].dppclk_mhz;
if (!max_phyclk_mhz)
max_phyclk_mhz = dcn3_02_soc.clock_limits[0].phyclk_mhz;
if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
/* If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array */
dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz;
num_dcfclk_sta_targets++;
} else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
/* If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates */
for (i = 0; i < num_dcfclk_sta_targets; i++) {
if (dcfclk_sta_targets[i] > max_dcfclk_mhz) {
dcfclk_sta_targets[i] = max_dcfclk_mhz;
break;
}
}
/* Update size of array since we "removed" duplicates */
num_dcfclk_sta_targets = i + 1;
}
num_uclk_states = bw_params->clk_table.num_entries;
/* Calculate optimal dcfclk for each uclk */
for (i = 0; i < num_uclk_states; i++) {
dcn302_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
&optimal_dcfclk_for_uclk[i], NULL);
if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz)
optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz;
}
/* Calculate optimal uclk for each dcfclk sta target */
for (i = 0; i < num_dcfclk_sta_targets; i++) {
for (j = 0; j < num_uclk_states; j++) {
if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
optimal_uclk_for_dcfclk_sta_targets[i] =
bw_params->clk_table.entries[j].memclk_mhz * 16;
break;
}
}
}
i = 0;
j = 0;
/* create the final dcfclk and uclk table */
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
} else {
j = num_uclk_states;
}
}
}
while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
}
while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES &&
optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
}
dcn3_02_soc.num_states = num_states;
for (i = 0; i < dcn3_02_soc.num_states; i++) {
dcn3_02_soc.clock_limits[i].state = i;
dcn3_02_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
dcn3_02_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i];
dcn3_02_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
/* Fill all states with max values of all other clocks */
dcn3_02_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
dcn3_02_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
dcn3_02_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz;
/* Populate from bw_params for DTBCLK, SOCCLK */
if (!bw_params->clk_table.entries[i].dtbclk_mhz && i > 0)
dcn3_02_soc.clock_limits[i].dtbclk_mhz = dcn3_02_soc.clock_limits[i-1].dtbclk_mhz;
else
dcn3_02_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0)
dcn3_02_soc.clock_limits[i].socclk_mhz = dcn3_02_soc.clock_limits[i-1].socclk_mhz;
else
dcn3_02_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz;
/* These clocks cannot come from bw_params, always fill from dcn3_02_soc[1] */
/* FCLK, PHYCLK_D18, DSCCLK */
dcn3_02_soc.clock_limits[i].phyclk_d18_mhz = dcn3_02_soc.clock_limits[0].phyclk_d18_mhz;
dcn3_02_soc.clock_limits[i].dscclk_mhz = dcn3_02_soc.clock_limits[0].dscclk_mhz;
}
/* re-init DML with updated bb */
dml_init_instance(&dc->dml, &dcn3_02_soc, &dcn3_02_ip, DML_PROJECT_DCN30);
if (dc->current_state)
dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_02_soc, &dcn3_02_ip, DML_PROJECT_DCN30);
}
}
void dcn302_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info)
{
dc_assert_fp_enabled();
if (bb_info.dram_clock_change_latency_100ns > 0)
dcn3_02_soc.dram_clock_change_latency_us =
bb_info.dram_clock_change_latency_100ns * 10;
if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
dcn3_02_soc.sr_enter_plus_exit_time_us =
bb_info.dram_sr_enter_exit_latency_100ns * 10;
if (bb_info.dram_sr_exit_latency_100ns > 0)
dcn3_02_soc.sr_exit_time_us =
bb_info.dram_sr_exit_latency_100ns * 10;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c |
/*
* Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dc.h"
#include "../display_mode_lib.h"
#include "../dcn30/display_mode_vba_30.h"
#include "display_mode_vba_31.h"
#include "../dml_inline_defs.h"
/*
* NOTE:
* This file is gcc-parsable HW gospel, coming straight from HW engineers.
*
* It doesn't adhere to Linux kernel style and sometimes will do things in odd
* ways. Unless there is something clearly wrong with it the code should
* remain as-is as it provides us with a guarantee from HW that it is correct.
*/
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
#define DCN31_MAX_DSC_IMAGE_WIDTH 5184
#define DCN31_MAX_FMT_420_BUFFER_WIDTH 4096
#define DCN3_15_MIN_COMPBUF_SIZE_KB 128
#define DCN3_15_MAX_DET_SIZE 384
// For DML-C changes that hasn't been propagated to VBA yet
//#define __DML_VBA_ALLOW_DELTA__
// Move these to ip paramaters/constant
// At which vstartup the DML start to try if the mode can be supported
#define __DML_VBA_MIN_VSTARTUP__ 9
// Delay in DCFCLK from ARB to DET (1st num is ARB to SDPIF, 2nd number is SDPIF to DET)
#define __DML_ARB_TO_RET_DELAY__ (7 + 95)
// fudge factor for min dcfclk calclation
#define __DML_MIN_DCFCLK_FACTOR__ 1.15
typedef struct {
double DPPCLK;
double DISPCLK;
double PixelClock;
double DCFCLKDeepSleep;
unsigned int DPPPerPlane;
bool ScalerEnabled;
double VRatio;
double VRatioChroma;
enum scan_direction_class SourceScan;
unsigned int BlockWidth256BytesY;
unsigned int BlockHeight256BytesY;
unsigned int BlockWidth256BytesC;
unsigned int BlockHeight256BytesC;
unsigned int InterlaceEnable;
unsigned int NumberOfCursors;
unsigned int VBlank;
unsigned int HTotal;
unsigned int DCCEnable;
bool ODMCombineIsEnabled;
enum source_format_class SourcePixelFormat;
int BytePerPixelY;
int BytePerPixelC;
bool ProgressiveToInterlaceUnitInOPP;
} Pipe;
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(struct display_mode_lib *mode_lib);
static unsigned int dscceComputeDelay(
unsigned int bpc,
double BPP,
unsigned int sliceWidth,
unsigned int numSlices,
enum output_format_class pixelFormat,
enum output_encoder_class Output);
static unsigned int dscComputeDelay(enum output_format_class pixelFormat, enum output_encoder_class Output);
static bool CalculatePrefetchSchedule(
struct display_mode_lib *mode_lib,
double HostVMInefficiencyFactor,
Pipe *myPipe,
unsigned int DSCDelay,
double DPPCLKDelaySubtotalPlusCNVCFormater,
double DPPCLKDelaySCL,
double DPPCLKDelaySCLLBOnly,
double DPPCLKDelayCNVCCursor,
double DISPCLKDelaySubtotal,
unsigned int DPP_RECOUT_WIDTH,
enum output_format_class OutputFormat,
unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int MaxVStartup,
unsigned int GPUVMPageTableLevels,
bool GPUVMEnable,
bool HostVMEnable,
unsigned int HostVMMaxNonCachedPageTableLevels,
double HostVMMinPageSize,
bool DynamicMetadataEnable,
bool DynamicMetadataVMEnabled,
int DynamicMetadataLinesBeforeActiveRequired,
unsigned int DynamicMetadataTransmittedBytes,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
unsigned int PDEAndMetaPTEBytesFrame,
unsigned int MetaRowByte,
unsigned int PixelPTEBytesPerRow,
double PrefetchSourceLinesY,
unsigned int SwathWidthY,
double VInitPreFillY,
unsigned int MaxNumSwathY,
double PrefetchSourceLinesC,
unsigned int SwathWidthC,
double VInitPreFillC,
unsigned int MaxNumSwathC,
int swath_width_luma_ub,
int swath_width_chroma_ub,
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double TWait,
double *DSTXAfterScaler,
double *DSTYAfterScaler,
double *DestinationLinesForPrefetch,
double *PrefetchBandwidth,
double *DestinationLinesToRequestVMInVBlank,
double *DestinationLinesToRequestRowInVBlank,
double *VRatioPrefetchY,
double *VRatioPrefetchC,
double *RequiredPrefetchPixDataBWLuma,
double *RequiredPrefetchPixDataBWChroma,
bool *NotEnoughTimeForDynamicMetadata,
double *Tno_bw,
double *prefetch_vmrow_bw,
double *Tdmdl_vm,
double *Tdmdl,
double *TSetup,
int *VUpdateOffsetPix,
double *VUpdateWidthPix,
double *VReadyOffsetPix);
static double RoundToDFSGranularityUp(double Clock, double VCOSpeed);
static double RoundToDFSGranularityDown(double Clock, double VCOSpeed);
static void CalculateDCCConfiguration(
bool DCCEnabled,
bool DCCProgrammingAssumesScanDirectionUnknown,
enum source_format_class SourcePixelFormat,
unsigned int SurfaceWidthLuma,
unsigned int SurfaceWidthChroma,
unsigned int SurfaceHeightLuma,
unsigned int SurfaceHeightChroma,
double DETBufferSize,
unsigned int RequestHeight256ByteLuma,
unsigned int RequestHeight256ByteChroma,
enum dm_swizzle_mode TilingFormat,
unsigned int BytePerPixelY,
unsigned int BytePerPixelC,
double BytePerPixelDETY,
double BytePerPixelDETC,
enum scan_direction_class ScanOrientation,
unsigned int *MaxUncompressedBlockLuma,
unsigned int *MaxUncompressedBlockChroma,
unsigned int *MaxCompressedBlockLuma,
unsigned int *MaxCompressedBlockChroma,
unsigned int *IndependentBlockLuma,
unsigned int *IndependentBlockChroma);
static double CalculatePrefetchSourceLines(
struct display_mode_lib *mode_lib,
double VRatio,
double vtaps,
bool Interlace,
bool ProgressiveToInterlaceUnitInOPP,
unsigned int SwathHeight,
unsigned int ViewportYStart,
double *VInitPreFill,
unsigned int *MaxNumSwath);
static unsigned int CalculateVMAndRowBytes(
struct display_mode_lib *mode_lib,
bool DCCEnable,
unsigned int BlockHeight256Bytes,
unsigned int BlockWidth256Bytes,
enum source_format_class SourcePixelFormat,
unsigned int SurfaceTiling,
unsigned int BytePerPixel,
enum scan_direction_class ScanDirection,
unsigned int SwathWidth,
unsigned int ViewportHeight,
bool GPUVMEnable,
bool HostVMEnable,
unsigned int HostVMMaxNonCachedPageTableLevels,
unsigned int GPUVMMinPageSize,
unsigned int HostVMMinPageSize,
unsigned int PTEBufferSizeInRequests,
unsigned int Pitch,
unsigned int DCCMetaPitch,
unsigned int *MacroTileWidth,
unsigned int *MetaRowByte,
unsigned int *PixelPTEBytesPerRow,
bool *PTEBufferSizeNotExceeded,
int *dpte_row_width_ub,
unsigned int *dpte_row_height,
unsigned int *MetaRequestWidth,
unsigned int *MetaRequestHeight,
unsigned int *meta_row_width,
unsigned int *meta_row_height,
int *vm_group_bytes,
unsigned int *dpte_group_bytes,
unsigned int *PixelPTEReqWidth,
unsigned int *PixelPTEReqHeight,
unsigned int *PTERequestSize,
int *DPDE0BytesFrame,
int *MetaPTEBytesFrame);
static double CalculateTWait(unsigned int PrefetchMode, double DRAMClockChangeLatency, double UrgentLatency, double SREnterPlusExitTime);
static void CalculateRowBandwidth(
bool GPUVMEnable,
enum source_format_class SourcePixelFormat,
double VRatio,
double VRatioChroma,
bool DCCEnable,
double LineTime,
unsigned int MetaRowByteLuma,
unsigned int MetaRowByteChroma,
unsigned int meta_row_height_luma,
unsigned int meta_row_height_chroma,
unsigned int PixelPTEBytesPerRowLuma,
unsigned int PixelPTEBytesPerRowChroma,
unsigned int dpte_row_height_luma,
unsigned int dpte_row_height_chroma,
double *meta_row_bw,
double *dpte_row_bw);
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
unsigned int k,
double HostVMInefficiencyFactor,
double UrgentExtraLatency,
double UrgentLatency,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
double DPTEBytesPerRow);
static double CalculateWriteBackDelay(
enum source_format_class WritebackPixelFormat,
double WritebackHRatio,
double WritebackVRatio,
unsigned int WritebackVTaps,
int WritebackDestinationWidth,
int WritebackDestinationHeight,
int WritebackSourceHeight,
unsigned int HTotal);
static void CalculateVupdateAndDynamicMetadataParameters(
int MaxInterDCNTileRepeaters,
double DPPCLK,
double DISPCLK,
double DCFClkDeepSleep,
double PixelClock,
int HTotal,
int VBlank,
int DynamicMetadataTransmittedBytes,
int DynamicMetadataLinesBeforeActiveRequired,
int InterlaceEnable,
bool ProgressiveToInterlaceUnitInOPP,
double *TSetup,
double *Tdmbf,
double *Tdmec,
double *Tdmsks,
int *VUpdateOffsetPix,
double *VUpdateWidthPix,
double *VReadyOffsetPix);
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
double DCFCLK,
double ReturnBW,
double UrgentLatency,
double ExtraLatency,
double SOCCLK,
double DCFCLKDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
double SwathWidthY[],
double SwathWidthC[],
unsigned int DPPPerPlane[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
bool UnboundedRequestEnabled,
int unsigned CompressedBufferSizeInkByte,
enum clock_change_support *DRAMClockChangeSupport,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *Z8StutterExitWatermark,
double *Z8StutterEnterPlusExitWatermark);
static void CalculateDCFCLKDeepSleep(
struct display_mode_lib *mode_lib,
unsigned int NumberOfActivePlanes,
int BytePerPixelY[],
int BytePerPixelC[],
double VRatio[],
double VRatioChroma[],
double SwathWidthY[],
double SwathWidthC[],
unsigned int DPPPerPlane[],
double HRatio[],
double HRatioChroma[],
double PixelClock[],
double PSCL_THROUGHPUT[],
double PSCL_THROUGHPUT_CHROMA[],
double DPPCLK[],
double ReadBandwidthLuma[],
double ReadBandwidthChroma[],
int ReturnBusWidth,
double *DCFCLKDeepSleep);
static void CalculateUrgentBurstFactor(
int swath_width_luma_ub,
int swath_width_chroma_ub,
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double LineTime,
double UrgentLatency,
double CursorBufferSize,
unsigned int CursorWidth,
unsigned int CursorBPP,
double VRatio,
double VRatioC,
double BytePerPixelInDETY,
double BytePerPixelInDETC,
double DETBufferSizeY,
double DETBufferSizeC,
double *UrgentBurstFactorCursor,
double *UrgentBurstFactorLuma,
double *UrgentBurstFactorChroma,
bool *NotEnoughUrgentLatencyHiding);
static void UseMinimumDCFCLK(
struct display_mode_lib *mode_lib,
int MaxPrefetchMode,
int ReorderingBytes);
static void CalculatePixelDeliveryTimes(
unsigned int NumberOfActivePlanes,
double VRatio[],
double VRatioChroma[],
double VRatioPrefetchY[],
double VRatioPrefetchC[],
unsigned int swath_width_luma_ub[],
unsigned int swath_width_chroma_ub[],
unsigned int DPPPerPlane[],
double HRatio[],
double HRatioChroma[],
double PixelClock[],
double PSCL_THROUGHPUT[],
double PSCL_THROUGHPUT_CHROMA[],
double DPPCLK[],
int BytePerPixelC[],
enum scan_direction_class SourceScan[],
unsigned int NumberOfCursors[],
unsigned int CursorWidth[][DC__NUM_CURSOR__MAX],
unsigned int CursorBPP[][DC__NUM_CURSOR__MAX],
unsigned int BlockWidth256BytesY[],
unsigned int BlockHeight256BytesY[],
unsigned int BlockWidth256BytesC[],
unsigned int BlockHeight256BytesC[],
double DisplayPipeLineDeliveryTimeLuma[],
double DisplayPipeLineDeliveryTimeChroma[],
double DisplayPipeLineDeliveryTimeLumaPrefetch[],
double DisplayPipeLineDeliveryTimeChromaPrefetch[],
double DisplayPipeRequestDeliveryTimeLuma[],
double DisplayPipeRequestDeliveryTimeChroma[],
double DisplayPipeRequestDeliveryTimeLumaPrefetch[],
double DisplayPipeRequestDeliveryTimeChromaPrefetch[],
double CursorRequestDeliveryTime[],
double CursorRequestDeliveryTimePrefetch[]);
static void CalculateMetaAndPTETimes(
int NumberOfActivePlanes,
bool GPUVMEnable,
int MetaChunkSize,
int MinMetaChunkSizeBytes,
int HTotal[],
double VRatio[],
double VRatioChroma[],
double DestinationLinesToRequestRowInVBlank[],
double DestinationLinesToRequestRowInImmediateFlip[],
bool DCCEnable[],
double PixelClock[],
int BytePerPixelY[],
int BytePerPixelC[],
enum scan_direction_class SourceScan[],
int dpte_row_height[],
int dpte_row_height_chroma[],
int meta_row_width[],
int meta_row_width_chroma[],
int meta_row_height[],
int meta_row_height_chroma[],
int meta_req_width[],
int meta_req_width_chroma[],
int meta_req_height[],
int meta_req_height_chroma[],
int dpte_group_bytes[],
int PTERequestSizeY[],
int PTERequestSizeC[],
int PixelPTEReqWidthY[],
int PixelPTEReqHeightY[],
int PixelPTEReqWidthC[],
int PixelPTEReqHeightC[],
int dpte_row_width_luma_ub[],
int dpte_row_width_chroma_ub[],
double DST_Y_PER_PTE_ROW_NOM_L[],
double DST_Y_PER_PTE_ROW_NOM_C[],
double DST_Y_PER_META_ROW_NOM_L[],
double DST_Y_PER_META_ROW_NOM_C[],
double TimePerMetaChunkNominal[],
double TimePerChromaMetaChunkNominal[],
double TimePerMetaChunkVBlank[],
double TimePerChromaMetaChunkVBlank[],
double TimePerMetaChunkFlip[],
double TimePerChromaMetaChunkFlip[],
double time_per_pte_group_nom_luma[],
double time_per_pte_group_vblank_luma[],
double time_per_pte_group_flip_luma[],
double time_per_pte_group_nom_chroma[],
double time_per_pte_group_vblank_chroma[],
double time_per_pte_group_flip_chroma[]);
static void CalculateVMGroupAndRequestTimes(
unsigned int NumberOfActivePlanes,
bool GPUVMEnable,
unsigned int GPUVMMaxPageTableLevels,
unsigned int HTotal[],
int BytePerPixelC[],
double DestinationLinesToRequestVMInVBlank[],
double DestinationLinesToRequestVMInImmediateFlip[],
bool DCCEnable[],
double PixelClock[],
int dpte_row_width_luma_ub[],
int dpte_row_width_chroma_ub[],
int vm_group_bytes[],
unsigned int dpde0_bytes_per_frame_ub_l[],
unsigned int dpde0_bytes_per_frame_ub_c[],
int meta_pte_bytes_per_frame_ub_l[],
int meta_pte_bytes_per_frame_ub_c[],
double TimePerVMGroupVBlank[],
double TimePerVMGroupFlip[],
double TimePerVMRequestVBlank[],
double TimePerVMRequestFlip[]);
static void CalculateStutterEfficiency(
struct display_mode_lib *mode_lib,
int CompressedBufferSizeInkByte,
bool UnboundedRequestEnabled,
int ConfigReturnBufferSizeInKByte,
int MetaFIFOSizeInKEntries,
int ZeroSizeBufferEntries,
int NumberOfActivePlanes,
int ROBBufferSizeInKByte,
double TotalDataReadBandwidth,
double DCFCLK,
double ReturnBW,
double COMPBUF_RESERVED_SPACE_64B,
double COMPBUF_RESERVED_SPACE_ZS,
double SRExitTime,
double SRExitZ8Time,
bool SynchronizedVBlank,
double Z8StutterEnterPlusExitWatermark,
double StutterEnterPlusExitWatermark,
bool ProgressiveToInterlaceUnitInOPP,
bool Interlace[],
double MinTTUVBlank[],
int DPPPerPlane[],
unsigned int DETBufferSizeY[],
int BytePerPixelY[],
double BytePerPixelDETY[],
double SwathWidthY[],
int SwathHeightY[],
int SwathHeightC[],
double NetDCCRateLuma[],
double NetDCCRateChroma[],
double DCCFractionOfZeroSizeRequestsLuma[],
double DCCFractionOfZeroSizeRequestsChroma[],
int HTotal[],
int VTotal[],
double PixelClock[],
double VRatio[],
enum scan_direction_class SourceScan[],
int BlockHeight256BytesY[],
int BlockWidth256BytesY[],
int BlockHeight256BytesC[],
int BlockWidth256BytesC[],
int DCCYMaxUncompressedBlock[],
int DCCCMaxUncompressedBlock[],
int VActive[],
bool DCCEnable[],
bool WritebackEnable[],
double ReadBandwidthPlaneLuma[],
double ReadBandwidthPlaneChroma[],
double meta_row_bw[],
double dpte_row_bw[],
double *StutterEfficiencyNotIncludingVBlank,
double *StutterEfficiency,
int *NumberOfStutterBurstsPerFrame,
double *Z8StutterEfficiencyNotIncludingVBlank,
double *Z8StutterEfficiency,
int *Z8NumberOfStutterBurstsPerFrame,
double *StutterPeriod);
static void CalculateSwathAndDETConfiguration(
bool ForceSingleDPP,
int NumberOfActivePlanes,
bool DETSharedByAllDPP,
unsigned int DETBufferSizeInKByte[],
double MaximumSwathWidthLuma[],
double MaximumSwathWidthChroma[],
enum scan_direction_class SourceScan[],
enum source_format_class SourcePixelFormat[],
enum dm_swizzle_mode SurfaceTiling[],
int ViewportWidth[],
int ViewportHeight[],
int SurfaceWidthY[],
int SurfaceWidthC[],
int SurfaceHeightY[],
int SurfaceHeightC[],
int Read256BytesBlockHeightY[],
int Read256BytesBlockHeightC[],
int Read256BytesBlockWidthY[],
int Read256BytesBlockWidthC[],
enum odm_combine_mode ODMCombineEnabled[],
int BlendingAndTiming[],
int BytePerPixY[],
int BytePerPixC[],
double BytePerPixDETY[],
double BytePerPixDETC[],
int HActive[],
double HRatio[],
double HRatioChroma[],
int DPPPerPlane[],
int swath_width_luma_ub[],
int swath_width_chroma_ub[],
double SwathWidth[],
double SwathWidthChroma[],
int SwathHeightY[],
int SwathHeightC[],
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
bool ViewportSizeSupportPerPlane[],
bool *ViewportSizeSupport);
static void CalculateSwathWidth(
bool ForceSingleDPP,
int NumberOfActivePlanes,
enum source_format_class SourcePixelFormat[],
enum scan_direction_class SourceScan[],
int ViewportWidth[],
int ViewportHeight[],
int SurfaceWidthY[],
int SurfaceWidthC[],
int SurfaceHeightY[],
int SurfaceHeightC[],
enum odm_combine_mode ODMCombineEnabled[],
int BytePerPixY[],
int BytePerPixC[],
int Read256BytesBlockHeightY[],
int Read256BytesBlockHeightC[],
int Read256BytesBlockWidthY[],
int Read256BytesBlockWidthC[],
int BlendingAndTiming[],
int HActive[],
double HRatio[],
int DPPPerPlane[],
double SwathWidthSingleDPPY[],
double SwathWidthSingleDPPC[],
double SwathWidthY[],
double SwathWidthC[],
int MaximumSwathHeightY[],
int MaximumSwathHeightC[],
int swath_width_luma_ub[],
int swath_width_chroma_ub[]);
static double CalculateExtraLatency(
int RoundTripPingLatencyCycles,
int ReorderingBytes,
double DCFCLK,
int TotalNumberOfActiveDPP,
int PixelChunkSizeInKByte,
int TotalNumberOfDCCActiveDPP,
int MetaChunkSize,
double ReturnBW,
bool GPUVMEnable,
bool HostVMEnable,
int NumberOfActivePlanes,
int NumberOfDPP[],
int dpte_group_bytes[],
double HostVMInefficiencyFactor,
double HostVMMinPageSize,
int HostVMMaxNonCachedPageTableLevels);
static double CalculateExtraLatencyBytes(
int ReorderingBytes,
int TotalNumberOfActiveDPP,
int PixelChunkSizeInKByte,
int TotalNumberOfDCCActiveDPP,
int MetaChunkSize,
bool GPUVMEnable,
bool HostVMEnable,
int NumberOfActivePlanes,
int NumberOfDPP[],
int dpte_group_bytes[],
double HostVMInefficiencyFactor,
double HostVMMinPageSize,
int HostVMMaxNonCachedPageTableLevels);
static double CalculateUrgentLatency(
double UrgentLatencyPixelDataOnly,
double UrgentLatencyPixelMixedWithVMData,
double UrgentLatencyVMDataOnly,
bool DoUrgentLatencyAdjustment,
double UrgentLatencyAdjustmentFabricClockComponent,
double UrgentLatencyAdjustmentFabricClockReference,
double FabricClockSingle);
static void CalculateUnboundedRequestAndCompressedBufferSize(
unsigned int DETBufferSizeInKByte,
int ConfigReturnBufferSizeInKByte,
enum unbounded_requesting_policy UseUnboundedRequestingFinal,
int TotalActiveDPP,
bool NoChromaPlanes,
int MaxNumDPP,
int CompressedBufferSegmentSizeInkByteFinal,
enum output_encoder_class *Output,
bool *UnboundedRequestEnabled,
int *CompressedBufferSizeInkByte);
static bool UnboundedRequest(enum unbounded_requesting_policy UseUnboundedRequestingFinal, int TotalNumberOfActiveDPP, bool NoChroma, enum output_encoder_class Output);
void dml31_recalculate(struct display_mode_lib *mode_lib)
{
ModeSupportAndSystemConfiguration(mode_lib);
PixelClockAdjustmentForProgressiveToInterlaceUnit(mode_lib);
DisplayPipeConfiguration(mode_lib);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Calling DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation\n", __func__);
#endif
DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(mode_lib);
}
static unsigned int dscceComputeDelay(
unsigned int bpc,
double BPP,
unsigned int sliceWidth,
unsigned int numSlices,
enum output_format_class pixelFormat,
enum output_encoder_class Output)
{
// valid bpc = source bits per component in the set of {8, 10, 12}
// valid bpp = increments of 1/16 of a bit
// min = 6/7/8 in N420/N422/444, respectively
// max = such that compression is 1:1
//valid sliceWidth = number of pixels per slice line, must be less than or equal to 5184/numSlices (or 4096/numSlices in 420 mode)
//valid numSlices = number of slices in the horiziontal direction per DSC engine in the set of {1, 2, 3, 4}
//valid pixelFormat = pixel/color format in the set of {:N444_RGB, :S422, :N422, :N420}
// fixed value
unsigned int rcModelSize = 8192;
// N422/N420 operate at 2 pixels per clock
unsigned int pixelsPerClock = 0, lstall, D, initalXmitDelay, w, s, ix, wx, P, l0, a, ax, L, Delay, pixels;
if (pixelFormat == dm_420)
pixelsPerClock = 2;
else if (pixelFormat == dm_444)
pixelsPerClock = 1;
else if (pixelFormat == dm_n422)
pixelsPerClock = 2;
// #all other modes operate at 1 pixel per clock
else
pixelsPerClock = 1;
//initial transmit delay as per PPS
initalXmitDelay = dml_round(rcModelSize / 2.0 / BPP / pixelsPerClock);
//compute ssm delay
if (bpc == 8)
D = 81;
else if (bpc == 10)
D = 89;
else
D = 113;
//divide by pixel per cycle to compute slice width as seen by DSC
w = sliceWidth / pixelsPerClock;
//422 mode has an additional cycle of delay
if (pixelFormat == dm_420 || pixelFormat == dm_444 || pixelFormat == dm_n422)
s = 0;
else
s = 1;
//main calculation for the dscce
ix = initalXmitDelay + 45;
wx = (w + 2) / 3;
P = 3 * wx - w;
l0 = ix / w;
a = ix + P * l0;
ax = (a + 2) / 3 + D + 6 + 1;
L = (ax + wx - 1) / wx;
if ((ix % w) == 0 && P != 0)
lstall = 1;
else
lstall = 0;
Delay = L * wx * (numSlices - 1) + ax + s + lstall + 22;
//dsc processes 3 pixel containers per cycle and a container can contain 1 or 2 pixels
pixels = Delay * 3 * pixelsPerClock;
return pixels;
}
static unsigned int dscComputeDelay(enum output_format_class pixelFormat, enum output_encoder_class Output)
{
unsigned int Delay = 0;
if (pixelFormat == dm_420) {
// sfr
Delay = Delay + 2;
// dsccif
Delay = Delay + 0;
// dscc - input deserializer
Delay = Delay + 3;
// dscc gets pixels every other cycle
Delay = Delay + 2;
// dscc - input cdc fifo
Delay = Delay + 12;
// dscc gets pixels every other cycle
Delay = Delay + 13;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output cdc fifo
Delay = Delay + 7;
// dscc gets pixels every other cycle
Delay = Delay + 3;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output serializer
Delay = Delay + 1;
// sft
Delay = Delay + 1;
} else if (pixelFormat == dm_n422) {
// sfr
Delay = Delay + 2;
// dsccif
Delay = Delay + 1;
// dscc - input deserializer
Delay = Delay + 5;
// dscc - input cdc fifo
Delay = Delay + 25;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output cdc fifo
Delay = Delay + 10;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output serializer
Delay = Delay + 1;
// sft
Delay = Delay + 1;
} else {
// sfr
Delay = Delay + 2;
// dsccif
Delay = Delay + 0;
// dscc - input deserializer
Delay = Delay + 3;
// dscc - input cdc fifo
Delay = Delay + 12;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output cdc fifo
Delay = Delay + 7;
// dscc - output serializer
Delay = Delay + 1;
// dscc - cdc uncertainty
Delay = Delay + 2;
// sft
Delay = Delay + 1;
}
return Delay;
}
static bool CalculatePrefetchSchedule(
struct display_mode_lib *mode_lib,
double HostVMInefficiencyFactor,
Pipe *myPipe,
unsigned int DSCDelay,
double DPPCLKDelaySubtotalPlusCNVCFormater,
double DPPCLKDelaySCL,
double DPPCLKDelaySCLLBOnly,
double DPPCLKDelayCNVCCursor,
double DISPCLKDelaySubtotal,
unsigned int DPP_RECOUT_WIDTH,
enum output_format_class OutputFormat,
unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int MaxVStartup,
unsigned int GPUVMPageTableLevels,
bool GPUVMEnable,
bool HostVMEnable,
unsigned int HostVMMaxNonCachedPageTableLevels,
double HostVMMinPageSize,
bool DynamicMetadataEnable,
bool DynamicMetadataVMEnabled,
int DynamicMetadataLinesBeforeActiveRequired,
unsigned int DynamicMetadataTransmittedBytes,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
unsigned int PDEAndMetaPTEBytesFrame,
unsigned int MetaRowByte,
unsigned int PixelPTEBytesPerRow,
double PrefetchSourceLinesY,
unsigned int SwathWidthY,
double VInitPreFillY,
unsigned int MaxNumSwathY,
double PrefetchSourceLinesC,
unsigned int SwathWidthC,
double VInitPreFillC,
unsigned int MaxNumSwathC,
int swath_width_luma_ub,
int swath_width_chroma_ub,
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double TWait,
double *DSTXAfterScaler,
double *DSTYAfterScaler,
double *DestinationLinesForPrefetch,
double *PrefetchBandwidth,
double *DestinationLinesToRequestVMInVBlank,
double *DestinationLinesToRequestRowInVBlank,
double *VRatioPrefetchY,
double *VRatioPrefetchC,
double *RequiredPrefetchPixDataBWLuma,
double *RequiredPrefetchPixDataBWChroma,
bool *NotEnoughTimeForDynamicMetadata,
double *Tno_bw,
double *prefetch_vmrow_bw,
double *Tdmdl_vm,
double *Tdmdl,
double *TSetup,
int *VUpdateOffsetPix,
double *VUpdateWidthPix,
double *VReadyOffsetPix)
{
bool MyError = false;
unsigned int DPPCycles, DISPCLKCycles;
double DSTTotalPixelsAfterScaler;
double LineTime;
double dst_y_prefetch_equ;
#ifdef __DML_VBA_DEBUG__
double Tsw_oto;
#endif
double prefetch_bw_oto;
double prefetch_bw_pr;
double Tvm_oto;
double Tr0_oto;
double Tvm_oto_lines;
double Tr0_oto_lines;
double dst_y_prefetch_oto;
double TimeForFetchingMetaPTE = 0;
double TimeForFetchingRowInVBlank = 0;
double LinesToRequestPrefetchPixelData = 0;
unsigned int HostVMDynamicLevelsTrips;
double trip_to_mem;
double Tvm_trips;
double Tr0_trips;
double Tvm_trips_rounded;
double Tr0_trips_rounded;
double Lsw_oto;
double Tpre_rounded;
double prefetch_bw_equ;
double Tvm_equ;
double Tr0_equ;
double Tdmbf;
double Tdmec;
double Tdmsks;
double prefetch_sw_bytes;
double bytes_pp;
double dep_bytes;
int max_vratio_pre = 4;
double min_Lsw;
double Tsw_est1 = 0;
double Tsw_est3 = 0;
double max_Tsw = 0;
if (GPUVMEnable == true && HostVMEnable == true) {
HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
} else {
HostVMDynamicLevelsTrips = 0;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: GPUVMEnable=%d HostVMEnable=%d HostVMInefficiencyFactor=%f\n", __func__, GPUVMEnable, HostVMEnable, HostVMInefficiencyFactor);
#endif
CalculateVupdateAndDynamicMetadataParameters(
MaxInterDCNTileRepeaters,
myPipe->DPPCLK,
myPipe->DISPCLK,
myPipe->DCFCLKDeepSleep,
myPipe->PixelClock,
myPipe->HTotal,
myPipe->VBlank,
DynamicMetadataTransmittedBytes,
DynamicMetadataLinesBeforeActiveRequired,
myPipe->InterlaceEnable,
myPipe->ProgressiveToInterlaceUnitInOPP,
TSetup,
&Tdmbf,
&Tdmec,
&Tdmsks,
VUpdateOffsetPix,
VUpdateWidthPix,
VReadyOffsetPix);
LineTime = myPipe->HTotal / myPipe->PixelClock;
trip_to_mem = UrgentLatency;
Tvm_trips = UrgentExtraLatency + trip_to_mem * (GPUVMPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
#ifdef __DML_VBA_ALLOW_DELTA__
if (DynamicMetadataVMEnabled == true && GPUVMEnable == true) {
#else
if (DynamicMetadataVMEnabled == true) {
#endif
*Tdmdl = TWait + Tvm_trips + trip_to_mem;
} else {
*Tdmdl = TWait + UrgentExtraLatency;
}
#ifdef __DML_VBA_ALLOW_DELTA__
if (DynamicMetadataEnable == false) {
*Tdmdl = 0.0;
}
#endif
if (DynamicMetadataEnable == true) {
if (VStartup * LineTime < *TSetup + *Tdmdl + Tdmbf + Tdmec + Tdmsks) {
*NotEnoughTimeForDynamicMetadata = true;
dml_print("DML::%s: Not Enough Time for Dynamic Meta!\n", __func__);
dml_print("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, Tdmbf);
dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, Tdmec);
dml_print("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, Tdmsks);
dml_print("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *Tdmdl);
} else {
*NotEnoughTimeForDynamicMetadata = false;
}
} else {
*NotEnoughTimeForDynamicMetadata = false;
}
*Tdmdl_vm = (DynamicMetadataEnable == true && DynamicMetadataVMEnabled == true && GPUVMEnable == true ? TWait + Tvm_trips : 0);
if (myPipe->ScalerEnabled)
DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCL;
else
DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCLLBOnly;
DPPCycles = DPPCycles + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor;
DISPCLKCycles = DISPCLKDelaySubtotal;
if (myPipe->DPPCLK == 0.0 || myPipe->DISPCLK == 0.0)
return true;
*DSTXAfterScaler = DPPCycles * myPipe->PixelClock / myPipe->DPPCLK + DISPCLKCycles * myPipe->PixelClock / myPipe->DISPCLK + DSCDelay;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DPPCycles: %d\n", __func__, DPPCycles);
dml_print("DML::%s: PixelClock: %f\n", __func__, myPipe->PixelClock);
dml_print("DML::%s: DPPCLK: %f\n", __func__, myPipe->DPPCLK);
dml_print("DML::%s: DISPCLKCycles: %d\n", __func__, DISPCLKCycles);
dml_print("DML::%s: DISPCLK: %f\n", __func__, myPipe->DISPCLK);
dml_print("DML::%s: DSCDelay: %d\n", __func__, DSCDelay);
dml_print("DML::%s: DSTXAfterScaler: %d\n", __func__, *DSTXAfterScaler);
dml_print("DML::%s: ODMCombineIsEnabled: %d\n", __func__, myPipe->ODMCombineIsEnabled);
#endif
*DSTXAfterScaler = *DSTXAfterScaler + ((myPipe->ODMCombineIsEnabled) ? 18 : 0) + (myPipe->DPPPerPlane - 1) * DPP_RECOUT_WIDTH;
if (OutputFormat == dm_420 || (myPipe->InterlaceEnable && myPipe->ProgressiveToInterlaceUnitInOPP))
*DSTYAfterScaler = 1;
else
*DSTYAfterScaler = 0;
DSTTotalPixelsAfterScaler = *DSTYAfterScaler * myPipe->HTotal + *DSTXAfterScaler;
*DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
*DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * myPipe->HTotal));
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DSTXAfterScaler: %d (final)\n", __func__, *DSTXAfterScaler);
#endif
MyError = false;
Tr0_trips = trip_to_mem * (HostVMDynamicLevelsTrips + 1);
Tvm_trips_rounded = dml_ceil(4.0 * Tvm_trips / LineTime, 1) / 4 * LineTime;
Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1) / 4 * LineTime;
#ifdef __DML_VBA_ALLOW_DELTA__
if (!myPipe->DCCEnable) {
Tr0_trips = 0.0;
Tr0_trips_rounded = 0.0;
}
#endif
if (!GPUVMEnable) {
Tvm_trips = 0.0;
Tvm_trips_rounded = 0.0;
}
if (GPUVMEnable) {
if (GPUVMPageTableLevels >= 3) {
*Tno_bw = UrgentExtraLatency + trip_to_mem * ((GPUVMPageTableLevels - 2) - 1);
} else {
*Tno_bw = 0;
}
} else if (!myPipe->DCCEnable) {
*Tno_bw = LineTime;
} else {
*Tno_bw = LineTime / 4;
}
if (myPipe->SourcePixelFormat == dm_420_8 || myPipe->SourcePixelFormat == dm_420_10 || myPipe->SourcePixelFormat == dm_420_12)
bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC / 4;
else
bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC;
/*rev 99*/
prefetch_bw_pr = bytes_pp * myPipe->PixelClock / (double) myPipe->DPPPerPlane;
prefetch_bw_pr = dml_min(1, myPipe->VRatio) * prefetch_bw_pr;
max_Tsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime;
prefetch_sw_bytes = PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * myPipe->BytePerPixelC;
prefetch_bw_oto = dml_max(prefetch_bw_pr, prefetch_sw_bytes / max_Tsw);
min_Lsw = dml_max(1, dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre);
Lsw_oto = dml_ceil(4 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1) / 4;
#ifdef __DML_VBA_DEBUG__
Tsw_oto = Lsw_oto * LineTime;
#endif
#ifdef __DML_VBA_DEBUG__
dml_print("DML: HTotal: %d\n", myPipe->HTotal);
dml_print("DML: prefetch_bw_oto: %f\n", prefetch_bw_oto);
dml_print("DML: PrefetchSourceLinesY: %f\n", PrefetchSourceLinesY);
dml_print("DML: swath_width_luma_ub: %d\n", swath_width_luma_ub);
dml_print("DML: BytePerPixelY: %d\n", myPipe->BytePerPixelY);
dml_print("DML: Tsw_oto: %f\n", Tsw_oto);
#endif
if (GPUVMEnable == true)
Tvm_oto = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto, Tvm_trips, LineTime / 4.0);
else
Tvm_oto = LineTime / 4.0;
if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
Tr0_oto = dml_max4((MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto, Tr0_trips, // PREVIOUS_ERROR (missing this term)
LineTime - Tvm_oto,
LineTime / 4);
} else {
Tr0_oto = (LineTime - Tvm_oto) / 2.0;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Tvm_trips = %f\n", __func__, Tvm_trips);
dml_print("DML::%s: Tr0_trips = %f\n", __func__, Tr0_trips);
dml_print("DML::%s: PDEAndMetaPTEBytesFrame = %d\n", __func__, MetaRowByte);
dml_print("DML::%s: MetaRowByte = %d\n", __func__, MetaRowByte);
dml_print("DML::%s: PixelPTEBytesPerRow = %d\n", __func__, PixelPTEBytesPerRow);
dml_print("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
dml_print("DML::%s: prefetch_bw_oto = %f\n", __func__, prefetch_bw_oto);
dml_print("DML::%s: Tr0_oto = %f\n", __func__, Tr0_oto);
dml_print("DML::%s: Tvm_oto = %f\n", __func__, Tvm_oto);
#endif
Tvm_oto_lines = dml_ceil(4.0 * Tvm_oto / LineTime, 1) / 4.0;
Tr0_oto_lines = dml_ceil(4.0 * Tr0_oto / LineTime, 1) / 4.0;
dst_y_prefetch_oto = Tvm_oto_lines + 2 * Tr0_oto_lines + Lsw_oto;
dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime - (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal);
dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0;
Tpre_rounded = dst_y_prefetch_equ * LineTime;
dep_bytes = dml_max(PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor, MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor);
if (prefetch_sw_bytes < dep_bytes)
prefetch_sw_bytes = 2 * dep_bytes;
dml_print("DML: dst_y_prefetch_oto: %f\n", dst_y_prefetch_oto);
dml_print("DML: Tvm_oto_lines: %f\n", Tvm_oto_lines);
dml_print("DML: Tr0_oto_lines: %f\n", Tr0_oto_lines);
dml_print("DML: Lsw_oto: %f\n", Lsw_oto);
dml_print("DML: LineTime: %f\n", LineTime);
dml_print("DML: dst_y_prefetch_equ: %f (after round)\n", dst_y_prefetch_equ);
dml_print("DML: LineTime: %f\n", LineTime);
dml_print("DML: VStartup: %d\n", VStartup);
dml_print("DML: Tvstartup: %fus - time between vstartup and first pixel of active\n", VStartup * LineTime);
dml_print("DML: TSetup: %fus - time from vstartup to vready\n", *TSetup);
dml_print("DML: TCalc: %fus - time for calculations in dchub starting at vready\n", TCalc);
dml_print("DML: TWait: %fus - time for fabric to become ready max(pstate exit,cstate enter/exit, urgent latency) after TCalc\n", TWait);
dml_print("DML: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", Tdmbf);
dml_print("DML: Tdmec: %fus - time dio takes to transfer dmd\n", Tdmec);
dml_print("DML: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", Tdmsks);
dml_print("DML: Tdmdl_vm: %fus - time for vm stages of dmd \n", *Tdmdl_vm);
dml_print("DML: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", *Tdmdl);
dml_print("DML: DSTXAfterScaler: %f pixels - number of pixel clocks pipeline and buffer delay after scaler \n", *DSTXAfterScaler);
dml_print("DML: DSTYAfterScaler: %f lines - number of lines of pipeline and buffer delay after scaler \n", *DSTYAfterScaler);
*PrefetchBandwidth = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
if (dst_y_prefetch_equ > 1) {
double PrefetchBandwidth1;
double PrefetchBandwidth2;
double PrefetchBandwidth3;
double PrefetchBandwidth4;
if (Tpre_rounded - *Tno_bw > 0) {
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
+ prefetch_sw_bytes) / (Tpre_rounded - *Tno_bw);
Tsw_est1 = prefetch_sw_bytes / PrefetchBandwidth1;
} else {
PrefetchBandwidth1 = 0;
}
if (VStartup == MaxVStartup && Tsw_est1 / LineTime < min_Lsw && Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - *Tno_bw > 0) {
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor)
/ (Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - *Tno_bw);
}
if (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded > 0)
PrefetchBandwidth2 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + prefetch_sw_bytes) / (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded);
else
PrefetchBandwidth2 = 0;
if (Tpre_rounded - Tvm_trips_rounded > 0) {
PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
+ prefetch_sw_bytes) / (Tpre_rounded - Tvm_trips_rounded);
Tsw_est3 = prefetch_sw_bytes / PrefetchBandwidth3;
} else {
PrefetchBandwidth3 = 0;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Tpre_rounded: %f\n", __func__, Tpre_rounded);
dml_print("DML::%s: Tvm_trips_rounded: %f\n", __func__, Tvm_trips_rounded);
dml_print("DML::%s: PrefetchBandwidth3: %f\n", __func__, PrefetchBandwidth3);
#endif
if (VStartup == MaxVStartup && Tsw_est3 / LineTime < min_Lsw && Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - Tvm_trips_rounded > 0) {
PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor)
/ (Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - Tvm_trips_rounded);
}
if (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded > 0)
PrefetchBandwidth4 = prefetch_sw_bytes / (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded);
else
PrefetchBandwidth4 = 0;
{
bool Case1OK;
bool Case2OK;
bool Case3OK;
if (PrefetchBandwidth1 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth1 >= Tvm_trips_rounded
&& (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth1 >= Tr0_trips_rounded) {
Case1OK = true;
} else {
Case1OK = false;
}
} else {
Case1OK = false;
}
if (PrefetchBandwidth2 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth2 >= Tvm_trips_rounded
&& (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth2 < Tr0_trips_rounded) {
Case2OK = true;
} else {
Case2OK = false;
}
} else {
Case2OK = false;
}
if (PrefetchBandwidth3 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth3 < Tvm_trips_rounded
&& (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth3 >= Tr0_trips_rounded) {
Case3OK = true;
} else {
Case3OK = false;
}
} else {
Case3OK = false;
}
if (Case1OK) {
prefetch_bw_equ = PrefetchBandwidth1;
} else if (Case2OK) {
prefetch_bw_equ = PrefetchBandwidth2;
} else if (Case3OK) {
prefetch_bw_equ = PrefetchBandwidth3;
} else {
prefetch_bw_equ = PrefetchBandwidth4;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Case1OK: %d\n", __func__, Case1OK);
dml_print("DML::%s: Case2OK: %d\n", __func__, Case2OK);
dml_print("DML::%s: Case3OK: %d\n", __func__, Case3OK);
dml_print("DML::%s: prefetch_bw_equ: %f\n", __func__, prefetch_bw_equ);
#endif
if (prefetch_bw_equ > 0) {
if (GPUVMEnable == true) {
Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_equ, Tvm_trips, LineTime / 4);
} else {
Tvm_equ = LineTime / 4;
}
if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
Tr0_equ = dml_max4(
(MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_equ,
Tr0_trips,
(LineTime - Tvm_equ) / 2,
LineTime / 4);
} else {
Tr0_equ = (LineTime - Tvm_equ) / 2;
}
} else {
Tvm_equ = 0;
Tr0_equ = 0;
dml_print("DML: prefetch_bw_equ equals 0! %s:%d\n", __FILE__, __LINE__);
}
}
if (dst_y_prefetch_oto < dst_y_prefetch_equ) {
*DestinationLinesForPrefetch = dst_y_prefetch_oto;
TimeForFetchingMetaPTE = Tvm_oto;
TimeForFetchingRowInVBlank = Tr0_oto;
*PrefetchBandwidth = prefetch_bw_oto;
} else {
*DestinationLinesForPrefetch = dst_y_prefetch_equ;
TimeForFetchingMetaPTE = Tvm_equ;
TimeForFetchingRowInVBlank = Tr0_equ;
*PrefetchBandwidth = prefetch_bw_equ;
}
*DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0;
*DestinationLinesToRequestRowInVBlank = dml_ceil(4.0 * TimeForFetchingRowInVBlank / LineTime, 1.0) / 4.0;
#ifdef __DML_VBA_ALLOW_DELTA__
LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch
// See note above dated 5/30/2018
// - ((NumberOfCursors > 0 || GPUVMEnable || DCCEnable) ?
- ((GPUVMEnable || myPipe->DCCEnable) ? (*DestinationLinesToRequestVMInVBlank + 2 * *DestinationLinesToRequestRowInVBlank) : 0.0); // TODO: Did someone else add this??
#else
LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch - *DestinationLinesToRequestVMInVBlank - 2 * *DestinationLinesToRequestRowInVBlank;
#endif
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DestinationLinesForPrefetch = %f\n", __func__, *DestinationLinesForPrefetch);
dml_print("DML::%s: DestinationLinesToRequestVMInVBlank = %f\n", __func__, *DestinationLinesToRequestVMInVBlank);
dml_print("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, TimeForFetchingRowInVBlank);
dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
dml_print("DML::%s: DestinationLinesToRequestRowInVBlank = %f\n", __func__, *DestinationLinesToRequestRowInVBlank);
dml_print("DML::%s: PrefetchSourceLinesY = %f\n", __func__, PrefetchSourceLinesY);
dml_print("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, LinesToRequestPrefetchPixelData);
#endif
if (LinesToRequestPrefetchPixelData > 0 && prefetch_bw_equ > 0) {
*VRatioPrefetchY = (double) PrefetchSourceLinesY / LinesToRequestPrefetchPixelData;
*VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VRatioPrefetchY = %f\n", __func__, *VRatioPrefetchY);
dml_print("DML::%s: SwathHeightY = %d\n", __func__, SwathHeightY);
dml_print("DML::%s: VInitPreFillY = %f\n", __func__, VInitPreFillY);
#endif
if ((SwathHeightY > 4) && (VInitPreFillY > 3)) {
if (LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
*VRatioPrefetchY = dml_max(
(double) PrefetchSourceLinesY / LinesToRequestPrefetchPixelData,
(double) MaxNumSwathY * SwathHeightY / (LinesToRequestPrefetchPixelData - (VInitPreFillY - 3.0) / 2.0));
*VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
} else {
MyError = true;
dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
*VRatioPrefetchY = 0;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VRatioPrefetchY = %f\n", __func__, *VRatioPrefetchY);
dml_print("DML::%s: PrefetchSourceLinesY = %f\n", __func__, PrefetchSourceLinesY);
dml_print("DML::%s: MaxNumSwathY = %d\n", __func__, MaxNumSwathY);
#endif
}
*VRatioPrefetchC = (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData;
*VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VRatioPrefetchC = %f\n", __func__, *VRatioPrefetchC);
dml_print("DML::%s: SwathHeightC = %d\n", __func__, SwathHeightC);
dml_print("DML::%s: VInitPreFillC = %f\n", __func__, VInitPreFillC);
#endif
if ((SwathHeightC > 4) || VInitPreFillC > 3) {
if (LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
*VRatioPrefetchC = dml_max(
*VRatioPrefetchC,
(double) MaxNumSwathC * SwathHeightC / (LinesToRequestPrefetchPixelData - (VInitPreFillC - 3.0) / 2.0));
*VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
} else {
MyError = true;
dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
*VRatioPrefetchC = 0;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VRatioPrefetchC = %f\n", __func__, *VRatioPrefetchC);
dml_print("DML::%s: PrefetchSourceLinesC = %f\n", __func__, PrefetchSourceLinesC);
dml_print("DML::%s: MaxNumSwathC = %d\n", __func__, MaxNumSwathC);
#endif
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: BytePerPixelY = %d\n", __func__, myPipe->BytePerPixelY);
dml_print("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
#endif
*RequiredPrefetchPixDataBWLuma = (double) PrefetchSourceLinesY / LinesToRequestPrefetchPixelData * myPipe->BytePerPixelY * swath_width_luma_ub / LineTime;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: RequiredPrefetchPixDataBWLuma = %f\n", __func__, *RequiredPrefetchPixDataBWLuma);
#endif
*RequiredPrefetchPixDataBWChroma = (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData * myPipe->BytePerPixelC * swath_width_chroma_ub
/ LineTime;
} else {
MyError = true;
dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
dml_print("DML: LinesToRequestPrefetchPixelData: %f, should be > 0\n", LinesToRequestPrefetchPixelData);
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
*RequiredPrefetchPixDataBWChroma = 0;
}
dml_print(
"DML: Tpre: %fus - sum of time to request meta pte, 2 x data pte + meta data, swaths\n",
(double) LinesToRequestPrefetchPixelData * LineTime + 2.0 * TimeForFetchingRowInVBlank + TimeForFetchingMetaPTE);
dml_print("DML: Tvm: %fus - time to fetch page tables for meta surface\n", TimeForFetchingMetaPTE);
dml_print("DML: Tr0: %fus - time to fetch first row of data pagetables and first row of meta data (done in parallel)\n", TimeForFetchingRowInVBlank);
dml_print(
"DML: Tsw: %fus = time to fetch enough pixel data and cursor data to feed the scalers init position and detile\n",
(double) LinesToRequestPrefetchPixelData * LineTime);
dml_print("DML: To: %fus - time for propagation from scaler to optc\n",
(*DSTYAfterScaler + ((double) (*DSTXAfterScaler) /
(double) myPipe->HTotal)) * LineTime);
dml_print("DML: Tvstartup - TSetup - Tcalc - Twait - Tpre - To > 0\n");
dml_print("DML: Tslack(pre): %fus - time left over in schedule\n",
VStartup * LineTime - TimeForFetchingMetaPTE - 2 * TimeForFetchingRowInVBlank
- (*DSTYAfterScaler + ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime - TWait - TCalc - *TSetup);
dml_print("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %d\n", PixelPTEBytesPerRow);
} else {
MyError = true;
dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
}
{
double prefetch_vm_bw;
double prefetch_row_bw;
if (PDEAndMetaPTEBytesFrame == 0) {
prefetch_vm_bw = 0;
} else if (*DestinationLinesToRequestVMInVBlank > 0) {
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: PDEAndMetaPTEBytesFrame = %d\n", __func__, PDEAndMetaPTEBytesFrame);
dml_print("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
dml_print("DML::%s: DestinationLinesToRequestVMInVBlank = %f\n", __func__, *DestinationLinesToRequestVMInVBlank);
dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
#endif
prefetch_vm_bw = PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInVBlank * LineTime);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw);
#endif
} else {
prefetch_vm_bw = 0;
MyError = true;
dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
}
if (MetaRowByte + PixelPTEBytesPerRow == 0) {
prefetch_row_bw = 0;
} else if (*DestinationLinesToRequestRowInVBlank > 0) {
prefetch_row_bw = (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInVBlank * LineTime);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: MetaRowByte = %d\n", __func__, MetaRowByte);
dml_print("DML::%s: PixelPTEBytesPerRow = %d\n", __func__, PixelPTEBytesPerRow);
dml_print("DML::%s: DestinationLinesToRequestRowInVBlank = %f\n", __func__, *DestinationLinesToRequestRowInVBlank);
dml_print("DML::%s: prefetch_row_bw = %f\n", __func__, prefetch_row_bw);
#endif
} else {
prefetch_row_bw = 0;
MyError = true;
dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
}
*prefetch_vmrow_bw = dml_max(prefetch_vm_bw, prefetch_row_bw);
}
if (MyError) {
*PrefetchBandwidth = 0;
TimeForFetchingMetaPTE = 0;
TimeForFetchingRowInVBlank = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*DestinationLinesForPrefetch = 0;
LinesToRequestPrefetchPixelData = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
*RequiredPrefetchPixDataBWChroma = 0;
}
return MyError;
}
static double RoundToDFSGranularityUp(double Clock, double VCOSpeed)
{
return VCOSpeed * 4 / dml_floor(VCOSpeed * 4 / Clock, 1);
}
static double RoundToDFSGranularityDown(double Clock, double VCOSpeed)
{
return VCOSpeed * 4 / dml_ceil(VCOSpeed * 4.0 / Clock, 1);
}
static void CalculateDCCConfiguration(
bool DCCEnabled,
bool DCCProgrammingAssumesScanDirectionUnknown,
enum source_format_class SourcePixelFormat,
unsigned int SurfaceWidthLuma,
unsigned int SurfaceWidthChroma,
unsigned int SurfaceHeightLuma,
unsigned int SurfaceHeightChroma,
double DETBufferSize,
unsigned int RequestHeight256ByteLuma,
unsigned int RequestHeight256ByteChroma,
enum dm_swizzle_mode TilingFormat,
unsigned int BytePerPixelY,
unsigned int BytePerPixelC,
double BytePerPixelDETY,
double BytePerPixelDETC,
enum scan_direction_class ScanOrientation,
unsigned int *MaxUncompressedBlockLuma,
unsigned int *MaxUncompressedBlockChroma,
unsigned int *MaxCompressedBlockLuma,
unsigned int *MaxCompressedBlockChroma,
unsigned int *IndependentBlockLuma,
unsigned int *IndependentBlockChroma)
{
int yuv420;
int horz_div_l;
int horz_div_c;
int vert_div_l;
int vert_div_c;
int swath_buf_size;
double detile_buf_vp_horz_limit;
double detile_buf_vp_vert_limit;
int MAS_vp_horz_limit;
int MAS_vp_vert_limit;
int max_vp_horz_width;
int max_vp_vert_height;
int eff_surf_width_l;
int eff_surf_width_c;
int eff_surf_height_l;
int eff_surf_height_c;
int full_swath_bytes_horz_wc_l;
int full_swath_bytes_horz_wc_c;
int full_swath_bytes_vert_wc_l;
int full_swath_bytes_vert_wc_c;
int req128_horz_wc_l;
int req128_horz_wc_c;
int req128_vert_wc_l;
int req128_vert_wc_c;
int segment_order_horz_contiguous_luma;
int segment_order_horz_contiguous_chroma;
int segment_order_vert_contiguous_luma;
int segment_order_vert_contiguous_chroma;
typedef enum {
REQ_256Bytes, REQ_128BytesNonContiguous, REQ_128BytesContiguous, REQ_NA
} RequestType;
RequestType RequestLuma;
RequestType RequestChroma;
yuv420 = ((SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_420_12) ? 1 : 0);
horz_div_l = 1;
horz_div_c = 1;
vert_div_l = 1;
vert_div_c = 1;
if (BytePerPixelY == 1)
vert_div_l = 0;
if (BytePerPixelC == 1)
vert_div_c = 0;
if (BytePerPixelY == 8 && (TilingFormat == dm_sw_64kb_s || TilingFormat == dm_sw_64kb_s_t || TilingFormat == dm_sw_64kb_s_x))
horz_div_l = 0;
if (BytePerPixelC == 8 && (TilingFormat == dm_sw_64kb_s || TilingFormat == dm_sw_64kb_s_t || TilingFormat == dm_sw_64kb_s_x))
horz_div_c = 0;
if (BytePerPixelC == 0) {
swath_buf_size = DETBufferSize / 2 - 2 * 256;
detile_buf_vp_horz_limit = (double) swath_buf_size / ((double) RequestHeight256ByteLuma * BytePerPixelY / (1 + horz_div_l));
detile_buf_vp_vert_limit = (double) swath_buf_size / (256.0 / RequestHeight256ByteLuma / (1 + vert_div_l));
} else {
swath_buf_size = DETBufferSize / 2 - 2 * 2 * 256;
detile_buf_vp_horz_limit = (double) swath_buf_size
/ ((double) RequestHeight256ByteLuma * BytePerPixelY / (1 + horz_div_l)
+ (double) RequestHeight256ByteChroma * BytePerPixelC / (1 + horz_div_c) / (1 + yuv420));
detile_buf_vp_vert_limit = (double) swath_buf_size
/ (256.0 / RequestHeight256ByteLuma / (1 + vert_div_l) + 256.0 / RequestHeight256ByteChroma / (1 + vert_div_c) / (1 + yuv420));
}
if (SourcePixelFormat == dm_420_10) {
detile_buf_vp_horz_limit = 1.5 * detile_buf_vp_horz_limit;
detile_buf_vp_vert_limit = 1.5 * detile_buf_vp_vert_limit;
}
detile_buf_vp_horz_limit = dml_floor(detile_buf_vp_horz_limit - 1, 16);
detile_buf_vp_vert_limit = dml_floor(detile_buf_vp_vert_limit - 1, 16);
MAS_vp_horz_limit = SourcePixelFormat == dm_rgbe_alpha ? 3840 : 5760;
MAS_vp_vert_limit = (BytePerPixelC > 0 ? 2880 : 5760);
max_vp_horz_width = dml_min((double) MAS_vp_horz_limit, detile_buf_vp_horz_limit);
max_vp_vert_height = dml_min((double) MAS_vp_vert_limit, detile_buf_vp_vert_limit);
eff_surf_width_l = (SurfaceWidthLuma > max_vp_horz_width ? max_vp_horz_width : SurfaceWidthLuma);
eff_surf_width_c = eff_surf_width_l / (1 + yuv420);
eff_surf_height_l = (SurfaceHeightLuma > max_vp_vert_height ? max_vp_vert_height : SurfaceHeightLuma);
eff_surf_height_c = eff_surf_height_l / (1 + yuv420);
full_swath_bytes_horz_wc_l = eff_surf_width_l * RequestHeight256ByteLuma * BytePerPixelY;
full_swath_bytes_vert_wc_l = eff_surf_height_l * 256 / RequestHeight256ByteLuma;
if (BytePerPixelC > 0) {
full_swath_bytes_horz_wc_c = eff_surf_width_c * RequestHeight256ByteChroma * BytePerPixelC;
full_swath_bytes_vert_wc_c = eff_surf_height_c * 256 / RequestHeight256ByteChroma;
} else {
full_swath_bytes_horz_wc_c = 0;
full_swath_bytes_vert_wc_c = 0;
}
if (SourcePixelFormat == dm_420_10) {
full_swath_bytes_horz_wc_l = dml_ceil(full_swath_bytes_horz_wc_l * 2 / 3, 256);
full_swath_bytes_horz_wc_c = dml_ceil(full_swath_bytes_horz_wc_c * 2 / 3, 256);
full_swath_bytes_vert_wc_l = dml_ceil(full_swath_bytes_vert_wc_l * 2 / 3, 256);
full_swath_bytes_vert_wc_c = dml_ceil(full_swath_bytes_vert_wc_c * 2 / 3, 256);
}
if (2 * full_swath_bytes_horz_wc_l + 2 * full_swath_bytes_horz_wc_c <= DETBufferSize) {
req128_horz_wc_l = 0;
req128_horz_wc_c = 0;
} else if (full_swath_bytes_horz_wc_l < 1.5 * full_swath_bytes_horz_wc_c && 2 * full_swath_bytes_horz_wc_l + full_swath_bytes_horz_wc_c <= DETBufferSize) {
req128_horz_wc_l = 0;
req128_horz_wc_c = 1;
} else if (full_swath_bytes_horz_wc_l >= 1.5 * full_swath_bytes_horz_wc_c && full_swath_bytes_horz_wc_l + 2 * full_swath_bytes_horz_wc_c <= DETBufferSize) {
req128_horz_wc_l = 1;
req128_horz_wc_c = 0;
} else {
req128_horz_wc_l = 1;
req128_horz_wc_c = 1;
}
if (2 * full_swath_bytes_vert_wc_l + 2 * full_swath_bytes_vert_wc_c <= DETBufferSize) {
req128_vert_wc_l = 0;
req128_vert_wc_c = 0;
} else if (full_swath_bytes_vert_wc_l < 1.5 * full_swath_bytes_vert_wc_c && 2 * full_swath_bytes_vert_wc_l + full_swath_bytes_vert_wc_c <= DETBufferSize) {
req128_vert_wc_l = 0;
req128_vert_wc_c = 1;
} else if (full_swath_bytes_vert_wc_l >= 1.5 * full_swath_bytes_vert_wc_c && full_swath_bytes_vert_wc_l + 2 * full_swath_bytes_vert_wc_c <= DETBufferSize) {
req128_vert_wc_l = 1;
req128_vert_wc_c = 0;
} else {
req128_vert_wc_l = 1;
req128_vert_wc_c = 1;
}
if (BytePerPixelY == 2 || (BytePerPixelY == 4 && TilingFormat != dm_sw_64kb_r_x)) {
segment_order_horz_contiguous_luma = 0;
} else {
segment_order_horz_contiguous_luma = 1;
}
if ((BytePerPixelY == 8 && (TilingFormat == dm_sw_64kb_d || TilingFormat == dm_sw_64kb_d_x || TilingFormat == dm_sw_64kb_d_t || TilingFormat == dm_sw_64kb_r_x))
|| (BytePerPixelY == 4 && TilingFormat == dm_sw_64kb_r_x)) {
segment_order_vert_contiguous_luma = 0;
} else {
segment_order_vert_contiguous_luma = 1;
}
if (BytePerPixelC == 2 || (BytePerPixelC == 4 && TilingFormat != dm_sw_64kb_r_x)) {
segment_order_horz_contiguous_chroma = 0;
} else {
segment_order_horz_contiguous_chroma = 1;
}
if ((BytePerPixelC == 8 && (TilingFormat == dm_sw_64kb_d || TilingFormat == dm_sw_64kb_d_x || TilingFormat == dm_sw_64kb_d_t || TilingFormat == dm_sw_64kb_r_x))
|| (BytePerPixelC == 4 && TilingFormat == dm_sw_64kb_r_x)) {
segment_order_vert_contiguous_chroma = 0;
} else {
segment_order_vert_contiguous_chroma = 1;
}
if (DCCProgrammingAssumesScanDirectionUnknown == true) {
if (req128_horz_wc_l == 0 && req128_vert_wc_l == 0) {
RequestLuma = REQ_256Bytes;
} else if ((req128_horz_wc_l == 1 && segment_order_horz_contiguous_luma == 0) || (req128_vert_wc_l == 1 && segment_order_vert_contiguous_luma == 0)) {
RequestLuma = REQ_128BytesNonContiguous;
} else {
RequestLuma = REQ_128BytesContiguous;
}
if (req128_horz_wc_c == 0 && req128_vert_wc_c == 0) {
RequestChroma = REQ_256Bytes;
} else if ((req128_horz_wc_c == 1 && segment_order_horz_contiguous_chroma == 0) || (req128_vert_wc_c == 1 && segment_order_vert_contiguous_chroma == 0)) {
RequestChroma = REQ_128BytesNonContiguous;
} else {
RequestChroma = REQ_128BytesContiguous;
}
} else if (ScanOrientation != dm_vert) {
if (req128_horz_wc_l == 0) {
RequestLuma = REQ_256Bytes;
} else if (segment_order_horz_contiguous_luma == 0) {
RequestLuma = REQ_128BytesNonContiguous;
} else {
RequestLuma = REQ_128BytesContiguous;
}
if (req128_horz_wc_c == 0) {
RequestChroma = REQ_256Bytes;
} else if (segment_order_horz_contiguous_chroma == 0) {
RequestChroma = REQ_128BytesNonContiguous;
} else {
RequestChroma = REQ_128BytesContiguous;
}
} else {
if (req128_vert_wc_l == 0) {
RequestLuma = REQ_256Bytes;
} else if (segment_order_vert_contiguous_luma == 0) {
RequestLuma = REQ_128BytesNonContiguous;
} else {
RequestLuma = REQ_128BytesContiguous;
}
if (req128_vert_wc_c == 0) {
RequestChroma = REQ_256Bytes;
} else if (segment_order_vert_contiguous_chroma == 0) {
RequestChroma = REQ_128BytesNonContiguous;
} else {
RequestChroma = REQ_128BytesContiguous;
}
}
if (RequestLuma == REQ_256Bytes) {
*MaxUncompressedBlockLuma = 256;
*MaxCompressedBlockLuma = 256;
*IndependentBlockLuma = 0;
} else if (RequestLuma == REQ_128BytesContiguous) {
*MaxUncompressedBlockLuma = 256;
*MaxCompressedBlockLuma = 128;
*IndependentBlockLuma = 128;
} else {
*MaxUncompressedBlockLuma = 256;
*MaxCompressedBlockLuma = 64;
*IndependentBlockLuma = 64;
}
if (RequestChroma == REQ_256Bytes) {
*MaxUncompressedBlockChroma = 256;
*MaxCompressedBlockChroma = 256;
*IndependentBlockChroma = 0;
} else if (RequestChroma == REQ_128BytesContiguous) {
*MaxUncompressedBlockChroma = 256;
*MaxCompressedBlockChroma = 128;
*IndependentBlockChroma = 128;
} else {
*MaxUncompressedBlockChroma = 256;
*MaxCompressedBlockChroma = 64;
*IndependentBlockChroma = 64;
}
if (DCCEnabled != true || BytePerPixelC == 0) {
*MaxUncompressedBlockChroma = 0;
*MaxCompressedBlockChroma = 0;
*IndependentBlockChroma = 0;
}
if (DCCEnabled != true) {
*MaxUncompressedBlockLuma = 0;
*MaxCompressedBlockLuma = 0;
*IndependentBlockLuma = 0;
}
}
static double CalculatePrefetchSourceLines(
struct display_mode_lib *mode_lib,
double VRatio,
double vtaps,
bool Interlace,
bool ProgressiveToInterlaceUnitInOPP,
unsigned int SwathHeight,
unsigned int ViewportYStart,
double *VInitPreFill,
unsigned int *MaxNumSwath)
{
struct vba_vars_st *v = &mode_lib->vba;
unsigned int MaxPartialSwath;
if (ProgressiveToInterlaceUnitInOPP)
*VInitPreFill = dml_floor((VRatio + vtaps + 1) / 2.0, 1);
else
*VInitPreFill = dml_floor((VRatio + vtaps + 1 + Interlace * 0.5 * VRatio) / 2.0, 1);
if (!v->IgnoreViewportPositioning) {
*MaxNumSwath = dml_ceil((*VInitPreFill - 1.0) / SwathHeight, 1) + 1.0;
if (*VInitPreFill > 1.0)
MaxPartialSwath = (unsigned int) (*VInitPreFill - 2) % SwathHeight;
else
MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 2) % SwathHeight;
MaxPartialSwath = dml_max(1U, MaxPartialSwath);
} else {
if (ViewportYStart != 0)
dml_print("WARNING DML: using viewport y position of 0 even though actual viewport y position is non-zero in prefetch source lines calculation\n");
*MaxNumSwath = dml_ceil(*VInitPreFill / SwathHeight, 1);
if (*VInitPreFill > 1.0)
MaxPartialSwath = (unsigned int) (*VInitPreFill - 1) % SwathHeight;
else
MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 1) % SwathHeight;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VRatio = %f\n", __func__, VRatio);
dml_print("DML::%s: vtaps = %f\n", __func__, vtaps);
dml_print("DML::%s: VInitPreFill = %f\n", __func__, *VInitPreFill);
dml_print("DML::%s: ProgressiveToInterlaceUnitInOPP = %d\n", __func__, ProgressiveToInterlaceUnitInOPP);
dml_print("DML::%s: IgnoreViewportPositioning = %d\n", __func__, v->IgnoreViewportPositioning);
dml_print("DML::%s: SwathHeight = %d\n", __func__, SwathHeight);
dml_print("DML::%s: MaxPartialSwath = %d\n", __func__, MaxPartialSwath);
dml_print("DML::%s: MaxNumSwath = %d\n", __func__, *MaxNumSwath);
dml_print("DML::%s: Prefetch source lines = %d\n", __func__, *MaxNumSwath * SwathHeight + MaxPartialSwath);
#endif
return *MaxNumSwath * SwathHeight + MaxPartialSwath;
}
static unsigned int CalculateVMAndRowBytes(
struct display_mode_lib *mode_lib,
bool DCCEnable,
unsigned int BlockHeight256Bytes,
unsigned int BlockWidth256Bytes,
enum source_format_class SourcePixelFormat,
unsigned int SurfaceTiling,
unsigned int BytePerPixel,
enum scan_direction_class ScanDirection,
unsigned int SwathWidth,
unsigned int ViewportHeight,
bool GPUVMEnable,
bool HostVMEnable,
unsigned int HostVMMaxNonCachedPageTableLevels,
unsigned int GPUVMMinPageSize,
unsigned int HostVMMinPageSize,
unsigned int PTEBufferSizeInRequests,
unsigned int Pitch,
unsigned int DCCMetaPitch,
unsigned int *MacroTileWidth,
unsigned int *MetaRowByte,
unsigned int *PixelPTEBytesPerRow,
bool *PTEBufferSizeNotExceeded,
int *dpte_row_width_ub,
unsigned int *dpte_row_height,
unsigned int *MetaRequestWidth,
unsigned int *MetaRequestHeight,
unsigned int *meta_row_width,
unsigned int *meta_row_height,
int *vm_group_bytes,
unsigned int *dpte_group_bytes,
unsigned int *PixelPTEReqWidth,
unsigned int *PixelPTEReqHeight,
unsigned int *PTERequestSize,
int *DPDE0BytesFrame,
int *MetaPTEBytesFrame)
{
struct vba_vars_st *v = &mode_lib->vba;
unsigned int MPDEBytesFrame;
unsigned int DCCMetaSurfaceBytes;
unsigned int MacroTileSizeBytes;
unsigned int MacroTileHeight;
unsigned int ExtraDPDEBytesFrame;
unsigned int PDEAndMetaPTEBytesFrame;
unsigned int PixelPTEReqHeightPTEs = 0;
unsigned int HostVMDynamicLevels = 0;
double FractionOfPTEReturnDrop;
if (GPUVMEnable == true && HostVMEnable == true) {
if (HostVMMinPageSize < 2048) {
HostVMDynamicLevels = HostVMMaxNonCachedPageTableLevels;
} else if (HostVMMinPageSize >= 2048 && HostVMMinPageSize < 1048576) {
HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 1);
} else {
HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 2);
}
}
*MetaRequestHeight = 8 * BlockHeight256Bytes;
*MetaRequestWidth = 8 * BlockWidth256Bytes;
if (ScanDirection != dm_vert) {
*meta_row_height = *MetaRequestHeight;
*meta_row_width = dml_ceil((double) SwathWidth - 1, *MetaRequestWidth) + *MetaRequestWidth;
*MetaRowByte = *meta_row_width * *MetaRequestHeight * BytePerPixel / 256.0;
} else {
*meta_row_height = *MetaRequestWidth;
*meta_row_width = dml_ceil((double) SwathWidth - 1, *MetaRequestHeight) + *MetaRequestHeight;
*MetaRowByte = *meta_row_width * *MetaRequestWidth * BytePerPixel / 256.0;
}
DCCMetaSurfaceBytes = DCCMetaPitch * (dml_ceil(ViewportHeight - 1, 64 * BlockHeight256Bytes) + 64 * BlockHeight256Bytes) * BytePerPixel / 256;
if (GPUVMEnable == true) {
*MetaPTEBytesFrame = (dml_ceil((double) (DCCMetaSurfaceBytes - 4.0 * 1024.0) / (8 * 4.0 * 1024), 1) + 1) * 64;
MPDEBytesFrame = 128 * (v->GPUVMMaxPageTableLevels - 1);
} else {
*MetaPTEBytesFrame = 0;
MPDEBytesFrame = 0;
}
if (DCCEnable != true) {
*MetaPTEBytesFrame = 0;
MPDEBytesFrame = 0;
*MetaRowByte = 0;
}
if (SurfaceTiling == dm_sw_linear) {
MacroTileSizeBytes = 256;
MacroTileHeight = BlockHeight256Bytes;
} else {
MacroTileSizeBytes = 65536;
MacroTileHeight = 16 * BlockHeight256Bytes;
}
*MacroTileWidth = MacroTileSizeBytes / BytePerPixel / MacroTileHeight;
if (GPUVMEnable == true && v->GPUVMMaxPageTableLevels > 1) {
if (ScanDirection != dm_vert) {
*DPDE0BytesFrame = 64
* (dml_ceil(
((Pitch * (dml_ceil(ViewportHeight - 1, MacroTileHeight) + MacroTileHeight) * BytePerPixel) - MacroTileSizeBytes)
/ (8 * 2097152),
1) + 1);
} else {
*DPDE0BytesFrame = 64
* (dml_ceil(
((Pitch * (dml_ceil((double) SwathWidth - 1, MacroTileHeight) + MacroTileHeight) * BytePerPixel) - MacroTileSizeBytes)
/ (8 * 2097152),
1) + 1);
}
ExtraDPDEBytesFrame = 128 * (v->GPUVMMaxPageTableLevels - 2);
} else {
*DPDE0BytesFrame = 0;
ExtraDPDEBytesFrame = 0;
}
PDEAndMetaPTEBytesFrame = *MetaPTEBytesFrame + MPDEBytesFrame + *DPDE0BytesFrame + ExtraDPDEBytesFrame;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: MetaPTEBytesFrame = %d\n", __func__, *MetaPTEBytesFrame);
dml_print("DML::%s: MPDEBytesFrame = %d\n", __func__, MPDEBytesFrame);
dml_print("DML::%s: DPDE0BytesFrame = %d\n", __func__, *DPDE0BytesFrame);
dml_print("DML::%s: ExtraDPDEBytesFrame= %d\n", __func__, ExtraDPDEBytesFrame);
dml_print("DML::%s: PDEAndMetaPTEBytesFrame = %d\n", __func__, PDEAndMetaPTEBytesFrame);
#endif
if (HostVMEnable == true) {
PDEAndMetaPTEBytesFrame = PDEAndMetaPTEBytesFrame * (1 + 8 * HostVMDynamicLevels);
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: PDEAndMetaPTEBytesFrame = %d\n", __func__, PDEAndMetaPTEBytesFrame);
#endif
if (SurfaceTiling == dm_sw_linear) {
PixelPTEReqHeightPTEs = 1;
*PixelPTEReqHeight = 1;
*PixelPTEReqWidth = 32768.0 / BytePerPixel;
*PTERequestSize = 64;
FractionOfPTEReturnDrop = 0;
} else if (MacroTileSizeBytes == 4096) {
PixelPTEReqHeightPTEs = 1;
*PixelPTEReqHeight = MacroTileHeight;
*PixelPTEReqWidth = 8 * *MacroTileWidth;
*PTERequestSize = 64;
if (ScanDirection != dm_vert)
FractionOfPTEReturnDrop = 0;
else
FractionOfPTEReturnDrop = 7 / 8;
} else if (GPUVMMinPageSize == 4 && MacroTileSizeBytes > 4096) {
PixelPTEReqHeightPTEs = 16;
*PixelPTEReqHeight = 16 * BlockHeight256Bytes;
*PixelPTEReqWidth = 16 * BlockWidth256Bytes;
*PTERequestSize = 128;
FractionOfPTEReturnDrop = 0;
} else {
PixelPTEReqHeightPTEs = 1;
*PixelPTEReqHeight = MacroTileHeight;
*PixelPTEReqWidth = 8 * *MacroTileWidth;
*PTERequestSize = 64;
FractionOfPTEReturnDrop = 0;
}
if (SurfaceTiling == dm_sw_linear) {
*dpte_row_height = dml_min(128, 1 << (unsigned int) dml_floor(dml_log2(PTEBufferSizeInRequests * *PixelPTEReqWidth / Pitch), 1));
*dpte_row_width_ub = (dml_ceil((double)(Pitch * *dpte_row_height - 1) / *PixelPTEReqWidth, 1) + 1) * *PixelPTEReqWidth;
*PixelPTEBytesPerRow = *dpte_row_width_ub / *PixelPTEReqWidth * *PTERequestSize;
} else if (ScanDirection != dm_vert) {
*dpte_row_height = *PixelPTEReqHeight;
*dpte_row_width_ub = (dml_ceil((double) (SwathWidth - 1) / *PixelPTEReqWidth, 1) + 1) * *PixelPTEReqWidth;
*PixelPTEBytesPerRow = *dpte_row_width_ub / *PixelPTEReqWidth * *PTERequestSize;
} else {
*dpte_row_height = dml_min(*PixelPTEReqWidth, *MacroTileWidth);
*dpte_row_width_ub = (dml_ceil((double) (SwathWidth - 1) / *PixelPTEReqHeight, 1) + 1) * *PixelPTEReqHeight;
*PixelPTEBytesPerRow = *dpte_row_width_ub / *PixelPTEReqHeight * *PTERequestSize;
}
if (*PixelPTEBytesPerRow * (1 - FractionOfPTEReturnDrop) <= 64 * PTEBufferSizeInRequests) {
*PTEBufferSizeNotExceeded = true;
} else {
*PTEBufferSizeNotExceeded = false;
}
if (GPUVMEnable != true) {
*PixelPTEBytesPerRow = 0;
*PTEBufferSizeNotExceeded = true;
}
dml_print("DML: vm_bytes = meta_pte_bytes_per_frame (per_pipe) = MetaPTEBytesFrame = : %i\n", *MetaPTEBytesFrame);
if (HostVMEnable == true) {
*PixelPTEBytesPerRow = *PixelPTEBytesPerRow * (1 + 8 * HostVMDynamicLevels);
}
if (HostVMEnable == true) {
*vm_group_bytes = 512;
*dpte_group_bytes = 512;
} else if (GPUVMEnable == true) {
*vm_group_bytes = 2048;
if (SurfaceTiling != dm_sw_linear && PixelPTEReqHeightPTEs == 1 && ScanDirection == dm_vert) {
*dpte_group_bytes = 512;
} else {
*dpte_group_bytes = 2048;
}
} else {
*vm_group_bytes = 0;
*dpte_group_bytes = 0;
}
return PDEAndMetaPTEBytesFrame;
}
static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(struct display_mode_lib *mode_lib)
{
struct vba_vars_st *v = &mode_lib->vba;
unsigned int j, k;
double HostVMInefficiencyFactor = 1.0;
bool NoChromaPlanes = true;
int ReorderBytes;
double VMDataOnlyReturnBW;
double MaxTotalRDBandwidth = 0;
int PrefetchMode = v->PrefetchModePerState[v->VoltageLevel][v->maxMpcComb];
v->WritebackDISPCLK = 0.0;
v->DISPCLKWithRamping = 0;
v->DISPCLKWithoutRamping = 0;
v->GlobalDPPCLK = 0.0;
/* DAL custom code: need to update ReturnBW in case min dcfclk is overriden */
{
double IdealFabricAndSDPPortBandwidthPerState = dml_min(
v->ReturnBusWidth * v->DCFCLKState[v->VoltageLevel][v->maxMpcComb],
v->FabricClockPerState[v->VoltageLevel] * v->FabricDatapathToDCNDataReturn);
double IdealDRAMBandwidthPerState = v->DRAMSpeedPerState[v->VoltageLevel] * v->NumberOfChannels * v->DRAMChannelWidth;
if (v->HostVMEnable != true) {
v->ReturnBW = dml_min(
IdealFabricAndSDPPortBandwidthPerState * v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency / 100.0,
IdealDRAMBandwidthPerState * v->PercentOfIdealDRAMBWReceivedAfterUrgLatencyPixelDataOnly / 100.0);
} else {
v->ReturnBW = dml_min(
IdealFabricAndSDPPortBandwidthPerState * v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency / 100.0,
IdealDRAMBandwidthPerState * v->PercentOfIdealDRAMBWReceivedAfterUrgLatencyPixelMixedWithVMData / 100.0);
}
}
/* End DAL custom code */
// DISPCLK and DPPCLK Calculation
//
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k]) {
v->WritebackDISPCLK = dml_max(
v->WritebackDISPCLK,
dml31_CalculateWriteBackDISPCLK(
v->WritebackPixelFormat[k],
v->PixelClock[k],
v->WritebackHRatio[k],
v->WritebackVRatio[k],
v->WritebackHTaps[k],
v->WritebackVTaps[k],
v->WritebackSourceWidth[k],
v->WritebackDestinationWidth[k],
v->HTotal[k],
v->WritebackLineBufferSize));
}
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->HRatio[k] > 1) {
v->PSCL_THROUGHPUT_LUMA[k] = dml_min(
v->MaxDCHUBToPSCLThroughput,
v->MaxPSCLToLBThroughput * v->HRatio[k] / dml_ceil(v->htaps[k] / 6.0, 1));
} else {
v->PSCL_THROUGHPUT_LUMA[k] = dml_min(v->MaxDCHUBToPSCLThroughput, v->MaxPSCLToLBThroughput);
}
v->DPPCLKUsingSingleDPPLuma = v->PixelClock[k]
* dml_max(
v->vtaps[k] / 6.0 * dml_min(1.0, v->HRatio[k]),
dml_max(v->HRatio[k] * v->VRatio[k] / v->PSCL_THROUGHPUT_LUMA[k], 1.0));
if ((v->htaps[k] > 6 || v->vtaps[k] > 6) && v->DPPCLKUsingSingleDPPLuma < 2 * v->PixelClock[k]) {
v->DPPCLKUsingSingleDPPLuma = 2 * v->PixelClock[k];
}
if ((v->SourcePixelFormat[k] != dm_420_8 && v->SourcePixelFormat[k] != dm_420_10 && v->SourcePixelFormat[k] != dm_420_12
&& v->SourcePixelFormat[k] != dm_rgbe_alpha)) {
v->PSCL_THROUGHPUT_CHROMA[k] = 0.0;
v->DPPCLKUsingSingleDPP[k] = v->DPPCLKUsingSingleDPPLuma;
} else {
if (v->HRatioChroma[k] > 1) {
v->PSCL_THROUGHPUT_CHROMA[k] = dml_min(
v->MaxDCHUBToPSCLThroughput,
v->MaxPSCLToLBThroughput * v->HRatioChroma[k] / dml_ceil(v->HTAPsChroma[k] / 6.0, 1.0));
} else {
v->PSCL_THROUGHPUT_CHROMA[k] = dml_min(v->MaxDCHUBToPSCLThroughput, v->MaxPSCLToLBThroughput);
}
v->DPPCLKUsingSingleDPPChroma = v->PixelClock[k]
* dml_max3(
v->VTAPsChroma[k] / 6.0 * dml_min(1.0, v->HRatioChroma[k]),
v->HRatioChroma[k] * v->VRatioChroma[k] / v->PSCL_THROUGHPUT_CHROMA[k],
1.0);
if ((v->HTAPsChroma[k] > 6 || v->VTAPsChroma[k] > 6) && v->DPPCLKUsingSingleDPPChroma < 2 * v->PixelClock[k]) {
v->DPPCLKUsingSingleDPPChroma = 2 * v->PixelClock[k];
}
v->DPPCLKUsingSingleDPP[k] = dml_max(v->DPPCLKUsingSingleDPPLuma, v->DPPCLKUsingSingleDPPChroma);
}
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->BlendingAndTiming[k] != k)
continue;
if (v->ODMCombineEnabled[k] == dm_odm_combine_mode_4to1) {
v->DISPCLKWithRamping = dml_max(
v->DISPCLKWithRamping,
v->PixelClock[k] / 4 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100)
* (1 + v->DISPCLKRampingMargin / 100));
v->DISPCLKWithoutRamping = dml_max(
v->DISPCLKWithoutRamping,
v->PixelClock[k] / 4 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100));
} else if (v->ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
v->DISPCLKWithRamping = dml_max(
v->DISPCLKWithRamping,
v->PixelClock[k] / 2 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100)
* (1 + v->DISPCLKRampingMargin / 100));
v->DISPCLKWithoutRamping = dml_max(
v->DISPCLKWithoutRamping,
v->PixelClock[k] / 2 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100));
} else {
v->DISPCLKWithRamping = dml_max(
v->DISPCLKWithRamping,
v->PixelClock[k] * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100) * (1 + v->DISPCLKRampingMargin / 100));
v->DISPCLKWithoutRamping = dml_max(
v->DISPCLKWithoutRamping,
v->PixelClock[k] * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100));
}
}
v->DISPCLKWithRamping = dml_max(v->DISPCLKWithRamping, v->WritebackDISPCLK);
v->DISPCLKWithoutRamping = dml_max(v->DISPCLKWithoutRamping, v->WritebackDISPCLK);
ASSERT(v->DISPCLKDPPCLKVCOSpeed != 0);
v->DISPCLKWithRampingRoundedToDFSGranularity = RoundToDFSGranularityUp(v->DISPCLKWithRamping, v->DISPCLKDPPCLKVCOSpeed);
v->DISPCLKWithoutRampingRoundedToDFSGranularity = RoundToDFSGranularityUp(v->DISPCLKWithoutRamping, v->DISPCLKDPPCLKVCOSpeed);
v->MaxDispclkRoundedToDFSGranularity = RoundToDFSGranularityDown(
v->soc.clock_limits[v->soc.num_states - 1].dispclk_mhz,
v->DISPCLKDPPCLKVCOSpeed);
if (v->DISPCLKWithoutRampingRoundedToDFSGranularity > v->MaxDispclkRoundedToDFSGranularity) {
v->DISPCLK_calculated = v->DISPCLKWithoutRampingRoundedToDFSGranularity;
} else if (v->DISPCLKWithRampingRoundedToDFSGranularity > v->MaxDispclkRoundedToDFSGranularity) {
v->DISPCLK_calculated = v->MaxDispclkRoundedToDFSGranularity;
} else {
v->DISPCLK_calculated = v->DISPCLKWithRampingRoundedToDFSGranularity;
}
v->DISPCLK = v->DISPCLK_calculated;
DTRACE(" dispclk_mhz (calculated) = %f", v->DISPCLK_calculated);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->DPPCLK_calculated[k] = v->DPPCLKUsingSingleDPP[k] / v->DPPPerPlane[k] * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100);
v->GlobalDPPCLK = dml_max(v->GlobalDPPCLK, v->DPPCLK_calculated[k]);
}
v->GlobalDPPCLK = RoundToDFSGranularityUp(v->GlobalDPPCLK, v->DISPCLKDPPCLKVCOSpeed);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->DPPCLK_calculated[k] = v->GlobalDPPCLK / 255 * dml_ceil(v->DPPCLK_calculated[k] * 255.0 / v->GlobalDPPCLK, 1);
DTRACE(" dppclk_mhz[%i] (calculated) = %f", k, v->DPPCLK_calculated[k]);
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->DPPCLK[k] = v->DPPCLK_calculated[k];
}
// Urgent and B P-State/DRAM Clock Change Watermark
DTRACE(" dcfclk_mhz = %f", v->DCFCLK);
DTRACE(" return_bus_bw = %f", v->ReturnBW);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
dml30_CalculateBytePerPixelAnd256BBlockSizes(
v->SourcePixelFormat[k],
v->SurfaceTiling[k],
&v->BytePerPixelY[k],
&v->BytePerPixelC[k],
&v->BytePerPixelDETY[k],
&v->BytePerPixelDETC[k],
&v->BlockHeight256BytesY[k],
&v->BlockHeight256BytesC[k],
&v->BlockWidth256BytesY[k],
&v->BlockWidth256BytesC[k]);
}
CalculateSwathWidth(
false,
v->NumberOfActivePlanes,
v->SourcePixelFormat,
v->SourceScan,
v->ViewportWidth,
v->ViewportHeight,
v->SurfaceWidthY,
v->SurfaceWidthC,
v->SurfaceHeightY,
v->SurfaceHeightC,
v->ODMCombineEnabled,
v->BytePerPixelY,
v->BytePerPixelC,
v->BlockHeight256BytesY,
v->BlockHeight256BytesC,
v->BlockWidth256BytesY,
v->BlockWidth256BytesC,
v->BlendingAndTiming,
v->HActive,
v->HRatio,
v->DPPPerPlane,
v->SwathWidthSingleDPPY,
v->SwathWidthSingleDPPC,
v->SwathWidthY,
v->SwathWidthC,
v->dummyinteger3,
v->dummyinteger4,
v->swath_width_luma_ub,
v->swath_width_chroma_ub);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->ReadBandwidthPlaneLuma[k] = v->SwathWidthSingleDPPY[k] * v->BytePerPixelY[k] / (v->HTotal[k] / v->PixelClock[k])
* v->VRatio[k];
v->ReadBandwidthPlaneChroma[k] = v->SwathWidthSingleDPPC[k] * v->BytePerPixelC[k] / (v->HTotal[k] / v->PixelClock[k])
* v->VRatioChroma[k];
DTRACE(" read_bw[%i] = %fBps", k, v->ReadBandwidthPlaneLuma[k] + v->ReadBandwidthPlaneChroma[k]);
}
// DCFCLK Deep Sleep
CalculateDCFCLKDeepSleep(
mode_lib,
v->NumberOfActivePlanes,
v->BytePerPixelY,
v->BytePerPixelC,
v->VRatio,
v->VRatioChroma,
v->SwathWidthY,
v->SwathWidthC,
v->DPPPerPlane,
v->HRatio,
v->HRatioChroma,
v->PixelClock,
v->PSCL_THROUGHPUT_LUMA,
v->PSCL_THROUGHPUT_CHROMA,
v->DPPCLK,
v->ReadBandwidthPlaneLuma,
v->ReadBandwidthPlaneChroma,
v->ReturnBusWidth,
&v->DCFCLKDeepSleep);
// DSCCLK
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if ((v->BlendingAndTiming[k] != k) || !v->DSCEnabled[k]) {
v->DSCCLK_calculated[k] = 0.0;
} else {
if (v->OutputFormat[k] == dm_420)
v->DSCFormatFactor = 2;
else if (v->OutputFormat[k] == dm_444)
v->DSCFormatFactor = 1;
else if (v->OutputFormat[k] == dm_n422)
v->DSCFormatFactor = 2;
else
v->DSCFormatFactor = 1;
if (v->ODMCombineEnabled[k] == dm_odm_combine_mode_4to1)
v->DSCCLK_calculated[k] = v->PixelClockBackEnd[k] / 12 / v->DSCFormatFactor
/ (1 - v->DISPCLKDPPCLKDSCCLKDownSpreading / 100);
else if (v->ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
v->DSCCLK_calculated[k] = v->PixelClockBackEnd[k] / 6 / v->DSCFormatFactor
/ (1 - v->DISPCLKDPPCLKDSCCLKDownSpreading / 100);
else
v->DSCCLK_calculated[k] = v->PixelClockBackEnd[k] / 3 / v->DSCFormatFactor
/ (1 - v->DISPCLKDPPCLKDSCCLKDownSpreading / 100);
}
}
// DSC Delay
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double BPP = v->OutputBpp[k];
if (v->DSCEnabled[k] && BPP != 0) {
if (v->ODMCombineEnabled[k] == dm_odm_combine_mode_disabled) {
v->DSCDelay[k] = dscceComputeDelay(
v->DSCInputBitPerComponent[k],
BPP,
dml_ceil((double) v->HActive[k] / v->NumberOfDSCSlices[k], 1),
v->NumberOfDSCSlices[k],
v->OutputFormat[k],
v->Output[k]) + dscComputeDelay(v->OutputFormat[k], v->Output[k]);
} else if (v->ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
v->DSCDelay[k] = 2
* (dscceComputeDelay(
v->DSCInputBitPerComponent[k],
BPP,
dml_ceil((double) v->HActive[k] / v->NumberOfDSCSlices[k], 1),
v->NumberOfDSCSlices[k] / 2.0,
v->OutputFormat[k],
v->Output[k]) + dscComputeDelay(v->OutputFormat[k], v->Output[k]));
} else {
v->DSCDelay[k] = 4
* (dscceComputeDelay(
v->DSCInputBitPerComponent[k],
BPP,
dml_ceil((double) v->HActive[k] / v->NumberOfDSCSlices[k], 1),
v->NumberOfDSCSlices[k] / 4.0,
v->OutputFormat[k],
v->Output[k]) + dscComputeDelay(v->OutputFormat[k], v->Output[k]));
}
v->DSCDelay[k] = v->DSCDelay[k] * v->PixelClock[k] / v->PixelClockBackEnd[k];
} else {
v->DSCDelay[k] = 0;
}
}
for (k = 0; k < v->NumberOfActivePlanes; ++k)
for (j = 0; j < v->NumberOfActivePlanes; ++j) // NumberOfPlanes
if (j != k && v->BlendingAndTiming[k] == j && v->DSCEnabled[j])
v->DSCDelay[k] = v->DSCDelay[j];
// Prefetch
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
unsigned int PDEAndMetaPTEBytesFrameY;
unsigned int PixelPTEBytesPerRowY;
unsigned int MetaRowByteY;
unsigned int MetaRowByteC;
unsigned int PDEAndMetaPTEBytesFrameC;
unsigned int PixelPTEBytesPerRowC;
bool PTEBufferSizeNotExceededY;
bool PTEBufferSizeNotExceededC;
if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_420_12
|| v->SourcePixelFormat[k] == dm_rgbe_alpha) {
if ((v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_420_12) && v->SourceScan[k] != dm_vert) {
v->PTEBufferSizeInRequestsForLuma = (v->PTEBufferSizeInRequestsLuma + v->PTEBufferSizeInRequestsChroma) / 2;
v->PTEBufferSizeInRequestsForChroma = v->PTEBufferSizeInRequestsForLuma;
} else {
v->PTEBufferSizeInRequestsForLuma = v->PTEBufferSizeInRequestsLuma;
v->PTEBufferSizeInRequestsForChroma = v->PTEBufferSizeInRequestsChroma;
}
PDEAndMetaPTEBytesFrameC = CalculateVMAndRowBytes(
mode_lib,
v->DCCEnable[k],
v->BlockHeight256BytesC[k],
v->BlockWidth256BytesC[k],
v->SourcePixelFormat[k],
v->SurfaceTiling[k],
v->BytePerPixelC[k],
v->SourceScan[k],
v->SwathWidthC[k],
v->ViewportHeightChroma[k],
v->GPUVMEnable,
v->HostVMEnable,
v->HostVMMaxNonCachedPageTableLevels,
v->GPUVMMinPageSize,
v->HostVMMinPageSize,
v->PTEBufferSizeInRequestsForChroma,
v->PitchC[k],
v->DCCMetaPitchC[k],
&v->MacroTileWidthC[k],
&MetaRowByteC,
&PixelPTEBytesPerRowC,
&PTEBufferSizeNotExceededC,
&v->dpte_row_width_chroma_ub[k],
&v->dpte_row_height_chroma[k],
&v->meta_req_width_chroma[k],
&v->meta_req_height_chroma[k],
&v->meta_row_width_chroma[k],
&v->meta_row_height_chroma[k],
&v->dummyinteger1,
&v->dummyinteger2,
&v->PixelPTEReqWidthC[k],
&v->PixelPTEReqHeightC[k],
&v->PTERequestSizeC[k],
&v->dpde0_bytes_per_frame_ub_c[k],
&v->meta_pte_bytes_per_frame_ub_c[k]);
v->PrefetchSourceLinesC[k] = CalculatePrefetchSourceLines(
mode_lib,
v->VRatioChroma[k],
v->VTAPsChroma[k],
v->Interlace[k],
v->ProgressiveToInterlaceUnitInOPP,
v->SwathHeightC[k],
v->ViewportYStartC[k],
&v->VInitPreFillC[k],
&v->MaxNumSwathC[k]);
} else {
v->PTEBufferSizeInRequestsForLuma = v->PTEBufferSizeInRequestsLuma + v->PTEBufferSizeInRequestsChroma;
v->PTEBufferSizeInRequestsForChroma = 0;
PixelPTEBytesPerRowC = 0;
PDEAndMetaPTEBytesFrameC = 0;
MetaRowByteC = 0;
v->MaxNumSwathC[k] = 0;
v->PrefetchSourceLinesC[k] = 0;
}
PDEAndMetaPTEBytesFrameY = CalculateVMAndRowBytes(
mode_lib,
v->DCCEnable[k],
v->BlockHeight256BytesY[k],
v->BlockWidth256BytesY[k],
v->SourcePixelFormat[k],
v->SurfaceTiling[k],
v->BytePerPixelY[k],
v->SourceScan[k],
v->SwathWidthY[k],
v->ViewportHeight[k],
v->GPUVMEnable,
v->HostVMEnable,
v->HostVMMaxNonCachedPageTableLevels,
v->GPUVMMinPageSize,
v->HostVMMinPageSize,
v->PTEBufferSizeInRequestsForLuma,
v->PitchY[k],
v->DCCMetaPitchY[k],
&v->MacroTileWidthY[k],
&MetaRowByteY,
&PixelPTEBytesPerRowY,
&PTEBufferSizeNotExceededY,
&v->dpte_row_width_luma_ub[k],
&v->dpte_row_height[k],
&v->meta_req_width[k],
&v->meta_req_height[k],
&v->meta_row_width[k],
&v->meta_row_height[k],
&v->vm_group_bytes[k],
&v->dpte_group_bytes[k],
&v->PixelPTEReqWidthY[k],
&v->PixelPTEReqHeightY[k],
&v->PTERequestSizeY[k],
&v->dpde0_bytes_per_frame_ub_l[k],
&v->meta_pte_bytes_per_frame_ub_l[k]);
v->PrefetchSourceLinesY[k] = CalculatePrefetchSourceLines(
mode_lib,
v->VRatio[k],
v->vtaps[k],
v->Interlace[k],
v->ProgressiveToInterlaceUnitInOPP,
v->SwathHeightY[k],
v->ViewportYStartY[k],
&v->VInitPreFillY[k],
&v->MaxNumSwathY[k]);
v->PixelPTEBytesPerRow[k] = PixelPTEBytesPerRowY + PixelPTEBytesPerRowC;
v->PDEAndMetaPTEBytesFrame[k] = PDEAndMetaPTEBytesFrameY + PDEAndMetaPTEBytesFrameC;
v->MetaRowByte[k] = MetaRowByteY + MetaRowByteC;
CalculateRowBandwidth(
v->GPUVMEnable,
v->SourcePixelFormat[k],
v->VRatio[k],
v->VRatioChroma[k],
v->DCCEnable[k],
v->HTotal[k] / v->PixelClock[k],
MetaRowByteY,
MetaRowByteC,
v->meta_row_height[k],
v->meta_row_height_chroma[k],
PixelPTEBytesPerRowY,
PixelPTEBytesPerRowC,
v->dpte_row_height[k],
v->dpte_row_height_chroma[k],
&v->meta_row_bw[k],
&v->dpte_row_bw[k]);
}
v->TotalDCCActiveDPP = 0;
v->TotalActiveDPP = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->TotalActiveDPP = v->TotalActiveDPP + v->DPPPerPlane[k];
if (v->DCCEnable[k])
v->TotalDCCActiveDPP = v->TotalDCCActiveDPP + v->DPPPerPlane[k];
if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_420_12
|| v->SourcePixelFormat[k] == dm_rgbe_alpha)
NoChromaPlanes = false;
}
ReorderBytes = v->NumberOfChannels
* dml_max3(
v->UrgentOutOfOrderReturnPerChannelPixelDataOnly,
v->UrgentOutOfOrderReturnPerChannelPixelMixedWithVMData,
v->UrgentOutOfOrderReturnPerChannelVMDataOnly);
VMDataOnlyReturnBW = dml_min(
dml_min(v->ReturnBusWidth * v->DCFCLK, v->FabricClock * v->FabricDatapathToDCNDataReturn)
* v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency / 100.0,
v->DRAMSpeed * v->NumberOfChannels * v->DRAMChannelWidth
* v->PercentOfIdealDRAMBWReceivedAfterUrgLatencyVMDataOnly / 100.0);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: v->ReturnBusWidth = %f\n", __func__, v->ReturnBusWidth);
dml_print("DML::%s: v->DCFCLK = %f\n", __func__, v->DCFCLK);
dml_print("DML::%s: v->FabricClock = %f\n", __func__, v->FabricClock);
dml_print("DML::%s: v->FabricDatapathToDCNDataReturn = %f\n", __func__, v->FabricDatapathToDCNDataReturn);
dml_print("DML::%s: v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency = %f\n", __func__, v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency);
dml_print("DML::%s: v->DRAMSpeed = %f\n", __func__, v->DRAMSpeed);
dml_print("DML::%s: v->NumberOfChannels = %f\n", __func__, v->NumberOfChannels);
dml_print("DML::%s: v->DRAMChannelWidth = %f\n", __func__, v->DRAMChannelWidth);
dml_print("DML::%s: v->PercentOfIdealDRAMBWReceivedAfterUrgLatencyVMDataOnly = %f\n", __func__, v->PercentOfIdealDRAMBWReceivedAfterUrgLatencyVMDataOnly);
dml_print("DML::%s: VMDataOnlyReturnBW = %f\n", __func__, VMDataOnlyReturnBW);
dml_print("DML::%s: ReturnBW = %f\n", __func__, v->ReturnBW);
#endif
if (v->GPUVMEnable && v->HostVMEnable)
HostVMInefficiencyFactor = v->ReturnBW / VMDataOnlyReturnBW;
v->UrgentExtraLatency = CalculateExtraLatency(
v->RoundTripPingLatencyCycles,
ReorderBytes,
v->DCFCLK,
v->TotalActiveDPP,
v->PixelChunkSizeInKByte,
v->TotalDCCActiveDPP,
v->MetaChunkSize,
v->ReturnBW,
v->GPUVMEnable,
v->HostVMEnable,
v->NumberOfActivePlanes,
v->DPPPerPlane,
v->dpte_group_bytes,
HostVMInefficiencyFactor,
v->HostVMMinPageSize,
v->HostVMMaxNonCachedPageTableLevels);
v->TCalc = 24.0 / v->DCFCLKDeepSleep;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->BlendingAndTiming[k] == k) {
if (v->WritebackEnable[k] == true) {
v->WritebackDelay[v->VoltageLevel][k] = v->WritebackLatency
+ CalculateWriteBackDelay(
v->WritebackPixelFormat[k],
v->WritebackHRatio[k],
v->WritebackVRatio[k],
v->WritebackVTaps[k],
v->WritebackDestinationWidth[k],
v->WritebackDestinationHeight[k],
v->WritebackSourceHeight[k],
v->HTotal[k]) / v->DISPCLK;
} else
v->WritebackDelay[v->VoltageLevel][k] = 0;
for (j = 0; j < v->NumberOfActivePlanes; ++j) {
if (v->BlendingAndTiming[j] == k && v->WritebackEnable[j] == true) {
v->WritebackDelay[v->VoltageLevel][k] = dml_max(
v->WritebackDelay[v->VoltageLevel][k],
v->WritebackLatency
+ CalculateWriteBackDelay(
v->WritebackPixelFormat[j],
v->WritebackHRatio[j],
v->WritebackVRatio[j],
v->WritebackVTaps[j],
v->WritebackDestinationWidth[j],
v->WritebackDestinationHeight[j],
v->WritebackSourceHeight[j],
v->HTotal[k]) / v->DISPCLK);
}
}
}
}
for (k = 0; k < v->NumberOfActivePlanes; ++k)
for (j = 0; j < v->NumberOfActivePlanes; ++j)
if (v->BlendingAndTiming[k] == j)
v->WritebackDelay[v->VoltageLevel][k] = v->WritebackDelay[v->VoltageLevel][j];
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->MaxVStartupLines[k] =
(v->Interlace[k] && !v->ProgressiveToInterlaceUnitInOPP) ?
dml_floor((v->VTotal[k] - v->VActive[k]) / 2.0, 1.0) :
v->VTotal[k] - v->VActive[k]
- dml_max(
1.0,
dml_ceil(
(double) v->WritebackDelay[v->VoltageLevel][k]
/ (v->HTotal[k] / v->PixelClock[k]),
1));
if (v->MaxVStartupLines[k] > 1023)
v->MaxVStartupLines[k] = 1023;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d MaxVStartupLines = %d\n", __func__, k, v->MaxVStartupLines[k]);
dml_print("DML::%s: k=%d VoltageLevel = %d\n", __func__, k, v->VoltageLevel);
dml_print("DML::%s: k=%d WritebackDelay = %f\n", __func__, k, v->WritebackDelay[v->VoltageLevel][k]);
#endif
}
v->MaximumMaxVStartupLines = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k)
v->MaximumMaxVStartupLines = dml_max(v->MaximumMaxVStartupLines, v->MaxVStartupLines[k]);
// VBA_DELTA
// We don't really care to iterate between the various prefetch modes
//v->PrefetchERROR = CalculateMinAndMaxPrefetchMode(v->AllowDRAMSelfRefreshOrDRAMClockChangeInVblank, &v->MinPrefetchMode, &v->MaxPrefetchMode);
v->UrgentLatency = CalculateUrgentLatency(
v->UrgentLatencyPixelDataOnly,
v->UrgentLatencyPixelMixedWithVMData,
v->UrgentLatencyVMDataOnly,
v->DoUrgentLatencyAdjustment,
v->UrgentLatencyAdjustmentFabricClockComponent,
v->UrgentLatencyAdjustmentFabricClockReference,
v->FabricClock);
v->FractionOfUrgentBandwidth = 0.0;
v->FractionOfUrgentBandwidthImmediateFlip = 0.0;
v->VStartupLines = __DML_VBA_MIN_VSTARTUP__;
do {
double MaxTotalRDBandwidthNoUrgentBurst = 0.0;
bool DestinationLineTimesForPrefetchLessThan2 = false;
bool VRatioPrefetchMoreThan4 = false;
double TWait = CalculateTWait(PrefetchMode, v->DRAMClockChangeLatency, v->UrgentLatency, v->SREnterPlusExitTime);
MaxTotalRDBandwidth = 0;
dml_print("DML::%s: Start loop: VStartup = %d\n", __func__, v->VStartupLines);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
Pipe myPipe;
myPipe.DPPCLK = v->DPPCLK[k];
myPipe.DISPCLK = v->DISPCLK;
myPipe.PixelClock = v->PixelClock[k];
myPipe.DCFCLKDeepSleep = v->DCFCLKDeepSleep;
myPipe.DPPPerPlane = v->DPPPerPlane[k];
myPipe.ScalerEnabled = v->ScalerEnabled[k];
myPipe.VRatio = v->VRatio[k];
myPipe.VRatioChroma = v->VRatioChroma[k];
myPipe.SourceScan = v->SourceScan[k];
myPipe.BlockWidth256BytesY = v->BlockWidth256BytesY[k];
myPipe.BlockHeight256BytesY = v->BlockHeight256BytesY[k];
myPipe.BlockWidth256BytesC = v->BlockWidth256BytesC[k];
myPipe.BlockHeight256BytesC = v->BlockHeight256BytesC[k];
myPipe.InterlaceEnable = v->Interlace[k];
myPipe.NumberOfCursors = v->NumberOfCursors[k];
myPipe.VBlank = v->VTotal[k] - v->VActive[k];
myPipe.HTotal = v->HTotal[k];
myPipe.DCCEnable = v->DCCEnable[k];
myPipe.ODMCombineIsEnabled = v->ODMCombineEnabled[k] == dm_odm_combine_mode_4to1
|| v->ODMCombineEnabled[k] == dm_odm_combine_mode_2to1;
myPipe.SourcePixelFormat = v->SourcePixelFormat[k];
myPipe.BytePerPixelY = v->BytePerPixelY[k];
myPipe.BytePerPixelC = v->BytePerPixelC[k];
myPipe.ProgressiveToInterlaceUnitInOPP = v->ProgressiveToInterlaceUnitInOPP;
v->ErrorResult[k] = CalculatePrefetchSchedule(
mode_lib,
HostVMInefficiencyFactor,
&myPipe,
v->DSCDelay[k],
v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater,
v->DPPCLKDelaySCL,
v->DPPCLKDelaySCLLBOnly,
v->DPPCLKDelayCNVCCursor,
v->DISPCLKDelaySubtotal,
(unsigned int) (v->SwathWidthY[k] / v->HRatio[k]),
v->OutputFormat[k],
v->MaxInterDCNTileRepeaters,
dml_min(v->VStartupLines, v->MaxVStartupLines[k]),
v->MaxVStartupLines[k],
v->GPUVMMaxPageTableLevels,
v->GPUVMEnable,
v->HostVMEnable,
v->HostVMMaxNonCachedPageTableLevels,
v->HostVMMinPageSize,
v->DynamicMetadataEnable[k],
v->DynamicMetadataVMEnabled,
v->DynamicMetadataLinesBeforeActiveRequired[k],
v->DynamicMetadataTransmittedBytes[k],
v->UrgentLatency,
v->UrgentExtraLatency,
v->TCalc,
v->PDEAndMetaPTEBytesFrame[k],
v->MetaRowByte[k],
v->PixelPTEBytesPerRow[k],
v->PrefetchSourceLinesY[k],
v->SwathWidthY[k],
v->VInitPreFillY[k],
v->MaxNumSwathY[k],
v->PrefetchSourceLinesC[k],
v->SwathWidthC[k],
v->VInitPreFillC[k],
v->MaxNumSwathC[k],
v->swath_width_luma_ub[k],
v->swath_width_chroma_ub[k],
v->SwathHeightY[k],
v->SwathHeightC[k],
TWait,
&v->DSTXAfterScaler[k],
&v->DSTYAfterScaler[k],
&v->DestinationLinesForPrefetch[k],
&v->PrefetchBandwidth[k],
&v->DestinationLinesToRequestVMInVBlank[k],
&v->DestinationLinesToRequestRowInVBlank[k],
&v->VRatioPrefetchY[k],
&v->VRatioPrefetchC[k],
&v->RequiredPrefetchPixDataBWLuma[k],
&v->RequiredPrefetchPixDataBWChroma[k],
&v->NotEnoughTimeForDynamicMetadata[k],
&v->Tno_bw[k],
&v->prefetch_vmrow_bw[k],
&v->Tdmdl_vm[k],
&v->Tdmdl[k],
&v->TSetup[k],
&v->VUpdateOffsetPix[k],
&v->VUpdateWidthPix[k],
&v->VReadyOffsetPix[k]);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d Prefetch cal result=%0d\n", __func__, k, v->ErrorResult[k]);
#endif
v->VStartup[k] = dml_min(v->VStartupLines, v->MaxVStartupLines[k]);
}
v->NoEnoughUrgentLatencyHiding = false;
v->NoEnoughUrgentLatencyHidingPre = false;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->cursor_bw[k] = v->NumberOfCursors[k] * v->CursorWidth[k][0] * v->CursorBPP[k][0] / 8.0
/ (v->HTotal[k] / v->PixelClock[k]) * v->VRatio[k];
v->cursor_bw_pre[k] = v->NumberOfCursors[k] * v->CursorWidth[k][0] * v->CursorBPP[k][0] / 8.0
/ (v->HTotal[k] / v->PixelClock[k]) * v->VRatioPrefetchY[k];
CalculateUrgentBurstFactor(
v->swath_width_luma_ub[k],
v->swath_width_chroma_ub[k],
v->SwathHeightY[k],
v->SwathHeightC[k],
v->HTotal[k] / v->PixelClock[k],
v->UrgentLatency,
v->CursorBufferSize,
v->CursorWidth[k][0],
v->CursorBPP[k][0],
v->VRatio[k],
v->VRatioChroma[k],
v->BytePerPixelDETY[k],
v->BytePerPixelDETC[k],
v->DETBufferSizeY[k],
v->DETBufferSizeC[k],
&v->UrgBurstFactorCursor[k],
&v->UrgBurstFactorLuma[k],
&v->UrgBurstFactorChroma[k],
&v->NoUrgentLatencyHiding[k]);
CalculateUrgentBurstFactor(
v->swath_width_luma_ub[k],
v->swath_width_chroma_ub[k],
v->SwathHeightY[k],
v->SwathHeightC[k],
v->HTotal[k] / v->PixelClock[k],
v->UrgentLatency,
v->CursorBufferSize,
v->CursorWidth[k][0],
v->CursorBPP[k][0],
v->VRatioPrefetchY[k],
v->VRatioPrefetchC[k],
v->BytePerPixelDETY[k],
v->BytePerPixelDETC[k],
v->DETBufferSizeY[k],
v->DETBufferSizeC[k],
&v->UrgBurstFactorCursorPre[k],
&v->UrgBurstFactorLumaPre[k],
&v->UrgBurstFactorChromaPre[k],
&v->NoUrgentLatencyHidingPre[k]);
MaxTotalRDBandwidth = MaxTotalRDBandwidth
+ dml_max3(
v->DPPPerPlane[k] * v->prefetch_vmrow_bw[k],
v->ReadBandwidthPlaneLuma[k] * v->UrgBurstFactorLuma[k]
+ v->ReadBandwidthPlaneChroma[k] * v->UrgBurstFactorChroma[k]
+ v->cursor_bw[k] * v->UrgBurstFactorCursor[k]
+ v->DPPPerPlane[k] * (v->meta_row_bw[k] + v->dpte_row_bw[k]),
v->DPPPerPlane[k]
* (v->RequiredPrefetchPixDataBWLuma[k] * v->UrgBurstFactorLumaPre[k]
+ v->RequiredPrefetchPixDataBWChroma[k] * v->UrgBurstFactorChromaPre[k])
+ v->cursor_bw_pre[k] * v->UrgBurstFactorCursorPre[k]);
MaxTotalRDBandwidthNoUrgentBurst = MaxTotalRDBandwidthNoUrgentBurst
+ dml_max3(
v->DPPPerPlane[k] * v->prefetch_vmrow_bw[k],
v->ReadBandwidthPlaneLuma[k] + v->ReadBandwidthPlaneChroma[k] + v->cursor_bw[k]
+ v->DPPPerPlane[k] * (v->meta_row_bw[k] + v->dpte_row_bw[k]),
v->DPPPerPlane[k] * (v->RequiredPrefetchPixDataBWLuma[k] + v->RequiredPrefetchPixDataBWChroma[k])
+ v->cursor_bw_pre[k]);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d DPPPerPlane=%d\n", __func__, k, v->DPPPerPlane[k]);
dml_print("DML::%s: k=%0d UrgBurstFactorLuma=%f\n", __func__, k, v->UrgBurstFactorLuma[k]);
dml_print("DML::%s: k=%0d UrgBurstFactorChroma=%f\n", __func__, k, v->UrgBurstFactorChroma[k]);
dml_print("DML::%s: k=%0d UrgBurstFactorLumaPre=%f\n", __func__, k, v->UrgBurstFactorLumaPre[k]);
dml_print("DML::%s: k=%0d UrgBurstFactorChromaPre=%f\n", __func__, k, v->UrgBurstFactorChromaPre[k]);
dml_print("DML::%s: k=%0d VRatioPrefetchY=%f\n", __func__, k, v->VRatioPrefetchY[k]);
dml_print("DML::%s: k=%0d VRatioY=%f\n", __func__, k, v->VRatio[k]);
dml_print("DML::%s: k=%0d prefetch_vmrow_bw=%f\n", __func__, k, v->prefetch_vmrow_bw[k]);
dml_print("DML::%s: k=%0d ReadBandwidthPlaneLuma=%f\n", __func__, k, v->ReadBandwidthPlaneLuma[k]);
dml_print("DML::%s: k=%0d ReadBandwidthPlaneChroma=%f\n", __func__, k, v->ReadBandwidthPlaneChroma[k]);
dml_print("DML::%s: k=%0d cursor_bw=%f\n", __func__, k, v->cursor_bw[k]);
dml_print("DML::%s: k=%0d meta_row_bw=%f\n", __func__, k, v->meta_row_bw[k]);
dml_print("DML::%s: k=%0d dpte_row_bw=%f\n", __func__, k, v->dpte_row_bw[k]);
dml_print("DML::%s: k=%0d RequiredPrefetchPixDataBWLuma=%f\n", __func__, k, v->RequiredPrefetchPixDataBWLuma[k]);
dml_print("DML::%s: k=%0d RequiredPrefetchPixDataBWChroma=%f\n", __func__, k, v->RequiredPrefetchPixDataBWChroma[k]);
dml_print("DML::%s: k=%0d cursor_bw_pre=%f\n", __func__, k, v->cursor_bw_pre[k]);
dml_print("DML::%s: k=%0d MaxTotalRDBandwidthNoUrgentBurst=%f\n", __func__, k, MaxTotalRDBandwidthNoUrgentBurst);
#endif
if (v->DestinationLinesForPrefetch[k] < 2)
DestinationLineTimesForPrefetchLessThan2 = true;
if (v->VRatioPrefetchY[k] > 4 || v->VRatioPrefetchC[k] > 4)
VRatioPrefetchMoreThan4 = true;
if (v->NoUrgentLatencyHiding[k] == true)
v->NoEnoughUrgentLatencyHiding = true;
if (v->NoUrgentLatencyHidingPre[k] == true)
v->NoEnoughUrgentLatencyHidingPre = true;
}
v->FractionOfUrgentBandwidth = MaxTotalRDBandwidthNoUrgentBurst / v->ReturnBW;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: MaxTotalRDBandwidthNoUrgentBurst=%f \n", __func__, MaxTotalRDBandwidthNoUrgentBurst);
dml_print("DML::%s: ReturnBW=%f \n", __func__, v->ReturnBW);
dml_print("DML::%s: FractionOfUrgentBandwidth=%f \n", __func__, v->FractionOfUrgentBandwidth);
#endif
if (MaxTotalRDBandwidth <= v->ReturnBW && v->NoEnoughUrgentLatencyHiding == 0 && v->NoEnoughUrgentLatencyHidingPre == 0
&& !VRatioPrefetchMoreThan4 && !DestinationLineTimesForPrefetchLessThan2)
v->PrefetchModeSupported = true;
else {
v->PrefetchModeSupported = false;
dml_print("DML::%s: ***failed***. Bandwidth violation. Results are NOT valid\n", __func__);
dml_print("DML::%s: MaxTotalRDBandwidth:%f AvailReturnBandwidth:%f\n", __func__, MaxTotalRDBandwidth, v->ReturnBW);
dml_print("DML::%s: VRatioPrefetch %s more than 4\n", __func__, (VRatioPrefetchMoreThan4) ? "is" : "is not");
dml_print("DML::%s: DestinationLines for Prefetch %s less than 2\n", __func__, (DestinationLineTimesForPrefetchLessThan2) ? "is" : "is not");
}
// PREVIOUS_ERROR
// This error result check was done after the PrefetchModeSupported. So we will
// still try to calculate flip schedule even prefetch mode not supported
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->ErrorResult[k] == true || v->NotEnoughTimeForDynamicMetadata[k] == true) {
v->PrefetchModeSupported = false;
dml_print("DML::%s: ***failed***. Prefetch schedule violation. Results are NOT valid\n", __func__);
}
}
if (v->PrefetchModeSupported == true && v->ImmediateFlipSupport == true) {
v->BandwidthAvailableForImmediateFlip = v->ReturnBW;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->BandwidthAvailableForImmediateFlip = v->BandwidthAvailableForImmediateFlip
- dml_max(
v->ReadBandwidthPlaneLuma[k] * v->UrgBurstFactorLuma[k]
+ v->ReadBandwidthPlaneChroma[k] * v->UrgBurstFactorChroma[k]
+ v->cursor_bw[k] * v->UrgBurstFactorCursor[k],
v->DPPPerPlane[k]
* (v->RequiredPrefetchPixDataBWLuma[k] * v->UrgBurstFactorLumaPre[k]
+ v->RequiredPrefetchPixDataBWChroma[k] * v->UrgBurstFactorChromaPre[k])
+ v->cursor_bw_pre[k] * v->UrgBurstFactorCursorPre[k]);
}
v->TotImmediateFlipBytes = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->TotImmediateFlipBytes = v->TotImmediateFlipBytes
+ v->DPPPerPlane[k] * (v->PDEAndMetaPTEBytesFrame[k] + v->MetaRowByte[k] + v->PixelPTEBytesPerRow[k]);
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
CalculateFlipSchedule(
mode_lib,
k,
HostVMInefficiencyFactor,
v->UrgentExtraLatency,
v->UrgentLatency,
v->PDEAndMetaPTEBytesFrame[k],
v->MetaRowByte[k],
v->PixelPTEBytesPerRow[k]);
}
v->total_dcn_read_bw_with_flip = 0.0;
v->total_dcn_read_bw_with_flip_no_urgent_burst = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->total_dcn_read_bw_with_flip = v->total_dcn_read_bw_with_flip
+ dml_max3(
v->DPPPerPlane[k] * v->prefetch_vmrow_bw[k],
v->DPPPerPlane[k] * v->final_flip_bw[k]
+ v->ReadBandwidthLuma[k] * v->UrgBurstFactorLuma[k]
+ v->ReadBandwidthChroma[k] * v->UrgBurstFactorChroma[k]
+ v->cursor_bw[k] * v->UrgBurstFactorCursor[k],
v->DPPPerPlane[k]
* (v->final_flip_bw[k]
+ v->RequiredPrefetchPixDataBWLuma[k] * v->UrgBurstFactorLumaPre[k]
+ v->RequiredPrefetchPixDataBWChroma[k] * v->UrgBurstFactorChromaPre[k])
+ v->cursor_bw_pre[k] * v->UrgBurstFactorCursorPre[k]);
v->total_dcn_read_bw_with_flip_no_urgent_burst = v->total_dcn_read_bw_with_flip_no_urgent_burst
+ dml_max3(
v->DPPPerPlane[k] * v->prefetch_vmrow_bw[k],
v->DPPPerPlane[k] * v->final_flip_bw[k] + v->ReadBandwidthPlaneLuma[k]
+ v->ReadBandwidthPlaneChroma[k] + v->cursor_bw[k],
v->DPPPerPlane[k]
* (v->final_flip_bw[k] + v->RequiredPrefetchPixDataBWLuma[k]
+ v->RequiredPrefetchPixDataBWChroma[k]) + v->cursor_bw_pre[k]);
}
v->FractionOfUrgentBandwidthImmediateFlip = v->total_dcn_read_bw_with_flip_no_urgent_burst / v->ReturnBW;
v->ImmediateFlipSupported = true;
if (v->total_dcn_read_bw_with_flip > v->ReturnBW) {
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: total_dcn_read_bw_with_flip %f (bw w/ flip too high!)\n", __func__, v->total_dcn_read_bw_with_flip);
#endif
v->ImmediateFlipSupported = false;
v->total_dcn_read_bw_with_flip = MaxTotalRDBandwidth;
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->ImmediateFlipSupportedForPipe[k] == false) {
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Pipe %0d not supporting iflip\n",
__func__, k);
#endif
v->ImmediateFlipSupported = false;
}
}
} else {
v->ImmediateFlipSupported = false;
}
v->PrefetchAndImmediateFlipSupported =
(v->PrefetchModeSupported == true && ((!v->ImmediateFlipSupport && !v->HostVMEnable
&& v->ImmediateFlipRequirement[0] != dm_immediate_flip_required) ||
v->ImmediateFlipSupported)) ? true : false;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: PrefetchModeSupported %d\n", __func__, v->PrefetchModeSupported);
dml_print("DML::%s: ImmediateFlipRequirement[0] %d\n", __func__, v->ImmediateFlipRequirement[0] == dm_immediate_flip_required);
dml_print("DML::%s: ImmediateFlipSupported %d\n", __func__, v->ImmediateFlipSupported);
dml_print("DML::%s: ImmediateFlipSupport %d\n", __func__, v->ImmediateFlipSupport);
dml_print("DML::%s: HostVMEnable %d\n", __func__, v->HostVMEnable);
dml_print("DML::%s: PrefetchAndImmediateFlipSupported %d\n", __func__, v->PrefetchAndImmediateFlipSupported);
#endif
dml_print("DML::%s: Done loop: Vstartup=%d, Max Vstartup is %d\n", __func__, v->VStartupLines, v->MaximumMaxVStartupLines);
v->VStartupLines = v->VStartupLines + 1;
} while (!v->PrefetchAndImmediateFlipSupported && v->VStartupLines <= v->MaximumMaxVStartupLines);
ASSERT(v->PrefetchAndImmediateFlipSupported);
// Unbounded Request Enabled
CalculateUnboundedRequestAndCompressedBufferSize(
v->DETBufferSizeInKByte[0],
v->ConfigReturnBufferSizeInKByte,
v->UseUnboundedRequesting,
v->TotalActiveDPP,
NoChromaPlanes,
v->MaxNumDPP,
v->CompressedBufferSegmentSizeInkByte,
v->Output,
&v->UnboundedRequestEnabled,
&v->CompressedBufferSizeInkByte);
//Watermarks and NB P-State/DRAM Clock Change Support
{
enum clock_change_support DRAMClockChangeSupport; // dummy
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
PrefetchMode,
v->DCFCLK,
v->ReturnBW,
v->UrgentLatency,
v->UrgentExtraLatency,
v->SOCCLK,
v->DCFCLKDeepSleep,
v->DETBufferSizeY,
v->DETBufferSizeC,
v->SwathHeightY,
v->SwathHeightC,
v->SwathWidthY,
v->SwathWidthC,
v->DPPPerPlane,
v->BytePerPixelDETY,
v->BytePerPixelDETC,
v->UnboundedRequestEnabled,
v->CompressedBufferSizeInkByte,
&DRAMClockChangeSupport,
&v->StutterExitWatermark,
&v->StutterEnterPlusExitWatermark,
&v->Z8StutterExitWatermark,
&v->Z8StutterEnterPlusExitWatermark);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k] == true) {
v->WritebackAllowDRAMClockChangeEndPosition[k] = dml_max(
0,
v->VStartup[k] * v->HTotal[k] / v->PixelClock[k] - v->WritebackDRAMClockChangeWatermark);
} else {
v->WritebackAllowDRAMClockChangeEndPosition[k] = 0;
}
}
}
//Display Pipeline Delivery Time in Prefetch, Groups
CalculatePixelDeliveryTimes(
v->NumberOfActivePlanes,
v->VRatio,
v->VRatioChroma,
v->VRatioPrefetchY,
v->VRatioPrefetchC,
v->swath_width_luma_ub,
v->swath_width_chroma_ub,
v->DPPPerPlane,
v->HRatio,
v->HRatioChroma,
v->PixelClock,
v->PSCL_THROUGHPUT_LUMA,
v->PSCL_THROUGHPUT_CHROMA,
v->DPPCLK,
v->BytePerPixelC,
v->SourceScan,
v->NumberOfCursors,
v->CursorWidth,
v->CursorBPP,
v->BlockWidth256BytesY,
v->BlockHeight256BytesY,
v->BlockWidth256BytesC,
v->BlockHeight256BytesC,
v->DisplayPipeLineDeliveryTimeLuma,
v->DisplayPipeLineDeliveryTimeChroma,
v->DisplayPipeLineDeliveryTimeLumaPrefetch,
v->DisplayPipeLineDeliveryTimeChromaPrefetch,
v->DisplayPipeRequestDeliveryTimeLuma,
v->DisplayPipeRequestDeliveryTimeChroma,
v->DisplayPipeRequestDeliveryTimeLumaPrefetch,
v->DisplayPipeRequestDeliveryTimeChromaPrefetch,
v->CursorRequestDeliveryTime,
v->CursorRequestDeliveryTimePrefetch);
CalculateMetaAndPTETimes(
v->NumberOfActivePlanes,
v->GPUVMEnable,
v->MetaChunkSize,
v->MinMetaChunkSizeBytes,
v->HTotal,
v->VRatio,
v->VRatioChroma,
v->DestinationLinesToRequestRowInVBlank,
v->DestinationLinesToRequestRowInImmediateFlip,
v->DCCEnable,
v->PixelClock,
v->BytePerPixelY,
v->BytePerPixelC,
v->SourceScan,
v->dpte_row_height,
v->dpte_row_height_chroma,
v->meta_row_width,
v->meta_row_width_chroma,
v->meta_row_height,
v->meta_row_height_chroma,
v->meta_req_width,
v->meta_req_width_chroma,
v->meta_req_height,
v->meta_req_height_chroma,
v->dpte_group_bytes,
v->PTERequestSizeY,
v->PTERequestSizeC,
v->PixelPTEReqWidthY,
v->PixelPTEReqHeightY,
v->PixelPTEReqWidthC,
v->PixelPTEReqHeightC,
v->dpte_row_width_luma_ub,
v->dpte_row_width_chroma_ub,
v->DST_Y_PER_PTE_ROW_NOM_L,
v->DST_Y_PER_PTE_ROW_NOM_C,
v->DST_Y_PER_META_ROW_NOM_L,
v->DST_Y_PER_META_ROW_NOM_C,
v->TimePerMetaChunkNominal,
v->TimePerChromaMetaChunkNominal,
v->TimePerMetaChunkVBlank,
v->TimePerChromaMetaChunkVBlank,
v->TimePerMetaChunkFlip,
v->TimePerChromaMetaChunkFlip,
v->time_per_pte_group_nom_luma,
v->time_per_pte_group_vblank_luma,
v->time_per_pte_group_flip_luma,
v->time_per_pte_group_nom_chroma,
v->time_per_pte_group_vblank_chroma,
v->time_per_pte_group_flip_chroma);
CalculateVMGroupAndRequestTimes(
v->NumberOfActivePlanes,
v->GPUVMEnable,
v->GPUVMMaxPageTableLevels,
v->HTotal,
v->BytePerPixelC,
v->DestinationLinesToRequestVMInVBlank,
v->DestinationLinesToRequestVMInImmediateFlip,
v->DCCEnable,
v->PixelClock,
v->dpte_row_width_luma_ub,
v->dpte_row_width_chroma_ub,
v->vm_group_bytes,
v->dpde0_bytes_per_frame_ub_l,
v->dpde0_bytes_per_frame_ub_c,
v->meta_pte_bytes_per_frame_ub_l,
v->meta_pte_bytes_per_frame_ub_c,
v->TimePerVMGroupVBlank,
v->TimePerVMGroupFlip,
v->TimePerVMRequestVBlank,
v->TimePerVMRequestFlip);
// Min TTUVBlank
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (PrefetchMode == 0) {
v->AllowDRAMClockChangeDuringVBlank[k] = true;
v->AllowDRAMSelfRefreshDuringVBlank[k] = true;
v->MinTTUVBlank[k] = dml_max(
v->DRAMClockChangeWatermark,
dml_max(v->StutterEnterPlusExitWatermark, v->UrgentWatermark));
} else if (PrefetchMode == 1) {
v->AllowDRAMClockChangeDuringVBlank[k] = false;
v->AllowDRAMSelfRefreshDuringVBlank[k] = true;
v->MinTTUVBlank[k] = dml_max(v->StutterEnterPlusExitWatermark, v->UrgentWatermark);
} else {
v->AllowDRAMClockChangeDuringVBlank[k] = false;
v->AllowDRAMSelfRefreshDuringVBlank[k] = false;
v->MinTTUVBlank[k] = v->UrgentWatermark;
}
if (!v->DynamicMetadataEnable[k])
v->MinTTUVBlank[k] = v->TCalc + v->MinTTUVBlank[k];
}
// DCC Configuration
v->ActiveDPPs = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
CalculateDCCConfiguration(v->DCCEnable[k], false, // We should always know the direction DCCProgrammingAssumesScanDirectionUnknown,
v->SourcePixelFormat[k],
v->SurfaceWidthY[k],
v->SurfaceWidthC[k],
v->SurfaceHeightY[k],
v->SurfaceHeightC[k],
v->DETBufferSizeInKByte[k] * 1024,
v->BlockHeight256BytesY[k],
v->BlockHeight256BytesC[k],
v->SurfaceTiling[k],
v->BytePerPixelY[k],
v->BytePerPixelC[k],
v->BytePerPixelDETY[k],
v->BytePerPixelDETC[k],
v->SourceScan[k],
&v->DCCYMaxUncompressedBlock[k],
&v->DCCCMaxUncompressedBlock[k],
&v->DCCYMaxCompressedBlock[k],
&v->DCCCMaxCompressedBlock[k],
&v->DCCYIndependentBlock[k],
&v->DCCCIndependentBlock[k]);
}
// VStartup Adjustment
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
bool isInterlaceTiming;
double Tvstartup_margin = (v->MaxVStartupLines[k] - v->VStartup[k]) * v->HTotal[k] / v->PixelClock[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d, MinTTUVBlank = %f (before margin)\n", __func__, k, v->MinTTUVBlank[k]);
#endif
v->MinTTUVBlank[k] = v->MinTTUVBlank[k] + Tvstartup_margin;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d, Tvstartup_margin = %f\n", __func__, k, Tvstartup_margin);
dml_print("DML::%s: k=%d, MaxVStartupLines = %d\n", __func__, k, v->MaxVStartupLines[k]);
dml_print("DML::%s: k=%d, VStartup = %d\n", __func__, k, v->VStartup[k]);
dml_print("DML::%s: k=%d, MinTTUVBlank = %f\n", __func__, k, v->MinTTUVBlank[k]);
#endif
v->Tdmdl[k] = v->Tdmdl[k] + Tvstartup_margin;
if (v->DynamicMetadataEnable[k] && v->DynamicMetadataVMEnabled) {
v->Tdmdl_vm[k] = v->Tdmdl_vm[k] + Tvstartup_margin;
}
isInterlaceTiming = (v->Interlace[k] && !v->ProgressiveToInterlaceUnitInOPP);
v->MIN_DST_Y_NEXT_START[k] = ((isInterlaceTiming ? dml_floor((v->VTotal[k] - v->VFrontPorch[k]) / 2.0, 1.0) : v->VTotal[k])
- v->VFrontPorch[k])
+ dml_max(1.0, dml_ceil(v->WritebackDelay[v->VoltageLevel][k] / (v->HTotal[k] / v->PixelClock[k]), 1.0))
+ dml_floor(4.0 * v->TSetup[k] / (v->HTotal[k] / v->PixelClock[k]), 1.0) / 4.0;
v->VStartup[k] = (isInterlaceTiming ? (2 * v->MaxVStartupLines[k]) : v->MaxVStartupLines[k]);
if (((v->VUpdateOffsetPix[k] + v->VUpdateWidthPix[k] + v->VReadyOffsetPix[k]) / v->HTotal[k])
<= (isInterlaceTiming ?
dml_floor((v->VTotal[k] - v->VActive[k] - v->VFrontPorch[k] - v->VStartup[k]) / 2.0, 1.0) :
(int) (v->VTotal[k] - v->VActive[k] - v->VFrontPorch[k] - v->VStartup[k]))) {
v->VREADY_AT_OR_AFTER_VSYNC[k] = true;
} else {
v->VREADY_AT_OR_AFTER_VSYNC[k] = false;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d, VStartup = %d (max)\n", __func__, k, v->VStartup[k]);
dml_print("DML::%s: k=%d, VUpdateOffsetPix = %d\n", __func__, k, v->VUpdateOffsetPix[k]);
dml_print("DML::%s: k=%d, VUpdateWidthPix = %d\n", __func__, k, v->VUpdateWidthPix[k]);
dml_print("DML::%s: k=%d, VReadyOffsetPix = %d\n", __func__, k, v->VReadyOffsetPix[k]);
dml_print("DML::%s: k=%d, HTotal = %d\n", __func__, k, v->HTotal[k]);
dml_print("DML::%s: k=%d, VTotal = %d\n", __func__, k, v->VTotal[k]);
dml_print("DML::%s: k=%d, VActive = %d\n", __func__, k, v->VActive[k]);
dml_print("DML::%s: k=%d, VFrontPorch = %d\n", __func__, k, v->VFrontPorch[k]);
dml_print("DML::%s: k=%d, VStartup = %d\n", __func__, k, v->VStartup[k]);
dml_print("DML::%s: k=%d, MIN_DST_Y_NEXT_START = %f\n", __func__, k, v->MIN_DST_Y_NEXT_START[k]);
dml_print("DML::%s: k=%d, VREADY_AT_OR_AFTER_VSYNC = %d\n", __func__, k, v->VREADY_AT_OR_AFTER_VSYNC[k]);
#endif
}
{
//Maximum Bandwidth Used
double TotalWRBandwidth = 0;
double MaxPerPlaneVActiveWRBandwidth = 0;
double WRBandwidth = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k] == true && v->WritebackPixelFormat[k] == dm_444_32) {
WRBandwidth = v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k]
/ (v->HTotal[k] * v->WritebackSourceHeight[k] / v->PixelClock[k]) * 4;
} else if (v->WritebackEnable[k] == true) {
WRBandwidth = v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k]
/ (v->HTotal[k] * v->WritebackSourceHeight[k] / v->PixelClock[k]) * 8;
}
TotalWRBandwidth = TotalWRBandwidth + WRBandwidth;
MaxPerPlaneVActiveWRBandwidth = dml_max(MaxPerPlaneVActiveWRBandwidth, WRBandwidth);
}
v->TotalDataReadBandwidth = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->TotalDataReadBandwidth = v->TotalDataReadBandwidth + v->ReadBandwidthPlaneLuma[k] + v->ReadBandwidthPlaneChroma[k];
}
}
// Stutter Efficiency
CalculateStutterEfficiency(
mode_lib,
v->CompressedBufferSizeInkByte,
v->UnboundedRequestEnabled,
v->ConfigReturnBufferSizeInKByte,
v->MetaFIFOSizeInKEntries,
v->ZeroSizeBufferEntries,
v->NumberOfActivePlanes,
v->ROBBufferSizeInKByte,
v->TotalDataReadBandwidth,
v->DCFCLK,
v->ReturnBW,
v->COMPBUF_RESERVED_SPACE_64B,
v->COMPBUF_RESERVED_SPACE_ZS,
v->SRExitTime,
v->SRExitZ8Time,
v->SynchronizedVBlank,
v->StutterEnterPlusExitWatermark,
v->Z8StutterEnterPlusExitWatermark,
v->ProgressiveToInterlaceUnitInOPP,
v->Interlace,
v->MinTTUVBlank,
v->DPPPerPlane,
v->DETBufferSizeY,
v->BytePerPixelY,
v->BytePerPixelDETY,
v->SwathWidthY,
v->SwathHeightY,
v->SwathHeightC,
v->DCCRateLuma,
v->DCCRateChroma,
v->DCCFractionOfZeroSizeRequestsLuma,
v->DCCFractionOfZeroSizeRequestsChroma,
v->HTotal,
v->VTotal,
v->PixelClock,
v->VRatio,
v->SourceScan,
v->BlockHeight256BytesY,
v->BlockWidth256BytesY,
v->BlockHeight256BytesC,
v->BlockWidth256BytesC,
v->DCCYMaxUncompressedBlock,
v->DCCCMaxUncompressedBlock,
v->VActive,
v->DCCEnable,
v->WritebackEnable,
v->ReadBandwidthPlaneLuma,
v->ReadBandwidthPlaneChroma,
v->meta_row_bw,
v->dpte_row_bw,
&v->StutterEfficiencyNotIncludingVBlank,
&v->StutterEfficiency,
&v->NumberOfStutterBurstsPerFrame,
&v->Z8StutterEfficiencyNotIncludingVBlank,
&v->Z8StutterEfficiency,
&v->Z8NumberOfStutterBurstsPerFrame,
&v->StutterPeriod);
}
static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
{
struct vba_vars_st *v = &mode_lib->vba;
// Display Pipe Configuration
double BytePerPixDETY[DC__NUM_DPP__MAX];
double BytePerPixDETC[DC__NUM_DPP__MAX];
int BytePerPixY[DC__NUM_DPP__MAX];
int BytePerPixC[DC__NUM_DPP__MAX];
int Read256BytesBlockHeightY[DC__NUM_DPP__MAX];
int Read256BytesBlockHeightC[DC__NUM_DPP__MAX];
int Read256BytesBlockWidthY[DC__NUM_DPP__MAX];
int Read256BytesBlockWidthC[DC__NUM_DPP__MAX];
double dummy1[DC__NUM_DPP__MAX];
double dummy2[DC__NUM_DPP__MAX];
double dummy3[DC__NUM_DPP__MAX];
double dummy4[DC__NUM_DPP__MAX];
int dummy5[DC__NUM_DPP__MAX];
int dummy6[DC__NUM_DPP__MAX];
bool dummy7[DC__NUM_DPP__MAX];
bool dummysinglestring;
unsigned int k;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
dml30_CalculateBytePerPixelAnd256BBlockSizes(
v->SourcePixelFormat[k],
v->SurfaceTiling[k],
&BytePerPixY[k],
&BytePerPixC[k],
&BytePerPixDETY[k],
&BytePerPixDETC[k],
&Read256BytesBlockHeightY[k],
&Read256BytesBlockHeightC[k],
&Read256BytesBlockWidthY[k],
&Read256BytesBlockWidthC[k]);
}
CalculateSwathAndDETConfiguration(
false,
v->NumberOfActivePlanes,
mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0],
v->DETBufferSizeInKByte,
dummy1,
dummy2,
v->SourceScan,
v->SourcePixelFormat,
v->SurfaceTiling,
v->ViewportWidth,
v->ViewportHeight,
v->SurfaceWidthY,
v->SurfaceWidthC,
v->SurfaceHeightY,
v->SurfaceHeightC,
Read256BytesBlockHeightY,
Read256BytesBlockHeightC,
Read256BytesBlockWidthY,
Read256BytesBlockWidthC,
v->ODMCombineEnabled,
v->BlendingAndTiming,
BytePerPixY,
BytePerPixC,
BytePerPixDETY,
BytePerPixDETC,
v->HActive,
v->HRatio,
v->HRatioChroma,
v->DPPPerPlane,
dummy5,
dummy6,
dummy3,
dummy4,
v->SwathHeightY,
v->SwathHeightC,
v->DETBufferSizeY,
v->DETBufferSizeC,
dummy7,
&dummysinglestring);
}
static double CalculateTWait(unsigned int PrefetchMode, double DRAMClockChangeLatency, double UrgentLatency, double SREnterPlusExitTime)
{
if (PrefetchMode == 0) {
return dml_max(DRAMClockChangeLatency + UrgentLatency, dml_max(SREnterPlusExitTime, UrgentLatency));
} else if (PrefetchMode == 1) {
return dml_max(SREnterPlusExitTime, UrgentLatency);
} else {
return UrgentLatency;
}
}
double dml31_CalculateWriteBackDISPCLK(
enum source_format_class WritebackPixelFormat,
double PixelClock,
double WritebackHRatio,
double WritebackVRatio,
unsigned int WritebackHTaps,
unsigned int WritebackVTaps,
long WritebackSourceWidth,
long WritebackDestinationWidth,
unsigned int HTotal,
unsigned int WritebackLineBufferSize)
{
double DISPCLK_H, DISPCLK_V, DISPCLK_HB;
DISPCLK_H = PixelClock * dml_ceil(WritebackHTaps / 8.0, 1) / WritebackHRatio;
DISPCLK_V = PixelClock * (WritebackVTaps * dml_ceil(WritebackDestinationWidth / 6.0, 1) + 8.0) / HTotal;
DISPCLK_HB = PixelClock * WritebackVTaps * (WritebackDestinationWidth * WritebackVTaps - WritebackLineBufferSize / 57.0) / 6.0 / WritebackSourceWidth;
return dml_max3(DISPCLK_H, DISPCLK_V, DISPCLK_HB);
}
static double CalculateWriteBackDelay(
enum source_format_class WritebackPixelFormat,
double WritebackHRatio,
double WritebackVRatio,
unsigned int WritebackVTaps,
int WritebackDestinationWidth,
int WritebackDestinationHeight,
int WritebackSourceHeight,
unsigned int HTotal)
{
double CalculateWriteBackDelay;
double Line_length;
double Output_lines_last_notclamped;
double WritebackVInit;
WritebackVInit = (WritebackVRatio + WritebackVTaps + 1) / 2;
Line_length = dml_max((double) WritebackDestinationWidth, dml_ceil(WritebackDestinationWidth / 6.0, 1) * WritebackVTaps);
Output_lines_last_notclamped = WritebackDestinationHeight - 1 - dml_ceil((WritebackSourceHeight - WritebackVInit) / WritebackVRatio, 1);
if (Output_lines_last_notclamped < 0) {
CalculateWriteBackDelay = 0;
} else {
CalculateWriteBackDelay = Output_lines_last_notclamped * Line_length + (HTotal - WritebackDestinationWidth) + 80;
}
return CalculateWriteBackDelay;
}
static void CalculateVupdateAndDynamicMetadataParameters(
int MaxInterDCNTileRepeaters,
double DPPCLK,
double DISPCLK,
double DCFClkDeepSleep,
double PixelClock,
int HTotal,
int VBlank,
int DynamicMetadataTransmittedBytes,
int DynamicMetadataLinesBeforeActiveRequired,
int InterlaceEnable,
bool ProgressiveToInterlaceUnitInOPP,
double *TSetup,
double *Tdmbf,
double *Tdmec,
double *Tdmsks,
int *VUpdateOffsetPix,
double *VUpdateWidthPix,
double *VReadyOffsetPix)
{
double TotalRepeaterDelayTime;
TotalRepeaterDelayTime = MaxInterDCNTileRepeaters * (2 / DPPCLK + 3 / DISPCLK);
*VUpdateWidthPix = dml_ceil((14.0 / DCFClkDeepSleep + 12.0 / DPPCLK + TotalRepeaterDelayTime) * PixelClock, 1.0);
*VReadyOffsetPix = dml_ceil(dml_max(150.0 / DPPCLK, TotalRepeaterDelayTime + 20.0 / DCFClkDeepSleep + 10.0 / DPPCLK) * PixelClock, 1.0);
*VUpdateOffsetPix = dml_ceil(HTotal / 4.0, 1);
*TSetup = (*VUpdateOffsetPix + *VUpdateWidthPix + *VReadyOffsetPix) / PixelClock;
*Tdmbf = DynamicMetadataTransmittedBytes / 4.0 / DISPCLK;
*Tdmec = HTotal / PixelClock;
if (DynamicMetadataLinesBeforeActiveRequired == 0) {
*Tdmsks = VBlank * HTotal / PixelClock / 2.0;
} else {
*Tdmsks = DynamicMetadataLinesBeforeActiveRequired * HTotal / PixelClock;
}
if (InterlaceEnable == 1 && ProgressiveToInterlaceUnitInOPP == false) {
*Tdmsks = *Tdmsks / 2;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VUpdateWidthPix = %d\n", __func__, *VUpdateWidthPix);
dml_print("DML::%s: VReadyOffsetPix = %d\n", __func__, *VReadyOffsetPix);
dml_print("DML::%s: VUpdateOffsetPix = %d\n", __func__, *VUpdateOffsetPix);
#endif
}
static void CalculateRowBandwidth(
bool GPUVMEnable,
enum source_format_class SourcePixelFormat,
double VRatio,
double VRatioChroma,
bool DCCEnable,
double LineTime,
unsigned int MetaRowByteLuma,
unsigned int MetaRowByteChroma,
unsigned int meta_row_height_luma,
unsigned int meta_row_height_chroma,
unsigned int PixelPTEBytesPerRowLuma,
unsigned int PixelPTEBytesPerRowChroma,
unsigned int dpte_row_height_luma,
unsigned int dpte_row_height_chroma,
double *meta_row_bw,
double *dpte_row_bw)
{
if (DCCEnable != true) {
*meta_row_bw = 0;
} else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_420_12 || SourcePixelFormat == dm_rgbe_alpha) {
*meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime) + VRatioChroma * MetaRowByteChroma / (meta_row_height_chroma * LineTime);
} else {
*meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime);
}
if (GPUVMEnable != true) {
*dpte_row_bw = 0;
} else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_420_12 || SourcePixelFormat == dm_rgbe_alpha) {
*dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime)
+ VRatioChroma * PixelPTEBytesPerRowChroma / (dpte_row_height_chroma * LineTime);
} else {
*dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime);
}
}
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
unsigned int k,
double HostVMInefficiencyFactor,
double UrgentExtraLatency,
double UrgentLatency,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
double DPTEBytesPerRow)
{
struct vba_vars_st *v = &mode_lib->vba;
double min_row_time = 0.0;
unsigned int HostVMDynamicLevelsTrips;
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
double ImmediateFlipBW = 1.0;
double LineTime = v->HTotal[k] / v->PixelClock[k];
if (v->GPUVMEnable == true && v->HostVMEnable == true) {
HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
} else {
HostVMDynamicLevelsTrips = 0;
}
if (v->GPUVMEnable == true || v->DCCEnable[k] == true) {
ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * v->BandwidthAvailableForImmediateFlip / v->TotImmediateFlipBytes;
}
if (v->GPUVMEnable == true) {
TimeForFetchingMetaPTEImmediateFlip = dml_max3(
v->Tno_bw[k] + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
UrgentExtraLatency + UrgentLatency * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
LineTime / 4.0);
} else {
TimeForFetchingMetaPTEImmediateFlip = 0;
}
v->DestinationLinesToRequestVMInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
TimeForFetchingRowInVBlankImmediateFlip = dml_max3(
(MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / ImmediateFlipBW,
UrgentLatency * (HostVMDynamicLevelsTrips + 1),
LineTime / 4);
} else {
TimeForFetchingRowInVBlankImmediateFlip = 0;
}
v->DestinationLinesToRequestRowInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
if (v->GPUVMEnable == true) {
v->final_flip_bw[k] = dml_max(
PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (v->DestinationLinesToRequestVMInImmediateFlip[k] * LineTime),
(MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime));
} else if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
v->final_flip_bw[k] = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime);
} else {
v->final_flip_bw[k] = 0;
}
if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_rgbe_alpha) {
if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
} else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
min_row_time = dml_min(v->meta_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
} else {
min_row_time = dml_min4(
v->dpte_row_height[k] * LineTime / v->VRatio[k],
v->meta_row_height[k] * LineTime / v->VRatio[k],
v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k],
v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
}
} else {
if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
min_row_time = v->dpte_row_height[k] * LineTime / v->VRatio[k];
} else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
min_row_time = v->meta_row_height[k] * LineTime / v->VRatio[k];
} else {
min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height[k] * LineTime / v->VRatio[k]);
}
}
if (v->DestinationLinesToRequestVMInImmediateFlip[k] >= 32 || v->DestinationLinesToRequestRowInImmediateFlip[k] >= 16
|| TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) {
v->ImmediateFlipSupportedForPipe[k] = false;
} else {
v->ImmediateFlipSupportedForPipe[k] = true;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestVMInImmediateFlip[k]);
dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestRowInImmediateFlip[k]);
dml_print("DML::%s: TimeForFetchingMetaPTEImmediateFlip = %f\n", __func__, TimeForFetchingMetaPTEImmediateFlip);
dml_print("DML::%s: TimeForFetchingRowInVBlankImmediateFlip = %f\n", __func__, TimeForFetchingRowInVBlankImmediateFlip);
dml_print("DML::%s: min_row_time = %f\n", __func__, min_row_time);
dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, v->ImmediateFlipSupportedForPipe[k]);
#endif
}
static double TruncToValidBPP(
double LinkBitRate,
int Lanes,
int HTotal,
int HActive,
double PixelClock,
double DesiredBPP,
bool DSCEnable,
enum output_encoder_class Output,
enum output_format_class Format,
unsigned int DSCInputBitPerComponent,
int DSCSlices,
int AudioRate,
int AudioLayout,
enum odm_combine_mode ODMCombine)
{
double MaxLinkBPP;
int MinDSCBPP;
double MaxDSCBPP;
int NonDSCBPP0;
int NonDSCBPP1;
int NonDSCBPP2;
if (Format == dm_420) {
NonDSCBPP0 = 12;
NonDSCBPP1 = 15;
NonDSCBPP2 = 18;
MinDSCBPP = 6;
MaxDSCBPP = 1.5 * DSCInputBitPerComponent - 1 / 16;
} else if (Format == dm_444) {
NonDSCBPP0 = 24;
NonDSCBPP1 = 30;
NonDSCBPP2 = 36;
MinDSCBPP = 8;
MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16;
} else {
NonDSCBPP0 = 16;
NonDSCBPP1 = 20;
NonDSCBPP2 = 24;
if (Format == dm_n422) {
MinDSCBPP = 7;
MaxDSCBPP = 2 * DSCInputBitPerComponent - 1.0 / 16.0;
} else {
MinDSCBPP = 8;
MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16.0;
}
}
if (DSCEnable && Output == dm_dp) {
MaxLinkBPP = LinkBitRate / 10 * 8 * Lanes / PixelClock * (1 - 2.4 / 100);
} else {
MaxLinkBPP = LinkBitRate / 10 * 8 * Lanes / PixelClock;
}
if (ODMCombine == dm_odm_combine_mode_4to1 && MaxLinkBPP > 16) {
MaxLinkBPP = 16;
} else if (ODMCombine == dm_odm_combine_mode_2to1 && MaxLinkBPP > 32) {
MaxLinkBPP = 32;
}
if (DesiredBPP == 0) {
if (DSCEnable) {
if (MaxLinkBPP < MinDSCBPP) {
return BPP_INVALID;
} else if (MaxLinkBPP >= MaxDSCBPP) {
return MaxDSCBPP;
} else {
return dml_floor(16.0 * MaxLinkBPP, 1.0) / 16.0;
}
} else {
if (MaxLinkBPP >= NonDSCBPP2) {
return NonDSCBPP2;
} else if (MaxLinkBPP >= NonDSCBPP1) {
return NonDSCBPP1;
} else if (MaxLinkBPP >= NonDSCBPP0) {
return 16.0;
} else {
return BPP_INVALID;
}
}
} else {
if (!((DSCEnable == false && (DesiredBPP == NonDSCBPP2 || DesiredBPP == NonDSCBPP1 || DesiredBPP <= NonDSCBPP0))
|| (DSCEnable && DesiredBPP >= MinDSCBPP && DesiredBPP <= MaxDSCBPP))) {
return BPP_INVALID;
} else {
return DesiredBPP;
}
}
return BPP_INVALID;
}
static noinline void CalculatePrefetchSchedulePerPlane(
struct display_mode_lib *mode_lib,
double HostVMInefficiencyFactor,
int i,
unsigned j,
unsigned k)
{
struct vba_vars_st *v = &mode_lib->vba;
Pipe myPipe;
myPipe.DPPCLK = v->RequiredDPPCLK[i][j][k];
myPipe.DISPCLK = v->RequiredDISPCLK[i][j];
myPipe.PixelClock = v->PixelClock[k];
myPipe.DCFCLKDeepSleep = v->ProjectedDCFCLKDeepSleep[i][j];
myPipe.DPPPerPlane = v->NoOfDPP[i][j][k];
myPipe.ScalerEnabled = v->ScalerEnabled[k];
myPipe.VRatio = mode_lib->vba.VRatio[k];
myPipe.VRatioChroma = mode_lib->vba.VRatioChroma[k];
myPipe.SourceScan = v->SourceScan[k];
myPipe.BlockWidth256BytesY = v->Read256BlockWidthY[k];
myPipe.BlockHeight256BytesY = v->Read256BlockHeightY[k];
myPipe.BlockWidth256BytesC = v->Read256BlockWidthC[k];
myPipe.BlockHeight256BytesC = v->Read256BlockHeightC[k];
myPipe.InterlaceEnable = v->Interlace[k];
myPipe.NumberOfCursors = v->NumberOfCursors[k];
myPipe.VBlank = v->VTotal[k] - v->VActive[k];
myPipe.HTotal = v->HTotal[k];
myPipe.DCCEnable = v->DCCEnable[k];
myPipe.ODMCombineIsEnabled = v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1
|| v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1;
myPipe.SourcePixelFormat = v->SourcePixelFormat[k];
myPipe.BytePerPixelY = v->BytePerPixelY[k];
myPipe.BytePerPixelC = v->BytePerPixelC[k];
myPipe.ProgressiveToInterlaceUnitInOPP = v->ProgressiveToInterlaceUnitInOPP;
v->NoTimeForPrefetch[i][j][k] = CalculatePrefetchSchedule(
mode_lib,
HostVMInefficiencyFactor,
&myPipe,
v->DSCDelayPerState[i][k],
v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater,
v->DPPCLKDelaySCL,
v->DPPCLKDelaySCLLBOnly,
v->DPPCLKDelayCNVCCursor,
v->DISPCLKDelaySubtotal,
v->SwathWidthYThisState[k] / v->HRatio[k],
v->OutputFormat[k],
v->MaxInterDCNTileRepeaters,
dml_min(v->MaxVStartup, v->MaximumVStartup[i][j][k]),
v->MaximumVStartup[i][j][k],
v->GPUVMMaxPageTableLevels,
v->GPUVMEnable,
v->HostVMEnable,
v->HostVMMaxNonCachedPageTableLevels,
v->HostVMMinPageSize,
v->DynamicMetadataEnable[k],
v->DynamicMetadataVMEnabled,
v->DynamicMetadataLinesBeforeActiveRequired[k],
v->DynamicMetadataTransmittedBytes[k],
v->UrgLatency[i],
v->ExtraLatency,
v->TimeCalc,
v->PDEAndMetaPTEBytesPerFrame[i][j][k],
v->MetaRowBytes[i][j][k],
v->DPTEBytesPerRow[i][j][k],
v->PrefetchLinesY[i][j][k],
v->SwathWidthYThisState[k],
v->PrefillY[k],
v->MaxNumSwY[k],
v->PrefetchLinesC[i][j][k],
v->SwathWidthCThisState[k],
v->PrefillC[k],
v->MaxNumSwC[k],
v->swath_width_luma_ub_this_state[k],
v->swath_width_chroma_ub_this_state[k],
v->SwathHeightYThisState[k],
v->SwathHeightCThisState[k],
v->TWait,
&v->DSTXAfterScaler[k],
&v->DSTYAfterScaler[k],
&v->LineTimesForPrefetch[k],
&v->PrefetchBW[k],
&v->LinesForMetaPTE[k],
&v->LinesForMetaAndDPTERow[k],
&v->VRatioPreY[i][j][k],
&v->VRatioPreC[i][j][k],
&v->RequiredPrefetchPixelDataBWLuma[i][j][k],
&v->RequiredPrefetchPixelDataBWChroma[i][j][k],
&v->NoTimeForDynamicMetadata[i][j][k],
&v->Tno_bw[k],
&v->prefetch_vmrow_bw[k],
&v->dummy7[k],
&v->dummy8[k],
&v->dummy13[k],
&v->VUpdateOffsetPix[k],
&v->VUpdateWidthPix[k],
&v->VReadyOffsetPix[k]);
}
static void PatchDETBufferSizeInKByte(unsigned int NumberOfActivePlanes, int NoOfDPPThisState[], unsigned int config_return_buffer_size_in_kbytes, unsigned int DETBufferSizeInKByte[])
{
int i, total_pipes = 0;
for (i = 0; i < NumberOfActivePlanes; i++)
total_pipes += NoOfDPPThisState[i];
DETBufferSizeInKByte[0] = ((config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB) / 64 / total_pipes) * 64;
if (DETBufferSizeInKByte[0] > DCN3_15_MAX_DET_SIZE)
DETBufferSizeInKByte[0] = DCN3_15_MAX_DET_SIZE;
for (i = 1; i < NumberOfActivePlanes; i++)
DETBufferSizeInKByte[i] = DETBufferSizeInKByte[0];
}
void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
{
struct vba_vars_st *v = &mode_lib->vba;
int i, j;
unsigned int k, m;
int ReorderingBytes;
int MinPrefetchMode = 0, MaxPrefetchMode = 2;
bool NoChroma = true;
bool EnoughWritebackUnits = true;
bool P2IWith420 = false;
bool DSCOnlyIfNecessaryWithBPP = false;
bool DSC422NativeNotSupported = false;
double MaxTotalVActiveRDBandwidth;
bool ViewportExceedsSurface = false;
bool FMTBufferExceeded = false;
/*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/
CalculateMinAndMaxPrefetchMode(
mode_lib->vba.AllowDRAMSelfRefreshOrDRAMClockChangeInVblank,
&MinPrefetchMode, &MaxPrefetchMode);
/*Scale Ratio, taps Support Check*/
v->ScaleRatioAndTapsSupport = true;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->ScalerEnabled[k] == false
&& ((v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32
&& v->SourcePixelFormat[k] != dm_444_16 && v->SourcePixelFormat[k] != dm_mono_16
&& v->SourcePixelFormat[k] != dm_mono_8 && v->SourcePixelFormat[k] != dm_rgbe
&& v->SourcePixelFormat[k] != dm_rgbe_alpha) || v->HRatio[k] != 1.0 || v->htaps[k] != 1.0
|| v->VRatio[k] != 1.0 || v->vtaps[k] != 1.0)) {
v->ScaleRatioAndTapsSupport = false;
} else if (v->vtaps[k] < 1.0 || v->vtaps[k] > 8.0 || v->htaps[k] < 1.0 || v->htaps[k] > 8.0
|| (v->htaps[k] > 1.0 && (v->htaps[k] % 2) == 1) || v->HRatio[k] > v->MaxHSCLRatio
|| v->VRatio[k] > v->MaxVSCLRatio || v->HRatio[k] > v->htaps[k]
|| v->VRatio[k] > v->vtaps[k]
|| (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32
&& v->SourcePixelFormat[k] != dm_444_16 && v->SourcePixelFormat[k] != dm_mono_16
&& v->SourcePixelFormat[k] != dm_mono_8 && v->SourcePixelFormat[k] != dm_rgbe
&& (v->VTAPsChroma[k] < 1 || v->VTAPsChroma[k] > 8 || v->HTAPsChroma[k] < 1
|| v->HTAPsChroma[k] > 8 || (v->HTAPsChroma[k] > 1 && v->HTAPsChroma[k] % 2 == 1)
|| v->HRatioChroma[k] > v->MaxHSCLRatio
|| v->VRatioChroma[k] > v->MaxVSCLRatio
|| v->HRatioChroma[k] > v->HTAPsChroma[k]
|| v->VRatioChroma[k] > v->VTAPsChroma[k]))) {
v->ScaleRatioAndTapsSupport = false;
}
}
/*Source Format, Pixel Format and Scan Support Check*/
v->SourceFormatPixelAndScanSupport = true;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if ((v->SurfaceTiling[k] == dm_sw_linear && (!(v->SourceScan[k] != dm_vert) || v->DCCEnable[k] == true))
|| ((v->SurfaceTiling[k] == dm_sw_64kb_d || v->SurfaceTiling[k] == dm_sw_64kb_d_t
|| v->SurfaceTiling[k] == dm_sw_64kb_d_x) && !(v->SourcePixelFormat[k] == dm_444_64))) {
v->SourceFormatPixelAndScanSupport = false;
}
}
/*Bandwidth Support Check*/
for (k = 0; k < v->NumberOfActivePlanes; k++) {
dml30_CalculateBytePerPixelAnd256BBlockSizes(
v->SourcePixelFormat[k],
v->SurfaceTiling[k],
&v->BytePerPixelY[k],
&v->BytePerPixelC[k],
&v->BytePerPixelInDETY[k],
&v->BytePerPixelInDETC[k],
&v->Read256BlockHeightY[k],
&v->Read256BlockHeightC[k],
&v->Read256BlockWidthY[k],
&v->Read256BlockWidthC[k]);
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->SourceScan[k] != dm_vert) {
v->SwathWidthYSingleDPP[k] = v->ViewportWidth[k];
v->SwathWidthCSingleDPP[k] = v->ViewportWidthChroma[k];
} else {
v->SwathWidthYSingleDPP[k] = v->ViewportHeight[k];
v->SwathWidthCSingleDPP[k] = v->ViewportHeightChroma[k];
}
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->ReadBandwidthLuma[k] = v->SwathWidthYSingleDPP[k] * dml_ceil(v->BytePerPixelInDETY[k], 1.0)
/ (v->HTotal[k] / v->PixelClock[k]) * v->VRatio[k];
v->ReadBandwidthChroma[k] = v->SwathWidthYSingleDPP[k] / 2 * dml_ceil(v->BytePerPixelInDETC[k], 2.0)
/ (v->HTotal[k] / v->PixelClock[k]) * v->VRatio[k] / 2.0;
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->WritebackEnable[k] == true && v->WritebackPixelFormat[k] == dm_444_64) {
v->WriteBandwidth[k] = v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k]
/ (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 8.0;
} else if (v->WritebackEnable[k] == true) {
v->WriteBandwidth[k] = v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k]
/ (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4.0;
} else {
v->WriteBandwidth[k] = 0.0;
}
}
/*Writeback Latency support check*/
v->WritebackLatencySupport = true;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->WritebackEnable[k] == true && (v->WriteBandwidth[k] > v->WritebackInterfaceBufferSize * 1024 / v->WritebackLatency)) {
v->WritebackLatencySupport = false;
}
}
/*Writeback Mode Support Check*/
v->TotalNumberOfActiveWriteback = 0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->WritebackEnable[k] == true) {
v->TotalNumberOfActiveWriteback = v->TotalNumberOfActiveWriteback + 1;
}
}
if (v->TotalNumberOfActiveWriteback > v->MaxNumWriteback) {
EnoughWritebackUnits = false;
}
/*Writeback Scale Ratio and Taps Support Check*/
v->WritebackScaleRatioAndTapsSupport = true;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->WritebackEnable[k] == true) {
if (v->WritebackHRatio[k] > v->WritebackMaxHSCLRatio || v->WritebackVRatio[k] > v->WritebackMaxVSCLRatio
|| v->WritebackHRatio[k] < v->WritebackMinHSCLRatio
|| v->WritebackVRatio[k] < v->WritebackMinVSCLRatio
|| v->WritebackHTaps[k] > v->WritebackMaxHSCLTaps
|| v->WritebackVTaps[k] > v->WritebackMaxVSCLTaps
|| v->WritebackHRatio[k] > v->WritebackHTaps[k] || v->WritebackVRatio[k] > v->WritebackVTaps[k]
|| (v->WritebackHTaps[k] > 2.0 && ((v->WritebackHTaps[k] % 2) == 1))) {
v->WritebackScaleRatioAndTapsSupport = false;
}
if (2.0 * v->WritebackDestinationWidth[k] * (v->WritebackVTaps[k] - 1) * 57 > v->WritebackLineBufferSize) {
v->WritebackScaleRatioAndTapsSupport = false;
}
}
}
/*Maximum DISPCLK/DPPCLK Support check*/
v->WritebackRequiredDISPCLK = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->WritebackEnable[k] == true) {
v->WritebackRequiredDISPCLK = dml_max(
v->WritebackRequiredDISPCLK,
dml31_CalculateWriteBackDISPCLK(
v->WritebackPixelFormat[k],
v->PixelClock[k],
v->WritebackHRatio[k],
v->WritebackVRatio[k],
v->WritebackHTaps[k],
v->WritebackVTaps[k],
v->WritebackSourceWidth[k],
v->WritebackDestinationWidth[k],
v->HTotal[k],
v->WritebackLineBufferSize));
}
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->HRatio[k] > 1.0) {
v->PSCL_FACTOR[k] = dml_min(
v->MaxDCHUBToPSCLThroughput,
v->MaxPSCLToLBThroughput * v->HRatio[k] / dml_ceil(v->htaps[k] / 6.0, 1.0));
} else {
v->PSCL_FACTOR[k] = dml_min(v->MaxDCHUBToPSCLThroughput, v->MaxPSCLToLBThroughput);
}
if (v->BytePerPixelC[k] == 0.0) {
v->PSCL_FACTOR_CHROMA[k] = 0.0;
v->MinDPPCLKUsingSingleDPP[k] = v->PixelClock[k]
* dml_max3(
v->vtaps[k] / 6.0 * dml_min(1.0, v->HRatio[k]),
v->HRatio[k] * v->VRatio[k] / v->PSCL_FACTOR[k],
1.0);
if ((v->htaps[k] > 6.0 || v->vtaps[k] > 6.0) && v->MinDPPCLKUsingSingleDPP[k] < 2.0 * v->PixelClock[k]) {
v->MinDPPCLKUsingSingleDPP[k] = 2.0 * v->PixelClock[k];
}
} else {
if (v->HRatioChroma[k] > 1.0) {
v->PSCL_FACTOR_CHROMA[k] = dml_min(
v->MaxDCHUBToPSCLThroughput,
v->MaxPSCLToLBThroughput * v->HRatioChroma[k] / dml_ceil(v->HTAPsChroma[k] / 6.0, 1.0));
} else {
v->PSCL_FACTOR_CHROMA[k] = dml_min(v->MaxDCHUBToPSCLThroughput, v->MaxPSCLToLBThroughput);
}
v->MinDPPCLKUsingSingleDPP[k] = v->PixelClock[k]
* dml_max5(
v->vtaps[k] / 6.0 * dml_min(1.0, v->HRatio[k]),
v->HRatio[k] * v->VRatio[k] / v->PSCL_FACTOR[k],
v->VTAPsChroma[k] / 6.0 * dml_min(1.0, v->HRatioChroma[k]),
v->HRatioChroma[k] * v->VRatioChroma[k] / v->PSCL_FACTOR_CHROMA[k],
1.0);
if ((v->htaps[k] > 6.0 || v->vtaps[k] > 6.0 || v->HTAPsChroma[k] > 6.0 || v->VTAPsChroma[k] > 6.0)
&& v->MinDPPCLKUsingSingleDPP[k] < 2.0 * v->PixelClock[k]) {
v->MinDPPCLKUsingSingleDPP[k] = 2.0 * v->PixelClock[k];
}
}
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
int MaximumSwathWidthSupportLuma;
int MaximumSwathWidthSupportChroma;
if (v->SurfaceTiling[k] == dm_sw_linear) {
MaximumSwathWidthSupportLuma = 8192.0;
} else if (v->SourceScan[k] == dm_vert && v->BytePerPixelC[k] > 0) {
MaximumSwathWidthSupportLuma = 2880.0;
} else if (v->SourcePixelFormat[k] == dm_rgbe_alpha) {
MaximumSwathWidthSupportLuma = 3840.0;
} else {
MaximumSwathWidthSupportLuma = 5760.0;
}
if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_420_12) {
MaximumSwathWidthSupportChroma = MaximumSwathWidthSupportLuma / 2.0;
} else {
MaximumSwathWidthSupportChroma = MaximumSwathWidthSupportLuma;
}
v->MaximumSwathWidthInLineBufferLuma = v->LineBufferSize * dml_max(v->HRatio[k], 1.0) / v->LBBitPerPixel[k]
/ (v->vtaps[k] + dml_max(dml_ceil(v->VRatio[k], 1.0) - 2, 0.0));
if (v->BytePerPixelC[k] == 0.0) {
v->MaximumSwathWidthInLineBufferChroma = 0;
} else {
v->MaximumSwathWidthInLineBufferChroma = v->LineBufferSize * dml_max(v->HRatioChroma[k], 1.0) / v->LBBitPerPixel[k]
/ (v->VTAPsChroma[k] + dml_max(dml_ceil(v->VRatioChroma[k], 1.0) - 2, 0.0));
}
v->MaximumSwathWidthLuma[k] = dml_min(MaximumSwathWidthSupportLuma, v->MaximumSwathWidthInLineBufferLuma);
v->MaximumSwathWidthChroma[k] = dml_min(MaximumSwathWidthSupportChroma, v->MaximumSwathWidthInLineBufferChroma);
}
CalculateSwathAndDETConfiguration(
true,
v->NumberOfActivePlanes,
mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0],
v->DETBufferSizeInKByte,
v->MaximumSwathWidthLuma,
v->MaximumSwathWidthChroma,
v->SourceScan,
v->SourcePixelFormat,
v->SurfaceTiling,
v->ViewportWidth,
v->ViewportHeight,
v->SurfaceWidthY,
v->SurfaceWidthC,
v->SurfaceHeightY,
v->SurfaceHeightC,
v->Read256BlockHeightY,
v->Read256BlockHeightC,
v->Read256BlockWidthY,
v->Read256BlockWidthC,
v->odm_combine_dummy,
v->BlendingAndTiming,
v->BytePerPixelY,
v->BytePerPixelC,
v->BytePerPixelInDETY,
v->BytePerPixelInDETC,
v->HActive,
v->HRatio,
v->HRatioChroma,
v->NoOfDPPThisState,
v->swath_width_luma_ub_this_state,
v->swath_width_chroma_ub_this_state,
v->SwathWidthYThisState,
v->SwathWidthCThisState,
v->SwathHeightYThisState,
v->SwathHeightCThisState,
v->DETBufferSizeYThisState,
v->DETBufferSizeCThisState,
v->SingleDPPViewportSizeSupportPerPlane,
&v->ViewportSizeSupport[0][0]);
for (i = 0; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
v->MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(v->MaxDispclk[i], v->DISPCLKDPPCLKVCOSpeed);
v->MaxDppclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(v->MaxDppclk[i], v->DISPCLKDPPCLKVCOSpeed);
v->RequiredDISPCLK[i][j] = 0.0;
v->DISPCLK_DPPCLK_Support[i][j] = true;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->PlaneRequiredDISPCLKWithoutODMCombine = v->PixelClock[k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
* (1.0 + v->DISPCLKRampingMargin / 100.0);
if ((v->PlaneRequiredDISPCLKWithoutODMCombine >= v->MaxDispclk[i]
&& v->MaxDispclk[i] == v->MaxDispclk[v->soc.num_states - 1]
&& v->MaxDppclk[i] == v->MaxDppclk[v->soc.num_states - 1])) {
v->PlaneRequiredDISPCLKWithoutODMCombine = v->PixelClock[k]
* (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
}
v->PlaneRequiredDISPCLKWithODMCombine2To1 = v->PixelClock[k] / 2 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
* (1 + v->DISPCLKRampingMargin / 100.0);
if ((v->PlaneRequiredDISPCLKWithODMCombine2To1 >= v->MaxDispclk[i]
&& v->MaxDispclk[i] == v->MaxDispclk[v->soc.num_states - 1]
&& v->MaxDppclk[i] == v->MaxDppclk[v->soc.num_states - 1])) {
v->PlaneRequiredDISPCLKWithODMCombine2To1 = v->PixelClock[k] / 2
* (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
}
v->PlaneRequiredDISPCLKWithODMCombine4To1 = v->PixelClock[k] / 4 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
* (1 + v->DISPCLKRampingMargin / 100.0);
if ((v->PlaneRequiredDISPCLKWithODMCombine4To1 >= v->MaxDispclk[i]
&& v->MaxDispclk[i] == v->MaxDispclk[v->soc.num_states - 1]
&& v->MaxDppclk[i] == v->MaxDppclk[v->soc.num_states - 1])) {
v->PlaneRequiredDISPCLKWithODMCombine4To1 = v->PixelClock[k] / 4
* (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
}
if (v->ODMCombinePolicy == dm_odm_combine_policy_none
|| !(v->Output[k] == dm_dp ||
v->Output[k] == dm_dp2p0 ||
v->Output[k] == dm_edp)) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithoutODMCombine;
if (v->HActive[k] / 2 > DCN31_MAX_FMT_420_BUFFER_WIDTH)
FMTBufferExceeded = true;
} else if (v->ODMCombinePolicy == dm_odm_combine_policy_2to1) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
} else if (v->ODMCombinePolicy == dm_odm_combine_policy_4to1
|| v->PlaneRequiredDISPCLKWithODMCombine2To1 > v->MaxDispclkRoundedDownToDFSGranularity) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_4to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine4To1;
} else if (v->PlaneRequiredDISPCLKWithoutODMCombine > v->MaxDispclkRoundedDownToDFSGranularity) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
} else {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithoutODMCombine;
}
if (v->DSCEnabled[k] && v->HActive[k] > DCN31_MAX_DSC_IMAGE_WIDTH
&& v->ODMCombineEnablePerState[i][k] != dm_odm_combine_mode_4to1) {
if (v->HActive[k] / 2 > DCN31_MAX_DSC_IMAGE_WIDTH) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_4to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine4To1;
} else {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
}
}
if (v->OutputFormat[k] == dm_420 && v->HActive[k] > DCN31_MAX_FMT_420_BUFFER_WIDTH
&& v->ODMCombineEnablePerState[i][k] != dm_odm_combine_mode_4to1) {
if (v->Output[k] == dm_hdmi) {
FMTBufferExceeded = true;
} else if (v->HActive[k] / 2 > DCN31_MAX_FMT_420_BUFFER_WIDTH) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_4to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine4To1;
if (v->HActive[k] / 4 > DCN31_MAX_FMT_420_BUFFER_WIDTH)
FMTBufferExceeded = true;
} else {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
}
}
if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1) {
v->MPCCombine[i][j][k] = false;
v->NoOfDPP[i][j][k] = 4;
v->RequiredDPPCLK[i][j][k] = v->MinDPPCLKUsingSingleDPP[k] * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 4;
} else if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
v->MPCCombine[i][j][k] = false;
v->NoOfDPP[i][j][k] = 2;
v->RequiredDPPCLK[i][j][k] = v->MinDPPCLKUsingSingleDPP[k] * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 2;
} else if ((v->WhenToDoMPCCombine == dm_mpc_never
|| (v->MinDPPCLKUsingSingleDPP[k] * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
<= v->MaxDppclkRoundedDownToDFSGranularity && v->SingleDPPViewportSizeSupportPerPlane[k] == true))) {
v->MPCCombine[i][j][k] = false;
v->NoOfDPP[i][j][k] = 1;
v->RequiredDPPCLK[i][j][k] = v->MinDPPCLKUsingSingleDPP[k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
} else {
v->MPCCombine[i][j][k] = true;
v->NoOfDPP[i][j][k] = 2;
v->RequiredDPPCLK[i][j][k] = v->MinDPPCLKUsingSingleDPP[k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 2.0;
}
v->RequiredDISPCLK[i][j] = dml_max(v->RequiredDISPCLK[i][j], v->PlaneRequiredDISPCLK);
if ((v->MinDPPCLKUsingSingleDPP[k] / v->NoOfDPP[i][j][k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
> v->MaxDppclkRoundedDownToDFSGranularity)
|| (v->PlaneRequiredDISPCLK > v->MaxDispclkRoundedDownToDFSGranularity)) {
v->DISPCLK_DPPCLK_Support[i][j] = false;
}
if (mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[k] > DCN3_15_MAX_DET_SIZE && v->NoOfDPP[i][j][k] < 2) {
v->MPCCombine[i][j][k] = true;
v->NoOfDPP[i][j][k] = 2;
}
}
v->TotalNumberOfActiveDPP[i][j] = 0;
v->TotalNumberOfSingleDPPPlanes[i][j] = 0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->TotalNumberOfActiveDPP[i][j] = v->TotalNumberOfActiveDPP[i][j] + v->NoOfDPP[i][j][k];
if (v->NoOfDPP[i][j][k] == 1)
v->TotalNumberOfSingleDPPPlanes[i][j] = v->TotalNumberOfSingleDPPPlanes[i][j] + 1;
if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10
|| v->SourcePixelFormat[k] == dm_420_12 || v->SourcePixelFormat[k] == dm_rgbe_alpha)
NoChroma = false;
}
// UPTO
if (j == 1 && v->WhenToDoMPCCombine != dm_mpc_never
&& !UnboundedRequest(v->UseUnboundedRequesting, v->TotalNumberOfActiveDPP[i][j], NoChroma, v->Output[0])) {
while (!(v->TotalNumberOfActiveDPP[i][j] >= v->MaxNumDPP || v->TotalNumberOfSingleDPPPlanes[i][j] == 0)) {
double BWOfNonSplitPlaneOfMaximumBandwidth;
unsigned int NumberOfNonSplitPlaneOfMaximumBandwidth;
BWOfNonSplitPlaneOfMaximumBandwidth = 0;
NumberOfNonSplitPlaneOfMaximumBandwidth = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k] > BWOfNonSplitPlaneOfMaximumBandwidth
&& v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled && v->MPCCombine[i][j][k] == false) {
BWOfNonSplitPlaneOfMaximumBandwidth = v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k];
NumberOfNonSplitPlaneOfMaximumBandwidth = k;
}
}
v->MPCCombine[i][j][NumberOfNonSplitPlaneOfMaximumBandwidth] = true;
v->NoOfDPP[i][j][NumberOfNonSplitPlaneOfMaximumBandwidth] = 2;
v->RequiredDPPCLK[i][j][NumberOfNonSplitPlaneOfMaximumBandwidth] =
v->MinDPPCLKUsingSingleDPP[NumberOfNonSplitPlaneOfMaximumBandwidth]
* (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100) / 2;
v->TotalNumberOfActiveDPP[i][j] = v->TotalNumberOfActiveDPP[i][j] + 1;
v->TotalNumberOfSingleDPPPlanes[i][j] = v->TotalNumberOfSingleDPPPlanes[i][j] - 1;
}
}
if (v->TotalNumberOfActiveDPP[i][j] > v->MaxNumDPP) {
v->RequiredDISPCLK[i][j] = 0.0;
v->DISPCLK_DPPCLK_Support[i][j] = true;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
if (v->SingleDPPViewportSizeSupportPerPlane[k] == false && v->WhenToDoMPCCombine != dm_mpc_never) {
v->MPCCombine[i][j][k] = true;
v->NoOfDPP[i][j][k] = 2;
v->RequiredDPPCLK[i][j][k] = v->MinDPPCLKUsingSingleDPP[k]
* (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 2.0;
} else {
v->MPCCombine[i][j][k] = false;
v->NoOfDPP[i][j][k] = 1;
v->RequiredDPPCLK[i][j][k] = v->MinDPPCLKUsingSingleDPP[k]
* (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
}
if (!(v->MaxDispclk[i] == v->MaxDispclk[v->soc.num_states - 1]
&& v->MaxDppclk[i] == v->MaxDppclk[v->soc.num_states - 1])) {
v->PlaneRequiredDISPCLK = v->PixelClock[k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
* (1.0 + v->DISPCLKRampingMargin / 100.0);
} else {
v->PlaneRequiredDISPCLK = v->PixelClock[k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
}
v->RequiredDISPCLK[i][j] = dml_max(v->RequiredDISPCLK[i][j], v->PlaneRequiredDISPCLK);
if ((v->MinDPPCLKUsingSingleDPP[k] / v->NoOfDPP[i][j][k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
> v->MaxDppclkRoundedDownToDFSGranularity)
|| (v->PlaneRequiredDISPCLK > v->MaxDispclkRoundedDownToDFSGranularity)) {
v->DISPCLK_DPPCLK_Support[i][j] = false;
}
}
v->TotalNumberOfActiveDPP[i][j] = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->TotalNumberOfActiveDPP[i][j] = v->TotalNumberOfActiveDPP[i][j] + v->NoOfDPP[i][j][k];
}
}
v->RequiredDISPCLK[i][j] = dml_max(v->RequiredDISPCLK[i][j], v->WritebackRequiredDISPCLK);
if (v->MaxDispclkRoundedDownToDFSGranularity < v->WritebackRequiredDISPCLK) {
v->DISPCLK_DPPCLK_Support[i][j] = false;
}
}
}
/*Total Available Pipes Support Check*/
for (i = 0; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
if (v->TotalNumberOfActiveDPP[i][j] <= v->MaxNumDPP) {
v->TotalAvailablePipesSupport[i][j] = true;
} else {
v->TotalAvailablePipesSupport[i][j] = false;
}
}
}
/*Display IO and DSC Support Check*/
v->NonsupportedDSCInputBPC = false;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (!(v->DSCInputBitPerComponent[k] == 12.0 || v->DSCInputBitPerComponent[k] == 10.0 || v->DSCInputBitPerComponent[k] == 8.0)
|| v->DSCInputBitPerComponent[k] > v->MaximumDSCBitsPerComponent) {
v->NonsupportedDSCInputBPC = true;
}
}
/*Number Of DSC Slices*/
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->BlendingAndTiming[k] == k) {
if (v->PixelClockBackEnd[k] > 3200) {
v->NumberOfDSCSlices[k] = dml_ceil(v->PixelClockBackEnd[k] / 400.0, 4.0);
} else if (v->PixelClockBackEnd[k] > 1360) {
v->NumberOfDSCSlices[k] = 8;
} else if (v->PixelClockBackEnd[k] > 680) {
v->NumberOfDSCSlices[k] = 4;
} else if (v->PixelClockBackEnd[k] > 340) {
v->NumberOfDSCSlices[k] = 2;
} else {
v->NumberOfDSCSlices[k] = 1;
}
} else {
v->NumberOfDSCSlices[k] = 0;
}
}
for (i = 0; i < v->soc.num_states; i++) {
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->RequiresDSC[i][k] = false;
v->RequiresFEC[i][k] = false;
if (v->BlendingAndTiming[k] == k) {
if (v->Output[k] == dm_hdmi) {
v->RequiresDSC[i][k] = false;
v->RequiresFEC[i][k] = false;
v->OutputBppPerState[i][k] = TruncToValidBPP(
dml_min(600.0, v->PHYCLKPerState[i]) * 10,
3,
v->HTotal[k],
v->HActive[k],
v->PixelClockBackEnd[k],
v->ForcedOutputLinkBPP[k],
false,
v->Output[k],
v->OutputFormat[k],
v->DSCInputBitPerComponent[k],
v->NumberOfDSCSlices[k],
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
} else if (v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_dp2p0) {
if (v->DSCEnable[k] == true) {
v->RequiresDSC[i][k] = true;
v->LinkDSCEnable = true;
if (v->Output[k] == dm_dp || v->Output[k] == dm_dp2p0) {
v->RequiresFEC[i][k] = true;
} else {
v->RequiresFEC[i][k] = false;
}
} else {
v->RequiresDSC[i][k] = false;
v->LinkDSCEnable = false;
if (v->Output[k] == dm_dp2p0) {
v->RequiresFEC[i][k] = true;
} else {
v->RequiresFEC[i][k] = false;
}
}
if (v->Output[k] == dm_dp2p0) {
v->Outbpp = BPP_INVALID;
if ((v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr10) &&
v->PHYCLKD18PerState[k] >= 10000.0 / 18.0) {
v->Outbpp = TruncToValidBPP(
(1.0 - v->Downspreading / 100.0) * 10000,
v->OutputLinkDPLanes[k],
v->HTotal[k],
v->HActive[k],
v->PixelClockBackEnd[k],
v->ForcedOutputLinkBPP[k],
v->LinkDSCEnable,
v->Output[k],
v->OutputFormat[k],
v->DSCInputBitPerComponent[k],
v->NumberOfDSCSlices[k],
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[k] < 13500.0 / 18.0 &&
v->DSCEnable[k] == true && v->ForcedOutputLinkBPP[k] == 0) {
v->RequiresDSC[i][k] = true;
v->LinkDSCEnable = true;
v->Outbpp = TruncToValidBPP(
(1.0 - v->Downspreading / 100.0) * 10000,
v->OutputLinkDPLanes[k],
v->HTotal[k],
v->HActive[k],
v->PixelClockBackEnd[k],
v->ForcedOutputLinkBPP[k],
v->LinkDSCEnable,
v->Output[k],
v->OutputFormat[k],
v->DSCInputBitPerComponent[k],
v->NumberOfDSCSlices[k],
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
}
v->OutputBppPerState[i][k] = v->Outbpp;
// TODO: Need some other way to handle this nonsense
// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR10"
}
if (v->Outbpp == BPP_INVALID &&
(v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr13p5) &&
v->PHYCLKD18PerState[k] >= 13500.0 / 18.0) {
v->Outbpp = TruncToValidBPP(
(1.0 - v->Downspreading / 100.0) * 13500,
v->OutputLinkDPLanes[k],
v->HTotal[k],
v->HActive[k],
v->PixelClockBackEnd[k],
v->ForcedOutputLinkBPP[k],
v->LinkDSCEnable,
v->Output[k],
v->OutputFormat[k],
v->DSCInputBitPerComponent[k],
v->NumberOfDSCSlices[k],
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[k] < 20000.0 / 18.0 &&
v->DSCEnable[k] == true && v->ForcedOutputLinkBPP[k] == 0) {
v->RequiresDSC[i][k] = true;
v->LinkDSCEnable = true;
v->Outbpp = TruncToValidBPP(
(1.0 - v->Downspreading / 100.0) * 13500,
v->OutputLinkDPLanes[k],
v->HTotal[k],
v->HActive[k],
v->PixelClockBackEnd[k],
v->ForcedOutputLinkBPP[k],
v->LinkDSCEnable,
v->Output[k],
v->OutputFormat[k],
v->DSCInputBitPerComponent[k],
v->NumberOfDSCSlices[k],
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
}
v->OutputBppPerState[i][k] = v->Outbpp;
// TODO: Need some other way to handle this nonsense
// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR13p5"
}
if (v->Outbpp == BPP_INVALID &&
(v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr20) &&
v->PHYCLKD18PerState[k] >= 20000.0 / 18.0) {
v->Outbpp = TruncToValidBPP(
(1.0 - v->Downspreading / 100.0) * 20000,
v->OutputLinkDPLanes[k],
v->HTotal[k],
v->HActive[k],
v->PixelClockBackEnd[k],
v->ForcedOutputLinkBPP[k],
v->LinkDSCEnable,
v->Output[k],
v->OutputFormat[k],
v->DSCInputBitPerComponent[k],
v->NumberOfDSCSlices[k],
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
if (v->Outbpp == BPP_INVALID && v->DSCEnable[k] == true &&
v->ForcedOutputLinkBPP[k] == 0) {
v->RequiresDSC[i][k] = true;
v->LinkDSCEnable = true;
v->Outbpp = TruncToValidBPP(
(1.0 - v->Downspreading / 100.0) * 20000,
v->OutputLinkDPLanes[k],
v->HTotal[k],
v->HActive[k],
v->PixelClockBackEnd[k],
v->ForcedOutputLinkBPP[k],
v->LinkDSCEnable,
v->Output[k],
v->OutputFormat[k],
v->DSCInputBitPerComponent[k],
v->NumberOfDSCSlices[k],
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
}
v->OutputBppPerState[i][k] = v->Outbpp;
// TODO: Need some other way to handle this nonsense
// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR20"
}
} else {
v->Outbpp = BPP_INVALID;
if (v->PHYCLKPerState[i] >= 270.0) {
v->Outbpp = TruncToValidBPP(
(1.0 - v->Downspreading / 100.0) * 2700,
v->OutputLinkDPLanes[k],
v->HTotal[k],
v->HActive[k],
v->PixelClockBackEnd[k],
v->ForcedOutputLinkBPP[k],
v->LinkDSCEnable,
v->Output[k],
v->OutputFormat[k],
v->DSCInputBitPerComponent[k],
v->NumberOfDSCSlices[k],
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
v->OutputBppPerState[i][k] = v->Outbpp;
// TODO: Need some other way to handle this nonsense
// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR"
}
if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 540.0) {
v->Outbpp = TruncToValidBPP(
(1.0 - v->Downspreading / 100.0) * 5400,
v->OutputLinkDPLanes[k],
v->HTotal[k],
v->HActive[k],
v->PixelClockBackEnd[k],
v->ForcedOutputLinkBPP[k],
v->LinkDSCEnable,
v->Output[k],
v->OutputFormat[k],
v->DSCInputBitPerComponent[k],
v->NumberOfDSCSlices[k],
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
v->OutputBppPerState[i][k] = v->Outbpp;
// TODO: Need some other way to handle this nonsense
// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR2"
}
if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 810.0) {
v->Outbpp = TruncToValidBPP(
(1.0 - v->Downspreading / 100.0) * 8100,
v->OutputLinkDPLanes[k],
v->HTotal[k],
v->HActive[k],
v->PixelClockBackEnd[k],
v->ForcedOutputLinkBPP[k],
v->LinkDSCEnable,
v->Output[k],
v->OutputFormat[k],
v->DSCInputBitPerComponent[k],
v->NumberOfDSCSlices[k],
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
v->OutputBppPerState[i][k] = v->Outbpp;
// TODO: Need some other way to handle this nonsense
// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR3"
}
}
}
} else {
v->OutputBppPerState[i][k] = 0;
}
}
}
for (i = 0; i < v->soc.num_states; i++) {
v->LinkCapacitySupport[i] = true;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->BlendingAndTiming[k] == k
&& (v->Output[k] == dm_dp ||
v->Output[k] == dm_edp ||
v->Output[k] == dm_hdmi) && v->OutputBppPerState[i][k] == 0) {
v->LinkCapacitySupport[i] = false;
}
}
}
// UPTO 2172
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->BlendingAndTiming[k] == k
&& (v->Output[k] == dm_dp ||
v->Output[k] == dm_edp ||
v->Output[k] == dm_hdmi)) {
if (v->OutputFormat[k] == dm_420 && v->Interlace[k] == 1 && v->ProgressiveToInterlaceUnitInOPP == true) {
P2IWith420 = true;
}
if (v->DSCEnable[k] == true && v->OutputFormat[k] == dm_n422
&& !v->DSC422NativeSupport) {
DSC422NativeNotSupported = true;
}
}
}
for (i = 0; i < v->soc.num_states; ++i) {
v->ODMCombine4To1SupportCheckOK[i] = true;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->BlendingAndTiming[k] == k && v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1
&& (v->ODMCombine4To1Supported == false || v->Output[k] == dm_dp || v->Output[k] == dm_edp
|| v->Output[k] == dm_hdmi)) {
v->ODMCombine4To1SupportCheckOK[i] = false;
}
}
}
/* Skip dscclk validation: as long as dispclk is supported, dscclk is also implicitly supported */
for (i = 0; i < v->soc.num_states; i++) {
v->NotEnoughDSCUnits[i] = false;
v->TotalDSCUnitsRequired = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->RequiresDSC[i][k] == true) {
if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1) {
v->TotalDSCUnitsRequired = v->TotalDSCUnitsRequired + 4.0;
} else if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
v->TotalDSCUnitsRequired = v->TotalDSCUnitsRequired + 2.0;
} else {
v->TotalDSCUnitsRequired = v->TotalDSCUnitsRequired + 1.0;
}
}
}
if (v->TotalDSCUnitsRequired > v->NumberOfDSC) {
v->NotEnoughDSCUnits[i] = true;
}
}
/*DSC Delay per state*/
for (i = 0; i < v->soc.num_states; i++) {
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->OutputBppPerState[i][k] == BPP_INVALID) {
v->BPP = 0.0;
} else {
v->BPP = v->OutputBppPerState[i][k];
}
if (v->RequiresDSC[i][k] == true && v->BPP != 0.0) {
if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
v->DSCDelayPerState[i][k] = dscceComputeDelay(
v->DSCInputBitPerComponent[k],
v->BPP,
dml_ceil(1.0 * v->HActive[k] / v->NumberOfDSCSlices[k], 1.0),
v->NumberOfDSCSlices[k],
v->OutputFormat[k],
v->Output[k]) + dscComputeDelay(v->OutputFormat[k], v->Output[k]);
} else if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
v->DSCDelayPerState[i][k] = 2.0
* (dscceComputeDelay(
v->DSCInputBitPerComponent[k],
v->BPP,
dml_ceil(1.0 * v->HActive[k] / v->NumberOfDSCSlices[k], 1.0),
v->NumberOfDSCSlices[k] / 2,
v->OutputFormat[k],
v->Output[k]) + dscComputeDelay(v->OutputFormat[k], v->Output[k]));
} else {
v->DSCDelayPerState[i][k] = 4.0
* (dscceComputeDelay(
v->DSCInputBitPerComponent[k],
v->BPP,
dml_ceil(1.0 * v->HActive[k] / v->NumberOfDSCSlices[k], 1.0),
v->NumberOfDSCSlices[k] / 4,
v->OutputFormat[k],
v->Output[k]) + dscComputeDelay(v->OutputFormat[k], v->Output[k]));
}
v->DSCDelayPerState[i][k] = v->DSCDelayPerState[i][k] * v->PixelClock[k] / v->PixelClockBackEnd[k];
} else {
v->DSCDelayPerState[i][k] = 0.0;
}
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
for (m = 0; m < v->NumberOfActivePlanes; m++) {
if (v->BlendingAndTiming[k] == m && v->RequiresDSC[i][m] == true) {
v->DSCDelayPerState[i][k] = v->DSCDelayPerState[i][m];
}
}
}
}
//Calculate Swath, DET Configuration, DCFCLKDeepSleep
//
for (i = 0; i < v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->RequiredDPPCLKThisState[k] = v->RequiredDPPCLK[i][j][k];
v->NoOfDPPThisState[k] = v->NoOfDPP[i][j][k];
v->ODMCombineEnableThisState[k] = v->ODMCombineEnablePerState[i][k];
}
if (v->NumberOfActivePlanes > 1 && mode_lib->project == DML_PROJECT_DCN315 && !v->DETSizeOverride[0])
PatchDETBufferSizeInKByte(v->NumberOfActivePlanes, v->NoOfDPPThisState, v->ip.config_return_buffer_size_in_kbytes, v->DETBufferSizeInKByte);
CalculateSwathAndDETConfiguration(
false,
v->NumberOfActivePlanes,
mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0],
v->DETBufferSizeInKByte,
v->MaximumSwathWidthLuma,
v->MaximumSwathWidthChroma,
v->SourceScan,
v->SourcePixelFormat,
v->SurfaceTiling,
v->ViewportWidth,
v->ViewportHeight,
v->SurfaceWidthY,
v->SurfaceWidthC,
v->SurfaceHeightY,
v->SurfaceHeightC,
v->Read256BlockHeightY,
v->Read256BlockHeightC,
v->Read256BlockWidthY,
v->Read256BlockWidthC,
v->ODMCombineEnableThisState,
v->BlendingAndTiming,
v->BytePerPixelY,
v->BytePerPixelC,
v->BytePerPixelInDETY,
v->BytePerPixelInDETC,
v->HActive,
v->HRatio,
v->HRatioChroma,
v->NoOfDPPThisState,
v->swath_width_luma_ub_this_state,
v->swath_width_chroma_ub_this_state,
v->SwathWidthYThisState,
v->SwathWidthCThisState,
v->SwathHeightYThisState,
v->SwathHeightCThisState,
v->DETBufferSizeYThisState,
v->DETBufferSizeCThisState,
v->dummystring,
&v->ViewportSizeSupport[i][j]);
CalculateDCFCLKDeepSleep(
mode_lib,
v->NumberOfActivePlanes,
v->BytePerPixelY,
v->BytePerPixelC,
v->VRatio,
v->VRatioChroma,
v->SwathWidthYThisState,
v->SwathWidthCThisState,
v->NoOfDPPThisState,
v->HRatio,
v->HRatioChroma,
v->PixelClock,
v->PSCL_FACTOR,
v->PSCL_FACTOR_CHROMA,
v->RequiredDPPCLKThisState,
v->ReadBandwidthLuma,
v->ReadBandwidthChroma,
v->ReturnBusWidth,
&v->ProjectedDCFCLKDeepSleep[i][j]);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->swath_width_luma_ub_all_states[i][j][k] = v->swath_width_luma_ub_this_state[k];
v->swath_width_chroma_ub_all_states[i][j][k] = v->swath_width_chroma_ub_this_state[k];
v->SwathWidthYAllStates[i][j][k] = v->SwathWidthYThisState[k];
v->SwathWidthCAllStates[i][j][k] = v->SwathWidthCThisState[k];
v->SwathHeightYAllStates[i][j][k] = v->SwathHeightYThisState[k];
v->SwathHeightCAllStates[i][j][k] = v->SwathHeightCThisState[k];
v->DETBufferSizeYAllStates[i][j][k] = v->DETBufferSizeYThisState[k];
v->DETBufferSizeCAllStates[i][j][k] = v->DETBufferSizeCThisState[k];
}
}
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->cursor_bw[k] = v->NumberOfCursors[k] * v->CursorWidth[k][0] * v->CursorBPP[k][0] / 8.0
/ (v->HTotal[k] / v->PixelClock[k]) * v->VRatio[k];
}
for (i = 0; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
bool NotUrgentLatencyHiding[DC__NUM_DPP__MAX];
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->swath_width_luma_ub_this_state[k] = v->swath_width_luma_ub_all_states[i][j][k];
v->swath_width_chroma_ub_this_state[k] = v->swath_width_chroma_ub_all_states[i][j][k];
v->SwathWidthYThisState[k] = v->SwathWidthYAllStates[i][j][k];
v->SwathWidthCThisState[k] = v->SwathWidthCAllStates[i][j][k];
v->SwathHeightYThisState[k] = v->SwathHeightYAllStates[i][j][k];
v->SwathHeightCThisState[k] = v->SwathHeightCAllStates[i][j][k];
v->DETBufferSizeYThisState[k] = v->DETBufferSizeYAllStates[i][j][k];
v->DETBufferSizeCThisState[k] = v->DETBufferSizeCAllStates[i][j][k];
}
v->TotalNumberOfDCCActiveDPP[i][j] = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->DCCEnable[k] == true) {
v->TotalNumberOfDCCActiveDPP[i][j] = v->TotalNumberOfDCCActiveDPP[i][j] + v->NoOfDPP[i][j][k];
}
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10
|| v->SourcePixelFormat[k] == dm_420_12 || v->SourcePixelFormat[k] == dm_rgbe_alpha) {
if ((v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_420_12)
&& v->SourceScan[k] != dm_vert) {
v->PTEBufferSizeInRequestsForLuma = (v->PTEBufferSizeInRequestsLuma + v->PTEBufferSizeInRequestsChroma)
/ 2;
v->PTEBufferSizeInRequestsForChroma = v->PTEBufferSizeInRequestsForLuma;
} else {
v->PTEBufferSizeInRequestsForLuma = v->PTEBufferSizeInRequestsLuma;
v->PTEBufferSizeInRequestsForChroma = v->PTEBufferSizeInRequestsChroma;
}
v->PDEAndMetaPTEBytesPerFrameC = CalculateVMAndRowBytes(
mode_lib,
v->DCCEnable[k],
v->Read256BlockHeightC[k],
v->Read256BlockWidthC[k],
v->SourcePixelFormat[k],
v->SurfaceTiling[k],
v->BytePerPixelC[k],
v->SourceScan[k],
v->SwathWidthCThisState[k],
v->ViewportHeightChroma[k],
v->GPUVMEnable,
v->HostVMEnable,
v->HostVMMaxNonCachedPageTableLevels,
v->GPUVMMinPageSize,
v->HostVMMinPageSize,
v->PTEBufferSizeInRequestsForChroma,
v->PitchC[k],
0.0,
&v->MacroTileWidthC[k],
&v->MetaRowBytesC,
&v->DPTEBytesPerRowC,
&v->PTEBufferSizeNotExceededC[i][j][k],
&v->dummyinteger7,
&v->dpte_row_height_chroma[k],
&v->dummyinteger28,
&v->dummyinteger26,
&v->dummyinteger23,
&v->meta_row_height_chroma[k],
&v->dummyinteger8,
&v->dummyinteger9,
&v->dummyinteger19,
&v->dummyinteger20,
&v->dummyinteger17,
&v->dummyinteger10,
&v->dummyinteger11);
v->PrefetchLinesC[i][j][k] = CalculatePrefetchSourceLines(
mode_lib,
v->VRatioChroma[k],
v->VTAPsChroma[k],
v->Interlace[k],
v->ProgressiveToInterlaceUnitInOPP,
v->SwathHeightCThisState[k],
v->ViewportYStartC[k],
&v->PrefillC[k],
&v->MaxNumSwC[k]);
} else {
v->PTEBufferSizeInRequestsForLuma = v->PTEBufferSizeInRequestsLuma + v->PTEBufferSizeInRequestsChroma;
v->PTEBufferSizeInRequestsForChroma = 0;
v->PDEAndMetaPTEBytesPerFrameC = 0.0;
v->MetaRowBytesC = 0.0;
v->DPTEBytesPerRowC = 0.0;
v->PrefetchLinesC[i][j][k] = 0.0;
v->PTEBufferSizeNotExceededC[i][j][k] = true;
}
v->PDEAndMetaPTEBytesPerFrameY = CalculateVMAndRowBytes(
mode_lib,
v->DCCEnable[k],
v->Read256BlockHeightY[k],
v->Read256BlockWidthY[k],
v->SourcePixelFormat[k],
v->SurfaceTiling[k],
v->BytePerPixelY[k],
v->SourceScan[k],
v->SwathWidthYThisState[k],
v->ViewportHeight[k],
v->GPUVMEnable,
v->HostVMEnable,
v->HostVMMaxNonCachedPageTableLevels,
v->GPUVMMinPageSize,
v->HostVMMinPageSize,
v->PTEBufferSizeInRequestsForLuma,
v->PitchY[k],
v->DCCMetaPitchY[k],
&v->MacroTileWidthY[k],
&v->MetaRowBytesY,
&v->DPTEBytesPerRowY,
&v->PTEBufferSizeNotExceededY[i][j][k],
&v->dummyinteger7,
&v->dpte_row_height[k],
&v->dummyinteger29,
&v->dummyinteger27,
&v->dummyinteger24,
&v->meta_row_height[k],
&v->dummyinteger25,
&v->dpte_group_bytes[k],
&v->dummyinteger21,
&v->dummyinteger22,
&v->dummyinteger18,
&v->dummyinteger5,
&v->dummyinteger6);
v->PrefetchLinesY[i][j][k] = CalculatePrefetchSourceLines(
mode_lib,
v->VRatio[k],
v->vtaps[k],
v->Interlace[k],
v->ProgressiveToInterlaceUnitInOPP,
v->SwathHeightYThisState[k],
v->ViewportYStartY[k],
&v->PrefillY[k],
&v->MaxNumSwY[k]);
v->PDEAndMetaPTEBytesPerFrame[i][j][k] = v->PDEAndMetaPTEBytesPerFrameY + v->PDEAndMetaPTEBytesPerFrameC;
v->MetaRowBytes[i][j][k] = v->MetaRowBytesY + v->MetaRowBytesC;
v->DPTEBytesPerRow[i][j][k] = v->DPTEBytesPerRowY + v->DPTEBytesPerRowC;
CalculateRowBandwidth(
v->GPUVMEnable,
v->SourcePixelFormat[k],
v->VRatio[k],
v->VRatioChroma[k],
v->DCCEnable[k],
v->HTotal[k] / v->PixelClock[k],
v->MetaRowBytesY,
v->MetaRowBytesC,
v->meta_row_height[k],
v->meta_row_height_chroma[k],
v->DPTEBytesPerRowY,
v->DPTEBytesPerRowC,
v->dpte_row_height[k],
v->dpte_row_height_chroma[k],
&v->meta_row_bandwidth[i][j][k],
&v->dpte_row_bandwidth[i][j][k]);
}
/*DCCMetaBufferSizeSupport(i, j) = True
For k = 0 To NumberOfActivePlanes - 1
If MetaRowBytes(i, j, k) > 24064 Then
DCCMetaBufferSizeSupport(i, j) = False
End If
Next k*/
v->DCCMetaBufferSizeSupport[i][j] = true;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->MetaRowBytes[i][j][k] > 24064)
v->DCCMetaBufferSizeSupport[i][j] = false;
}
v->UrgLatency[i] = CalculateUrgentLatency(
v->UrgentLatencyPixelDataOnly,
v->UrgentLatencyPixelMixedWithVMData,
v->UrgentLatencyVMDataOnly,
v->DoUrgentLatencyAdjustment,
v->UrgentLatencyAdjustmentFabricClockComponent,
v->UrgentLatencyAdjustmentFabricClockReference,
v->FabricClockPerState[i]);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
CalculateUrgentBurstFactor(
v->swath_width_luma_ub_this_state[k],
v->swath_width_chroma_ub_this_state[k],
v->SwathHeightYThisState[k],
v->SwathHeightCThisState[k],
v->HTotal[k] / v->PixelClock[k],
v->UrgLatency[i],
v->CursorBufferSize,
v->CursorWidth[k][0],
v->CursorBPP[k][0],
v->VRatio[k],
v->VRatioChroma[k],
v->BytePerPixelInDETY[k],
v->BytePerPixelInDETC[k],
v->DETBufferSizeYThisState[k],
v->DETBufferSizeCThisState[k],
&v->UrgentBurstFactorCursor[k],
&v->UrgentBurstFactorLuma[k],
&v->UrgentBurstFactorChroma[k],
&NotUrgentLatencyHiding[k]);
}
v->NotEnoughUrgentLatencyHidingA[i][j] = false;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (NotUrgentLatencyHiding[k]) {
v->NotEnoughUrgentLatencyHidingA[i][j] = true;
}
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->VActivePixelBandwidth[i][j][k] = v->ReadBandwidthLuma[k] * v->UrgentBurstFactorLuma[k]
+ v->ReadBandwidthChroma[k] * v->UrgentBurstFactorChroma[k];
v->VActiveCursorBandwidth[i][j][k] = v->cursor_bw[k] * v->UrgentBurstFactorCursor[k];
}
v->TotalVActivePixelBandwidth[i][j] = 0;
v->TotalVActiveCursorBandwidth[i][j] = 0;
v->TotalMetaRowBandwidth[i][j] = 0;
v->TotalDPTERowBandwidth[i][j] = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->TotalVActivePixelBandwidth[i][j] = v->TotalVActivePixelBandwidth[i][j] + v->VActivePixelBandwidth[i][j][k];
v->TotalVActiveCursorBandwidth[i][j] = v->TotalVActiveCursorBandwidth[i][j] + v->VActiveCursorBandwidth[i][j][k];
v->TotalMetaRowBandwidth[i][j] = v->TotalMetaRowBandwidth[i][j] + v->NoOfDPP[i][j][k] * v->meta_row_bandwidth[i][j][k];
v->TotalDPTERowBandwidth[i][j] = v->TotalDPTERowBandwidth[i][j] + v->NoOfDPP[i][j][k] * v->dpte_row_bandwidth[i][j][k];
}
}
}
//Calculate Return BW
for (i = 0; i < v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->BlendingAndTiming[k] == k) {
if (v->WritebackEnable[k] == true) {
v->WritebackDelayTime[k] = v->WritebackLatency
+ CalculateWriteBackDelay(
v->WritebackPixelFormat[k],
v->WritebackHRatio[k],
v->WritebackVRatio[k],
v->WritebackVTaps[k],
v->WritebackDestinationWidth[k],
v->WritebackDestinationHeight[k],
v->WritebackSourceHeight[k],
v->HTotal[k]) / v->RequiredDISPCLK[i][j];
} else {
v->WritebackDelayTime[k] = 0.0;
}
for (m = 0; m < v->NumberOfActivePlanes; m++) {
if (v->BlendingAndTiming[m] == k && v->WritebackEnable[m] == true) {
v->WritebackDelayTime[k] = dml_max(
v->WritebackDelayTime[k],
v->WritebackLatency
+ CalculateWriteBackDelay(
v->WritebackPixelFormat[m],
v->WritebackHRatio[m],
v->WritebackVRatio[m],
v->WritebackVTaps[m],
v->WritebackDestinationWidth[m],
v->WritebackDestinationHeight[m],
v->WritebackSourceHeight[m],
v->HTotal[m]) / v->RequiredDISPCLK[i][j]);
}
}
}
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
for (m = 0; m < v->NumberOfActivePlanes; m++) {
if (v->BlendingAndTiming[k] == m) {
v->WritebackDelayTime[k] = v->WritebackDelayTime[m];
}
}
}
v->MaxMaxVStartup[i][j] = 0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->MaximumVStartup[i][j][k] =
(v->Interlace[k] && !v->ProgressiveToInterlaceUnitInOPP) ?
dml_floor((v->VTotal[k] - v->VActive[k]) / 2.0, 1.0) :
v->VTotal[k] - v->VActive[k]
- dml_max(
1.0,
dml_ceil(
1.0 * v->WritebackDelayTime[k]
/ (v->HTotal[k]
/ v->PixelClock[k]),
1.0));
if (v->MaximumVStartup[i][j][k] > 1023)
v->MaximumVStartup[i][j][k] = 1023;
v->MaxMaxVStartup[i][j] = dml_max(v->MaxMaxVStartup[i][j], v->MaximumVStartup[i][j][k]);
}
}
}
ReorderingBytes = v->NumberOfChannels
* dml_max3(
v->UrgentOutOfOrderReturnPerChannelPixelDataOnly,
v->UrgentOutOfOrderReturnPerChannelPixelMixedWithVMData,
v->UrgentOutOfOrderReturnPerChannelVMDataOnly);
for (i = 0; i < v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
v->DCFCLKState[i][j] = v->DCFCLKPerState[i];
}
}
if (v->UseMinimumRequiredDCFCLK == true)
UseMinimumDCFCLK(mode_lib, MaxPrefetchMode, ReorderingBytes);
for (i = 0; i < v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
double IdealFabricAndSDPPortBandwidthPerState = dml_min(
v->ReturnBusWidth * v->DCFCLKState[i][j],
v->FabricClockPerState[i] * v->FabricDatapathToDCNDataReturn);
double IdealDRAMBandwidthPerState = v->DRAMSpeedPerState[i] * v->NumberOfChannels * v->DRAMChannelWidth;
double PixelDataOnlyReturnBWPerState = dml_min(
IdealFabricAndSDPPortBandwidthPerState * v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency / 100.0,
IdealDRAMBandwidthPerState * v->PercentOfIdealDRAMBWReceivedAfterUrgLatencyPixelDataOnly / 100.0);
double PixelMixedWithVMDataReturnBWPerState = dml_min(
IdealFabricAndSDPPortBandwidthPerState * v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency / 100.0,
IdealDRAMBandwidthPerState * v->PercentOfIdealDRAMBWReceivedAfterUrgLatencyPixelMixedWithVMData / 100.0);
if (v->HostVMEnable != true) {
v->ReturnBWPerState[i][j] = PixelDataOnlyReturnBWPerState;
} else {
v->ReturnBWPerState[i][j] = PixelMixedWithVMDataReturnBWPerState;
}
}
}
//Re-ordering Buffer Support Check
for (i = 0; i < v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
if ((v->ROBBufferSizeInKByte - v->PixelChunkSizeInKByte) * 1024 / v->ReturnBWPerState[i][j]
> (v->RoundTripPingLatencyCycles + __DML_ARB_TO_RET_DELAY__) / v->DCFCLKState[i][j] + ReorderingBytes / v->ReturnBWPerState[i][j]) {
v->ROBSupport[i][j] = true;
} else {
v->ROBSupport[i][j] = false;
}
}
}
//Vertical Active BW support check
MaxTotalVActiveRDBandwidth = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
MaxTotalVActiveRDBandwidth = MaxTotalVActiveRDBandwidth + v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k];
}
for (i = 0; i < v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
v->MaxTotalVerticalActiveAvailableBandwidth[i][j] = dml_min(
dml_min(
v->ReturnBusWidth * v->DCFCLKState[i][j],
v->FabricClockPerState[i] * v->FabricDatapathToDCNDataReturn)
* v->MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation / 100,
v->DRAMSpeedPerState[i] * v->NumberOfChannels * v->DRAMChannelWidth
* v->MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation / 100);
if (MaxTotalVActiveRDBandwidth <= v->MaxTotalVerticalActiveAvailableBandwidth[i][j]) {
v->TotalVerticalActiveBandwidthSupport[i][j] = true;
} else {
v->TotalVerticalActiveBandwidthSupport[i][j] = false;
}
}
}
v->UrgentLatency = CalculateUrgentLatency(
v->UrgentLatencyPixelDataOnly,
v->UrgentLatencyPixelMixedWithVMData,
v->UrgentLatencyVMDataOnly,
v->DoUrgentLatencyAdjustment,
v->UrgentLatencyAdjustmentFabricClockComponent,
v->UrgentLatencyAdjustmentFabricClockReference,
v->FabricClock);
//Prefetch Check
for (i = 0; i < v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
double VMDataOnlyReturnBWPerState;
double HostVMInefficiencyFactor = 1;
int NextPrefetchModeState = MinPrefetchMode;
bool UnboundedRequestEnabledThisState = false;
int CompressedBufferSizeInkByteThisState = 0;
double dummy;
v->TimeCalc = 24 / v->ProjectedDCFCLKDeepSleep[i][j];
v->BandwidthWithoutPrefetchSupported[i][j] = true;
if (v->TotalVActivePixelBandwidth[i][j] + v->TotalVActiveCursorBandwidth[i][j] + v->TotalMetaRowBandwidth[i][j]
+ v->TotalDPTERowBandwidth[i][j] > v->ReturnBWPerState[i][j] || v->NotEnoughUrgentLatencyHidingA[i][j]) {
v->BandwidthWithoutPrefetchSupported[i][j] = false;
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->NoOfDPPThisState[k] = v->NoOfDPP[i][j][k];
v->swath_width_luma_ub_this_state[k] = v->swath_width_luma_ub_all_states[i][j][k];
v->swath_width_chroma_ub_this_state[k] = v->swath_width_chroma_ub_all_states[i][j][k];
v->SwathWidthYThisState[k] = v->SwathWidthYAllStates[i][j][k];
v->SwathWidthCThisState[k] = v->SwathWidthCAllStates[i][j][k];
v->SwathHeightYThisState[k] = v->SwathHeightYAllStates[i][j][k];
v->SwathHeightCThisState[k] = v->SwathHeightCAllStates[i][j][k];
v->DETBufferSizeYThisState[k] = v->DETBufferSizeYAllStates[i][j][k];
v->DETBufferSizeCThisState[k] = v->DETBufferSizeCAllStates[i][j][k];
}
VMDataOnlyReturnBWPerState = dml_min(
dml_min(
v->ReturnBusWidth * v->DCFCLKState[i][j],
v->FabricClockPerState[i] * v->FabricDatapathToDCNDataReturn)
* v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency / 100.0,
v->DRAMSpeedPerState[i] * v->NumberOfChannels * v->DRAMChannelWidth
* v->PercentOfIdealDRAMBWReceivedAfterUrgLatencyVMDataOnly / 100.0);
if (v->GPUVMEnable && v->HostVMEnable)
HostVMInefficiencyFactor = v->ReturnBWPerState[i][j] / VMDataOnlyReturnBWPerState;
v->ExtraLatency = CalculateExtraLatency(
v->RoundTripPingLatencyCycles,
ReorderingBytes,
v->DCFCLKState[i][j],
v->TotalNumberOfActiveDPP[i][j],
v->PixelChunkSizeInKByte,
v->TotalNumberOfDCCActiveDPP[i][j],
v->MetaChunkSize,
v->ReturnBWPerState[i][j],
v->GPUVMEnable,
v->HostVMEnable,
v->NumberOfActivePlanes,
v->NoOfDPPThisState,
v->dpte_group_bytes,
HostVMInefficiencyFactor,
v->HostVMMinPageSize,
v->HostVMMaxNonCachedPageTableLevels);
v->NextMaxVStartup = v->MaxMaxVStartup[i][j];
do {
v->PrefetchModePerState[i][j] = NextPrefetchModeState;
v->MaxVStartup = v->NextMaxVStartup;
v->TWait = CalculateTWait(
v->PrefetchModePerState[i][j],
v->DRAMClockChangeLatency,
v->UrgLatency[i],
v->SREnterPlusExitTime);
for (k = 0; k < v->NumberOfActivePlanes; k++) {
CalculatePrefetchSchedulePerPlane(mode_lib,
HostVMInefficiencyFactor,
i, j, k);
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
CalculateUrgentBurstFactor(
v->swath_width_luma_ub_this_state[k],
v->swath_width_chroma_ub_this_state[k],
v->SwathHeightYThisState[k],
v->SwathHeightCThisState[k],
v->HTotal[k] / v->PixelClock[k],
v->UrgLatency[i],
v->CursorBufferSize,
v->CursorWidth[k][0],
v->CursorBPP[k][0],
v->VRatioPreY[i][j][k],
v->VRatioPreC[i][j][k],
v->BytePerPixelInDETY[k],
v->BytePerPixelInDETC[k],
v->DETBufferSizeYThisState[k],
v->DETBufferSizeCThisState[k],
&v->UrgentBurstFactorCursorPre[k],
&v->UrgentBurstFactorLumaPre[k],
&v->UrgentBurstFactorChromaPre[k],
&v->NotUrgentLatencyHidingPre[k]);
}
v->MaximumReadBandwidthWithPrefetch = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->cursor_bw_pre[k] = v->NumberOfCursors[k] * v->CursorWidth[k][0] * v->CursorBPP[k][0] / 8.0
/ (v->HTotal[k] / v->PixelClock[k]) * v->VRatioPreY[i][j][k];
v->MaximumReadBandwidthWithPrefetch =
v->MaximumReadBandwidthWithPrefetch
+ dml_max3(
v->VActivePixelBandwidth[i][j][k]
+ v->VActiveCursorBandwidth[i][j][k]
+ v->NoOfDPP[i][j][k]
* (v->meta_row_bandwidth[i][j][k]
+ v->dpte_row_bandwidth[i][j][k]),
v->NoOfDPP[i][j][k] * v->prefetch_vmrow_bw[k],
v->NoOfDPP[i][j][k]
* (v->RequiredPrefetchPixelDataBWLuma[i][j][k]
* v->UrgentBurstFactorLumaPre[k]
+ v->RequiredPrefetchPixelDataBWChroma[i][j][k]
* v->UrgentBurstFactorChromaPre[k])
+ v->cursor_bw_pre[k] * v->UrgentBurstFactorCursorPre[k]);
}
v->NotEnoughUrgentLatencyHidingPre = false;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->NotUrgentLatencyHidingPre[k] == true) {
v->NotEnoughUrgentLatencyHidingPre = true;
}
}
v->PrefetchSupported[i][j] = true;
if (v->BandwidthWithoutPrefetchSupported[i][j] == false || v->MaximumReadBandwidthWithPrefetch > v->ReturnBWPerState[i][j]
|| v->NotEnoughUrgentLatencyHidingPre == 1) {
v->PrefetchSupported[i][j] = false;
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->LineTimesForPrefetch[k] < 2.0 || v->LinesForMetaPTE[k] >= 32.0 || v->LinesForMetaAndDPTERow[k] >= 16.0
|| v->NoTimeForPrefetch[i][j][k] == true) {
v->PrefetchSupported[i][j] = false;
}
}
v->DynamicMetadataSupported[i][j] = true;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->NoTimeForDynamicMetadata[i][j][k] == true) {
v->DynamicMetadataSupported[i][j] = false;
}
}
v->VRatioInPrefetchSupported[i][j] = true;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->VRatioPreY[i][j][k] > 4.0 || v->VRatioPreC[i][j][k] > 4.0 || v->NoTimeForPrefetch[i][j][k] == true) {
v->VRatioInPrefetchSupported[i][j] = false;
}
}
v->AnyLinesForVMOrRowTooLarge = false;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->LinesForMetaAndDPTERow[k] >= 16 || v->LinesForMetaPTE[k] >= 32) {
v->AnyLinesForVMOrRowTooLarge = true;
}
}
v->NextPrefetchMode = v->NextPrefetchMode + 1;
if (v->PrefetchSupported[i][j] == true && v->VRatioInPrefetchSupported[i][j] == true) {
v->BandwidthAvailableForImmediateFlip = v->ReturnBWPerState[i][j];
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->BandwidthAvailableForImmediateFlip = v->BandwidthAvailableForImmediateFlip
- dml_max(
v->VActivePixelBandwidth[i][j][k] + v->VActiveCursorBandwidth[i][j][k],
v->NoOfDPP[i][j][k]
* (v->RequiredPrefetchPixelDataBWLuma[i][j][k]
* v->UrgentBurstFactorLumaPre[k]
+ v->RequiredPrefetchPixelDataBWChroma[i][j][k]
* v->UrgentBurstFactorChromaPre[k])
+ v->cursor_bw_pre[k] * v->UrgentBurstFactorCursorPre[k]);
}
v->TotImmediateFlipBytes = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->TotImmediateFlipBytes = v->TotImmediateFlipBytes
+ v->NoOfDPP[i][j][k] * (v->PDEAndMetaPTEBytesPerFrame[i][j][k] + v->MetaRowBytes[i][j][k]
+ v->DPTEBytesPerRow[i][j][k]);
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
CalculateFlipSchedule(
mode_lib,
k,
HostVMInefficiencyFactor,
v->ExtraLatency,
v->UrgLatency[i],
v->PDEAndMetaPTEBytesPerFrame[i][j][k],
v->MetaRowBytes[i][j][k],
v->DPTEBytesPerRow[i][j][k]);
}
v->total_dcn_read_bw_with_flip = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->total_dcn_read_bw_with_flip = v->total_dcn_read_bw_with_flip
+ dml_max3(
v->NoOfDPP[i][j][k] * v->prefetch_vmrow_bw[k],
v->NoOfDPP[i][j][k] * v->final_flip_bw[k] + v->VActivePixelBandwidth[i][j][k]
+ v->VActiveCursorBandwidth[i][j][k],
v->NoOfDPP[i][j][k]
* (v->final_flip_bw[k]
+ v->RequiredPrefetchPixelDataBWLuma[i][j][k]
* v->UrgentBurstFactorLumaPre[k]
+ v->RequiredPrefetchPixelDataBWChroma[i][j][k]
* v->UrgentBurstFactorChromaPre[k])
+ v->cursor_bw_pre[k] * v->UrgentBurstFactorCursorPre[k]);
}
v->ImmediateFlipSupportedForState[i][j] = true;
if (v->total_dcn_read_bw_with_flip > v->ReturnBWPerState[i][j]) {
v->ImmediateFlipSupportedForState[i][j] = false;
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->ImmediateFlipSupportedForPipe[k] == false) {
v->ImmediateFlipSupportedForState[i][j] = false;
}
}
} else {
v->ImmediateFlipSupportedForState[i][j] = false;
}
if (v->MaxVStartup <= __DML_VBA_MIN_VSTARTUP__ || v->AnyLinesForVMOrRowTooLarge == false) {
v->NextMaxVStartup = v->MaxMaxVStartup[i][j];
NextPrefetchModeState = NextPrefetchModeState + 1;
} else {
v->NextMaxVStartup = v->NextMaxVStartup - 1;
}
v->NextPrefetchMode = v->NextPrefetchMode + 1;
} while (!((v->PrefetchSupported[i][j] == true && v->DynamicMetadataSupported[i][j] == true && v->VRatioInPrefetchSupported[i][j] == true
&& ((v->HostVMEnable == false &&
v->ImmediateFlipRequirement[0] != dm_immediate_flip_required)
|| v->ImmediateFlipSupportedForState[i][j] == true))
|| (v->NextMaxVStartup == v->MaxMaxVStartup[i][j] && NextPrefetchModeState > MaxPrefetchMode)));
CalculateUnboundedRequestAndCompressedBufferSize(
v->DETBufferSizeInKByte[0],
v->ConfigReturnBufferSizeInKByte,
v->UseUnboundedRequesting,
v->TotalNumberOfActiveDPP[i][j],
NoChroma,
v->MaxNumDPP,
v->CompressedBufferSegmentSizeInkByte,
v->Output,
&UnboundedRequestEnabledThisState,
&CompressedBufferSizeInkByteThisState);
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
v->PrefetchModePerState[i][j],
v->DCFCLKState[i][j],
v->ReturnBWPerState[i][j],
v->UrgLatency[i],
v->ExtraLatency,
v->SOCCLKPerState[i],
v->ProjectedDCFCLKDeepSleep[i][j],
v->DETBufferSizeYThisState,
v->DETBufferSizeCThisState,
v->SwathHeightYThisState,
v->SwathHeightCThisState,
v->SwathWidthYThisState,
v->SwathWidthCThisState,
v->NoOfDPPThisState,
v->BytePerPixelInDETY,
v->BytePerPixelInDETC,
UnboundedRequestEnabledThisState,
CompressedBufferSizeInkByteThisState,
&v->DRAMClockChangeSupport[i][j],
&dummy,
&dummy,
&dummy,
&dummy);
}
}
/*PTE Buffer Size Check*/
for (i = 0; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
v->PTEBufferSizeNotExceeded[i][j] = true;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->PTEBufferSizeNotExceededY[i][j][k] == false || v->PTEBufferSizeNotExceededC[i][j][k] == false) {
v->PTEBufferSizeNotExceeded[i][j] = false;
}
}
}
}
/*Cursor Support Check*/
v->CursorSupport = true;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->CursorWidth[k][0] > 0.0) {
if (v->CursorBPP[k][0] == 64 && v->Cursor64BppSupport == false) {
v->CursorSupport = false;
}
}
}
/*Valid Pitch Check*/
v->PitchSupport = true;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->AlignedYPitch[k] = dml_ceil(dml_max(v->PitchY[k], v->SurfaceWidthY[k]), v->MacroTileWidthY[k]);
if (v->DCCEnable[k] == true) {
v->AlignedDCCMetaPitchY[k] = dml_ceil(dml_max(v->DCCMetaPitchY[k], v->SurfaceWidthY[k]), 64.0 * v->Read256BlockWidthY[k]);
} else {
v->AlignedDCCMetaPitchY[k] = v->DCCMetaPitchY[k];
}
if (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32 && v->SourcePixelFormat[k] != dm_444_16
&& v->SourcePixelFormat[k] != dm_mono_16 && v->SourcePixelFormat[k] != dm_rgbe
&& v->SourcePixelFormat[k] != dm_mono_8) {
v->AlignedCPitch[k] = dml_ceil(dml_max(v->PitchC[k], v->SurfaceWidthC[k]), v->MacroTileWidthC[k]);
if (v->DCCEnable[k] == true) {
v->AlignedDCCMetaPitchC[k] = dml_ceil(
dml_max(v->DCCMetaPitchC[k], v->SurfaceWidthC[k]),
64.0 * v->Read256BlockWidthC[k]);
} else {
v->AlignedDCCMetaPitchC[k] = v->DCCMetaPitchC[k];
}
} else {
v->AlignedCPitch[k] = v->PitchC[k];
v->AlignedDCCMetaPitchC[k] = v->DCCMetaPitchC[k];
}
if (v->AlignedYPitch[k] > v->PitchY[k] || v->AlignedCPitch[k] > v->PitchC[k]
|| v->AlignedDCCMetaPitchY[k] > v->DCCMetaPitchY[k] || v->AlignedDCCMetaPitchC[k] > v->DCCMetaPitchC[k]) {
v->PitchSupport = false;
}
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->ViewportWidth[k] > v->SurfaceWidthY[k] || v->ViewportHeight[k] > v->SurfaceHeightY[k]) {
ViewportExceedsSurface = true;
if (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32
&& v->SourcePixelFormat[k] != dm_444_16 && v->SourcePixelFormat[k] != dm_444_8
&& v->SourcePixelFormat[k] != dm_rgbe) {
if (v->ViewportWidthChroma[k] > v->SurfaceWidthC[k]
|| v->ViewportHeightChroma[k] > v->SurfaceHeightC[k]) {
ViewportExceedsSurface = true;
}
}
}
}
/*Mode Support, Voltage State and SOC Configuration*/
for (i = v->soc.num_states - 1; i >= 0; i--) {
for (j = 0; j < 2; j++) {
if (v->ScaleRatioAndTapsSupport == true && v->SourceFormatPixelAndScanSupport == true && v->ViewportSizeSupport[i][j] == true
&& v->LinkCapacitySupport[i] == true && !P2IWith420 && !DSCOnlyIfNecessaryWithBPP
&& !DSC422NativeNotSupported && v->ODMCombine4To1SupportCheckOK[i] == true && v->NotEnoughDSCUnits[i] == false
&& v->DTBCLKRequiredMoreThanSupported[i] == false
&& v->ROBSupport[i][j] == true && v->DISPCLK_DPPCLK_Support[i][j] == true
&& v->TotalAvailablePipesSupport[i][j] == true && EnoughWritebackUnits == true
&& v->WritebackLatencySupport == true && v->WritebackScaleRatioAndTapsSupport == true
&& v->CursorSupport == true && v->PitchSupport == true && ViewportExceedsSurface == false
&& v->PrefetchSupported[i][j] == true && v->DynamicMetadataSupported[i][j] == true
&& v->TotalVerticalActiveBandwidthSupport[i][j] == true && v->VRatioInPrefetchSupported[i][j] == true
&& v->PTEBufferSizeNotExceeded[i][j] == true && v->NonsupportedDSCInputBPC == false
&& ((v->HostVMEnable == false
&& v->ImmediateFlipRequirement[0] != dm_immediate_flip_required)
|| v->ImmediateFlipSupportedForState[i][j] == true)
&& FMTBufferExceeded == false) {
v->ModeSupport[i][j] = true;
} else {
v->ModeSupport[i][j] = false;
#ifdef __DML_VBA_DEBUG__
if (v->ScaleRatioAndTapsSupport == false)
dml_print("DML SUPPORT: ScaleRatioAndTapsSupport failed");
if (v->SourceFormatPixelAndScanSupport == false)
dml_print("DML SUPPORT: SourceFormatPixelAndScanSupport failed");
if (v->ViewportSizeSupport[i][j] == false)
dml_print("DML SUPPORT: ViewportSizeSupport failed");
if (v->LinkCapacitySupport[i] == false)
dml_print("DML SUPPORT: LinkCapacitySupport failed");
if (v->ODMCombine4To1SupportCheckOK[i] == false)
dml_print("DML SUPPORT: DSC422NativeNotSupported failed");
if (v->NotEnoughDSCUnits[i] == true)
dml_print("DML SUPPORT: NotEnoughDSCUnits");
if (v->DTBCLKRequiredMoreThanSupported[i] == true)
dml_print("DML SUPPORT: DTBCLKRequiredMoreThanSupported");
if (v->ROBSupport[i][j] == false)
dml_print("DML SUPPORT: ROBSupport failed");
if (v->DISPCLK_DPPCLK_Support[i][j] == false)
dml_print("DML SUPPORT: DISPCLK_DPPCLK_Support failed");
if (v->TotalAvailablePipesSupport[i][j] == false)
dml_print("DML SUPPORT: DSC422NativeNotSupported failed");
if (EnoughWritebackUnits == false)
dml_print("DML SUPPORT: DSC422NativeNotSupported failed");
if (v->WritebackLatencySupport == false)
dml_print("DML SUPPORT: WritebackLatencySupport failed");
if (v->WritebackScaleRatioAndTapsSupport == false)
dml_print("DML SUPPORT: DSC422NativeNotSupported ");
if (v->CursorSupport == false)
dml_print("DML SUPPORT: DSC422NativeNotSupported failed");
if (v->PitchSupport == false)
dml_print("DML SUPPORT: PitchSupport failed");
if (ViewportExceedsSurface == true)
dml_print("DML SUPPORT: ViewportExceedsSurface failed");
if (v->PrefetchSupported[i][j] == false)
dml_print("DML SUPPORT: PrefetchSupported failed");
if (v->DynamicMetadataSupported[i][j] == false)
dml_print("DML SUPPORT: DSC422NativeNotSupported failed");
if (v->TotalVerticalActiveBandwidthSupport[i][j] == false)
dml_print("DML SUPPORT: TotalVerticalActiveBandwidthSupport failed");
if (v->VRatioInPrefetchSupported[i][j] == false)
dml_print("DML SUPPORT: VRatioInPrefetchSupported failed");
if (v->PTEBufferSizeNotExceeded[i][j] == false)
dml_print("DML SUPPORT: PTEBufferSizeNotExceeded failed");
if (v->NonsupportedDSCInputBPC == true)
dml_print("DML SUPPORT: NonsupportedDSCInputBPC failed");
if (!((v->HostVMEnable == false
&& v->ImmediateFlipRequirement[0] != dm_immediate_flip_required)
|| v->ImmediateFlipSupportedForState[i][j] == true))
dml_print("DML SUPPORT: ImmediateFlipRequirement failed");
if (FMTBufferExceeded == true)
dml_print("DML SUPPORT: FMTBufferExceeded failed");
#endif
}
}
}
{
unsigned int MaximumMPCCombine = 0;
for (i = v->soc.num_states; i >= 0; i--) {
if (i == v->soc.num_states || v->ModeSupport[i][0] == true || v->ModeSupport[i][1] == true) {
v->VoltageLevel = i;
v->ModeIsSupported = v->ModeSupport[i][0] == true || v->ModeSupport[i][1] == true;
if (v->ModeSupport[i][0] == true) {
MaximumMPCCombine = 0;
} else {
MaximumMPCCombine = 1;
}
}
}
v->ImmediateFlipSupport = v->ImmediateFlipSupportedForState[v->VoltageLevel][MaximumMPCCombine];
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
v->MPCCombineEnable[k] = v->MPCCombine[v->VoltageLevel][MaximumMPCCombine][k];
v->DPPPerPlane[k] = v->NoOfDPP[v->VoltageLevel][MaximumMPCCombine][k];
}
v->DCFCLK = v->DCFCLKState[v->VoltageLevel][MaximumMPCCombine];
v->DRAMSpeed = v->DRAMSpeedPerState[v->VoltageLevel];
v->FabricClock = v->FabricClockPerState[v->VoltageLevel];
v->SOCCLK = v->SOCCLKPerState[v->VoltageLevel];
v->ReturnBW = v->ReturnBWPerState[v->VoltageLevel][MaximumMPCCombine];
v->maxMpcComb = MaximumMPCCombine;
}
}
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
double DCFCLK,
double ReturnBW,
double UrgentLatency,
double ExtraLatency,
double SOCCLK,
double DCFCLKDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
double SwathWidthY[],
double SwathWidthC[],
unsigned int DPPPerPlane[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
bool UnboundedRequestEnabled,
int unsigned CompressedBufferSizeInkByte,
enum clock_change_support *DRAMClockChangeSupport,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *Z8StutterExitWatermark,
double *Z8StutterEnterPlusExitWatermark)
{
struct vba_vars_st *v = &mode_lib->vba;
double EffectiveLBLatencyHidingY;
double EffectiveLBLatencyHidingC;
double LinesInDETY[DC__NUM_DPP__MAX];
double LinesInDETC;
unsigned int LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX];
unsigned int LinesInDETCRoundedDownToSwath;
double FullDETBufferingTimeY;
double FullDETBufferingTimeC;
double ActiveDRAMClockChangeLatencyMarginY;
double ActiveDRAMClockChangeLatencyMarginC;
double WritebackDRAMClockChangeLatencyMargin;
double PlaneWithMinActiveDRAMClockChangeMargin;
double SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank;
double WritebackDRAMClockChangeLatencyHiding;
double TotalPixelBW = 0.0;
int k, j;
v->UrgentWatermark = UrgentLatency + ExtraLatency;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
dml_print("DML::%s: ExtraLatency = %f\n", __func__, ExtraLatency);
dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->UrgentWatermark);
#endif
v->DRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->UrgentWatermark;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: v->DRAMClockChangeLatency = %f\n", __func__, v->DRAMClockChangeLatency);
dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->DRAMClockChangeWatermark);
#endif
v->TotalActiveWriteback = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k] == true) {
v->TotalActiveWriteback = v->TotalActiveWriteback + 1;
}
}
if (v->TotalActiveWriteback <= 1) {
v->WritebackUrgentWatermark = v->WritebackLatency;
} else {
v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
if (v->TotalActiveWriteback <= 1) {
v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency;
} else {
v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
TotalPixelBW = TotalPixelBW
+ DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k])
/ (v->HTotal[k] / v->PixelClock[k]);
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double EffectiveDETBufferSizeY = DETBufferSizeY[k];
v->LBLatencyHidingSourceLinesY = dml_min(
(double) v->MaxLineBufferLines,
dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
v->LBLatencyHidingSourceLinesC = dml_min(
(double) v->MaxLineBufferLines,
dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
if (UnboundedRequestEnabled) {
EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
+ CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW;
}
LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
if (BytePerPixelDETC[k] > 0) {
LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k];
} else {
LinesInDETC = 0;
FullDETBufferingTimeC = 999999;
}
ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
- ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY
- (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
- ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC
- (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k];
}
v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
} else {
v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
}
if (v->WritebackEnable[k] == true) {
WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024
/ (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
if (v->WritebackPixelFormat[k] == dm_444_64) {
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
}
WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark;
v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(v->ActiveDRAMClockChangeLatencyMargin[k], WritebackDRAMClockChangeLatencyMargin);
}
}
v->MinActiveDRAMClockChangeMargin = 999999;
PlaneWithMinActiveDRAMClockChangeMargin = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) {
v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k];
if (v->BlendingAndTiming[k] == k) {
PlaneWithMinActiveDRAMClockChangeMargin = k;
} else {
for (j = 0; j < v->NumberOfActivePlanes; ++j) {
if (v->BlendingAndTiming[k] == j) {
PlaneWithMinActiveDRAMClockChangeMargin = j;
}
}
}
}
}
v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->DRAMClockChangeLatency ;
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
&& v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k];
}
}
v->TotalNumberOfActiveOTG = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->BlendingAndTiming[k] == k) {
v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1;
}
}
if (v->MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
} else if ((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
|| SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
} else {
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
}
*StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
*StutterEnterPlusExitWatermark = (v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
*Z8StutterExitWatermark = v->SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
*Z8StutterEnterPlusExitWatermark = v->SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, *StutterExitWatermark);
dml_print("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, *StutterEnterPlusExitWatermark);
dml_print("DML::%s: Z8StutterExitWatermark = %f\n", __func__, *Z8StutterExitWatermark);
dml_print("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, *Z8StutterEnterPlusExitWatermark);
#endif
}
static void CalculateDCFCLKDeepSleep(
struct display_mode_lib *mode_lib,
unsigned int NumberOfActivePlanes,
int BytePerPixelY[],
int BytePerPixelC[],
double VRatio[],
double VRatioChroma[],
double SwathWidthY[],
double SwathWidthC[],
unsigned int DPPPerPlane[],
double HRatio[],
double HRatioChroma[],
double PixelClock[],
double PSCL_THROUGHPUT[],
double PSCL_THROUGHPUT_CHROMA[],
double DPPCLK[],
double ReadBandwidthLuma[],
double ReadBandwidthChroma[],
int ReturnBusWidth,
double *DCFCLKDeepSleep)
{
struct vba_vars_st *v = &mode_lib->vba;
double DisplayPipeLineDeliveryTimeLuma;
double DisplayPipeLineDeliveryTimeChroma;
double ReadBandwidth = 0.0;
int k;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (VRatio[k] <= 1) {
DisplayPipeLineDeliveryTimeLuma = SwathWidthY[k] * DPPPerPlane[k] / HRatio[k] / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeLuma = SwathWidthY[k] / PSCL_THROUGHPUT[k] / DPPCLK[k];
}
if (BytePerPixelC[k] == 0) {
DisplayPipeLineDeliveryTimeChroma = 0;
} else {
if (VRatioChroma[k] <= 1) {
DisplayPipeLineDeliveryTimeChroma = SwathWidthC[k] * DPPPerPlane[k] / HRatioChroma[k] / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeChroma = SwathWidthC[k] / PSCL_THROUGHPUT_CHROMA[k] / DPPCLK[k];
}
}
if (BytePerPixelC[k] > 0) {
v->DCFCLKDeepSleepPerPlane[k] = dml_max(__DML_MIN_DCFCLK_FACTOR__ * SwathWidthY[k] * BytePerPixelY[k] / 32.0 / DisplayPipeLineDeliveryTimeLuma,
__DML_MIN_DCFCLK_FACTOR__ * SwathWidthC[k] * BytePerPixelC[k] / 32.0 / DisplayPipeLineDeliveryTimeChroma);
} else {
v->DCFCLKDeepSleepPerPlane[k] = __DML_MIN_DCFCLK_FACTOR__ * SwathWidthY[k] * BytePerPixelY[k] / 64.0 / DisplayPipeLineDeliveryTimeLuma;
}
v->DCFCLKDeepSleepPerPlane[k] = dml_max(v->DCFCLKDeepSleepPerPlane[k], PixelClock[k] / 16);
}
for (k = 0; k < NumberOfActivePlanes; ++k) {
ReadBandwidth = ReadBandwidth + ReadBandwidthLuma[k] + ReadBandwidthChroma[k];
}
*DCFCLKDeepSleep = dml_max(8.0, __DML_MIN_DCFCLK_FACTOR__ * ReadBandwidth / ReturnBusWidth);
for (k = 0; k < NumberOfActivePlanes; ++k) {
*DCFCLKDeepSleep = dml_max(*DCFCLKDeepSleep, v->DCFCLKDeepSleepPerPlane[k]);
}
}
static void CalculateUrgentBurstFactor(
int swath_width_luma_ub,
int swath_width_chroma_ub,
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double LineTime,
double UrgentLatency,
double CursorBufferSize,
unsigned int CursorWidth,
unsigned int CursorBPP,
double VRatio,
double VRatioC,
double BytePerPixelInDETY,
double BytePerPixelInDETC,
double DETBufferSizeY,
double DETBufferSizeC,
double *UrgentBurstFactorCursor,
double *UrgentBurstFactorLuma,
double *UrgentBurstFactorChroma,
bool *NotEnoughUrgentLatencyHiding)
{
double LinesInDETLuma;
double LinesInDETChroma;
unsigned int LinesInCursorBuffer;
double CursorBufferSizeInTime;
double DETBufferSizeInTimeLuma;
double DETBufferSizeInTimeChroma;
*NotEnoughUrgentLatencyHiding = 0;
if (CursorWidth > 0) {
LinesInCursorBuffer = 1 << (unsigned int) dml_floor(dml_log2(CursorBufferSize * 1024.0 / (CursorWidth * CursorBPP / 8.0)), 1.0);
if (VRatio > 0) {
CursorBufferSizeInTime = LinesInCursorBuffer * LineTime / VRatio;
if (CursorBufferSizeInTime - UrgentLatency <= 0) {
*NotEnoughUrgentLatencyHiding = 1;
*UrgentBurstFactorCursor = 0;
} else {
*UrgentBurstFactorCursor = CursorBufferSizeInTime / (CursorBufferSizeInTime - UrgentLatency);
}
} else {
*UrgentBurstFactorCursor = 1;
}
}
LinesInDETLuma = DETBufferSizeY / BytePerPixelInDETY / swath_width_luma_ub;
if (VRatio > 0) {
DETBufferSizeInTimeLuma = dml_floor(LinesInDETLuma, SwathHeightY) * LineTime / VRatio;
if (DETBufferSizeInTimeLuma - UrgentLatency <= 0) {
*NotEnoughUrgentLatencyHiding = 1;
*UrgentBurstFactorLuma = 0;
} else {
*UrgentBurstFactorLuma = DETBufferSizeInTimeLuma / (DETBufferSizeInTimeLuma - UrgentLatency);
}
} else {
*UrgentBurstFactorLuma = 1;
}
if (BytePerPixelInDETC > 0) {
LinesInDETChroma = DETBufferSizeC / BytePerPixelInDETC / swath_width_chroma_ub;
if (VRatio > 0) {
DETBufferSizeInTimeChroma = dml_floor(LinesInDETChroma, SwathHeightC) * LineTime / VRatio;
if (DETBufferSizeInTimeChroma - UrgentLatency <= 0) {
*NotEnoughUrgentLatencyHiding = 1;
*UrgentBurstFactorChroma = 0;
} else {
*UrgentBurstFactorChroma = DETBufferSizeInTimeChroma / (DETBufferSizeInTimeChroma - UrgentLatency);
}
} else {
*UrgentBurstFactorChroma = 1;
}
}
}
static void CalculatePixelDeliveryTimes(
unsigned int NumberOfActivePlanes,
double VRatio[],
double VRatioChroma[],
double VRatioPrefetchY[],
double VRatioPrefetchC[],
unsigned int swath_width_luma_ub[],
unsigned int swath_width_chroma_ub[],
unsigned int DPPPerPlane[],
double HRatio[],
double HRatioChroma[],
double PixelClock[],
double PSCL_THROUGHPUT[],
double PSCL_THROUGHPUT_CHROMA[],
double DPPCLK[],
int BytePerPixelC[],
enum scan_direction_class SourceScan[],
unsigned int NumberOfCursors[],
unsigned int CursorWidth[][DC__NUM_CURSOR__MAX],
unsigned int CursorBPP[][DC__NUM_CURSOR__MAX],
unsigned int BlockWidth256BytesY[],
unsigned int BlockHeight256BytesY[],
unsigned int BlockWidth256BytesC[],
unsigned int BlockHeight256BytesC[],
double DisplayPipeLineDeliveryTimeLuma[],
double DisplayPipeLineDeliveryTimeChroma[],
double DisplayPipeLineDeliveryTimeLumaPrefetch[],
double DisplayPipeLineDeliveryTimeChromaPrefetch[],
double DisplayPipeRequestDeliveryTimeLuma[],
double DisplayPipeRequestDeliveryTimeChroma[],
double DisplayPipeRequestDeliveryTimeLumaPrefetch[],
double DisplayPipeRequestDeliveryTimeChromaPrefetch[],
double CursorRequestDeliveryTime[],
double CursorRequestDeliveryTimePrefetch[])
{
double req_per_swath_ub;
int k;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (VRatio[k] <= 1) {
DisplayPipeLineDeliveryTimeLuma[k] = swath_width_luma_ub[k] * DPPPerPlane[k] / HRatio[k] / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeLuma[k] = swath_width_luma_ub[k] / PSCL_THROUGHPUT[k] / DPPCLK[k];
}
if (BytePerPixelC[k] == 0) {
DisplayPipeLineDeliveryTimeChroma[k] = 0;
} else {
if (VRatioChroma[k] <= 1) {
DisplayPipeLineDeliveryTimeChroma[k] = swath_width_chroma_ub[k] * DPPPerPlane[k] / HRatioChroma[k] / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeChroma[k] = swath_width_chroma_ub[k] / PSCL_THROUGHPUT_CHROMA[k] / DPPCLK[k];
}
}
if (VRatioPrefetchY[k] <= 1) {
DisplayPipeLineDeliveryTimeLumaPrefetch[k] = swath_width_luma_ub[k] * DPPPerPlane[k] / HRatio[k] / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeLumaPrefetch[k] = swath_width_luma_ub[k] / PSCL_THROUGHPUT[k] / DPPCLK[k];
}
if (BytePerPixelC[k] == 0) {
DisplayPipeLineDeliveryTimeChromaPrefetch[k] = 0;
} else {
if (VRatioPrefetchC[k] <= 1) {
DisplayPipeLineDeliveryTimeChromaPrefetch[k] = swath_width_chroma_ub[k] * DPPPerPlane[k] / HRatioChroma[k] / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeChromaPrefetch[k] = swath_width_chroma_ub[k] / PSCL_THROUGHPUT_CHROMA[k] / DPPCLK[k];
}
}
}
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (SourceScan[k] != dm_vert) {
req_per_swath_ub = swath_width_luma_ub[k] / BlockWidth256BytesY[k];
} else {
req_per_swath_ub = swath_width_luma_ub[k] / BlockHeight256BytesY[k];
}
DisplayPipeRequestDeliveryTimeLuma[k] = DisplayPipeLineDeliveryTimeLuma[k] / req_per_swath_ub;
DisplayPipeRequestDeliveryTimeLumaPrefetch[k] = DisplayPipeLineDeliveryTimeLumaPrefetch[k] / req_per_swath_ub;
if (BytePerPixelC[k] == 0) {
DisplayPipeRequestDeliveryTimeChroma[k] = 0;
DisplayPipeRequestDeliveryTimeChromaPrefetch[k] = 0;
} else {
if (SourceScan[k] != dm_vert) {
req_per_swath_ub = swath_width_chroma_ub[k] / BlockWidth256BytesC[k];
} else {
req_per_swath_ub = swath_width_chroma_ub[k] / BlockHeight256BytesC[k];
}
DisplayPipeRequestDeliveryTimeChroma[k] = DisplayPipeLineDeliveryTimeChroma[k] / req_per_swath_ub;
DisplayPipeRequestDeliveryTimeChromaPrefetch[k] = DisplayPipeLineDeliveryTimeChromaPrefetch[k] / req_per_swath_ub;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d : HRatio = %f\n", __func__, k, HRatio[k]);
dml_print("DML::%s: k=%d : VRatio = %f\n", __func__, k, VRatio[k]);
dml_print("DML::%s: k=%d : HRatioChroma = %f\n", __func__, k, HRatioChroma[k]);
dml_print("DML::%s: k=%d : VRatioChroma = %f\n", __func__, k, VRatioChroma[k]);
dml_print("DML::%s: k=%d : DisplayPipeLineDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLuma[k]);
dml_print("DML::%s: k=%d : DisplayPipeLineDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLumaPrefetch[k]);
dml_print("DML::%s: k=%d : DisplayPipeLineDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChroma[k]);
dml_print("DML::%s: k=%d : DisplayPipeLineDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChromaPrefetch[k]);
dml_print("DML::%s: k=%d : DisplayPipeRequestDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLuma[k]);
dml_print("DML::%s: k=%d : DisplayPipeRequestDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLumaPrefetch[k]);
dml_print("DML::%s: k=%d : DisplayPipeRequestDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChroma[k]);
dml_print("DML::%s: k=%d : DisplayPipeRequestDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChromaPrefetch[k]);
#endif
}
for (k = 0; k < NumberOfActivePlanes; ++k) {
int cursor_req_per_width;
cursor_req_per_width = dml_ceil(CursorWidth[k][0] * CursorBPP[k][0] / 256 / 8, 1);
if (NumberOfCursors[k] > 0) {
if (VRatio[k] <= 1) {
CursorRequestDeliveryTime[k] = CursorWidth[k][0] / HRatio[k] / PixelClock[k] / cursor_req_per_width;
} else {
CursorRequestDeliveryTime[k] = CursorWidth[k][0] / PSCL_THROUGHPUT[k] / DPPCLK[k] / cursor_req_per_width;
}
if (VRatioPrefetchY[k] <= 1) {
CursorRequestDeliveryTimePrefetch[k] = CursorWidth[k][0] / HRatio[k] / PixelClock[k] / cursor_req_per_width;
} else {
CursorRequestDeliveryTimePrefetch[k] = CursorWidth[k][0] / PSCL_THROUGHPUT[k] / DPPCLK[k] / cursor_req_per_width;
}
} else {
CursorRequestDeliveryTime[k] = 0;
CursorRequestDeliveryTimePrefetch[k] = 0;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d : NumberOfCursors = %d\n", __func__, k, NumberOfCursors[k]);
dml_print("DML::%s: k=%d : CursorRequestDeliveryTime = %f\n", __func__, k, CursorRequestDeliveryTime[k]);
dml_print("DML::%s: k=%d : CursorRequestDeliveryTimePrefetch = %f\n", __func__, k, CursorRequestDeliveryTimePrefetch[k]);
#endif
}
}
static void CalculateMetaAndPTETimes(
int NumberOfActivePlanes,
bool GPUVMEnable,
int MetaChunkSize,
int MinMetaChunkSizeBytes,
int HTotal[],
double VRatio[],
double VRatioChroma[],
double DestinationLinesToRequestRowInVBlank[],
double DestinationLinesToRequestRowInImmediateFlip[],
bool DCCEnable[],
double PixelClock[],
int BytePerPixelY[],
int BytePerPixelC[],
enum scan_direction_class SourceScan[],
int dpte_row_height[],
int dpte_row_height_chroma[],
int meta_row_width[],
int meta_row_width_chroma[],
int meta_row_height[],
int meta_row_height_chroma[],
int meta_req_width[],
int meta_req_width_chroma[],
int meta_req_height[],
int meta_req_height_chroma[],
int dpte_group_bytes[],
int PTERequestSizeY[],
int PTERequestSizeC[],
int PixelPTEReqWidthY[],
int PixelPTEReqHeightY[],
int PixelPTEReqWidthC[],
int PixelPTEReqHeightC[],
int dpte_row_width_luma_ub[],
int dpte_row_width_chroma_ub[],
double DST_Y_PER_PTE_ROW_NOM_L[],
double DST_Y_PER_PTE_ROW_NOM_C[],
double DST_Y_PER_META_ROW_NOM_L[],
double DST_Y_PER_META_ROW_NOM_C[],
double TimePerMetaChunkNominal[],
double TimePerChromaMetaChunkNominal[],
double TimePerMetaChunkVBlank[],
double TimePerChromaMetaChunkVBlank[],
double TimePerMetaChunkFlip[],
double TimePerChromaMetaChunkFlip[],
double time_per_pte_group_nom_luma[],
double time_per_pte_group_vblank_luma[],
double time_per_pte_group_flip_luma[],
double time_per_pte_group_nom_chroma[],
double time_per_pte_group_vblank_chroma[],
double time_per_pte_group_flip_chroma[])
{
unsigned int meta_chunk_width;
unsigned int min_meta_chunk_width;
unsigned int meta_chunk_per_row_int;
unsigned int meta_row_remainder;
unsigned int meta_chunk_threshold;
unsigned int meta_chunks_per_row_ub;
unsigned int meta_chunk_width_chroma;
unsigned int min_meta_chunk_width_chroma;
unsigned int meta_chunk_per_row_int_chroma;
unsigned int meta_row_remainder_chroma;
unsigned int meta_chunk_threshold_chroma;
unsigned int meta_chunks_per_row_ub_chroma;
unsigned int dpte_group_width_luma;
unsigned int dpte_groups_per_row_luma_ub;
unsigned int dpte_group_width_chroma;
unsigned int dpte_groups_per_row_chroma_ub;
int k;
for (k = 0; k < NumberOfActivePlanes; ++k) {
DST_Y_PER_PTE_ROW_NOM_L[k] = dpte_row_height[k] / VRatio[k];
if (BytePerPixelC[k] == 0) {
DST_Y_PER_PTE_ROW_NOM_C[k] = 0;
} else {
DST_Y_PER_PTE_ROW_NOM_C[k] = dpte_row_height_chroma[k] / VRatioChroma[k];
}
DST_Y_PER_META_ROW_NOM_L[k] = meta_row_height[k] / VRatio[k];
if (BytePerPixelC[k] == 0) {
DST_Y_PER_META_ROW_NOM_C[k] = 0;
} else {
DST_Y_PER_META_ROW_NOM_C[k] = meta_row_height_chroma[k] / VRatioChroma[k];
}
}
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (DCCEnable[k] == true) {
meta_chunk_width = MetaChunkSize * 1024 * 256 / BytePerPixelY[k] / meta_row_height[k];
min_meta_chunk_width = MinMetaChunkSizeBytes * 256 / BytePerPixelY[k] / meta_row_height[k];
meta_chunk_per_row_int = meta_row_width[k] / meta_chunk_width;
meta_row_remainder = meta_row_width[k] % meta_chunk_width;
if (SourceScan[k] != dm_vert) {
meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_width[k];
} else {
meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_height[k];
}
if (meta_row_remainder <= meta_chunk_threshold) {
meta_chunks_per_row_ub = meta_chunk_per_row_int + 1;
} else {
meta_chunks_per_row_ub = meta_chunk_per_row_int + 2;
}
TimePerMetaChunkNominal[k] = meta_row_height[k] / VRatio[k] * HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub;
TimePerMetaChunkVBlank[k] = DestinationLinesToRequestRowInVBlank[k] * HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub;
TimePerMetaChunkFlip[k] = DestinationLinesToRequestRowInImmediateFlip[k] * HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub;
if (BytePerPixelC[k] == 0) {
TimePerChromaMetaChunkNominal[k] = 0;
TimePerChromaMetaChunkVBlank[k] = 0;
TimePerChromaMetaChunkFlip[k] = 0;
} else {
meta_chunk_width_chroma = MetaChunkSize * 1024 * 256 / BytePerPixelC[k] / meta_row_height_chroma[k];
min_meta_chunk_width_chroma = MinMetaChunkSizeBytes * 256 / BytePerPixelC[k] / meta_row_height_chroma[k];
meta_chunk_per_row_int_chroma = (double) meta_row_width_chroma[k] / meta_chunk_width_chroma;
meta_row_remainder_chroma = meta_row_width_chroma[k] % meta_chunk_width_chroma;
if (SourceScan[k] != dm_vert) {
meta_chunk_threshold_chroma = 2 * min_meta_chunk_width_chroma - meta_req_width_chroma[k];
} else {
meta_chunk_threshold_chroma = 2 * min_meta_chunk_width_chroma - meta_req_height_chroma[k];
}
if (meta_row_remainder_chroma <= meta_chunk_threshold_chroma) {
meta_chunks_per_row_ub_chroma = meta_chunk_per_row_int_chroma + 1;
} else {
meta_chunks_per_row_ub_chroma = meta_chunk_per_row_int_chroma + 2;
}
TimePerChromaMetaChunkNominal[k] = meta_row_height_chroma[k] / VRatioChroma[k] * HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub_chroma;
TimePerChromaMetaChunkVBlank[k] = DestinationLinesToRequestRowInVBlank[k] * HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub_chroma;
TimePerChromaMetaChunkFlip[k] = DestinationLinesToRequestRowInImmediateFlip[k] * HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub_chroma;
}
} else {
TimePerMetaChunkNominal[k] = 0;
TimePerMetaChunkVBlank[k] = 0;
TimePerMetaChunkFlip[k] = 0;
TimePerChromaMetaChunkNominal[k] = 0;
TimePerChromaMetaChunkVBlank[k] = 0;
TimePerChromaMetaChunkFlip[k] = 0;
}
}
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (GPUVMEnable == true) {
if (SourceScan[k] != dm_vert) {
dpte_group_width_luma = dpte_group_bytes[k] / PTERequestSizeY[k] * PixelPTEReqWidthY[k];
} else {
dpte_group_width_luma = dpte_group_bytes[k] / PTERequestSizeY[k] * PixelPTEReqHeightY[k];
}
dpte_groups_per_row_luma_ub = dml_ceil(1.0 * dpte_row_width_luma_ub[k] / dpte_group_width_luma, 1);
time_per_pte_group_nom_luma[k] = DST_Y_PER_PTE_ROW_NOM_L[k] * HTotal[k] / PixelClock[k] / dpte_groups_per_row_luma_ub;
time_per_pte_group_vblank_luma[k] = DestinationLinesToRequestRowInVBlank[k] * HTotal[k] / PixelClock[k] / dpte_groups_per_row_luma_ub;
time_per_pte_group_flip_luma[k] = DestinationLinesToRequestRowInImmediateFlip[k] * HTotal[k] / PixelClock[k] / dpte_groups_per_row_luma_ub;
if (BytePerPixelC[k] == 0) {
time_per_pte_group_nom_chroma[k] = 0;
time_per_pte_group_vblank_chroma[k] = 0;
time_per_pte_group_flip_chroma[k] = 0;
} else {
if (SourceScan[k] != dm_vert) {
dpte_group_width_chroma = dpte_group_bytes[k] / PTERequestSizeC[k] * PixelPTEReqWidthC[k];
} else {
dpte_group_width_chroma = dpte_group_bytes[k] / PTERequestSizeC[k] * PixelPTEReqHeightC[k];
}
dpte_groups_per_row_chroma_ub = dml_ceil(1.0 * dpte_row_width_chroma_ub[k] / dpte_group_width_chroma, 1);
time_per_pte_group_nom_chroma[k] = DST_Y_PER_PTE_ROW_NOM_C[k] * HTotal[k] / PixelClock[k] / dpte_groups_per_row_chroma_ub;
time_per_pte_group_vblank_chroma[k] = DestinationLinesToRequestRowInVBlank[k] * HTotal[k] / PixelClock[k] / dpte_groups_per_row_chroma_ub;
time_per_pte_group_flip_chroma[k] = DestinationLinesToRequestRowInImmediateFlip[k] * HTotal[k] / PixelClock[k] / dpte_groups_per_row_chroma_ub;
}
} else {
time_per_pte_group_nom_luma[k] = 0;
time_per_pte_group_vblank_luma[k] = 0;
time_per_pte_group_flip_luma[k] = 0;
time_per_pte_group_nom_chroma[k] = 0;
time_per_pte_group_vblank_chroma[k] = 0;
time_per_pte_group_flip_chroma[k] = 0;
}
}
}
static void CalculateVMGroupAndRequestTimes(
unsigned int NumberOfActivePlanes,
bool GPUVMEnable,
unsigned int GPUVMMaxPageTableLevels,
unsigned int HTotal[],
int BytePerPixelC[],
double DestinationLinesToRequestVMInVBlank[],
double DestinationLinesToRequestVMInImmediateFlip[],
bool DCCEnable[],
double PixelClock[],
int dpte_row_width_luma_ub[],
int dpte_row_width_chroma_ub[],
int vm_group_bytes[],
unsigned int dpde0_bytes_per_frame_ub_l[],
unsigned int dpde0_bytes_per_frame_ub_c[],
int meta_pte_bytes_per_frame_ub_l[],
int meta_pte_bytes_per_frame_ub_c[],
double TimePerVMGroupVBlank[],
double TimePerVMGroupFlip[],
double TimePerVMRequestVBlank[],
double TimePerVMRequestFlip[])
{
int num_group_per_lower_vm_stage;
int num_req_per_lower_vm_stage;
int k;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (GPUVMEnable == true && (DCCEnable[k] == true || GPUVMMaxPageTableLevels > 1)) {
if (DCCEnable[k] == false) {
if (BytePerPixelC[k] > 0) {
num_group_per_lower_vm_stage = dml_ceil((double) (dpde0_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1)
+ dml_ceil((double) (dpde0_bytes_per_frame_ub_c[k]) / (double) (vm_group_bytes[k]), 1);
} else {
num_group_per_lower_vm_stage = dml_ceil((double) (dpde0_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1);
}
} else {
if (GPUVMMaxPageTableLevels == 1) {
if (BytePerPixelC[k] > 0) {
num_group_per_lower_vm_stage = dml_ceil((double) (meta_pte_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1)
+ dml_ceil((double) (meta_pte_bytes_per_frame_ub_c[k]) / (double) (vm_group_bytes[k]), 1);
} else {
num_group_per_lower_vm_stage = dml_ceil((double) (meta_pte_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1);
}
} else {
if (BytePerPixelC[k] > 0) {
num_group_per_lower_vm_stage = 2 + dml_ceil((double) (dpde0_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1)
+ dml_ceil((double) (dpde0_bytes_per_frame_ub_c[k]) / (double) (vm_group_bytes[k]), 1)
+ dml_ceil((double) (meta_pte_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1)
+ dml_ceil((double) (meta_pte_bytes_per_frame_ub_c[k]) / (double) (vm_group_bytes[k]), 1);
} else {
num_group_per_lower_vm_stage = 1 + dml_ceil((double) (dpde0_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1)
+ dml_ceil((double) (meta_pte_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1);
}
}
}
if (DCCEnable[k] == false) {
if (BytePerPixelC[k] > 0) {
num_req_per_lower_vm_stage = dpde0_bytes_per_frame_ub_l[k] / 64 + dpde0_bytes_per_frame_ub_c[k] / 64;
} else {
num_req_per_lower_vm_stage = dpde0_bytes_per_frame_ub_l[k] / 64;
}
} else {
if (GPUVMMaxPageTableLevels == 1) {
if (BytePerPixelC[k] > 0) {
num_req_per_lower_vm_stage = meta_pte_bytes_per_frame_ub_l[k] / 64 + meta_pte_bytes_per_frame_ub_c[k] / 64;
} else {
num_req_per_lower_vm_stage = meta_pte_bytes_per_frame_ub_l[k] / 64;
}
} else {
if (BytePerPixelC[k] > 0) {
num_req_per_lower_vm_stage = dpde0_bytes_per_frame_ub_l[k] / 64 + dpde0_bytes_per_frame_ub_c[k] / 64
+ meta_pte_bytes_per_frame_ub_l[k] / 64 + meta_pte_bytes_per_frame_ub_c[k] / 64;
} else {
num_req_per_lower_vm_stage = dpde0_bytes_per_frame_ub_l[k] / 64 + meta_pte_bytes_per_frame_ub_l[k] / 64;
}
}
}
TimePerVMGroupVBlank[k] = DestinationLinesToRequestVMInVBlank[k] * HTotal[k] / PixelClock[k] / num_group_per_lower_vm_stage;
TimePerVMGroupFlip[k] = DestinationLinesToRequestVMInImmediateFlip[k] * HTotal[k] / PixelClock[k] / num_group_per_lower_vm_stage;
TimePerVMRequestVBlank[k] = DestinationLinesToRequestVMInVBlank[k] * HTotal[k] / PixelClock[k] / num_req_per_lower_vm_stage;
TimePerVMRequestFlip[k] = DestinationLinesToRequestVMInImmediateFlip[k] * HTotal[k] / PixelClock[k] / num_req_per_lower_vm_stage;
if (GPUVMMaxPageTableLevels > 2) {
TimePerVMGroupVBlank[k] = TimePerVMGroupVBlank[k] / 2;
TimePerVMGroupFlip[k] = TimePerVMGroupFlip[k] / 2;
TimePerVMRequestVBlank[k] = TimePerVMRequestVBlank[k] / 2;
TimePerVMRequestFlip[k] = TimePerVMRequestFlip[k] / 2;
}
} else {
TimePerVMGroupVBlank[k] = 0;
TimePerVMGroupFlip[k] = 0;
TimePerVMRequestVBlank[k] = 0;
TimePerVMRequestFlip[k] = 0;
}
}
}
static void CalculateStutterEfficiency(
struct display_mode_lib *mode_lib,
int CompressedBufferSizeInkByte,
bool UnboundedRequestEnabled,
int ConfigReturnBufferSizeInKByte,
int MetaFIFOSizeInKEntries,
int ZeroSizeBufferEntries,
int NumberOfActivePlanes,
int ROBBufferSizeInKByte,
double TotalDataReadBandwidth,
double DCFCLK,
double ReturnBW,
double COMPBUF_RESERVED_SPACE_64B,
double COMPBUF_RESERVED_SPACE_ZS,
double SRExitTime,
double SRExitZ8Time,
bool SynchronizedVBlank,
double Z8StutterEnterPlusExitWatermark,
double StutterEnterPlusExitWatermark,
bool ProgressiveToInterlaceUnitInOPP,
bool Interlace[],
double MinTTUVBlank[],
int DPPPerPlane[],
unsigned int DETBufferSizeY[],
int BytePerPixelY[],
double BytePerPixelDETY[],
double SwathWidthY[],
int SwathHeightY[],
int SwathHeightC[],
double NetDCCRateLuma[],
double NetDCCRateChroma[],
double DCCFractionOfZeroSizeRequestsLuma[],
double DCCFractionOfZeroSizeRequestsChroma[],
int HTotal[],
int VTotal[],
double PixelClock[],
double VRatio[],
enum scan_direction_class SourceScan[],
int BlockHeight256BytesY[],
int BlockWidth256BytesY[],
int BlockHeight256BytesC[],
int BlockWidth256BytesC[],
int DCCYMaxUncompressedBlock[],
int DCCCMaxUncompressedBlock[],
int VActive[],
bool DCCEnable[],
bool WritebackEnable[],
double ReadBandwidthPlaneLuma[],
double ReadBandwidthPlaneChroma[],
double meta_row_bw[],
double dpte_row_bw[],
double *StutterEfficiencyNotIncludingVBlank,
double *StutterEfficiency,
int *NumberOfStutterBurstsPerFrame,
double *Z8StutterEfficiencyNotIncludingVBlank,
double *Z8StutterEfficiency,
int *Z8NumberOfStutterBurstsPerFrame,
double *StutterPeriod)
{
struct vba_vars_st *v = &mode_lib->vba;
double DETBufferingTimeY;
double SwathWidthYCriticalPlane = 0;
double VActiveTimeCriticalPlane = 0;
double FrameTimeCriticalPlane = 0;
int BytePerPixelYCriticalPlane = 0;
double LinesToFinishSwathTransferStutterCriticalPlane = 0;
double MinTTUVBlankCriticalPlane = 0;
double TotalCompressedReadBandwidth;
double TotalRowReadBandwidth;
double AverageDCCCompressionRate;
double EffectiveCompressedBufferSize;
double PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer;
double StutterBurstTime;
int TotalActiveWriteback;
double LinesInDETY;
double LinesInDETYRoundedDownToSwath;
double MaximumEffectiveCompressionLuma;
double MaximumEffectiveCompressionChroma;
double TotalZeroSizeRequestReadBandwidth;
double TotalZeroSizeCompressedReadBandwidth;
double AverageDCCZeroSizeFraction;
double AverageZeroSizeCompressionRate;
int TotalNumberOfActiveOTG = 0;
double LastStutterPeriod = 0.0;
double LastZ8StutterPeriod = 0.0;
int k;
TotalZeroSizeRequestReadBandwidth = 0;
TotalZeroSizeCompressedReadBandwidth = 0;
TotalRowReadBandwidth = 0;
TotalCompressedReadBandwidth = 0;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (DCCEnable[k] == true) {
if ((SourceScan[k] == dm_vert && BlockWidth256BytesY[k] > SwathHeightY[k]) || (SourceScan[k] != dm_vert && BlockHeight256BytesY[k] > SwathHeightY[k])
|| DCCYMaxUncompressedBlock[k] < 256) {
MaximumEffectiveCompressionLuma = 2;
} else {
MaximumEffectiveCompressionLuma = 4;
}
TotalCompressedReadBandwidth = TotalCompressedReadBandwidth + ReadBandwidthPlaneLuma[k] / dml_min(NetDCCRateLuma[k], MaximumEffectiveCompressionLuma);
TotalZeroSizeRequestReadBandwidth = TotalZeroSizeRequestReadBandwidth + ReadBandwidthPlaneLuma[k] * DCCFractionOfZeroSizeRequestsLuma[k];
TotalZeroSizeCompressedReadBandwidth = TotalZeroSizeCompressedReadBandwidth
+ ReadBandwidthPlaneLuma[k] * DCCFractionOfZeroSizeRequestsLuma[k] / MaximumEffectiveCompressionLuma;
if (ReadBandwidthPlaneChroma[k] > 0) {
if ((SourceScan[k] == dm_vert && BlockWidth256BytesC[k] > SwathHeightC[k])
|| (SourceScan[k] != dm_vert && BlockHeight256BytesC[k] > SwathHeightC[k]) || DCCCMaxUncompressedBlock[k] < 256) {
MaximumEffectiveCompressionChroma = 2;
} else {
MaximumEffectiveCompressionChroma = 4;
}
TotalCompressedReadBandwidth = TotalCompressedReadBandwidth
+ ReadBandwidthPlaneChroma[k] / dml_min(NetDCCRateChroma[k], MaximumEffectiveCompressionChroma);
TotalZeroSizeRequestReadBandwidth = TotalZeroSizeRequestReadBandwidth + ReadBandwidthPlaneChroma[k] * DCCFractionOfZeroSizeRequestsChroma[k];
TotalZeroSizeCompressedReadBandwidth = TotalZeroSizeCompressedReadBandwidth
+ ReadBandwidthPlaneChroma[k] * DCCFractionOfZeroSizeRequestsChroma[k] / MaximumEffectiveCompressionChroma;
}
} else {
TotalCompressedReadBandwidth = TotalCompressedReadBandwidth + ReadBandwidthPlaneLuma[k] + ReadBandwidthPlaneChroma[k];
}
TotalRowReadBandwidth = TotalRowReadBandwidth + DPPPerPlane[k] * (meta_row_bw[k] + dpte_row_bw[k]);
}
AverageDCCCompressionRate = TotalDataReadBandwidth / TotalCompressedReadBandwidth;
AverageDCCZeroSizeFraction = TotalZeroSizeRequestReadBandwidth / TotalDataReadBandwidth;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: TotalCompressedReadBandwidth = %f\n", __func__, TotalCompressedReadBandwidth);
dml_print("DML::%s: TotalZeroSizeRequestReadBandwidth = %f\n", __func__, TotalZeroSizeRequestReadBandwidth);
dml_print("DML::%s: TotalZeroSizeCompressedReadBandwidth = %f\n", __func__, TotalZeroSizeCompressedReadBandwidth);
dml_print("DML::%s: MaximumEffectiveCompressionLuma = %f\n", __func__, MaximumEffectiveCompressionLuma);
dml_print("DML::%s: MaximumEffectiveCompressionChroma = %f\n", __func__, MaximumEffectiveCompressionChroma);
dml_print("DML::%s: AverageDCCCompressionRate = %f\n", __func__, AverageDCCCompressionRate);
dml_print("DML::%s: AverageDCCZeroSizeFraction = %f\n", __func__, AverageDCCZeroSizeFraction);
dml_print("DML::%s: CompressedBufferSizeInkByte = %d\n", __func__, CompressedBufferSizeInkByte);
#endif
if (AverageDCCZeroSizeFraction == 1) {
AverageZeroSizeCompressionRate = TotalZeroSizeRequestReadBandwidth / TotalZeroSizeCompressedReadBandwidth;
EffectiveCompressedBufferSize = MetaFIFOSizeInKEntries * 1024 * 64 * AverageZeroSizeCompressionRate + (ZeroSizeBufferEntries - COMPBUF_RESERVED_SPACE_ZS) * 64 * AverageZeroSizeCompressionRate;
} else if (AverageDCCZeroSizeFraction > 0) {
AverageZeroSizeCompressionRate = TotalZeroSizeRequestReadBandwidth / TotalZeroSizeCompressedReadBandwidth;
EffectiveCompressedBufferSize = dml_min(
CompressedBufferSizeInkByte * 1024 * AverageDCCCompressionRate,
MetaFIFOSizeInKEntries * 1024 * 64 / (AverageDCCZeroSizeFraction / AverageZeroSizeCompressionRate + 1 / AverageDCCCompressionRate))
+ dml_min((ROBBufferSizeInKByte * 1024 - COMPBUF_RESERVED_SPACE_64B * 64) * AverageDCCCompressionRate,
(ZeroSizeBufferEntries - COMPBUF_RESERVED_SPACE_ZS) * 64 / (AverageDCCZeroSizeFraction / AverageZeroSizeCompressionRate));
dml_print("DML::%s: min 1 = %f\n", __func__, CompressedBufferSizeInkByte * 1024 * AverageDCCCompressionRate);
dml_print(
"DML::%s: min 2 = %f\n",
__func__,
MetaFIFOSizeInKEntries * 1024 * 64 / (AverageDCCZeroSizeFraction / AverageZeroSizeCompressionRate + 1 / AverageDCCCompressionRate));
dml_print("DML::%s: min 3 = %f\n", __func__, ROBBufferSizeInKByte * 1024 * AverageDCCCompressionRate);
dml_print("DML::%s: min 4 = %f\n", __func__, ZeroSizeBufferEntries * 64 / (AverageDCCZeroSizeFraction / AverageZeroSizeCompressionRate));
} else {
EffectiveCompressedBufferSize = dml_min(
CompressedBufferSizeInkByte * 1024 * AverageDCCCompressionRate,
MetaFIFOSizeInKEntries * 1024 * 64 * AverageDCCCompressionRate) + (ROBBufferSizeInKByte * 1024 - COMPBUF_RESERVED_SPACE_64B * 64) * AverageDCCCompressionRate;
dml_print("DML::%s: min 1 = %f\n", __func__, CompressedBufferSizeInkByte * 1024 * AverageDCCCompressionRate);
dml_print("DML::%s: min 2 = %f\n", __func__, MetaFIFOSizeInKEntries * 1024 * 64 * AverageDCCCompressionRate);
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: MetaFIFOSizeInKEntries = %d\n", __func__, MetaFIFOSizeInKEntries);
dml_print("DML::%s: AverageZeroSizeCompressionRate = %f\n", __func__, AverageZeroSizeCompressionRate);
dml_print("DML::%s: EffectiveCompressedBufferSize = %f\n", __func__, EffectiveCompressedBufferSize);
#endif
*StutterPeriod = 0;
for (k = 0; k < NumberOfActivePlanes; ++k) {
LinesInDETY = (DETBufferSizeY[k] + (UnboundedRequestEnabled == true ? EffectiveCompressedBufferSize : 0) * ReadBandwidthPlaneLuma[k] / TotalDataReadBandwidth)
/ BytePerPixelDETY[k] / SwathWidthY[k];
LinesInDETYRoundedDownToSwath = dml_floor(LinesInDETY, SwathHeightY[k]);
DETBufferingTimeY = LinesInDETYRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatio[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d DETBufferSizeY = %f\n", __func__, k, DETBufferSizeY[k]);
dml_print("DML::%s: k=%0d BytePerPixelDETY = %f\n", __func__, k, BytePerPixelDETY[k]);
dml_print("DML::%s: k=%0d SwathWidthY = %f\n", __func__, k, SwathWidthY[k]);
dml_print("DML::%s: k=%0d ReadBandwidthPlaneLuma = %f\n", __func__, k, ReadBandwidthPlaneLuma[k]);
dml_print("DML::%s: k=%0d TotalDataReadBandwidth = %f\n", __func__, k, TotalDataReadBandwidth);
dml_print("DML::%s: k=%0d LinesInDETY = %f\n", __func__, k, LinesInDETY);
dml_print("DML::%s: k=%0d LinesInDETYRoundedDownToSwath = %f\n", __func__, k, LinesInDETYRoundedDownToSwath);
dml_print("DML::%s: k=%0d HTotal = %d\n", __func__, k, HTotal[k]);
dml_print("DML::%s: k=%0d PixelClock = %f\n", __func__, k, PixelClock[k]);
dml_print("DML::%s: k=%0d VRatio = %f\n", __func__, k, VRatio[k]);
dml_print("DML::%s: k=%0d DETBufferingTimeY = %f\n", __func__, k, DETBufferingTimeY);
dml_print("DML::%s: k=%0d PixelClock = %f\n", __func__, k, PixelClock[k]);
#endif
if (k == 0 || DETBufferingTimeY < *StutterPeriod) {
bool isInterlaceTiming = Interlace[k] && !ProgressiveToInterlaceUnitInOPP;
*StutterPeriod = DETBufferingTimeY;
FrameTimeCriticalPlane = (isInterlaceTiming ? dml_floor(VTotal[k] / 2.0, 1.0) : VTotal[k]) * HTotal[k] / PixelClock[k];
VActiveTimeCriticalPlane = (isInterlaceTiming ? dml_floor(VActive[k] / 2.0, 1.0) : VActive[k]) * HTotal[k] / PixelClock[k];
BytePerPixelYCriticalPlane = BytePerPixelY[k];
SwathWidthYCriticalPlane = SwathWidthY[k];
LinesToFinishSwathTransferStutterCriticalPlane = SwathHeightY[k] - (LinesInDETY - LinesInDETYRoundedDownToSwath);
MinTTUVBlankCriticalPlane = MinTTUVBlank[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: StutterPeriod = %f\n", __func__, *StutterPeriod);
dml_print("DML::%s: MinTTUVBlankCriticalPlane = %f\n", __func__, MinTTUVBlankCriticalPlane);
dml_print("DML::%s: FrameTimeCriticalPlane = %f\n", __func__, FrameTimeCriticalPlane);
dml_print("DML::%s: VActiveTimeCriticalPlane = %f\n", __func__, VActiveTimeCriticalPlane);
dml_print("DML::%s: BytePerPixelYCriticalPlane = %d\n", __func__, BytePerPixelYCriticalPlane);
dml_print("DML::%s: SwathWidthYCriticalPlane = %f\n", __func__, SwathWidthYCriticalPlane);
dml_print("DML::%s: LinesToFinishSwathTransferStutterCriticalPlane = %f\n", __func__, LinesToFinishSwathTransferStutterCriticalPlane);
#endif
}
}
PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer = dml_min(*StutterPeriod * TotalDataReadBandwidth, EffectiveCompressedBufferSize);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: ROBBufferSizeInKByte = %d\n", __func__, ROBBufferSizeInKByte);
dml_print("DML::%s: AverageDCCCompressionRate = %f\n", __func__, AverageDCCCompressionRate);
dml_print("DML::%s: StutterPeriod * TotalDataReadBandwidth = %f\n", __func__, *StutterPeriod * TotalDataReadBandwidth);
dml_print("DML::%s: ROBBufferSizeInKByte * 1024 * AverageDCCCompressionRate + EffectiveCompressedBufferSize = %f\n", __func__, ROBBufferSizeInKByte * 1024 * AverageDCCCompressionRate + EffectiveCompressedBufferSize);
dml_print("DML::%s: EffectiveCompressedBufferSize = %f\n", __func__, EffectiveCompressedBufferSize);
dml_print("DML::%s: PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer = %f\n", __func__, PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer);
dml_print("DML::%s: ReturnBW = %f\n", __func__, ReturnBW);
dml_print("DML::%s: TotalDataReadBandwidth = %f\n", __func__, TotalDataReadBandwidth);
dml_print("DML::%s: TotalRowReadBandwidth = %f\n", __func__, TotalRowReadBandwidth);
dml_print("DML::%s: DCFCLK = %f\n", __func__, DCFCLK);
#endif
StutterBurstTime = PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / AverageDCCCompressionRate / ReturnBW
+ (*StutterPeriod * TotalDataReadBandwidth - PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer) / (DCFCLK * 64)
+ *StutterPeriod * TotalRowReadBandwidth / ReturnBW;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Part 1 = %f\n", __func__, PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / AverageDCCCompressionRate / ReturnBW);
dml_print("DML::%s: StutterPeriod * TotalDataReadBandwidth = %f\n", __func__, (*StutterPeriod * TotalDataReadBandwidth));
dml_print("DML::%s: Part 2 = %f\n", __func__, (*StutterPeriod * TotalDataReadBandwidth - PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer) / (DCFCLK * 64));
dml_print("DML::%s: Part 3 = %f\n", __func__, *StutterPeriod * TotalRowReadBandwidth / ReturnBW);
dml_print("DML::%s: StutterBurstTime = %f\n", __func__, StutterBurstTime);
#endif
StutterBurstTime = dml_max(StutterBurstTime, LinesToFinishSwathTransferStutterCriticalPlane * BytePerPixelYCriticalPlane * SwathWidthYCriticalPlane / ReturnBW);
dml_print(
"DML::%s: Time to finish residue swath=%f\n",
__func__,
LinesToFinishSwathTransferStutterCriticalPlane * BytePerPixelYCriticalPlane * SwathWidthYCriticalPlane / ReturnBW);
TotalActiveWriteback = 0;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (WritebackEnable[k]) {
TotalActiveWriteback = TotalActiveWriteback + 1;
}
}
if (TotalActiveWriteback == 0) {
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: SRExitTime = %f\n", __func__, SRExitTime);
dml_print("DML::%s: SRExitZ8Time = %f\n", __func__, SRExitZ8Time);
dml_print("DML::%s: StutterBurstTime = %f (final)\n", __func__, StutterBurstTime);
dml_print("DML::%s: StutterPeriod = %f\n", __func__, *StutterPeriod);
#endif
*StutterEfficiencyNotIncludingVBlank = dml_max(0., 1 - (SRExitTime + StutterBurstTime) / *StutterPeriod) * 100;
*Z8StutterEfficiencyNotIncludingVBlank = dml_max(0., 1 - (SRExitZ8Time + StutterBurstTime) / *StutterPeriod) * 100;
*NumberOfStutterBurstsPerFrame = (*StutterEfficiencyNotIncludingVBlank > 0 ? dml_ceil(VActiveTimeCriticalPlane / *StutterPeriod, 1) : 0);
*Z8NumberOfStutterBurstsPerFrame = (*Z8StutterEfficiencyNotIncludingVBlank > 0 ? dml_ceil(VActiveTimeCriticalPlane / *StutterPeriod, 1) : 0);
} else {
*StutterEfficiencyNotIncludingVBlank = 0.;
*Z8StutterEfficiencyNotIncludingVBlank = 0.;
*NumberOfStutterBurstsPerFrame = 0;
*Z8NumberOfStutterBurstsPerFrame = 0;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VActiveTimeCriticalPlane = %f\n", __func__, VActiveTimeCriticalPlane);
dml_print("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *StutterEfficiencyNotIncludingVBlank);
dml_print("DML::%s: Z8StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *Z8StutterEfficiencyNotIncludingVBlank);
dml_print("DML::%s: NumberOfStutterBurstsPerFrame = %d\n", __func__, *NumberOfStutterBurstsPerFrame);
dml_print("DML::%s: Z8NumberOfStutterBurstsPerFrame = %d\n", __func__, *Z8NumberOfStutterBurstsPerFrame);
#endif
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (v->BlendingAndTiming[k] == k) {
TotalNumberOfActiveOTG = TotalNumberOfActiveOTG + 1;
}
}
if (*StutterEfficiencyNotIncludingVBlank > 0) {
LastStutterPeriod = VActiveTimeCriticalPlane - (*NumberOfStutterBurstsPerFrame - 1) * *StutterPeriod;
if ((SynchronizedVBlank || TotalNumberOfActiveOTG == 1) && LastStutterPeriod + MinTTUVBlankCriticalPlane > StutterEnterPlusExitWatermark) {
*StutterEfficiency = (1 - (*NumberOfStutterBurstsPerFrame * SRExitTime + StutterBurstTime * VActiveTimeCriticalPlane
/ *StutterPeriod) / FrameTimeCriticalPlane) * 100;
} else {
*StutterEfficiency = *StutterEfficiencyNotIncludingVBlank;
}
} else {
*StutterEfficiency = 0;
}
if (*Z8StutterEfficiencyNotIncludingVBlank > 0) {
LastZ8StutterPeriod = VActiveTimeCriticalPlane - (*NumberOfStutterBurstsPerFrame - 1) * *StutterPeriod;
if ((SynchronizedVBlank || TotalNumberOfActiveOTG == 1) && LastZ8StutterPeriod + MinTTUVBlankCriticalPlane > Z8StutterEnterPlusExitWatermark) {
*Z8StutterEfficiency = (1 - (*NumberOfStutterBurstsPerFrame * SRExitZ8Time + StutterBurstTime * VActiveTimeCriticalPlane
/ *StutterPeriod) / FrameTimeCriticalPlane) * 100;
} else {
*Z8StutterEfficiency = *Z8StutterEfficiencyNotIncludingVBlank;
}
} else {
*Z8StutterEfficiency = 0.;
}
dml_print("DML::%s: LastZ8StutterPeriod = %f\n", __func__, LastZ8StutterPeriod);
dml_print("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, Z8StutterEnterPlusExitWatermark);
dml_print("DML::%s: StutterBurstTime = %f\n", __func__, StutterBurstTime);
dml_print("DML::%s: StutterPeriod = %f\n", __func__, *StutterPeriod);
dml_print("DML::%s: StutterEfficiency = %f\n", __func__, *StutterEfficiency);
dml_print("DML::%s: Z8StutterEfficiency = %f\n", __func__, *Z8StutterEfficiency);
dml_print("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *StutterEfficiencyNotIncludingVBlank);
dml_print("DML::%s: Z8NumberOfStutterBurstsPerFrame = %d\n", __func__, *Z8NumberOfStutterBurstsPerFrame);
}
static void CalculateSwathAndDETConfiguration(
bool ForceSingleDPP,
int NumberOfActivePlanes,
bool DETSharedByAllDPP,
unsigned int DETBufferSizeInKByteA[],
double MaximumSwathWidthLuma[],
double MaximumSwathWidthChroma[],
enum scan_direction_class SourceScan[],
enum source_format_class SourcePixelFormat[],
enum dm_swizzle_mode SurfaceTiling[],
int ViewportWidth[],
int ViewportHeight[],
int SurfaceWidthY[],
int SurfaceWidthC[],
int SurfaceHeightY[],
int SurfaceHeightC[],
int Read256BytesBlockHeightY[],
int Read256BytesBlockHeightC[],
int Read256BytesBlockWidthY[],
int Read256BytesBlockWidthC[],
enum odm_combine_mode ODMCombineEnabled[],
int BlendingAndTiming[],
int BytePerPixY[],
int BytePerPixC[],
double BytePerPixDETY[],
double BytePerPixDETC[],
int HActive[],
double HRatio[],
double HRatioChroma[],
int DPPPerPlane[],
int swath_width_luma_ub[],
int swath_width_chroma_ub[],
double SwathWidth[],
double SwathWidthChroma[],
int SwathHeightY[],
int SwathHeightC[],
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
bool ViewportSizeSupportPerPlane[],
bool *ViewportSizeSupport)
{
int MaximumSwathHeightY[DC__NUM_DPP__MAX];
int MaximumSwathHeightC[DC__NUM_DPP__MAX];
int MinimumSwathHeightY;
int MinimumSwathHeightC;
int RoundedUpMaxSwathSizeBytesY;
int RoundedUpMaxSwathSizeBytesC;
int RoundedUpMinSwathSizeBytesY;
int RoundedUpMinSwathSizeBytesC;
int RoundedUpSwathSizeBytesY;
int RoundedUpSwathSizeBytesC;
double SwathWidthSingleDPP[DC__NUM_DPP__MAX];
double SwathWidthSingleDPPChroma[DC__NUM_DPP__MAX];
int k;
CalculateSwathWidth(
ForceSingleDPP,
NumberOfActivePlanes,
SourcePixelFormat,
SourceScan,
ViewportWidth,
ViewportHeight,
SurfaceWidthY,
SurfaceWidthC,
SurfaceHeightY,
SurfaceHeightC,
ODMCombineEnabled,
BytePerPixY,
BytePerPixC,
Read256BytesBlockHeightY,
Read256BytesBlockHeightC,
Read256BytesBlockWidthY,
Read256BytesBlockWidthC,
BlendingAndTiming,
HActive,
HRatio,
DPPPerPlane,
SwathWidthSingleDPP,
SwathWidthSingleDPPChroma,
SwathWidth,
SwathWidthChroma,
MaximumSwathHeightY,
MaximumSwathHeightC,
swath_width_luma_ub,
swath_width_chroma_ub);
*ViewportSizeSupport = true;
for (k = 0; k < NumberOfActivePlanes; ++k) {
unsigned int DETBufferSizeInKByte = DETBufferSizeInKByteA[k];
if (DETSharedByAllDPP && DPPPerPlane[k])
DETBufferSizeInKByte /= DPPPerPlane[k];
if ((SourcePixelFormat[k] == dm_444_64 || SourcePixelFormat[k] == dm_444_32 || SourcePixelFormat[k] == dm_444_16 || SourcePixelFormat[k] == dm_mono_16
|| SourcePixelFormat[k] == dm_mono_8 || SourcePixelFormat[k] == dm_rgbe)) {
if (SurfaceTiling[k] == dm_sw_linear
|| (SourcePixelFormat[k] == dm_444_64
&& (SurfaceTiling[k] == dm_sw_64kb_s || SurfaceTiling[k] == dm_sw_64kb_s_t || SurfaceTiling[k] == dm_sw_64kb_s_x)
&& SourceScan[k] != dm_vert)) {
MinimumSwathHeightY = MaximumSwathHeightY[k];
} else if (SourcePixelFormat[k] == dm_444_8 && SourceScan[k] == dm_vert) {
MinimumSwathHeightY = MaximumSwathHeightY[k];
} else {
MinimumSwathHeightY = MaximumSwathHeightY[k] / 2;
}
MinimumSwathHeightC = MaximumSwathHeightC[k];
} else {
if (SurfaceTiling[k] == dm_sw_linear) {
MinimumSwathHeightY = MaximumSwathHeightY[k];
MinimumSwathHeightC = MaximumSwathHeightC[k];
} else if (SourcePixelFormat[k] == dm_rgbe_alpha && SourceScan[k] == dm_vert) {
MinimumSwathHeightY = MaximumSwathHeightY[k] / 2;
MinimumSwathHeightC = MaximumSwathHeightC[k];
} else if (SourcePixelFormat[k] == dm_rgbe_alpha) {
MinimumSwathHeightY = MaximumSwathHeightY[k] / 2;
MinimumSwathHeightC = MaximumSwathHeightC[k] / 2;
} else if (SourcePixelFormat[k] == dm_420_8 && SourceScan[k] == dm_vert) {
MinimumSwathHeightY = MaximumSwathHeightY[k];
MinimumSwathHeightC = MaximumSwathHeightC[k] / 2;
} else {
MinimumSwathHeightC = MaximumSwathHeightC[k] / 2;
MinimumSwathHeightY = MaximumSwathHeightY[k] / 2;
}
}
RoundedUpMaxSwathSizeBytesY = swath_width_luma_ub[k] * BytePerPixDETY[k] * MaximumSwathHeightY[k];
RoundedUpMinSwathSizeBytesY = swath_width_luma_ub[k] * BytePerPixDETY[k] * MinimumSwathHeightY;
if (SourcePixelFormat[k] == dm_420_10) {
RoundedUpMaxSwathSizeBytesY = dml_ceil((double) RoundedUpMaxSwathSizeBytesY, 256);
RoundedUpMinSwathSizeBytesY = dml_ceil((double) RoundedUpMinSwathSizeBytesY, 256);
}
RoundedUpMaxSwathSizeBytesC = swath_width_chroma_ub[k] * BytePerPixDETC[k] * MaximumSwathHeightC[k];
RoundedUpMinSwathSizeBytesC = swath_width_chroma_ub[k] * BytePerPixDETC[k] * MinimumSwathHeightC;
if (SourcePixelFormat[k] == dm_420_10) {
RoundedUpMaxSwathSizeBytesC = dml_ceil(RoundedUpMaxSwathSizeBytesC, 256);
RoundedUpMinSwathSizeBytesC = dml_ceil(RoundedUpMinSwathSizeBytesC, 256);
}
if (RoundedUpMaxSwathSizeBytesY + RoundedUpMaxSwathSizeBytesC <= DETBufferSizeInKByte * 1024 / 2) {
SwathHeightY[k] = MaximumSwathHeightY[k];
SwathHeightC[k] = MaximumSwathHeightC[k];
RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY;
RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC;
} else if (RoundedUpMaxSwathSizeBytesY >= 1.5 * RoundedUpMaxSwathSizeBytesC
&& RoundedUpMinSwathSizeBytesY + RoundedUpMaxSwathSizeBytesC <= DETBufferSizeInKByte * 1024 / 2) {
SwathHeightY[k] = MinimumSwathHeightY;
SwathHeightC[k] = MaximumSwathHeightC[k];
RoundedUpSwathSizeBytesY = RoundedUpMinSwathSizeBytesY;
RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC;
} else if (RoundedUpMaxSwathSizeBytesY < 1.5 * RoundedUpMaxSwathSizeBytesC
&& RoundedUpMaxSwathSizeBytesY + RoundedUpMinSwathSizeBytesC <= DETBufferSizeInKByte * 1024 / 2) {
SwathHeightY[k] = MaximumSwathHeightY[k];
SwathHeightC[k] = MinimumSwathHeightC;
RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY;
RoundedUpSwathSizeBytesC = RoundedUpMinSwathSizeBytesC;
} else {
SwathHeightY[k] = MinimumSwathHeightY;
SwathHeightC[k] = MinimumSwathHeightC;
RoundedUpSwathSizeBytesY = RoundedUpMinSwathSizeBytesY;
RoundedUpSwathSizeBytesC = RoundedUpMinSwathSizeBytesC;
}
{
double actDETBufferSizeInKByte = dml_ceil(DETBufferSizeInKByte, 64);
if (SwathHeightC[k] == 0) {
DETBufferSizeY[k] = actDETBufferSizeInKByte * 1024;
DETBufferSizeC[k] = 0;
} else if (RoundedUpSwathSizeBytesY <= 1.5 * RoundedUpSwathSizeBytesC) {
DETBufferSizeY[k] = actDETBufferSizeInKByte * 1024 / 2;
DETBufferSizeC[k] = actDETBufferSizeInKByte * 1024 / 2;
} else {
DETBufferSizeY[k] = dml_floor(actDETBufferSizeInKByte * 1024 * 2 / 3, 1024);
DETBufferSizeC[k] = actDETBufferSizeInKByte * 1024 / 3;
}
if (RoundedUpMinSwathSizeBytesY + RoundedUpMinSwathSizeBytesC > actDETBufferSizeInKByte * 1024 / 2 || SwathWidth[k] > MaximumSwathWidthLuma[k]
|| (SwathHeightC[k] > 0 && SwathWidthChroma[k] > MaximumSwathWidthChroma[k])) {
*ViewportSizeSupport = false;
ViewportSizeSupportPerPlane[k] = false;
} else {
ViewportSizeSupportPerPlane[k] = true;
}
}
}
}
static void CalculateSwathWidth(
bool ForceSingleDPP,
int NumberOfActivePlanes,
enum source_format_class SourcePixelFormat[],
enum scan_direction_class SourceScan[],
int ViewportWidth[],
int ViewportHeight[],
int SurfaceWidthY[],
int SurfaceWidthC[],
int SurfaceHeightY[],
int SurfaceHeightC[],
enum odm_combine_mode ODMCombineEnabled[],
int BytePerPixY[],
int BytePerPixC[],
int Read256BytesBlockHeightY[],
int Read256BytesBlockHeightC[],
int Read256BytesBlockWidthY[],
int Read256BytesBlockWidthC[],
int BlendingAndTiming[],
int HActive[],
double HRatio[],
int DPPPerPlane[],
double SwathWidthSingleDPPY[],
double SwathWidthSingleDPPC[],
double SwathWidthY[],
double SwathWidthC[],
int MaximumSwathHeightY[],
int MaximumSwathHeightC[],
int swath_width_luma_ub[],
int swath_width_chroma_ub[])
{
enum odm_combine_mode MainPlaneODMCombine;
int j, k;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: NumberOfActivePlanes = %d\n", __func__, NumberOfActivePlanes);
#endif
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (SourceScan[k] != dm_vert) {
SwathWidthSingleDPPY[k] = ViewportWidth[k];
} else {
SwathWidthSingleDPPY[k] = ViewportHeight[k];
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d ViewportWidth=%d\n", __func__, k, ViewportWidth[k]);
dml_print("DML::%s: k=%d ViewportHeight=%d\n", __func__, k, ViewportHeight[k]);
#endif
MainPlaneODMCombine = ODMCombineEnabled[k];
for (j = 0; j < NumberOfActivePlanes; ++j) {
if (BlendingAndTiming[k] == j) {
MainPlaneODMCombine = ODMCombineEnabled[j];
}
}
if (MainPlaneODMCombine == dm_odm_combine_mode_4to1) {
SwathWidthY[k] = dml_min(SwathWidthSingleDPPY[k], dml_round(HActive[k] / 4.0 * HRatio[k]));
} else if (MainPlaneODMCombine == dm_odm_combine_mode_2to1) {
SwathWidthY[k] = dml_min(SwathWidthSingleDPPY[k], dml_round(HActive[k] / 2.0 * HRatio[k]));
} else if (DPPPerPlane[k] == 2) {
SwathWidthY[k] = SwathWidthSingleDPPY[k] / 2;
} else {
SwathWidthY[k] = SwathWidthSingleDPPY[k];
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d SwathWidthSingleDPPY=%f\n", __func__, k, SwathWidthSingleDPPY[k]);
dml_print("DML::%s: k=%d SwathWidthY=%f\n", __func__, k, SwathWidthY[k]);
#endif
if (SourcePixelFormat[k] == dm_420_8 || SourcePixelFormat[k] == dm_420_10 || SourcePixelFormat[k] == dm_420_12) {
SwathWidthC[k] = SwathWidthY[k] / 2;
SwathWidthSingleDPPC[k] = SwathWidthSingleDPPY[k] / 2;
} else {
SwathWidthC[k] = SwathWidthY[k];
SwathWidthSingleDPPC[k] = SwathWidthSingleDPPY[k];
}
if (ForceSingleDPP == true) {
SwathWidthY[k] = SwathWidthSingleDPPY[k];
SwathWidthC[k] = SwathWidthSingleDPPC[k];
}
{
int surface_width_ub_l = dml_ceil(SurfaceWidthY[k], Read256BytesBlockWidthY[k]);
int surface_height_ub_l = dml_ceil(SurfaceHeightY[k], Read256BytesBlockHeightY[k]);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d surface_width_ub_l=%0d\n", __func__, k, surface_width_ub_l);
#endif
if (SourceScan[k] != dm_vert) {
MaximumSwathHeightY[k] = Read256BytesBlockHeightY[k];
MaximumSwathHeightC[k] = Read256BytesBlockHeightC[k];
swath_width_luma_ub[k] = dml_min(surface_width_ub_l, (int) dml_ceil(SwathWidthY[k] - 1, Read256BytesBlockWidthY[k]) + Read256BytesBlockWidthY[k]);
if (BytePerPixC[k] > 0) {
int surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
swath_width_chroma_ub[k] = dml_min(
surface_width_ub_c,
(int) dml_ceil(SwathWidthC[k] - 1, Read256BytesBlockWidthC[k]) + Read256BytesBlockWidthC[k]);
} else {
swath_width_chroma_ub[k] = 0;
}
} else {
MaximumSwathHeightY[k] = Read256BytesBlockWidthY[k];
MaximumSwathHeightC[k] = Read256BytesBlockWidthC[k];
swath_width_luma_ub[k] = dml_min(surface_height_ub_l, (int) dml_ceil(SwathWidthY[k] - 1, Read256BytesBlockHeightY[k]) + Read256BytesBlockHeightY[k]);
if (BytePerPixC[k] > 0) {
int surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
swath_width_chroma_ub[k] = dml_min(
surface_height_ub_c,
(int) dml_ceil(SwathWidthC[k] - 1, Read256BytesBlockHeightC[k]) + Read256BytesBlockHeightC[k]);
} else {
swath_width_chroma_ub[k] = 0;
}
}
}
}
}
static double CalculateExtraLatency(
int RoundTripPingLatencyCycles,
int ReorderingBytes,
double DCFCLK,
int TotalNumberOfActiveDPP,
int PixelChunkSizeInKByte,
int TotalNumberOfDCCActiveDPP,
int MetaChunkSize,
double ReturnBW,
bool GPUVMEnable,
bool HostVMEnable,
int NumberOfActivePlanes,
int NumberOfDPP[],
int dpte_group_bytes[],
double HostVMInefficiencyFactor,
double HostVMMinPageSize,
int HostVMMaxNonCachedPageTableLevels)
{
double ExtraLatencyBytes;
double ExtraLatency;
ExtraLatencyBytes = CalculateExtraLatencyBytes(
ReorderingBytes,
TotalNumberOfActiveDPP,
PixelChunkSizeInKByte,
TotalNumberOfDCCActiveDPP,
MetaChunkSize,
GPUVMEnable,
HostVMEnable,
NumberOfActivePlanes,
NumberOfDPP,
dpte_group_bytes,
HostVMInefficiencyFactor,
HostVMMinPageSize,
HostVMMaxNonCachedPageTableLevels);
ExtraLatency = (RoundTripPingLatencyCycles + __DML_ARB_TO_RET_DELAY__) / DCFCLK + ExtraLatencyBytes / ReturnBW;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: RoundTripPingLatencyCycles=%d\n", __func__, RoundTripPingLatencyCycles);
dml_print("DML::%s: DCFCLK=%f\n", __func__, DCFCLK);
dml_print("DML::%s: ExtraLatencyBytes=%f\n", __func__, ExtraLatencyBytes);
dml_print("DML::%s: ReturnBW=%f\n", __func__, ReturnBW);
dml_print("DML::%s: ExtraLatency=%f\n", __func__, ExtraLatency);
#endif
return ExtraLatency;
}
static double CalculateExtraLatencyBytes(
int ReorderingBytes,
int TotalNumberOfActiveDPP,
int PixelChunkSizeInKByte,
int TotalNumberOfDCCActiveDPP,
int MetaChunkSize,
bool GPUVMEnable,
bool HostVMEnable,
int NumberOfActivePlanes,
int NumberOfDPP[],
int dpte_group_bytes[],
double HostVMInefficiencyFactor,
double HostVMMinPageSize,
int HostVMMaxNonCachedPageTableLevels)
{
double ret;
int HostVMDynamicLevels = 0, k;
if (GPUVMEnable == true && HostVMEnable == true) {
if (HostVMMinPageSize < 2048) {
HostVMDynamicLevels = HostVMMaxNonCachedPageTableLevels;
} else if (HostVMMinPageSize >= 2048 && HostVMMinPageSize < 1048576) {
HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 1);
} else {
HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 2);
}
} else {
HostVMDynamicLevels = 0;
}
ret = ReorderingBytes + (TotalNumberOfActiveDPP * PixelChunkSizeInKByte + TotalNumberOfDCCActiveDPP * MetaChunkSize) * 1024.0;
if (GPUVMEnable == true) {
for (k = 0; k < NumberOfActivePlanes; ++k) {
ret = ret + NumberOfDPP[k] * dpte_group_bytes[k] * (1 + 8 * HostVMDynamicLevels) * HostVMInefficiencyFactor;
}
}
return ret;
}
static double CalculateUrgentLatency(
double UrgentLatencyPixelDataOnly,
double UrgentLatencyPixelMixedWithVMData,
double UrgentLatencyVMDataOnly,
bool DoUrgentLatencyAdjustment,
double UrgentLatencyAdjustmentFabricClockComponent,
double UrgentLatencyAdjustmentFabricClockReference,
double FabricClock)
{
double ret;
ret = dml_max3(UrgentLatencyPixelDataOnly, UrgentLatencyPixelMixedWithVMData, UrgentLatencyVMDataOnly);
if (DoUrgentLatencyAdjustment == true) {
ret = ret + UrgentLatencyAdjustmentFabricClockComponent * (UrgentLatencyAdjustmentFabricClockReference / FabricClock - 1);
}
return ret;
}
static noinline_for_stack void UseMinimumDCFCLK(
struct display_mode_lib *mode_lib,
int MaxPrefetchMode,
int ReorderingBytes)
{
struct vba_vars_st *v = &mode_lib->vba;
int dummy1, i, j, k;
double NormalEfficiency, dummy2, dummy3;
double TotalMaxPrefetchFlipDPTERowBandwidth[DC__VOLTAGE_STATES][2];
NormalEfficiency = v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency / 100.0;
for (i = 0; i < v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
double PixelDCFCLKCyclesRequiredInPrefetch[DC__NUM_DPP__MAX];
double PrefetchPixelLinesTime[DC__NUM_DPP__MAX];
double DCFCLKRequiredForPeakBandwidthPerPlane[DC__NUM_DPP__MAX];
double DynamicMetadataVMExtraLatency[DC__NUM_DPP__MAX];
double MinimumTWait;
double NonDPTEBandwidth;
double DPTEBandwidth;
double DCFCLKRequiredForAverageBandwidth;
double ExtraLatencyBytes;
double ExtraLatencyCycles;
double DCFCLKRequiredForPeakBandwidth;
int NoOfDPPState[DC__NUM_DPP__MAX];
double MinimumTvmPlus2Tr0;
TotalMaxPrefetchFlipDPTERowBandwidth[i][j] = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
TotalMaxPrefetchFlipDPTERowBandwidth[i][j] = TotalMaxPrefetchFlipDPTERowBandwidth[i][j]
+ v->NoOfDPP[i][j][k] * v->DPTEBytesPerRow[i][j][k] / (15.75 * v->HTotal[k] / v->PixelClock[k]);
}
for (k = 0; k <= v->NumberOfActivePlanes - 1; ++k) {
NoOfDPPState[k] = v->NoOfDPP[i][j][k];
}
MinimumTWait = CalculateTWait(MaxPrefetchMode, v->FinalDRAMClockChangeLatency, v->UrgLatency[i], v->SREnterPlusExitTime);
NonDPTEBandwidth = v->TotalVActivePixelBandwidth[i][j] + v->TotalVActiveCursorBandwidth[i][j] + v->TotalMetaRowBandwidth[i][j];
DPTEBandwidth = (v->HostVMEnable == true || v->ImmediateFlipRequirement[0] == dm_immediate_flip_required) ?
TotalMaxPrefetchFlipDPTERowBandwidth[i][j] : v->TotalDPTERowBandwidth[i][j];
DCFCLKRequiredForAverageBandwidth = dml_max3(
v->ProjectedDCFCLKDeepSleep[i][j],
(NonDPTEBandwidth + v->TotalDPTERowBandwidth[i][j]) / v->ReturnBusWidth
/ (v->MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation / 100),
(NonDPTEBandwidth + DPTEBandwidth / NormalEfficiency) / NormalEfficiency / v->ReturnBusWidth);
ExtraLatencyBytes = CalculateExtraLatencyBytes(
ReorderingBytes,
v->TotalNumberOfActiveDPP[i][j],
v->PixelChunkSizeInKByte,
v->TotalNumberOfDCCActiveDPP[i][j],
v->MetaChunkSize,
v->GPUVMEnable,
v->HostVMEnable,
v->NumberOfActivePlanes,
NoOfDPPState,
v->dpte_group_bytes,
1,
v->HostVMMinPageSize,
v->HostVMMaxNonCachedPageTableLevels);
ExtraLatencyCycles = v->RoundTripPingLatencyCycles + __DML_ARB_TO_RET_DELAY__ + ExtraLatencyBytes / NormalEfficiency / v->ReturnBusWidth;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double DCFCLKCyclesRequiredInPrefetch;
double ExpectedPrefetchBWAcceleration;
double PrefetchTime;
PixelDCFCLKCyclesRequiredInPrefetch[k] = (v->PrefetchLinesY[i][j][k] * v->swath_width_luma_ub_all_states[i][j][k] * v->BytePerPixelY[k]
+ v->PrefetchLinesC[i][j][k] * v->swath_width_chroma_ub_all_states[i][j][k] * v->BytePerPixelC[k]) / NormalEfficiency / v->ReturnBusWidth;
DCFCLKCyclesRequiredInPrefetch = 2 * ExtraLatencyCycles / NoOfDPPState[k]
+ v->PDEAndMetaPTEBytesPerFrame[i][j][k] / NormalEfficiency / NormalEfficiency / v->ReturnBusWidth * (v->GPUVMMaxPageTableLevels > 2 ? 1 : 0)
+ 2 * v->DPTEBytesPerRow[i][j][k] / NormalEfficiency / NormalEfficiency / v->ReturnBusWidth
+ 2 * v->MetaRowBytes[i][j][k] / NormalEfficiency / v->ReturnBusWidth + PixelDCFCLKCyclesRequiredInPrefetch[k];
PrefetchPixelLinesTime[k] = dml_max(v->PrefetchLinesY[i][j][k], v->PrefetchLinesC[i][j][k]) * v->HTotal[k] / v->PixelClock[k];
ExpectedPrefetchBWAcceleration = (v->VActivePixelBandwidth[i][j][k] + v->VActiveCursorBandwidth[i][j][k])
/ (v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k]);
DynamicMetadataVMExtraLatency[k] =
(v->GPUVMEnable == true && v->DynamicMetadataEnable[k] == true && v->DynamicMetadataVMEnabled == true) ?
v->UrgLatency[i] * v->GPUVMMaxPageTableLevels * (v->HostVMEnable == true ? v->HostVMMaxNonCachedPageTableLevels + 1 : 1) : 0;
PrefetchTime = (v->MaximumVStartup[i][j][k] - 1) * v->HTotal[k] / v->PixelClock[k] - MinimumTWait
- v->UrgLatency[i]
* ((v->GPUVMMaxPageTableLevels <= 2 ? v->GPUVMMaxPageTableLevels : v->GPUVMMaxPageTableLevels - 2)
* (v->HostVMEnable == true ? v->HostVMMaxNonCachedPageTableLevels + 1 : 1) - 1)
- DynamicMetadataVMExtraLatency[k];
if (PrefetchTime > 0) {
double ExpectedVRatioPrefetch;
ExpectedVRatioPrefetch = PrefetchPixelLinesTime[k]
/ (PrefetchTime * PixelDCFCLKCyclesRequiredInPrefetch[k] / DCFCLKCyclesRequiredInPrefetch);
DCFCLKRequiredForPeakBandwidthPerPlane[k] = NoOfDPPState[k] * PixelDCFCLKCyclesRequiredInPrefetch[k] / PrefetchPixelLinesTime[k]
* dml_max(1.0, ExpectedVRatioPrefetch) * dml_max(1.0, ExpectedVRatioPrefetch / 4) * ExpectedPrefetchBWAcceleration;
if (v->HostVMEnable == true || v->ImmediateFlipRequirement[0] == dm_immediate_flip_required) {
DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKRequiredForPeakBandwidthPerPlane[k]
+ NoOfDPPState[k] * DPTEBandwidth / NormalEfficiency / NormalEfficiency / v->ReturnBusWidth;
}
} else {
DCFCLKRequiredForPeakBandwidthPerPlane[k] = v->DCFCLKPerState[i];
}
if (v->DynamicMetadataEnable[k] == true) {
double TSetupPipe;
double TdmbfPipe;
double TdmsksPipe;
double TdmecPipe;
double AllowedTimeForUrgentExtraLatency;
CalculateVupdateAndDynamicMetadataParameters(
v->MaxInterDCNTileRepeaters,
v->RequiredDPPCLK[i][j][k],
v->RequiredDISPCLK[i][j],
v->ProjectedDCFCLKDeepSleep[i][j],
v->PixelClock[k],
v->HTotal[k],
v->VTotal[k] - v->VActive[k],
v->DynamicMetadataTransmittedBytes[k],
v->DynamicMetadataLinesBeforeActiveRequired[k],
v->Interlace[k],
v->ProgressiveToInterlaceUnitInOPP,
&TSetupPipe,
&TdmbfPipe,
&TdmecPipe,
&TdmsksPipe,
&dummy1,
&dummy2,
&dummy3);
AllowedTimeForUrgentExtraLatency = v->MaximumVStartup[i][j][k] * v->HTotal[k] / v->PixelClock[k] - MinimumTWait - TSetupPipe - TdmbfPipe - TdmecPipe
- TdmsksPipe - DynamicMetadataVMExtraLatency[k];
if (AllowedTimeForUrgentExtraLatency > 0) {
DCFCLKRequiredForPeakBandwidthPerPlane[k] = dml_max(
DCFCLKRequiredForPeakBandwidthPerPlane[k],
ExtraLatencyCycles / AllowedTimeForUrgentExtraLatency);
} else {
DCFCLKRequiredForPeakBandwidthPerPlane[k] = v->DCFCLKPerState[i];
}
}
}
DCFCLKRequiredForPeakBandwidth = 0;
for (k = 0; k <= v->NumberOfActivePlanes - 1; ++k) {
DCFCLKRequiredForPeakBandwidth = DCFCLKRequiredForPeakBandwidth + DCFCLKRequiredForPeakBandwidthPerPlane[k];
}
MinimumTvmPlus2Tr0 = v->UrgLatency[i]
* (v->GPUVMEnable == true ?
(v->HostVMEnable == true ?
(v->GPUVMMaxPageTableLevels + 2) * (v->HostVMMaxNonCachedPageTableLevels + 1) - 1 : v->GPUVMMaxPageTableLevels + 1) :
0);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double MaximumTvmPlus2Tr0PlusTsw;
MaximumTvmPlus2Tr0PlusTsw = (v->MaximumVStartup[i][j][k] - 2) * v->HTotal[k] / v->PixelClock[k] - MinimumTWait - DynamicMetadataVMExtraLatency[k];
if (MaximumTvmPlus2Tr0PlusTsw <= MinimumTvmPlus2Tr0 + PrefetchPixelLinesTime[k] / 4) {
DCFCLKRequiredForPeakBandwidth = v->DCFCLKPerState[i];
} else {
DCFCLKRequiredForPeakBandwidth = dml_max3(
DCFCLKRequiredForPeakBandwidth,
2 * ExtraLatencyCycles / (MaximumTvmPlus2Tr0PlusTsw - MinimumTvmPlus2Tr0 - PrefetchPixelLinesTime[k] / 4),
(2 * ExtraLatencyCycles + PixelDCFCLKCyclesRequiredInPrefetch[k]) / (MaximumTvmPlus2Tr0PlusTsw - MinimumTvmPlus2Tr0));
}
}
v->DCFCLKState[i][j] = dml_min(v->DCFCLKPerState[i], 1.05 * dml_max(DCFCLKRequiredForAverageBandwidth, DCFCLKRequiredForPeakBandwidth));
}
}
}
static void CalculateUnboundedRequestAndCompressedBufferSize(
unsigned int DETBufferSizeInKByte,
int ConfigReturnBufferSizeInKByte,
enum unbounded_requesting_policy UseUnboundedRequestingFinal,
int TotalActiveDPP,
bool NoChromaPlanes,
int MaxNumDPP,
int CompressedBufferSegmentSizeInkByteFinal,
enum output_encoder_class *Output,
bool *UnboundedRequestEnabled,
int *CompressedBufferSizeInkByte)
{
double actDETBufferSizeInKByte = dml_ceil(DETBufferSizeInKByte, 64);
*UnboundedRequestEnabled = UnboundedRequest(UseUnboundedRequestingFinal, TotalActiveDPP, NoChromaPlanes, Output[0]);
*CompressedBufferSizeInkByte = (
*UnboundedRequestEnabled == true ?
ConfigReturnBufferSizeInKByte - TotalActiveDPP * actDETBufferSizeInKByte :
ConfigReturnBufferSizeInKByte - MaxNumDPP * actDETBufferSizeInKByte);
*CompressedBufferSizeInkByte = *CompressedBufferSizeInkByte * CompressedBufferSegmentSizeInkByteFinal / 64;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: TotalActiveDPP = %d\n", __func__, TotalActiveDPP);
dml_print("DML::%s: DETBufferSizeInKByte = %d\n", __func__, DETBufferSizeInKByte);
dml_print("DML::%s: ConfigReturnBufferSizeInKByte = %d\n", __func__, ConfigReturnBufferSizeInKByte);
dml_print("DML::%s: UseUnboundedRequestingFinal = %d\n", __func__, UseUnboundedRequestingFinal);
dml_print("DML::%s: actDETBufferSizeInKByte = %f\n", __func__, actDETBufferSizeInKByte);
dml_print("DML::%s: UnboundedRequestEnabled = %d\n", __func__, *UnboundedRequestEnabled);
dml_print("DML::%s: CompressedBufferSizeInkByte = %d\n", __func__, *CompressedBufferSizeInkByte);
#endif
}
static bool UnboundedRequest(enum unbounded_requesting_policy UseUnboundedRequestingFinal, int TotalNumberOfActiveDPP, bool NoChroma, enum output_encoder_class Output)
{
bool ret_val = false;
ret_val = (UseUnboundedRequestingFinal != dm_unbounded_requesting_disable && TotalNumberOfActiveDPP == 1 && NoChroma);
if (UseUnboundedRequestingFinal == dm_unbounded_requesting_edp_only && Output != dm_edp) {
ret_val = false;
}
return (ret_val);
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c |
/*
* Copyright 2019-2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "resource.h"
#include "clk_mgr.h"
#include "dcn31/dcn31_resource.h"
#include "dcn315/dcn315_resource.h"
#include "dcn316/dcn316_resource.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "dcn31_fpu.h"
/**
* DOC: DCN31x FPU manipulation Overview
*
* The DCN architecture relies on FPU operations, which require special
* compilation flags and the use of kernel_fpu_begin/end functions; ideally, we
* want to avoid spreading FPU access across multiple files. With this idea in
* mind, this file aims to centralize all DCN3.1.x functions that require FPU
* access in a single place. Code in this file follows the following code
* pattern:
*
* 1. Functions that use FPU operations should be isolated in static functions.
* 2. The FPU functions should have the noinline attribute to ensure anything
* that deals with FP register is contained within this call.
* 3. All function that needs to be accessed outside this file requires a
* public interface that not uses any FPU reference.
* 4. Developers **must not** use DC_FP_START/END in this file, but they need
* to ensure that the caller invokes it before access any function available
* in this file. For this reason, public functions in this file must invoke
* dc_assert_fp_enabled();
*/
struct _vcs_dpi_ip_params_st dcn3_1_ip = {
.gpuvm_enable = 1,
.gpuvm_max_page_table_levels = 1,
.hostvm_enable = 1,
.hostvm_max_page_table_levels = 2,
.rob_buffer_size_kbytes = 64,
.det_buffer_size_kbytes = DCN3_1_DEFAULT_DET_SIZE,
.config_return_buffer_size_in_kbytes = 1792,
.compressed_buffer_segment_size_in_kbytes = 64,
.meta_fifo_size_in_kentries = 32,
.zero_size_buffer_entries = 512,
.compbuf_reserved_space_64b = 256,
.compbuf_reserved_space_zs = 64,
.dpp_output_buffer_pixels = 2560,
.opp_output_buffer_lines = 1,
.pixel_chunk_size_kbytes = 8,
.meta_chunk_size_kbytes = 2,
.min_meta_chunk_size_bytes = 256,
.writeback_chunk_size_kbytes = 8,
.ptoi_supported = false,
.num_dsc = 3,
.maximum_dsc_bits_per_component = 10,
.dsc422_native_support = false,
.is_line_buffer_bpp_fixed = true,
.line_buffer_fixed_bpp = 48,
.line_buffer_size_bits = 789504,
.max_line_buffer_lines = 12,
.writeback_interface_buffer_size_kbytes = 90,
.max_num_dpp = 4,
.max_num_otg = 4,
.max_num_hdmi_frl_outputs = 1,
.max_num_wb = 1,
.max_dchub_pscl_bw_pix_per_clk = 4,
.max_pscl_lb_bw_pix_per_clk = 2,
.max_lb_vscl_bw_pix_per_clk = 4,
.max_vscl_hscl_bw_pix_per_clk = 4,
.max_hscl_ratio = 6,
.max_vscl_ratio = 6,
.max_hscl_taps = 8,
.max_vscl_taps = 8,
.dpte_buffer_size_in_pte_reqs_luma = 64,
.dpte_buffer_size_in_pte_reqs_chroma = 34,
.dispclk_ramp_margin_percent = 1,
.max_inter_dcn_tile_repeaters = 8,
.cursor_buffer_size = 16,
.cursor_chunk_size = 2,
.writeback_line_buffer_buffer_size = 0,
.writeback_min_hscl_ratio = 1,
.writeback_min_vscl_ratio = 1,
.writeback_max_hscl_ratio = 1,
.writeback_max_vscl_ratio = 1,
.writeback_max_hscl_taps = 1,
.writeback_max_vscl_taps = 1,
.dppclk_delay_subtotal = 46,
.dppclk_delay_scl = 50,
.dppclk_delay_scl_lb_only = 16,
.dppclk_delay_cnvc_formatter = 27,
.dppclk_delay_cnvc_cursor = 6,
.dispclk_delay_subtotal = 119,
.dynamic_metadata_vm_enabled = false,
.odm_combine_4to1_supported = false,
.dcc_supported = true,
};
static struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
/*TODO: correct dispclk/dppclk voltage level determination*/
.clock_limits = {
{
.state = 0,
.dispclk_mhz = 1200.0,
.dppclk_mhz = 1200.0,
.phyclk_mhz = 600.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 186.0,
.dtbclk_mhz = 625.0,
},
{
.state = 1,
.dispclk_mhz = 1200.0,
.dppclk_mhz = 1200.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 209.0,
.dtbclk_mhz = 625.0,
},
{
.state = 2,
.dispclk_mhz = 1200.0,
.dppclk_mhz = 1200.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 209.0,
.dtbclk_mhz = 625.0,
},
{
.state = 3,
.dispclk_mhz = 1200.0,
.dppclk_mhz = 1200.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 371.0,
.dtbclk_mhz = 625.0,
},
{
.state = 4,
.dispclk_mhz = 1200.0,
.dppclk_mhz = 1200.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 417.0,
.dtbclk_mhz = 625.0,
},
},
.num_states = 5,
.sr_exit_time_us = 9.0,
.sr_enter_plus_exit_time_us = 11.0,
.sr_exit_z8_time_us = 442.0,
.sr_enter_plus_exit_z8_time_us = 560.0,
.writeback_latency_us = 12.0,
.dram_channel_width_bytes = 4,
.round_trip_ping_latency_dcfclk_cycles = 106,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
.urgent_latency_vm_data_only_us = 4.0,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
.pct_ideal_sdp_bw_after_urgent = 80.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
.max_avg_sdp_bw_use_normal_percent = 60.0,
.max_avg_dram_bw_use_normal_percent = 60.0,
.fabric_datapath_to_dcn_data_return_bytes = 32,
.return_bus_width_bytes = 64,
.downspread_percent = 0.38,
.dcn_downspread_percent = 0.5,
.gpuvm_min_page_size_bytes = 4096,
.hostvm_min_page_size_bytes = 4096,
.do_urgent_latency_adjustment = false,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
};
struct _vcs_dpi_ip_params_st dcn3_15_ip = {
.gpuvm_enable = 1,
.gpuvm_max_page_table_levels = 1,
.hostvm_enable = 1,
.hostvm_max_page_table_levels = 2,
.rob_buffer_size_kbytes = 64,
.det_buffer_size_kbytes = DCN3_15_DEFAULT_DET_SIZE,
.min_comp_buffer_size_kbytes = 64,
.config_return_buffer_size_in_kbytes = 1024,
.compressed_buffer_segment_size_in_kbytes = 64,
.meta_fifo_size_in_kentries = 32,
.zero_size_buffer_entries = 512,
.compbuf_reserved_space_64b = 256,
.compbuf_reserved_space_zs = 64,
.dpp_output_buffer_pixels = 2560,
.opp_output_buffer_lines = 1,
.pixel_chunk_size_kbytes = 8,
.meta_chunk_size_kbytes = 2,
.min_meta_chunk_size_bytes = 256,
.writeback_chunk_size_kbytes = 8,
.ptoi_supported = false,
.num_dsc = 3,
.maximum_dsc_bits_per_component = 10,
.dsc422_native_support = false,
.is_line_buffer_bpp_fixed = true,
.line_buffer_fixed_bpp = 48,
.line_buffer_size_bits = 789504,
.max_line_buffer_lines = 12,
.writeback_interface_buffer_size_kbytes = 90,
.max_num_dpp = 4,
.max_num_otg = 4,
.max_num_hdmi_frl_outputs = 1,
.max_num_wb = 1,
.max_dchub_pscl_bw_pix_per_clk = 4,
.max_pscl_lb_bw_pix_per_clk = 2,
.max_lb_vscl_bw_pix_per_clk = 4,
.max_vscl_hscl_bw_pix_per_clk = 4,
.max_hscl_ratio = 6,
.max_vscl_ratio = 6,
.max_hscl_taps = 8,
.max_vscl_taps = 8,
.dpte_buffer_size_in_pte_reqs_luma = 64,
.dpte_buffer_size_in_pte_reqs_chroma = 34,
.dispclk_ramp_margin_percent = 1,
.max_inter_dcn_tile_repeaters = 9,
.cursor_buffer_size = 16,
.cursor_chunk_size = 2,
.writeback_line_buffer_buffer_size = 0,
.writeback_min_hscl_ratio = 1,
.writeback_min_vscl_ratio = 1,
.writeback_max_hscl_ratio = 1,
.writeback_max_vscl_ratio = 1,
.writeback_max_hscl_taps = 1,
.writeback_max_vscl_taps = 1,
.dppclk_delay_subtotal = 46,
.dppclk_delay_scl = 50,
.dppclk_delay_scl_lb_only = 16,
.dppclk_delay_cnvc_formatter = 27,
.dppclk_delay_cnvc_cursor = 6,
.dispclk_delay_subtotal = 119,
.dynamic_metadata_vm_enabled = false,
.odm_combine_4to1_supported = false,
.dcc_supported = true,
};
static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
.sr_exit_time_us = 9.0,
.sr_enter_plus_exit_time_us = 11.0,
.sr_exit_z8_time_us = 50.0,
.sr_enter_plus_exit_z8_time_us = 50.0,
.writeback_latency_us = 12.0,
.dram_channel_width_bytes = 4,
.round_trip_ping_latency_dcfclk_cycles = 106,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
.urgent_latency_vm_data_only_us = 4.0,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
.pct_ideal_sdp_bw_after_urgent = 80.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
.max_avg_sdp_bw_use_normal_percent = 60.0,
.max_avg_dram_bw_use_normal_percent = 60.0,
.fabric_datapath_to_dcn_data_return_bytes = 32,
.return_bus_width_bytes = 64,
.downspread_percent = 0.38,
.dcn_downspread_percent = 0.38,
.gpuvm_min_page_size_bytes = 4096,
.hostvm_min_page_size_bytes = 4096,
.do_urgent_latency_adjustment = false,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
.num_chans = 4,
.dummy_pstate_latency_us = 10.0
};
struct _vcs_dpi_ip_params_st dcn3_16_ip = {
.gpuvm_enable = 1,
.gpuvm_max_page_table_levels = 1,
.hostvm_enable = 1,
.hostvm_max_page_table_levels = 2,
.rob_buffer_size_kbytes = 64,
.det_buffer_size_kbytes = DCN3_16_DEFAULT_DET_SIZE,
.min_comp_buffer_size_kbytes = 64,
.config_return_buffer_size_in_kbytes = 1024,
.compressed_buffer_segment_size_in_kbytes = 64,
.meta_fifo_size_in_kentries = 32,
.zero_size_buffer_entries = 512,
.compbuf_reserved_space_64b = 256,
.compbuf_reserved_space_zs = 64,
.dpp_output_buffer_pixels = 2560,
.opp_output_buffer_lines = 1,
.pixel_chunk_size_kbytes = 8,
.meta_chunk_size_kbytes = 2,
.min_meta_chunk_size_bytes = 256,
.writeback_chunk_size_kbytes = 8,
.ptoi_supported = false,
.num_dsc = 3,
.maximum_dsc_bits_per_component = 10,
.dsc422_native_support = false,
.is_line_buffer_bpp_fixed = true,
.line_buffer_fixed_bpp = 48,
.line_buffer_size_bits = 789504,
.max_line_buffer_lines = 12,
.writeback_interface_buffer_size_kbytes = 90,
.max_num_dpp = 4,
.max_num_otg = 4,
.max_num_hdmi_frl_outputs = 1,
.max_num_wb = 1,
.max_dchub_pscl_bw_pix_per_clk = 4,
.max_pscl_lb_bw_pix_per_clk = 2,
.max_lb_vscl_bw_pix_per_clk = 4,
.max_vscl_hscl_bw_pix_per_clk = 4,
.max_hscl_ratio = 6,
.max_vscl_ratio = 6,
.max_hscl_taps = 8,
.max_vscl_taps = 8,
.dpte_buffer_size_in_pte_reqs_luma = 64,
.dpte_buffer_size_in_pte_reqs_chroma = 34,
.dispclk_ramp_margin_percent = 1,
.max_inter_dcn_tile_repeaters = 8,
.cursor_buffer_size = 16,
.cursor_chunk_size = 2,
.writeback_line_buffer_buffer_size = 0,
.writeback_min_hscl_ratio = 1,
.writeback_min_vscl_ratio = 1,
.writeback_max_hscl_ratio = 1,
.writeback_max_vscl_ratio = 1,
.writeback_max_hscl_taps = 1,
.writeback_max_vscl_taps = 1,
.dppclk_delay_subtotal = 46,
.dppclk_delay_scl = 50,
.dppclk_delay_scl_lb_only = 16,
.dppclk_delay_cnvc_formatter = 27,
.dppclk_delay_cnvc_cursor = 6,
.dispclk_delay_subtotal = 119,
.dynamic_metadata_vm_enabled = false,
.odm_combine_4to1_supported = false,
.dcc_supported = true,
};
static struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = {
/*TODO: correct dispclk/dppclk voltage level determination*/
.clock_limits = {
{
.state = 0,
.dispclk_mhz = 556.0,
.dppclk_mhz = 556.0,
.phyclk_mhz = 600.0,
.phyclk_d18_mhz = 445.0,
.dscclk_mhz = 186.0,
.dtbclk_mhz = 625.0,
},
{
.state = 1,
.dispclk_mhz = 625.0,
.dppclk_mhz = 625.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 209.0,
.dtbclk_mhz = 625.0,
},
{
.state = 2,
.dispclk_mhz = 625.0,
.dppclk_mhz = 625.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 209.0,
.dtbclk_mhz = 625.0,
},
{
.state = 3,
.dispclk_mhz = 1112.0,
.dppclk_mhz = 1112.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 371.0,
.dtbclk_mhz = 625.0,
},
{
.state = 4,
.dispclk_mhz = 1250.0,
.dppclk_mhz = 1250.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 417.0,
.dtbclk_mhz = 625.0,
},
},
.num_states = 5,
.sr_exit_time_us = 9.0,
.sr_enter_plus_exit_time_us = 11.0,
.sr_exit_z8_time_us = 442.0,
.sr_enter_plus_exit_z8_time_us = 560.0,
.writeback_latency_us = 12.0,
.dram_channel_width_bytes = 4,
.round_trip_ping_latency_dcfclk_cycles = 106,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
.urgent_latency_vm_data_only_us = 4.0,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
.pct_ideal_sdp_bw_after_urgent = 80.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
.max_avg_sdp_bw_use_normal_percent = 60.0,
.max_avg_dram_bw_use_normal_percent = 60.0,
.fabric_datapath_to_dcn_data_return_bytes = 32,
.return_bus_width_bytes = 64,
.downspread_percent = 0.38,
.dcn_downspread_percent = 0.5,
.gpuvm_min_page_size_bytes = 4096,
.hostvm_min_page_size_bytes = 4096,
.do_urgent_latency_adjustment = false,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
};
void dcn31_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
int pipe_cnt)
{
dc_assert_fp_enabled();
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
}
void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
{
dc_assert_fp_enabled();
if (dc->clk_mgr->bw_params->wm_table.entries[WM_A].valid) {
context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.entries[WM_A].pstate_latency_us;
context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.entries[WM_A].sr_enter_plus_exit_time_us;
context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.entries[WM_A].sr_exit_time_us;
}
}
void dcn315_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
{
dc_assert_fp_enabled();
if (dc->clk_mgr->bw_params->wm_table.entries[WM_A].valid) {
/* For 315 pstate change is only supported if possible in vactive */
if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[context->bw_ctx.dml.vba.VoltageLevel][context->bw_ctx.dml.vba.maxMpcComb] != dm_dram_clock_change_vactive)
context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us;
else
context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.entries[WM_A].pstate_latency_us;
context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us =
dc->clk_mgr->bw_params->wm_table.entries[WM_A].sr_enter_plus_exit_time_us;
context->bw_ctx.dml.soc.sr_exit_time_us =
dc->clk_mgr->bw_params->wm_table.entries[WM_A].sr_exit_time_us;
}
}
void dcn31_calculate_wm_and_dlg_fp(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
int vlevel)
{
int i, pipe_idx, total_det = 0, active_hubp_count = 0;
double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
dc_assert_fp_enabled();
if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
/* We don't recalculate clocks for 0 pipe configs, which can block
* S0i3 as high clocks will block low power states
* Override any clocks that can block S0i3 to min here
*/
if (pipe_cnt == 0) {
context->bw_ctx.bw.dcn.clk.dcfclk_khz = dcfclk; // always should be vlevel 0
return;
}
pipes[0].clks_cfg.voltage = vlevel;
pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
/* Set A:
* All clocks min required
*
* Set A calculated last so that following calculations are based on Set A
*/
dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a;
context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a;
context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
if (context->res_ctx.pipe_ctx[i].plane_state)
active_hubp_count++;
pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
if (dc->config.forced_clocks || dc->debug.max_disp_clk) {
pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
}
if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
pipe_idx++;
}
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
/* For 31x apu pstate change is only supported if possible in vactive*/
context->bw_ctx.bw.dcn.clk.p_state_change_support =
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_vactive;
/* If DCN isn't making memory requests we can allow pstate change and lower clocks */
if (!active_hubp_count) {
context->bw_ctx.bw.dcn.clk.socclk_khz = 0;
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
context->bw_ctx.bw.dcn.clk.dcfclk_khz = 0;
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = 0;
context->bw_ctx.bw.dcn.clk.dramclk_khz = 0;
context->bw_ctx.bw.dcn.clk.fclk_khz = 0;
context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].stream)
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 0;
}
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
context->res_ctx.pipe_ctx[i].det_buffer_size_kb =
get_det_buffer_size_kbytes(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
if (context->res_ctx.pipe_ctx[i].det_buffer_size_kb > 384)
context->res_ctx.pipe_ctx[i].det_buffer_size_kb /= 2;
total_det += context->res_ctx.pipe_ctx[i].det_buffer_size_kb;
pipe_idx++;
}
context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - total_det;
}
void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
struct _vcs_dpi_voltage_scaling_st *s = dc->scratch.update_bw_bounding_box.clock_limits;
struct clk_limit_table *clk_table = &bw_params->clk_table;
unsigned int i, closest_clk_lvl;
int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
int j;
dc_assert_fp_enabled();
memcpy(s, dcn3_1_soc.clock_limits, sizeof(dcn3_1_soc.clock_limits));
// Default clock levels are used for diags, which may lead to overclocking.
dcn3_1_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
dcn3_1_ip.max_num_dpp = dc->res_pool->pipe_count;
dcn3_1_soc.num_chans = bw_params->num_channels;
ASSERT(clk_table->num_entries);
/* Prepass to find max clocks independent of voltage level. */
for (i = 0; i < clk_table->num_entries; ++i) {
if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
}
for (i = 0; i < clk_table->num_entries; i++) {
/* loop backwards*/
for (closest_clk_lvl = 0, j = dcn3_1_soc.num_states - 1; j >= 0; j--) {
if ((unsigned int) dcn3_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
closest_clk_lvl = j;
break;
}
}
s[i].state = i;
/* Clocks dependent on voltage level. */
s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
s[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz *
2 * clk_table->entries[i].wck_ratio;
/* Clocks independent of voltage level. */
s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
dcn3_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
dcn3_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
s[i].dram_bw_per_chan_gbps =
dcn3_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
s[i].dscclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
s[i].dtbclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
s[i].phyclk_d18_mhz =
dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
s[i].phyclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
}
if (clk_table->num_entries) {
dcn3_1_soc.num_states = clk_table->num_entries;
}
memcpy(dcn3_1_soc.clock_limits, s, sizeof(dcn3_1_soc.clock_limits));
dcn3_1_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
if ((int)(dcn3_1_soc.dram_clock_change_latency_us * 1000)
!= dc->debug.dram_clock_change_latency_ns
&& dc->debug.dram_clock_change_latency_ns) {
dcn3_1_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000;
}
dml_init_instance(&dc->dml, &dcn3_1_soc, &dcn3_1_ip, DML_PROJECT_DCN31);
}
void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
struct clk_limit_table *clk_table = &bw_params->clk_table;
int i, max_dispclk_mhz = 0, max_dppclk_mhz = 0;
dc_assert_fp_enabled();
dcn3_15_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
dcn3_15_ip.max_num_dpp = dc->res_pool->pipe_count;
if (bw_params->num_channels > 0)
dcn3_15_soc.num_chans = bw_params->num_channels;
if (bw_params->dram_channel_width_bytes > 0)
dcn3_15_soc.dram_channel_width_bytes = bw_params->dram_channel_width_bytes;
ASSERT(clk_table->num_entries);
/* Setup soc to always use max dispclk/dppclk to avoid odm-to-lower-voltage */
for (i = 0; i < clk_table->num_entries; ++i) {
if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
}
for (i = 0; i < clk_table->num_entries; i++) {
dcn3_15_soc.clock_limits[i].state = i;
/* Clocks dependent on voltage level. */
dcn3_15_soc.clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
dcn3_15_soc.clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
dcn3_15_soc.clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
dcn3_15_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
/* These aren't actually read from smu, but rather set in clk_mgr defaults */
dcn3_15_soc.clock_limits[i].dtbclk_mhz = clk_table->entries[i].dtbclk_mhz;
dcn3_15_soc.clock_limits[i].phyclk_d18_mhz = clk_table->entries[i].phyclk_d18_mhz;
dcn3_15_soc.clock_limits[i].phyclk_mhz = clk_table->entries[i].phyclk_mhz;
/* Clocks independent of voltage level. */
dcn3_15_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
dcn3_15_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
dcn3_15_soc.clock_limits[i].dscclk_mhz = max_dispclk_mhz / 3.0;
}
dcn3_15_soc.num_states = clk_table->num_entries;
/* Set vco to max_dispclk * 2 to make sure the highest dispclk is always available for dml calcs,
* no impact outside of dml validation
*/
dcn3_15_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
if ((int)(dcn3_15_soc.dram_clock_change_latency_us * 1000)
!= dc->debug.dram_clock_change_latency_ns
&& dc->debug.dram_clock_change_latency_ns) {
dcn3_15_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000;
}
dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN315);
}
void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
struct _vcs_dpi_voltage_scaling_st *s = dc->scratch.update_bw_bounding_box.clock_limits;
struct clk_limit_table *clk_table = &bw_params->clk_table;
unsigned int i, closest_clk_lvl;
int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
int j;
dc_assert_fp_enabled();
memcpy(s, dcn3_16_soc.clock_limits, sizeof(dcn3_16_soc.clock_limits));
// Default clock levels are used for diags, which may lead to overclocking.
dcn3_16_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
dcn3_16_ip.max_num_dpp = dc->res_pool->pipe_count;
dcn3_16_soc.num_chans = bw_params->num_channels;
ASSERT(clk_table->num_entries);
/* Prepass to find max clocks independent of voltage level. */
for (i = 0; i < clk_table->num_entries; ++i) {
if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
}
for (i = 0; i < clk_table->num_entries; i++) {
/* loop backwards*/
for (closest_clk_lvl = 0, j = dcn3_16_soc.num_states - 1; j >= 0; j--) {
if ((unsigned int) dcn3_16_soc.clock_limits[j].dcfclk_mhz <=
clk_table->entries[i].dcfclk_mhz) {
closest_clk_lvl = j;
break;
}
}
// Ported from DCN315
if (clk_table->num_entries == 1) {
/*smu gives one DPM level, let's take the highest one*/
closest_clk_lvl = dcn3_16_soc.num_states - 1;
}
s[i].state = i;
/* Clocks dependent on voltage level. */
s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
if (clk_table->num_entries == 1 &&
s[i].dcfclk_mhz <
dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
/*SMU fix not released yet*/
s[i].dcfclk_mhz =
dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
}
s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
s[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz *
2 * clk_table->entries[i].wck_ratio;
/* Clocks independent of voltage level. */
s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
dcn3_16_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
dcn3_16_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
s[i].dram_bw_per_chan_gbps =
dcn3_16_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
s[i].dscclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
s[i].dtbclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
s[i].phyclk_d18_mhz =
dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
s[i].phyclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
}
if (clk_table->num_entries) {
dcn3_16_soc.num_states = clk_table->num_entries;
}
memcpy(dcn3_16_soc.clock_limits, s, sizeof(dcn3_16_soc.clock_limits));
if (max_dispclk_mhz) {
dcn3_16_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
}
if ((int)(dcn3_16_soc.dram_clock_change_latency_us * 1000)
!= dc->debug.dram_clock_change_latency_ns
&& dc->debug.dram_clock_change_latency_ns) {
dcn3_16_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000;
}
dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31);
}
int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc)
{
return soc->clock_limits[0].dispclk_mhz * 10000.0 / (1.0 + soc->dcn_downspread_percent / 100.0);
}
int dcn_get_approx_det_segs_required_for_pstate(
struct _vcs_dpi_soc_bounding_box_st *soc,
int pix_clk_100hz, int bpp, int seg_size_kb)
{
/* Roughly calculate required crb to hide latency. In practice there is slightly
* more buffer available for latency hiding
*/
return (int)(soc->dram_clock_change_latency_us * pix_clk_100hz * bpp
/ 10240000 + seg_size_kb - 1) / seg_size_kb;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c |
/*
* Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "../display_mode_lib.h"
#include "../display_mode_vba.h"
#include "../dml_inline_defs.h"
#include "display_rq_dlg_calc_31.h"
#include "../dcn30/display_mode_vba_30.h"
static bool is_dual_plane(enum source_format_class source_format)
{
bool ret_val = 0;
if ((source_format == dm_420_12) || (source_format == dm_420_8) || (source_format == dm_420_10) || (source_format == dm_rgbe_alpha))
ret_val = 1;
return ret_val;
}
static double get_refcyc_per_delivery(
struct display_mode_lib *mode_lib,
double refclk_freq_in_mhz,
double pclk_freq_in_mhz,
unsigned int odm_combine,
unsigned int recout_width,
unsigned int hactive,
double vratio,
double hscale_pixel_rate,
unsigned int delivery_width,
unsigned int req_per_swath_ub)
{
double refcyc_per_delivery = 0.0;
if (vratio <= 1.0) {
if (odm_combine)
refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) ((unsigned int) odm_combine * 2)
* dml_min((double) recout_width, (double) hactive / ((unsigned int) odm_combine * 2)) / pclk_freq_in_mhz / (double) req_per_swath_ub;
else
refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) recout_width / pclk_freq_in_mhz / (double) req_per_swath_ub;
} else {
refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) delivery_width / (double) hscale_pixel_rate / (double) req_per_swath_ub;
}
#ifdef __DML_RQ_DLG_CALC_DEBUG__
dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
dml_print("DML_DLG: %s: recout_width = %d\n", __func__, recout_width);
dml_print("DML_DLG: %s: vratio = %3.2f\n", __func__, vratio);
dml_print("DML_DLG: %s: req_per_swath_ub = %d\n", __func__, req_per_swath_ub);
dml_print("DML_DLG: %s: hscale_pixel_rate = %3.2f\n", __func__, hscale_pixel_rate);
dml_print("DML_DLG: %s: delivery_width = %d\n", __func__, delivery_width);
dml_print("DML_DLG: %s: refcyc_per_delivery= %3.2f\n", __func__, refcyc_per_delivery);
#endif
return refcyc_per_delivery;
}
static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_size)
{
if (tile_size == dm_256k_tile)
return (256 * 1024);
else if (tile_size == dm_64k_tile)
return (64 * 1024);
else
return (4 * 1024);
}
static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib, display_data_rq_regs_st *rq_regs, const display_data_rq_sizing_params_st *rq_sizing)
{
print__data_rq_sizing_params_st(mode_lib, rq_sizing);
rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10;
if (rq_sizing->min_chunk_bytes == 0)
rq_regs->min_chunk_size = 0;
else
rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1;
rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10;
if (rq_sizing->min_meta_chunk_bytes == 0)
rq_regs->min_meta_chunk_size = 0;
else
rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1;
rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6;
rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6;
}
static void extract_rq_regs(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, const display_rq_params_st *rq_param)
{
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
unsigned int detile_buf_plane1_addr = 0;
extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l);
rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_l.dpte_row_height), 1) - 3;
if (rq_param->yuv420) {
extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c);
rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_c.dpte_row_height), 1) - 3;
}
rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height);
rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height);
// FIXME: take the max between luma, chroma chunk size?
// okay for now, as we are setting chunk_bytes to 8kb anyways
if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024 || (rq_param->yuv420 && rq_param->sizing.rq_c.chunk_bytes >= 32 * 1024)) { //32kb
rq_regs->drq_expansion_mode = 0;
} else {
rq_regs->drq_expansion_mode = 2;
}
rq_regs->prq_expansion_mode = 1;
rq_regs->mrq_expansion_mode = 1;
rq_regs->crq_expansion_mode = 1;
// Note: detile_buf_plane1_addr is in unit of 1KB
if (rq_param->yuv420) {
if ((double) rq_param->misc.rq_l.stored_swath_bytes / (double) rq_param->misc.rq_c.stored_swath_bytes <= 1.5) {
detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 1024.0); // half to chroma
#ifdef __DML_RQ_DLG_CALC_DEBUG__
dml_print("DML_DLG: %s: detile_buf_plane1_addr = %0d (1/2 to chroma)\n", __func__, detile_buf_plane1_addr);
#endif
} else {
detile_buf_plane1_addr = dml_round_to_multiple((unsigned int) ((2.0 * detile_buf_size_in_bytes) / 3.0), 1024, 0) / 1024.0; // 2/3 to luma
#ifdef __DML_RQ_DLG_CALC_DEBUG__
dml_print("DML_DLG: %s: detile_buf_plane1_addr = %0d (1/3 chroma)\n", __func__, detile_buf_plane1_addr);
#endif
}
}
rq_regs->plane1_base_address = detile_buf_plane1_addr;
#ifdef __DML_RQ_DLG_CALC_DEBUG__
dml_print("DML_DLG: %s: detile_buf_size_in_bytes = %0d\n", __func__, detile_buf_size_in_bytes);
dml_print("DML_DLG: %s: detile_buf_plane1_addr = %0d\n", __func__, detile_buf_plane1_addr);
dml_print("DML_DLG: %s: plane1_base_address = %0d\n", __func__, rq_regs->plane1_base_address);
dml_print("DML_DLG: %s: rq_l.stored_swath_bytes = %0d\n", __func__, rq_param->misc.rq_l.stored_swath_bytes);
dml_print("DML_DLG: %s: rq_c.stored_swath_bytes = %0d\n", __func__, rq_param->misc.rq_c.stored_swath_bytes);
dml_print("DML_DLG: %s: rq_l.swath_height = %0d\n", __func__, rq_param->dlg.rq_l.swath_height);
dml_print("DML_DLG: %s: rq_c.swath_height = %0d\n", __func__, rq_param->dlg.rq_c.swath_height);
#endif
}
static void handle_det_buf_split(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, const display_pipe_source_params_st *pipe_src_param)
{
unsigned int total_swath_bytes = 0;
unsigned int swath_bytes_l = 0;
unsigned int swath_bytes_c = 0;
unsigned int full_swath_bytes_packed_l = 0;
unsigned int full_swath_bytes_packed_c = 0;
bool req128_l = 0;
bool req128_c = 0;
bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear);
bool surf_vert = (pipe_src_param->source_scan == dm_vert);
unsigned int log2_swath_height_l = 0;
unsigned int log2_swath_height_c = 0;
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
full_swath_bytes_packed_l = rq_param->misc.rq_l.full_swath_bytes;
full_swath_bytes_packed_c = rq_param->misc.rq_c.full_swath_bytes;
#ifdef __DML_RQ_DLG_CALC_DEBUG__
dml_print("DML_DLG: %s: full_swath_bytes_packed_l = %0d\n", __func__, full_swath_bytes_packed_l);
dml_print("DML_DLG: %s: full_swath_bytes_packed_c = %0d\n", __func__, full_swath_bytes_packed_c);
#endif
if (rq_param->yuv420_10bpc) {
full_swath_bytes_packed_l = dml_round_to_multiple(rq_param->misc.rq_l.full_swath_bytes * 2.0 / 3.0, 256, 1) + 256;
full_swath_bytes_packed_c = dml_round_to_multiple(rq_param->misc.rq_c.full_swath_bytes * 2.0 / 3.0, 256, 1) + 256;
#ifdef __DML_RQ_DLG_CALC_DEBUG__
dml_print("DML_DLG: %s: full_swath_bytes_packed_l = %0d (3-2 packing)\n", __func__, full_swath_bytes_packed_l);
dml_print("DML_DLG: %s: full_swath_bytes_packed_c = %0d (3-2 packing)\n", __func__, full_swath_bytes_packed_c);
#endif
}
if (rq_param->yuv420)
total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;
else
total_swath_bytes = 2 * full_swath_bytes_packed_l;
#ifdef __DML_RQ_DLG_CALC_DEBUG__
dml_print("DML_DLG: %s: total_swath_bytes = %0d\n", __func__, total_swath_bytes);
dml_print("DML_DLG: %s: detile_buf_size_in_bytes = %0d\n", __func__, detile_buf_size_in_bytes);
#endif
if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request
req128_l = 0;
req128_c = 0;
swath_bytes_l = full_swath_bytes_packed_l;
swath_bytes_c = full_swath_bytes_packed_c;
} else if (!rq_param->yuv420) {
req128_l = 1;
req128_c = 0;
swath_bytes_c = full_swath_bytes_packed_c;
swath_bytes_l = full_swath_bytes_packed_l / 2;
} else if ((double) full_swath_bytes_packed_l / (double) full_swath_bytes_packed_c < 1.5) {
req128_l = 0;
req128_c = 1;
swath_bytes_l = full_swath_bytes_packed_l;
swath_bytes_c = full_swath_bytes_packed_c / 2;
total_swath_bytes = 2 * swath_bytes_l + 2 * swath_bytes_c;
if (total_swath_bytes > detile_buf_size_in_bytes) {
req128_l = 1;
swath_bytes_l = full_swath_bytes_packed_l / 2;
}
} else {
req128_l = 1;
req128_c = 0;
swath_bytes_l = full_swath_bytes_packed_l / 2;
swath_bytes_c = full_swath_bytes_packed_c;
total_swath_bytes = 2 * swath_bytes_l + 2 * swath_bytes_c;
if (total_swath_bytes > detile_buf_size_in_bytes) {
req128_c = 1;
swath_bytes_c = full_swath_bytes_packed_c / 2;
}
}
if (rq_param->yuv420)
total_swath_bytes = 2 * swath_bytes_l + 2 * swath_bytes_c;
else
total_swath_bytes = 2 * swath_bytes_l;
rq_param->misc.rq_l.stored_swath_bytes = swath_bytes_l;
rq_param->misc.rq_c.stored_swath_bytes = swath_bytes_c;
#ifdef __DML_RQ_DLG_CALC_DEBUG__
dml_print("DML_DLG: %s: total_swath_bytes = %0d\n", __func__, total_swath_bytes);
dml_print("DML_DLG: %s: rq_l.stored_swath_bytes = %0d\n", __func__, rq_param->misc.rq_l.stored_swath_bytes);
dml_print("DML_DLG: %s: rq_c.stored_swath_bytes = %0d\n", __func__, rq_param->misc.rq_c.stored_swath_bytes);
#endif
if (surf_linear) {
log2_swath_height_l = 0;
log2_swath_height_c = 0;
} else {
unsigned int swath_height_l;
unsigned int swath_height_c;
if (!surf_vert) {
swath_height_l = rq_param->misc.rq_l.blk256_height;
swath_height_c = rq_param->misc.rq_c.blk256_height;
} else {
swath_height_l = rq_param->misc.rq_l.blk256_width;
swath_height_c = rq_param->misc.rq_c.blk256_width;
}
if (swath_height_l > 0)
log2_swath_height_l = dml_log2(swath_height_l);
if (req128_l && log2_swath_height_l > 0)
log2_swath_height_l -= 1;
if (swath_height_c > 0)
log2_swath_height_c = dml_log2(swath_height_c);
if (req128_c && log2_swath_height_c > 0)
log2_swath_height_c -= 1;
}
rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
#ifdef __DML_RQ_DLG_CALC_DEBUG__
dml_print("DML_DLG: %s: req128_l = %0d\n", __func__, req128_l);
dml_print("DML_DLG: %s: req128_c = %0d\n", __func__, req128_c);
dml_print("DML_DLG: %s: full_swath_bytes_packed_l = %0d\n", __func__, full_swath_bytes_packed_l);
dml_print("DML_DLG: %s: full_swath_bytes_packed_c = %0d\n", __func__, full_swath_bytes_packed_c);
dml_print("DML_DLG: %s: swath_height luma = %0d\n", __func__, rq_param->dlg.rq_l.swath_height);
dml_print("DML_DLG: %s: swath_height chroma = %0d\n", __func__, rq_param->dlg.rq_c.swath_height);
#endif
}
static void get_meta_and_pte_attr(
struct display_mode_lib *mode_lib,
display_data_rq_dlg_params_st *rq_dlg_param,
display_data_rq_misc_params_st *rq_misc_param,
display_data_rq_sizing_params_st *rq_sizing_param,
unsigned int vp_width,
unsigned int vp_height,
unsigned int data_pitch,
unsigned int meta_pitch,
unsigned int source_format,
unsigned int tiling,
unsigned int macro_tile_size,
unsigned int source_scan,
unsigned int hostvm_enable,
unsigned int is_chroma,
unsigned int surface_height)
{
bool surf_linear = (tiling == dm_sw_linear);
bool surf_vert = (source_scan == dm_vert);
unsigned int bytes_per_element;
unsigned int bytes_per_element_y;
unsigned int bytes_per_element_c;
unsigned int blk256_width = 0;
unsigned int blk256_height = 0;
unsigned int blk256_width_y = 0;
unsigned int blk256_height_y = 0;
unsigned int blk256_width_c = 0;
unsigned int blk256_height_c = 0;
unsigned int log2_bytes_per_element;
unsigned int log2_blk256_width;
unsigned int log2_blk256_height;
unsigned int blk_bytes;
unsigned int log2_blk_bytes;
unsigned int log2_blk_height;
unsigned int log2_blk_width;
unsigned int log2_meta_req_bytes;
unsigned int log2_meta_req_height;
unsigned int log2_meta_req_width;
unsigned int meta_req_width;
unsigned int meta_req_height;
unsigned int log2_meta_row_height;
unsigned int meta_row_width_ub;
unsigned int log2_meta_chunk_bytes;
unsigned int log2_meta_chunk_height;
//full sized meta chunk width in unit of data elements
unsigned int log2_meta_chunk_width;
unsigned int log2_min_meta_chunk_bytes;
unsigned int min_meta_chunk_width;
unsigned int meta_chunk_width;
unsigned int meta_chunk_per_row_int;
unsigned int meta_row_remainder;
unsigned int meta_chunk_threshold;
unsigned int meta_blk_height;
unsigned int meta_surface_bytes;
unsigned int vmpg_bytes;
unsigned int meta_pte_req_per_frame_ub;
unsigned int meta_pte_bytes_per_frame_ub;
const unsigned int log2_vmpg_bytes = dml_log2(mode_lib->soc.gpuvm_min_page_size_bytes);
const bool dual_plane_en = is_dual_plane((enum source_format_class) (source_format));
const unsigned int dpte_buf_in_pte_reqs =
dual_plane_en ? (is_chroma ? mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma : mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma) : (mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma
+ mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma);
unsigned int log2_vmpg_height = 0;
unsigned int log2_vmpg_width = 0;
unsigned int log2_dpte_req_height_ptes = 0;
unsigned int log2_dpte_req_height = 0;
unsigned int log2_dpte_req_width = 0;
unsigned int log2_dpte_row_height_linear = 0;
unsigned int log2_dpte_row_height = 0;
unsigned int log2_dpte_group_width = 0;
unsigned int dpte_row_width_ub = 0;
unsigned int dpte_req_height = 0;
unsigned int dpte_req_width = 0;
unsigned int dpte_group_width = 0;
unsigned int log2_dpte_group_bytes = 0;
unsigned int log2_dpte_group_length = 0;
double byte_per_pixel_det_y;
double byte_per_pixel_det_c;
dml30_CalculateBytePerPixelAnd256BBlockSizes(
(enum source_format_class) (source_format),
(enum dm_swizzle_mode) (tiling),
&bytes_per_element_y,
&bytes_per_element_c,
&byte_per_pixel_det_y,
&byte_per_pixel_det_c,
&blk256_height_y,
&blk256_height_c,
&blk256_width_y,
&blk256_width_c);
if (!is_chroma) {
blk256_width = blk256_width_y;
blk256_height = blk256_height_y;
bytes_per_element = bytes_per_element_y;
} else {
blk256_width = blk256_width_c;
blk256_height = blk256_height_c;
bytes_per_element = bytes_per_element_c;
}
log2_bytes_per_element = dml_log2(bytes_per_element);
dml_print("DML_DLG: %s: surf_linear = %d\n", __func__, surf_linear);
dml_print("DML_DLG: %s: surf_vert = %d\n", __func__, surf_vert);
dml_print("DML_DLG: %s: blk256_width = %d\n", __func__, blk256_width);
dml_print("DML_DLG: %s: blk256_height = %d\n", __func__, blk256_height);
log2_blk256_width = dml_log2((double) blk256_width);
log2_blk256_height = dml_log2((double) blk256_height);
blk_bytes = surf_linear ? 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
log2_blk_bytes = dml_log2((double) blk_bytes);
log2_blk_height = 0;
log2_blk_width = 0;
// remember log rule
// "+" in log is multiply
// "-" in log is divide
// "/2" is like square root
// blk is vertical biased
if (tiling != dm_sw_linear)
log2_blk_height = log2_blk256_height + dml_ceil((double) (log2_blk_bytes - 8) / 2.0, 1);
else
log2_blk_height = 0; // blk height of 1
log2_blk_width = log2_blk_bytes - log2_bytes_per_element - log2_blk_height;
if (!surf_vert) {
int unsigned temp;
temp = dml_round_to_multiple(vp_width - 1, blk256_width, 1) + blk256_width;
if (data_pitch < blk256_width) {
dml_print("WARNING: DML_DLG: %s: swath_size calculation ignoring data_pitch=%u < blk256_width=%u\n", __func__, data_pitch, blk256_width);
} else {
if (temp > data_pitch) {
if (data_pitch >= vp_width)
temp = data_pitch;
else
dml_print("WARNING: DML_DLG: %s: swath_size calculation ignoring data_pitch=%u < vp_width=%u\n", __func__, data_pitch, vp_width);
}
}
rq_dlg_param->swath_width_ub = temp;
rq_dlg_param->req_per_swath_ub = temp >> log2_blk256_width;
} else {
int unsigned temp;
temp = dml_round_to_multiple(vp_height - 1, blk256_height, 1) + blk256_height;
if (surface_height < blk256_height) {
dml_print("WARNING: DML_DLG: %s swath_size calculation ignored surface_height=%u < blk256_height=%u\n", __func__, surface_height, blk256_height);
} else {
if (temp > surface_height) {
if (surface_height >= vp_height)
temp = surface_height;
else
dml_print("WARNING: DML_DLG: %s swath_size calculation ignored surface_height=%u < vp_height=%u\n", __func__, surface_height, vp_height);
}
}
rq_dlg_param->swath_width_ub = temp;
rq_dlg_param->req_per_swath_ub = temp >> log2_blk256_height;
}
if (!surf_vert)
rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_height * bytes_per_element;
else
rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_width * bytes_per_element;
rq_misc_param->blk256_height = blk256_height;
rq_misc_param->blk256_width = blk256_width;
// -------
// meta
// -------
log2_meta_req_bytes = 6; // meta request is 64b and is 8x8byte meta element
// each 64b meta request for dcn is 8x8 meta elements and
// a meta element covers one 256b block of the data surface.
log2_meta_req_height = log2_blk256_height + 3; // meta req is 8x8 byte, each byte represent 1 blk256
log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element - log2_meta_req_height;
meta_req_width = 1 << log2_meta_req_width;
meta_req_height = 1 << log2_meta_req_height;
log2_meta_row_height = 0;
meta_row_width_ub = 0;
// the dimensions of a meta row are meta_row_width x meta_row_height in elements.
// calculate upper bound of the meta_row_width
if (!surf_vert) {
log2_meta_row_height = log2_meta_req_height;
meta_row_width_ub = dml_round_to_multiple(vp_width - 1, meta_req_width, 1) + meta_req_width;
rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_width;
} else {
log2_meta_row_height = log2_meta_req_width;
meta_row_width_ub = dml_round_to_multiple(vp_height - 1, meta_req_height, 1) + meta_req_height;
rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_height;
}
rq_dlg_param->meta_bytes_per_row_ub = rq_dlg_param->meta_req_per_row_ub * 64;
rq_dlg_param->meta_row_height = 1 << log2_meta_row_height;
log2_meta_chunk_bytes = dml_log2(rq_sizing_param->meta_chunk_bytes);
log2_meta_chunk_height = log2_meta_row_height;
//full sized meta chunk width in unit of data elements
log2_meta_chunk_width = log2_meta_chunk_bytes + 8 - log2_bytes_per_element - log2_meta_chunk_height;
log2_min_meta_chunk_bytes = dml_log2(rq_sizing_param->min_meta_chunk_bytes);
min_meta_chunk_width = 1 << (log2_min_meta_chunk_bytes + 8 - log2_bytes_per_element - log2_meta_chunk_height);
meta_chunk_width = 1 << log2_meta_chunk_width;
meta_chunk_per_row_int = (unsigned int) (meta_row_width_ub / meta_chunk_width);
meta_row_remainder = meta_row_width_ub % meta_chunk_width;
meta_chunk_threshold = 0;
meta_blk_height = blk256_height * 64;
meta_surface_bytes = meta_pitch * (dml_round_to_multiple(vp_height - 1, meta_blk_height, 1) + meta_blk_height) * bytes_per_element / 256;
vmpg_bytes = mode_lib->soc.gpuvm_min_page_size_bytes;
meta_pte_req_per_frame_ub = (dml_round_to_multiple(meta_surface_bytes - vmpg_bytes, 8 * vmpg_bytes, 1) + 8 * vmpg_bytes) / (8 * vmpg_bytes);
meta_pte_bytes_per_frame_ub = meta_pte_req_per_frame_ub * 64; //64B mpte request
rq_dlg_param->meta_pte_bytes_per_frame_ub = meta_pte_bytes_per_frame_ub;
dml_print("DML_DLG: %s: meta_blk_height = %d\n", __func__, meta_blk_height);
dml_print("DML_DLG: %s: meta_surface_bytes = %d\n", __func__, meta_surface_bytes);
dml_print("DML_DLG: %s: meta_pte_req_per_frame_ub = %d\n", __func__, meta_pte_req_per_frame_ub);
dml_print("DML_DLG: %s: meta_pte_bytes_per_frame_ub = %d\n", __func__, meta_pte_bytes_per_frame_ub);
if (!surf_vert)
meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_width;
else
meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_height;
if (meta_row_remainder <= meta_chunk_threshold)
rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 1;
else
rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 2;
// ------
// dpte
// ------
if (surf_linear) {
log2_vmpg_height = 0; // one line high
} else {
log2_vmpg_height = (log2_vmpg_bytes - 8) / 2 + log2_blk256_height;
}
log2_vmpg_width = log2_vmpg_bytes - log2_bytes_per_element - log2_vmpg_height;
// only 3 possible shapes for dpte request in dimensions of ptes: 8x1, 4x2, 2x4.
if (surf_linear) { //one 64B PTE request returns 8 PTEs
log2_dpte_req_height_ptes = 0;
log2_dpte_req_width = log2_vmpg_width + 3;
log2_dpte_req_height = 0;
} else if (log2_blk_bytes == 12) { //4KB tile means 4kB page size
//one 64B req gives 8x1 PTEs for 4KB tile
log2_dpte_req_height_ptes = 0;
log2_dpte_req_width = log2_blk_width + 3;
log2_dpte_req_height = log2_blk_height + 0;
} else if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) { // tile block >= 64KB
//two 64B reqs of 2x4 PTEs give 16 PTEs to cover 64KB
log2_dpte_req_height_ptes = 4;
log2_dpte_req_width = log2_blk256_width + 4; // log2_64KB_width
log2_dpte_req_height = log2_blk256_height + 4; // log2_64KB_height
} else { //64KB page size and must 64KB tile block
//one 64B req gives 8x1 PTEs for 64KB tile
log2_dpte_req_height_ptes = 0;
log2_dpte_req_width = log2_blk_width + 3;
log2_dpte_req_height = log2_blk_height + 0;
}
// The dpte request dimensions in data elements is dpte_req_width x dpte_req_height
// log2_vmpg_width is how much 1 pte represent, now calculating how much a 64b pte req represent
// That depends on the pte shape (i.e. 8x1, 4x2, 2x4)
//log2_dpte_req_height = log2_vmpg_height + log2_dpte_req_height_ptes;
//log2_dpte_req_width = log2_vmpg_width + log2_dpte_req_width_ptes;
dpte_req_height = 1 << log2_dpte_req_height;
dpte_req_width = 1 << log2_dpte_req_width;
// calculate pitch dpte row buffer can hold
// round the result down to a power of two.
if (surf_linear) {
unsigned int dpte_row_height;
log2_dpte_row_height_linear = dml_floor(dml_log2(dpte_buf_in_pte_reqs * dpte_req_width / data_pitch), 1);
dml_print("DML_DLG: %s: is_chroma = %d\n", __func__, is_chroma);
dml_print("DML_DLG: %s: dpte_buf_in_pte_reqs = %d\n", __func__, dpte_buf_in_pte_reqs);
dml_print("DML_DLG: %s: log2_dpte_row_height_linear = %d\n", __func__, log2_dpte_row_height_linear);
ASSERT(log2_dpte_row_height_linear >= 3);
if (log2_dpte_row_height_linear > 7)
log2_dpte_row_height_linear = 7;
log2_dpte_row_height = log2_dpte_row_height_linear;
// For linear, the dpte row is pitch dependent and the pte requests wrap at the pitch boundary.
// the dpte_row_width_ub is the upper bound of data_pitch*dpte_row_height in elements with this unique buffering.
dpte_row_height = 1 << log2_dpte_row_height;
dpte_row_width_ub = dml_round_to_multiple(data_pitch * dpte_row_height - 1, dpte_req_width, 1) + dpte_req_width;
rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
} else {
// the upper bound of the dpte_row_width without dependency on viewport position follows.
// for tiled mode, row height is the same as req height and row store up to vp size upper bound
if (!surf_vert) {
log2_dpte_row_height = log2_dpte_req_height;
dpte_row_width_ub = dml_round_to_multiple(vp_width - 1, dpte_req_width, 1) + dpte_req_width;
rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
} else {
log2_dpte_row_height = (log2_blk_width < log2_dpte_req_width) ? log2_blk_width : log2_dpte_req_width;
dpte_row_width_ub = dml_round_to_multiple(vp_height - 1, dpte_req_height, 1) + dpte_req_height;
rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_height;
}
}
if (log2_blk_bytes >= 16 && log2_vmpg_bytes == 12) // tile block >= 64KB
rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 128; //2*64B dpte request
else
rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 64; //64B dpte request
rq_dlg_param->dpte_row_height = 1 << log2_dpte_row_height;
// the dpte_group_bytes is reduced for the specific case of vertical
// access of a tile surface that has dpte request of 8x1 ptes.
if (hostvm_enable)
rq_sizing_param->dpte_group_bytes = 512;
else {
if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
rq_sizing_param->dpte_group_bytes = 2048;
}
//since pte request size is 64byte, the number of data pte requests per full sized group is as follows.
log2_dpte_group_bytes = dml_log2(rq_sizing_param->dpte_group_bytes);
log2_dpte_group_length = log2_dpte_group_bytes - 6; //length in 64b requests
// full sized data pte group width in elements
if (!surf_vert)
log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_width;
else
log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_height;
//But if the tile block >=64KB and the page size is 4KB, then each dPTE request is 2*64B
if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) // tile block >= 64KB
log2_dpte_group_width = log2_dpte_group_width - 1;
dpte_group_width = 1 << log2_dpte_group_width;
// since dpte groups are only aligned to dpte_req_width and not dpte_group_width,
// the upper bound for the dpte groups per row is as follows.
rq_dlg_param->dpte_groups_per_row_ub = dml_ceil((double) dpte_row_width_ub / dpte_group_width, 1);
}
static void get_surf_rq_param(
struct display_mode_lib *mode_lib,
display_data_rq_sizing_params_st *rq_sizing_param,
display_data_rq_dlg_params_st *rq_dlg_param,
display_data_rq_misc_params_st *rq_misc_param,
const display_pipe_params_st *pipe_param,
bool is_chroma,
bool is_alpha)
{
bool mode_422 = 0;
unsigned int vp_width = 0;
unsigned int vp_height = 0;
unsigned int data_pitch = 0;
unsigned int meta_pitch = 0;
unsigned int surface_height = 0;
unsigned int ppe = mode_422 ? 2 : 1;
// FIXME check if ppe apply for both luma and chroma in 422 case
if (is_chroma | is_alpha) {
vp_width = pipe_param->src.viewport_width_c / ppe;
vp_height = pipe_param->src.viewport_height_c;
data_pitch = pipe_param->src.data_pitch_c;
meta_pitch = pipe_param->src.meta_pitch_c;
surface_height = pipe_param->src.surface_height_y / 2.0;
} else {
vp_width = pipe_param->src.viewport_width / ppe;
vp_height = pipe_param->src.viewport_height;
data_pitch = pipe_param->src.data_pitch;
meta_pitch = pipe_param->src.meta_pitch;
surface_height = pipe_param->src.surface_height_y;
}
if (pipe_param->dest.odm_combine) {
unsigned int access_dir;
unsigned int full_src_vp_width;
unsigned int hactive_odm;
unsigned int src_hactive_odm;
access_dir = (pipe_param->src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
hactive_odm = pipe_param->dest.hactive / ((unsigned int) pipe_param->dest.odm_combine * 2);
if (is_chroma) {
full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio_c * pipe_param->dest.full_recout_width;
src_hactive_odm = pipe_param->scale_ratio_depth.hscl_ratio_c * hactive_odm;
} else {
full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio * pipe_param->dest.full_recout_width;
src_hactive_odm = pipe_param->scale_ratio_depth.hscl_ratio * hactive_odm;
}
if (access_dir == 0) {
vp_width = dml_min(full_src_vp_width, src_hactive_odm);
dml_print("DML_DLG: %s: vp_width = %d\n", __func__, vp_width);
} else {
vp_height = dml_min(full_src_vp_width, src_hactive_odm);
dml_print("DML_DLG: %s: vp_height = %d\n", __func__, vp_height);
}
dml_print("DML_DLG: %s: full_src_vp_width = %d\n", __func__, full_src_vp_width);
dml_print("DML_DLG: %s: hactive_odm = %d\n", __func__, hactive_odm);
dml_print("DML_DLG: %s: src_hactive_odm = %d\n", __func__, src_hactive_odm);
}
rq_sizing_param->chunk_bytes = 8192;
if (is_alpha) {
rq_sizing_param->chunk_bytes = 4096;
}
if (rq_sizing_param->chunk_bytes == 64 * 1024)
rq_sizing_param->min_chunk_bytes = 0;
else
rq_sizing_param->min_chunk_bytes = 1024;
rq_sizing_param->meta_chunk_bytes = 2048;
rq_sizing_param->min_meta_chunk_bytes = 256;
if (pipe_param->src.hostvm)
rq_sizing_param->mpte_group_bytes = 512;
else
rq_sizing_param->mpte_group_bytes = 2048;
get_meta_and_pte_attr(
mode_lib,
rq_dlg_param,
rq_misc_param,
rq_sizing_param,
vp_width,
vp_height,
data_pitch,
meta_pitch,
pipe_param->src.source_format,
pipe_param->src.sw_mode,
pipe_param->src.macro_tile_size,
pipe_param->src.source_scan,
pipe_param->src.hostvm,
is_chroma,
surface_height);
}
static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, const display_pipe_params_st *pipe_param)
{
// get param for luma surface
rq_param->yuv420 = pipe_param->src.source_format == dm_420_8 || pipe_param->src.source_format == dm_420_10 || pipe_param->src.source_format == dm_rgbe_alpha
|| pipe_param->src.source_format == dm_420_12;
rq_param->yuv420_10bpc = pipe_param->src.source_format == dm_420_10;
rq_param->rgbe_alpha = (pipe_param->src.source_format == dm_rgbe_alpha) ? 1 : 0;
get_surf_rq_param(mode_lib, &(rq_param->sizing.rq_l), &(rq_param->dlg.rq_l), &(rq_param->misc.rq_l), pipe_param, 0, 0);
if (is_dual_plane((enum source_format_class) (pipe_param->src.source_format))) {
// get param for chroma surface
get_surf_rq_param(mode_lib, &(rq_param->sizing.rq_c), &(rq_param->dlg.rq_c), &(rq_param->misc.rq_c), pipe_param, 1, rq_param->rgbe_alpha);
}
// calculate how to split the det buffer space between luma and chroma
handle_det_buf_split(mode_lib, rq_param, &pipe_param->src);
print__rq_params_st(mode_lib, rq_param);
}
void dml31_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, const display_pipe_params_st *pipe_param)
{
display_rq_params_st rq_param = {0};
memset(rq_regs, 0, sizeof(*rq_regs));
dml_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param);
extract_rq_regs(mode_lib, rq_regs, &rq_param);
print__rq_regs_st(mode_lib, rq_regs);
}
static void calculate_ttu_cursor(
struct display_mode_lib *mode_lib,
double *refcyc_per_req_delivery_pre_cur,
double *refcyc_per_req_delivery_cur,
double refclk_freq_in_mhz,
double ref_freq_to_pix_freq,
double hscale_pixel_rate_l,
double hscl_ratio,
double vratio_pre_l,
double vratio_l,
unsigned int cur_width,
enum cursor_bpp cur_bpp)
{
unsigned int cur_src_width = cur_width;
unsigned int cur_req_size = 0;
unsigned int cur_req_width = 0;
double cur_width_ub = 0.0;
double cur_req_per_width = 0.0;
double hactive_cur = 0.0;
ASSERT(cur_src_width <= 256);
*refcyc_per_req_delivery_pre_cur = 0.0;
*refcyc_per_req_delivery_cur = 0.0;
if (cur_src_width > 0) {
unsigned int cur_bit_per_pixel = 0;
if (cur_bpp == dm_cur_2bit) {
cur_req_size = 64; // byte
cur_bit_per_pixel = 2;
} else { // 32bit
cur_bit_per_pixel = 32;
if (cur_src_width >= 1 && cur_src_width <= 16)
cur_req_size = 64;
else if (cur_src_width >= 17 && cur_src_width <= 31)
cur_req_size = 128;
else
cur_req_size = 256;
}
cur_req_width = (double) cur_req_size / ((double) cur_bit_per_pixel / 8.0);
cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1) * (double) cur_req_width;
cur_req_per_width = cur_width_ub / (double) cur_req_width;
hactive_cur = (double) cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor
if (vratio_pre_l <= 1.0) {
*refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq / (double) cur_req_per_width;
} else {
*refcyc_per_req_delivery_pre_cur = (double) refclk_freq_in_mhz * (double) cur_src_width / hscale_pixel_rate_l / (double) cur_req_per_width;
}
ASSERT(*refcyc_per_req_delivery_pre_cur < dml_pow(2, 13));
if (vratio_l <= 1.0) {
*refcyc_per_req_delivery_cur = hactive_cur * ref_freq_to_pix_freq / (double) cur_req_per_width;
} else {
*refcyc_per_req_delivery_cur = (double) refclk_freq_in_mhz * (double) cur_src_width / hscale_pixel_rate_l / (double) cur_req_per_width;
}
dml_print("DML_DLG: %s: cur_req_width = %d\n", __func__, cur_req_width);
dml_print("DML_DLG: %s: cur_width_ub = %3.2f\n", __func__, cur_width_ub);
dml_print("DML_DLG: %s: cur_req_per_width = %3.2f\n", __func__, cur_req_per_width);
dml_print("DML_DLG: %s: hactive_cur = %3.2f\n", __func__, hactive_cur);
dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_cur = %3.2f\n", __func__, *refcyc_per_req_delivery_pre_cur);
dml_print("DML_DLG: %s: refcyc_per_req_delivery_cur = %3.2f\n", __func__, *refcyc_per_req_delivery_cur);
ASSERT(*refcyc_per_req_delivery_cur < dml_pow(2, 13));
}
}
// Note: currently taken in as is.
// Nice to decouple code from hw register implement and extract code that are repeated for luma and chroma.
static void dml_rq_dlg_get_dlg_params(
struct display_mode_lib *mode_lib,
const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
display_dlg_regs_st *disp_dlg_regs,
display_ttu_regs_st *disp_ttu_regs,
const display_rq_dlg_params_st *rq_dlg_param,
const display_dlg_sys_params_st *dlg_sys_param,
const bool cstate_en,
const bool pstate_en,
const bool vm_en,
const bool ignore_viewport_pos,
const bool immediate_flip_support)
{
const display_pipe_source_params_st *src = &e2e_pipe_param[pipe_idx].pipe.src;
const display_pipe_dest_params_st *dst = &e2e_pipe_param[pipe_idx].pipe.dest;
const display_clocks_and_cfg_st *clks = &e2e_pipe_param[pipe_idx].clks_cfg;
const scaler_ratio_depth_st *scl = &e2e_pipe_param[pipe_idx].pipe.scale_ratio_depth;
const scaler_taps_st *taps = &e2e_pipe_param[pipe_idx].pipe.scale_taps;
unsigned int pipe_index_in_combine[DC__NUM_PIPES__MAX];
// -------------------------
// Section 1.15.2.1: OTG dependent Params
// -------------------------
// Timing
unsigned int htotal = dst->htotal;
unsigned int hblank_end = dst->hblank_end;
unsigned int vblank_start = dst->vblank_start;
unsigned int vblank_end = dst->vblank_end;
double dppclk_freq_in_mhz = clks->dppclk_mhz;
double refclk_freq_in_mhz = clks->refclk_mhz;
double pclk_freq_in_mhz = dst->pixel_rate_mhz;
bool interlaced = dst->interlaced;
double ref_freq_to_pix_freq = refclk_freq_in_mhz / pclk_freq_in_mhz;
double min_ttu_vblank;
unsigned int dlg_vblank_start;
bool dual_plane;
bool mode_422;
unsigned int access_dir;
unsigned int vp_height_l;
unsigned int vp_width_l;
unsigned int vp_height_c;
unsigned int vp_width_c;
// Scaling
unsigned int htaps_l;
unsigned int htaps_c;
double hratio_l;
double hratio_c;
double vratio_l;
double vratio_c;
unsigned int swath_width_ub_l;
unsigned int dpte_groups_per_row_ub_l;
unsigned int swath_width_ub_c;
unsigned int dpte_groups_per_row_ub_c;
unsigned int meta_chunks_per_row_ub_l;
unsigned int meta_chunks_per_row_ub_c;
unsigned int vupdate_offset;
unsigned int vupdate_width;
unsigned int vready_offset;
unsigned int vstartup_start;
unsigned int dst_x_after_scaler;
unsigned int dst_y_after_scaler;
double dst_y_prefetch;
double dst_y_per_vm_vblank;
double dst_y_per_row_vblank;
double dst_y_per_vm_flip;
double dst_y_per_row_flip;
double max_dst_y_per_vm_vblank;
double max_dst_y_per_row_vblank;
double vratio_pre_l;
double vratio_pre_c;
unsigned int req_per_swath_ub_l;
unsigned int req_per_swath_ub_c;
unsigned int meta_row_height_l;
unsigned int meta_row_height_c;
unsigned int swath_width_pixels_ub_l;
unsigned int swath_width_pixels_ub_c;
unsigned int scaler_rec_in_width_l;
unsigned int scaler_rec_in_width_c;
unsigned int dpte_row_height_l;
unsigned int dpte_row_height_c;
double hscale_pixel_rate_l;
double hscale_pixel_rate_c;
double min_hratio_fact_l;
double min_hratio_fact_c;
double refcyc_per_line_delivery_pre_l;
double refcyc_per_line_delivery_pre_c;
double refcyc_per_line_delivery_l;
double refcyc_per_line_delivery_c;
double refcyc_per_req_delivery_pre_l;
double refcyc_per_req_delivery_pre_c;
double refcyc_per_req_delivery_l;
double refcyc_per_req_delivery_c;
unsigned int full_recout_width;
double refcyc_per_req_delivery_pre_cur0;
double refcyc_per_req_delivery_cur0;
double refcyc_per_req_delivery_pre_cur1;
double refcyc_per_req_delivery_cur1;
int unsigned vba__min_dst_y_next_start = get_min_dst_y_next_start(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // FROM VBA
int unsigned vba__vready_after_vcount0 = get_vready_at_or_after_vsync(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
float vba__refcyc_per_line_delivery_pre_l = get_refcyc_per_line_delivery_pre_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
float vba__refcyc_per_line_delivery_l = get_refcyc_per_line_delivery_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
float vba__refcyc_per_req_delivery_pre_l = get_refcyc_per_req_delivery_pre_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
float vba__refcyc_per_req_delivery_l = get_refcyc_per_req_delivery_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
dml_print("DML_DLG: %s: cstate_en = %d\n", __func__, cstate_en);
dml_print("DML_DLG: %s: pstate_en = %d\n", __func__, pstate_en);
dml_print("DML_DLG: %s: vm_en = %d\n", __func__, vm_en);
dml_print("DML_DLG: %s: ignore_viewport_pos = %d\n", __func__, ignore_viewport_pos);
dml_print("DML_DLG: %s: immediate_flip_support = %d\n", __func__, immediate_flip_support);
dml_print("DML_DLG: %s: dppclk_freq_in_mhz = %3.2f\n", __func__, dppclk_freq_in_mhz);
dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
dml_print("DML_DLG: %s: interlaced = %d\n", __func__, interlaced); ASSERT(ref_freq_to_pix_freq < 4.0);
disp_dlg_regs->ref_freq_to_pix_freq = (unsigned int) (ref_freq_to_pix_freq * dml_pow(2, 19));
disp_dlg_regs->refcyc_per_htotal = (unsigned int) (ref_freq_to_pix_freq * (double) htotal * dml_pow(2, 8));
disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; // 15 bits
//set_prefetch_mode(mode_lib, cstate_en, pstate_en, ignore_viewport_pos, immediate_flip_support);
min_ttu_vblank = get_min_ttu_vblank_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start) * dml_pow(2, 2));
disp_dlg_regs->min_dst_y_next_start_us = 0;
ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18));
dml_print("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, min_ttu_vblank);
dml_print("DML_DLG: %s: min_dst_y_next_start = 0x%0x\n", __func__, disp_dlg_regs->min_dst_y_next_start);
dml_print("DML_DLG: %s: dlg_vblank_start = 0x%0x\n", __func__, dlg_vblank_start);
dml_print("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, ref_freq_to_pix_freq);
dml_print("DML_DLG: %s: vba__min_dst_y_next_start = 0x%0x\n", __func__, vba__min_dst_y_next_start);
//old_impl_vs_vba_impl("min_dst_y_next_start", dlg_vblank_start, vba__min_dst_y_next_start);
// -------------------------
// Section 1.15.2.2: Prefetch, Active and TTU
// -------------------------
// Prefetch Calc
// Source
dual_plane = is_dual_plane((enum source_format_class) (src->source_format));
mode_422 = 0;
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
vp_height_l = src->viewport_height;
vp_width_l = src->viewport_width;
vp_height_c = src->viewport_height_c;
vp_width_c = src->viewport_width_c;
// Scaling
htaps_l = taps->htaps;
htaps_c = taps->htaps_c;
hratio_l = scl->hscl_ratio;
hratio_c = scl->hscl_ratio_c;
vratio_l = scl->vscl_ratio;
vratio_c = scl->vscl_ratio_c;
swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub;
dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub;
swath_width_ub_c = rq_dlg_param->rq_c.swath_width_ub;
dpte_groups_per_row_ub_c = rq_dlg_param->rq_c.dpte_groups_per_row_ub;
meta_chunks_per_row_ub_l = rq_dlg_param->rq_l.meta_chunks_per_row_ub;
meta_chunks_per_row_ub_c = rq_dlg_param->rq_c.meta_chunks_per_row_ub;
vupdate_offset = dst->vupdate_offset;
vupdate_width = dst->vupdate_width;
vready_offset = dst->vready_offset;
vstartup_start = dst->vstartup_start;
if (interlaced) {
if (vstartup_start / 2.0 - (double) (vready_offset + vupdate_width + vupdate_offset) / htotal <= vblank_end / 2.0)
disp_dlg_regs->vready_after_vcount0 = 1;
else
disp_dlg_regs->vready_after_vcount0 = 0;
} else {
if (vstartup_start - (double) (vready_offset + vupdate_width + vupdate_offset) / htotal <= vblank_end)
disp_dlg_regs->vready_after_vcount0 = 1;
else
disp_dlg_regs->vready_after_vcount0 = 0;
}
dml_print("DML_DLG: %s: vready_after_vcount0 = %d\n", __func__, disp_dlg_regs->vready_after_vcount0);
dml_print("DML_DLG: %s: vba__vready_after_vcount0 = %d\n", __func__, vba__vready_after_vcount0);
//old_impl_vs_vba_impl("vready_after_vcount0", disp_dlg_regs->vready_after_vcount0, vba__vready_after_vcount0);
if (interlaced)
vstartup_start = vstartup_start / 2;
dst_x_after_scaler = get_dst_x_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
dst_y_after_scaler = get_dst_y_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
// do some adjustment on the dst_after scaler to account for odm combine mode
dml_print("DML_DLG: %s: input dst_x_after_scaler = %d\n", __func__, dst_x_after_scaler);
dml_print("DML_DLG: %s: input dst_y_after_scaler = %d\n", __func__, dst_y_after_scaler);
// need to figure out which side of odm combine we're in
if (dst->odm_combine) {
// figure out which pipes go together
bool visited[DC__NUM_PIPES__MAX];
unsigned int i, j, k;
for (k = 0; k < num_pipes; ++k) {
visited[k] = false;
pipe_index_in_combine[k] = 0;
}
for (i = 0; i < num_pipes; i++) {
if (e2e_pipe_param[i].pipe.src.is_hsplit && !visited[i]) {
unsigned int grp = e2e_pipe_param[i].pipe.src.hsplit_grp;
unsigned int grp_idx = 0;
for (j = i; j < num_pipes; j++) {
if (e2e_pipe_param[j].pipe.src.hsplit_grp == grp && e2e_pipe_param[j].pipe.src.is_hsplit && !visited[j]) {
pipe_index_in_combine[j] = grp_idx;
dml_print("DML_DLG: %s: pipe[%d] is in grp %d idx %d\n", __func__, j, grp, grp_idx);
grp_idx++;
visited[j] = true;
}
}
}
}
}
if (dst->odm_combine == dm_odm_combine_mode_disabled) {
disp_dlg_regs->refcyc_h_blank_end = (unsigned int) ((double) hblank_end * ref_freq_to_pix_freq);
} else {
unsigned int odm_combine_factor = (dst->odm_combine == dm_odm_combine_mode_2to1 ? 2 : 4); // TODO: We should really check that 4to1 is supported before setting it to 4
unsigned int odm_pipe_index = pipe_index_in_combine[pipe_idx];
disp_dlg_regs->refcyc_h_blank_end = (unsigned int) (((double) hblank_end + odm_pipe_index * (double) dst->hactive / odm_combine_factor) * ref_freq_to_pix_freq);
} ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int)dml_pow(2, 13));
dml_print("DML_DLG: %s: htotal = %d\n", __func__, htotal);
dml_print("DML_DLG: %s: dst_x_after_scaler[%d] = %d\n", __func__, pipe_idx, dst_x_after_scaler);
dml_print("DML_DLG: %s: dst_y_after_scaler[%d] = %d\n", __func__, pipe_idx, dst_y_after_scaler);
dst_y_prefetch = get_dst_y_prefetch(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
dst_y_per_vm_vblank = get_dst_y_per_vm_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
dst_y_per_row_vblank = get_dst_y_per_row_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
dst_y_per_vm_flip = get_dst_y_per_vm_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
dst_y_per_row_flip = get_dst_y_per_row_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
max_dst_y_per_vm_vblank = 32.0; //U5.2
max_dst_y_per_row_vblank = 16.0; //U4.2
// magic!
if (htotal <= 75) {
max_dst_y_per_vm_vblank = 100.0;
max_dst_y_per_row_vblank = 100.0;
}
dml_print("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, dst_y_prefetch);
dml_print("DML_DLG: %s: dst_y_per_vm_flip = %3.2f\n", __func__, dst_y_per_vm_flip);
dml_print("DML_DLG: %s: dst_y_per_row_flip = %3.2f\n", __func__, dst_y_per_row_flip);
dml_print("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, dst_y_per_vm_vblank);
dml_print("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, dst_y_per_row_vblank);
ASSERT(dst_y_per_vm_vblank < max_dst_y_per_vm_vblank); ASSERT(dst_y_per_row_vblank < max_dst_y_per_row_vblank);
ASSERT(dst_y_prefetch > (dst_y_per_vm_vblank + dst_y_per_row_vblank));
vratio_pre_l = get_vratio_prefetch_l(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
vratio_pre_c = get_vratio_prefetch_c(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
dml_print("DML_DLG: %s: vratio_pre_l = %3.2f\n", __func__, vratio_pre_l);
dml_print("DML_DLG: %s: vratio_pre_c = %3.2f\n", __func__, vratio_pre_c);
// Active
req_per_swath_ub_l = rq_dlg_param->rq_l.req_per_swath_ub;
req_per_swath_ub_c = rq_dlg_param->rq_c.req_per_swath_ub;
meta_row_height_l = rq_dlg_param->rq_l.meta_row_height;
meta_row_height_c = rq_dlg_param->rq_c.meta_row_height;
swath_width_pixels_ub_l = 0;
swath_width_pixels_ub_c = 0;
scaler_rec_in_width_l = 0;
scaler_rec_in_width_c = 0;
dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height;
dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height;
if (mode_422) {
swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element
swath_width_pixels_ub_c = swath_width_ub_c * 2;
} else {
swath_width_pixels_ub_l = swath_width_ub_l * 1;
swath_width_pixels_ub_c = swath_width_ub_c * 1;
}
hscale_pixel_rate_l = 0.;
hscale_pixel_rate_c = 0.;
min_hratio_fact_l = 1.0;
min_hratio_fact_c = 1.0;
if (hratio_l <= 1)
min_hratio_fact_l = 2.0;
else if (htaps_l <= 6) {
if ((hratio_l * 2.0) > 4.0)
min_hratio_fact_l = 4.0;
else
min_hratio_fact_l = hratio_l * 2.0;
} else {
if (hratio_l > 4.0)
min_hratio_fact_l = 4.0;
else
min_hratio_fact_l = hratio_l;
}
hscale_pixel_rate_l = min_hratio_fact_l * dppclk_freq_in_mhz;
dml_print("DML_DLG: %s: hratio_l = %3.2f\n", __func__, hratio_l);
dml_print("DML_DLG: %s: min_hratio_fact_l = %3.2f\n", __func__, min_hratio_fact_l);
dml_print("DML_DLG: %s: hscale_pixel_rate_l = %3.2f\n", __func__, hscale_pixel_rate_l);
if (hratio_c <= 1)
min_hratio_fact_c = 2.0;
else if (htaps_c <= 6) {
if ((hratio_c * 2.0) > 4.0)
min_hratio_fact_c = 4.0;
else
min_hratio_fact_c = hratio_c * 2.0;
} else {
if (hratio_c > 4.0)
min_hratio_fact_c = 4.0;
else
min_hratio_fact_c = hratio_c;
}
hscale_pixel_rate_c = min_hratio_fact_c * dppclk_freq_in_mhz;
refcyc_per_line_delivery_pre_l = 0.;
refcyc_per_line_delivery_pre_c = 0.;
refcyc_per_line_delivery_l = 0.;
refcyc_per_line_delivery_c = 0.;
refcyc_per_req_delivery_pre_l = 0.;
refcyc_per_req_delivery_pre_c = 0.;
refcyc_per_req_delivery_l = 0.;
refcyc_per_req_delivery_c = 0.;
full_recout_width = 0;
// In ODM
if (src->is_hsplit) {
// This "hack" is only allowed (and valid) for MPC combine. In ODM
// combine, you MUST specify the full_recout_width...according to Oswin
if (dst->full_recout_width == 0 && !dst->odm_combine) {
dml_print("DML_DLG: %s: Warning: full_recout_width not set in hsplit mode\n", __func__);
full_recout_width = dst->recout_width * 2; // assume half split for dcn1
} else
full_recout_width = dst->full_recout_width;
} else
full_recout_width = dst->recout_width;
// As of DCN2, mpc_combine and odm_combine are mutually exclusive
refcyc_per_line_delivery_pre_l = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_pre_l,
hscale_pixel_rate_l,
swath_width_pixels_ub_l,
1); // per line
refcyc_per_line_delivery_l = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_l,
hscale_pixel_rate_l,
swath_width_pixels_ub_l,
1); // per line
dml_print("DML_DLG: %s: full_recout_width = %d\n", __func__, full_recout_width);
dml_print("DML_DLG: %s: hscale_pixel_rate_l = %3.2f\n", __func__, hscale_pixel_rate_l);
dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n", __func__, refcyc_per_line_delivery_pre_l);
dml_print("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n", __func__, refcyc_per_line_delivery_l);
dml_print("DML_DLG: %s: vba__refcyc_per_line_delivery_pre_l = %3.2f\n", __func__, vba__refcyc_per_line_delivery_pre_l);
dml_print("DML_DLG: %s: vba__refcyc_per_line_delivery_l = %3.2f\n", __func__, vba__refcyc_per_line_delivery_l);
//old_impl_vs_vba_impl("refcyc_per_line_delivery_pre_l", refcyc_per_line_delivery_pre_l, vba__refcyc_per_line_delivery_pre_l);
//old_impl_vs_vba_impl("refcyc_per_line_delivery_l", refcyc_per_line_delivery_l, vba__refcyc_per_line_delivery_l);
if (dual_plane) {
float vba__refcyc_per_line_delivery_pre_c = get_refcyc_per_line_delivery_pre_c_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
float vba__refcyc_per_line_delivery_c = get_refcyc_per_line_delivery_c_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
refcyc_per_line_delivery_pre_c = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_pre_c,
hscale_pixel_rate_c,
swath_width_pixels_ub_c,
1); // per line
refcyc_per_line_delivery_c = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_c,
hscale_pixel_rate_c,
swath_width_pixels_ub_c,
1); // per line
dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n", __func__, refcyc_per_line_delivery_pre_c);
dml_print("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n", __func__, refcyc_per_line_delivery_c);
dml_print("DML_DLG: %s: vba__refcyc_per_line_delivery_pre_c = %3.2f\n", __func__, vba__refcyc_per_line_delivery_pre_c);
dml_print("DML_DLG: %s: vba__refcyc_per_line_delivery_c = %3.2f\n", __func__, vba__refcyc_per_line_delivery_c);
//old_impl_vs_vba_impl("refcyc_per_line_delivery_pre_c", refcyc_per_line_delivery_pre_c, vba__refcyc_per_line_delivery_pre_c);
//old_impl_vs_vba_impl("refcyc_per_line_delivery_c", refcyc_per_line_delivery_c, vba__refcyc_per_line_delivery_c);
}
if (src->dynamic_metadata_enable && src->gpuvm)
disp_dlg_regs->refcyc_per_vm_dmdata = get_refcyc_per_vm_dmdata_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
disp_dlg_regs->dmdata_dl_delta = get_dmdata_dl_delta_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
// TTU - Luma / Chroma
if (access_dir) { // vertical access
scaler_rec_in_width_l = vp_height_l;
scaler_rec_in_width_c = vp_height_c;
} else {
scaler_rec_in_width_l = vp_width_l;
scaler_rec_in_width_c = vp_width_c;
}
refcyc_per_req_delivery_pre_l = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_pre_l,
hscale_pixel_rate_l,
scaler_rec_in_width_l,
req_per_swath_ub_l); // per req
refcyc_per_req_delivery_l = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_l,
hscale_pixel_rate_l,
scaler_rec_in_width_l,
req_per_swath_ub_l); // per req
dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n", __func__, refcyc_per_req_delivery_pre_l);
dml_print("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n", __func__, refcyc_per_req_delivery_l);
dml_print("DML_DLG: %s: vba__refcyc_per_req_delivery_pre_l = %3.2f\n", __func__, vba__refcyc_per_req_delivery_pre_l);
dml_print("DML_DLG: %s: vba__refcyc_per_req_delivery_l = %3.2f\n", __func__, vba__refcyc_per_req_delivery_l);
//old_impl_vs_vba_impl("refcyc_per_req_delivery_pre_l", refcyc_per_req_delivery_pre_l, vba__refcyc_per_req_delivery_pre_l);
//old_impl_vs_vba_impl("refcyc_per_req_delivery_l", refcyc_per_req_delivery_l, vba__refcyc_per_req_delivery_l);
ASSERT(refcyc_per_req_delivery_pre_l < dml_pow(2, 13)); ASSERT(refcyc_per_req_delivery_l < dml_pow(2, 13));
if (dual_plane) {
float vba__refcyc_per_req_delivery_pre_c = get_refcyc_per_req_delivery_pre_c_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
float vba__refcyc_per_req_delivery_c = get_refcyc_per_req_delivery_c_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
refcyc_per_req_delivery_pre_c = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_pre_c,
hscale_pixel_rate_c,
scaler_rec_in_width_c,
req_per_swath_ub_c); // per req
refcyc_per_req_delivery_c = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_c,
hscale_pixel_rate_c,
scaler_rec_in_width_c,
req_per_swath_ub_c); // per req
dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n", __func__, refcyc_per_req_delivery_pre_c);
dml_print("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n", __func__, refcyc_per_req_delivery_c);
dml_print("DML_DLG: %s: vba__refcyc_per_req_delivery_pre_c = %3.2f\n", __func__, vba__refcyc_per_req_delivery_pre_c);
dml_print("DML_DLG: %s: vba__refcyc_per_req_delivery_c = %3.2f\n", __func__, vba__refcyc_per_req_delivery_c);
//old_impl_vs_vba_impl("refcyc_per_req_delivery_pre_c", refcyc_per_req_delivery_pre_c, vba__refcyc_per_req_delivery_pre_c);
//old_impl_vs_vba_impl("refcyc_per_req_delivery_c", refcyc_per_req_delivery_c, vba__refcyc_per_req_delivery_c);
ASSERT(refcyc_per_req_delivery_pre_c < dml_pow(2, 13)); ASSERT(refcyc_per_req_delivery_c < dml_pow(2, 13));
}
// TTU - Cursor
refcyc_per_req_delivery_pre_cur0 = 0.0;
refcyc_per_req_delivery_cur0 = 0.0;
ASSERT(src->num_cursors <= 1);
if (src->num_cursors > 0) {
float vba__refcyc_per_req_delivery_pre_cur0;
float vba__refcyc_per_req_delivery_cur0;
calculate_ttu_cursor(
mode_lib,
&refcyc_per_req_delivery_pre_cur0,
&refcyc_per_req_delivery_cur0,
refclk_freq_in_mhz,
ref_freq_to_pix_freq,
hscale_pixel_rate_l,
scl->hscl_ratio,
vratio_pre_l,
vratio_l,
src->cur0_src_width,
(enum cursor_bpp) (src->cur0_bpp));
vba__refcyc_per_req_delivery_pre_cur0 = get_refcyc_per_cursor_req_delivery_pre_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
vba__refcyc_per_req_delivery_cur0 = get_refcyc_per_cursor_req_delivery_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_cur0 = %3.2f\n", __func__, refcyc_per_req_delivery_pre_cur0);
dml_print("DML_DLG: %s: refcyc_per_req_delivery_cur0 = %3.2f\n", __func__, refcyc_per_req_delivery_cur0);
dml_print("DML_DLG: %s: vba__refcyc_per_req_delivery_pre_cur0 = %3.2f\n", __func__, vba__refcyc_per_req_delivery_pre_cur0);
dml_print("DML_DLG: %s: vba__refcyc_per_req_delivery_cur0 = %3.2f\n", __func__, vba__refcyc_per_req_delivery_cur0);
//old_impl_vs_vba_impl("refcyc_per_req_delivery_pre_cur0", refcyc_per_req_delivery_pre_cur0, vba__refcyc_per_req_delivery_pre_cur0);
//old_impl_vs_vba_impl("refcyc_per_req_delivery_cur0", refcyc_per_req_delivery_cur0, vba__refcyc_per_req_delivery_cur0);
}
refcyc_per_req_delivery_pre_cur1 = 0.0;
refcyc_per_req_delivery_cur1 = 0.0;
// TTU - Misc
// all hard-coded
// Assignment to register structures
disp_dlg_regs->dst_y_after_scaler = dst_y_after_scaler; // in terms of line
ASSERT(disp_dlg_regs->dst_y_after_scaler < (unsigned int)8);
disp_dlg_regs->refcyc_x_after_scaler = dst_x_after_scaler * ref_freq_to_pix_freq; // in terms of refclk
ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int)dml_pow(2, 13));
disp_dlg_regs->dst_y_prefetch = (unsigned int) (dst_y_prefetch * dml_pow(2, 2));
disp_dlg_regs->dst_y_per_vm_vblank = (unsigned int) (dst_y_per_vm_vblank * dml_pow(2, 2));
disp_dlg_regs->dst_y_per_row_vblank = (unsigned int) (dst_y_per_row_vblank * dml_pow(2, 2));
disp_dlg_regs->dst_y_per_vm_flip = (unsigned int) (dst_y_per_vm_flip * dml_pow(2, 2));
disp_dlg_regs->dst_y_per_row_flip = (unsigned int) (dst_y_per_row_flip * dml_pow(2, 2));
disp_dlg_regs->vratio_prefetch = (unsigned int) (vratio_pre_l * dml_pow(2, 19));
disp_dlg_regs->vratio_prefetch_c = (unsigned int) (vratio_pre_c * dml_pow(2, 19));
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_vblank);
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_vblank);
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip);
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip);
disp_dlg_regs->refcyc_per_pte_group_vblank_l = (unsigned int) (dst_y_per_row_vblank * (double) htotal * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l);
ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)dml_pow(2, 13));
if (dual_plane) {
disp_dlg_regs->refcyc_per_pte_group_vblank_c = (unsigned int) (dst_y_per_row_vblank * (double) htotal * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_c);
ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c < (unsigned int)dml_pow(2, 13));
}
disp_dlg_regs->refcyc_per_meta_chunk_vblank_l = (unsigned int) (dst_y_per_row_vblank * (double) htotal * ref_freq_to_pix_freq / (double) meta_chunks_per_row_ub_l);
ASSERT(disp_dlg_regs->refcyc_per_meta_chunk_vblank_l < (unsigned int)dml_pow(2, 13));
disp_dlg_regs->refcyc_per_meta_chunk_vblank_c = disp_dlg_regs->refcyc_per_meta_chunk_vblank_l; // dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now
disp_dlg_regs->refcyc_per_pte_group_flip_l = (unsigned int) (dst_y_per_row_flip * htotal * ref_freq_to_pix_freq) / dpte_groups_per_row_ub_l;
disp_dlg_regs->refcyc_per_meta_chunk_flip_l = (unsigned int) (dst_y_per_row_flip * htotal * ref_freq_to_pix_freq) / meta_chunks_per_row_ub_l;
if (dual_plane) {
disp_dlg_regs->refcyc_per_pte_group_flip_c = (unsigned int) (dst_y_per_row_flip * htotal * ref_freq_to_pix_freq) / dpte_groups_per_row_ub_c;
disp_dlg_regs->refcyc_per_meta_chunk_flip_c = (unsigned int) (dst_y_per_row_flip * htotal * ref_freq_to_pix_freq) / meta_chunks_per_row_ub_c;
}
disp_dlg_regs->refcyc_per_vm_group_vblank = get_refcyc_per_vm_group_vblank_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
disp_dlg_regs->refcyc_per_vm_group_flip = get_refcyc_per_vm_group_flip_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
disp_dlg_regs->refcyc_per_vm_req_vblank = get_refcyc_per_vm_req_vblank_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10); // From VBA
disp_dlg_regs->refcyc_per_vm_req_flip = get_refcyc_per_vm_req_flip_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10); // From VBA
// Clamp to max for now
if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_vm_group_vblank = dml_pow(2, 23) - 1;
if (disp_dlg_regs->refcyc_per_vm_group_flip >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_vm_group_flip = dml_pow(2, 23) - 1;
if (disp_dlg_regs->refcyc_per_vm_req_vblank >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_vm_req_vblank = dml_pow(2, 23) - 1;
if (disp_dlg_regs->refcyc_per_vm_req_flip >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_vm_req_flip = dml_pow(2, 23) - 1;
disp_dlg_regs->dst_y_per_pte_row_nom_l = (unsigned int) ((double) dpte_row_height_l / (double) vratio_l * dml_pow(2, 2));
ASSERT(disp_dlg_regs->dst_y_per_pte_row_nom_l < (unsigned int)dml_pow(2, 17));
if (dual_plane) {
disp_dlg_regs->dst_y_per_pte_row_nom_c = (unsigned int) ((double) dpte_row_height_c / (double) vratio_c * dml_pow(2, 2));
if (disp_dlg_regs->dst_y_per_pte_row_nom_c >= (unsigned int) dml_pow(2, 17)) {
dml_print(
"DML_DLG: %s: Warning dst_y_per_pte_row_nom_c %u larger than supported by register format U15.2 %u\n",
__func__,
disp_dlg_regs->dst_y_per_pte_row_nom_c,
(unsigned int) dml_pow(2, 17) - 1);
}
}
disp_dlg_regs->dst_y_per_meta_row_nom_l = (unsigned int) ((double) meta_row_height_l / (double) vratio_l * dml_pow(2, 2));
ASSERT(disp_dlg_regs->dst_y_per_meta_row_nom_l < (unsigned int)dml_pow(2, 17));
disp_dlg_regs->dst_y_per_meta_row_nom_c = (unsigned int) ((double) meta_row_height_c / (double) vratio_c * dml_pow(2, 2));
ASSERT(disp_dlg_regs->dst_y_per_meta_row_nom_c < (unsigned int)dml_pow(2, 17));
disp_dlg_regs->refcyc_per_pte_group_nom_l = (unsigned int) ((double) dpte_row_height_l / (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
/ (double) dpte_groups_per_row_ub_l);
if (disp_dlg_regs->refcyc_per_pte_group_nom_l >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_pte_group_nom_l = dml_pow(2, 23) - 1;
disp_dlg_regs->refcyc_per_meta_chunk_nom_l = (unsigned int) ((double) meta_row_height_l / (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
/ (double) meta_chunks_per_row_ub_l);
if (disp_dlg_regs->refcyc_per_meta_chunk_nom_l >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_meta_chunk_nom_l = dml_pow(2, 23) - 1;
if (dual_plane) {
disp_dlg_regs->refcyc_per_pte_group_nom_c = (unsigned int) ((double) dpte_row_height_c / (double) vratio_c * (double) htotal * ref_freq_to_pix_freq
/ (double) dpte_groups_per_row_ub_c);
if (disp_dlg_regs->refcyc_per_pte_group_nom_c >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_pte_group_nom_c = dml_pow(2, 23) - 1;
// TODO: Is this the right calculation? Does htotal need to be halved?
disp_dlg_regs->refcyc_per_meta_chunk_nom_c = (unsigned int) ((double) meta_row_height_c / (double) vratio_c * (double) htotal * ref_freq_to_pix_freq
/ (double) meta_chunks_per_row_ub_c);
if (disp_dlg_regs->refcyc_per_meta_chunk_nom_c >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_meta_chunk_nom_c = dml_pow(2, 23) - 1;
}
disp_dlg_regs->refcyc_per_line_delivery_pre_l = (unsigned int) dml_floor(refcyc_per_line_delivery_pre_l, 1);
disp_dlg_regs->refcyc_per_line_delivery_l = (unsigned int) dml_floor(refcyc_per_line_delivery_l, 1);
ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int)dml_pow(2, 13)); ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int)dml_pow(2, 13));
disp_dlg_regs->refcyc_per_line_delivery_pre_c = (unsigned int) dml_floor(refcyc_per_line_delivery_pre_c, 1);
disp_dlg_regs->refcyc_per_line_delivery_c = (unsigned int) dml_floor(refcyc_per_line_delivery_c, 1);
ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int)dml_pow(2, 13)); ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int)dml_pow(2, 13));
disp_dlg_regs->chunk_hdl_adjust_cur0 = 3;
disp_dlg_regs->dst_y_offset_cur0 = 0;
disp_dlg_regs->chunk_hdl_adjust_cur1 = 3;
disp_dlg_regs->dst_y_offset_cur1 = 0;
disp_dlg_regs->dst_y_delta_drq_limit = 0x7fff; // off
disp_ttu_regs->refcyc_per_req_delivery_pre_l = (unsigned int) (refcyc_per_req_delivery_pre_l * dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_l = (unsigned int) (refcyc_per_req_delivery_l * dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_pre_c = (unsigned int) (refcyc_per_req_delivery_pre_c * dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_c = (unsigned int) (refcyc_per_req_delivery_c * dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_pre_cur0 = (unsigned int) (refcyc_per_req_delivery_pre_cur0 * dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_cur0 = (unsigned int) (refcyc_per_req_delivery_cur0 * dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_pre_cur1 = (unsigned int) (refcyc_per_req_delivery_pre_cur1 * dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_cur1 = (unsigned int) (refcyc_per_req_delivery_cur1 * dml_pow(2, 10));
disp_ttu_regs->qos_level_low_wm = 0;
ASSERT(disp_ttu_regs->qos_level_low_wm < dml_pow(2, 14));
disp_ttu_regs->qos_level_high_wm = (unsigned int) (4.0 * (double) htotal * ref_freq_to_pix_freq);
ASSERT(disp_ttu_regs->qos_level_high_wm < dml_pow(2, 14));
disp_ttu_regs->qos_level_flip = 14;
disp_ttu_regs->qos_level_fixed_l = 8;
disp_ttu_regs->qos_level_fixed_c = 8;
disp_ttu_regs->qos_level_fixed_cur0 = 8;
disp_ttu_regs->qos_ramp_disable_l = 0;
disp_ttu_regs->qos_ramp_disable_c = 0;
disp_ttu_regs->qos_ramp_disable_cur0 = 0;
disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
print__ttu_regs_st(mode_lib, disp_ttu_regs);
print__dlg_regs_st(mode_lib, disp_dlg_regs);
}
void dml31_rq_dlg_get_dlg_reg(
struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
const bool pstate_en,
const bool vm_en,
const bool ignore_viewport_pos,
const bool immediate_flip_support)
{
display_rq_params_st rq_param = {0};
display_dlg_sys_params_st dlg_sys_param = {0};
// Get watermark and Tex.
dlg_sys_param.t_urg_wm_us = get_wm_urgent(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.deepsleep_dcfclk_mhz = get_clk_dcf_deepsleep(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.t_extra_us = get_urgent_extra_latency(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.mem_trip_us = get_wm_memory_trip(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.t_mclk_wm_us = get_wm_dram_clock_change(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.t_sr_wm_us = get_wm_stutter_enter_exit(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.total_flip_bw = get_total_immediate_flip_bw(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib, e2e_pipe_param, num_pipes);
print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
// system parameter calculation done
dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx);
dml_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe);
dml_rq_dlg_get_dlg_params(
mode_lib,
e2e_pipe_param,
num_pipes,
pipe_idx,
dlg_regs,
ttu_regs,
&rq_param.dlg,
&dlg_sys_param,
cstate_en,
pstate_en,
vm_en,
ignore_viewport_pos,
immediate_flip_support);
dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx);
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dc.h"
#include "../display_mode_lib.h"
#include "display_mode_vba_30.h"
#include "../dml_inline_defs.h"
/*
* NOTE:
* This file is gcc-parsable HW gospel, coming straight from HW engineers.
*
* It doesn't adhere to Linux kernel style and sometimes will do things in odd
* ways. Unless there is something clearly wrong with it the code should
* remain as-is as it provides us with a guarantee from HW that it is correct.
*/
typedef struct {
double DPPCLK;
double DISPCLK;
double PixelClock;
double DCFCLKDeepSleep;
unsigned int DPPPerPlane;
bool ScalerEnabled;
enum scan_direction_class SourceScan;
unsigned int BlockWidth256BytesY;
unsigned int BlockHeight256BytesY;
unsigned int BlockWidth256BytesC;
unsigned int BlockHeight256BytesC;
unsigned int InterlaceEnable;
unsigned int NumberOfCursors;
unsigned int VBlank;
unsigned int HTotal;
unsigned int DCCEnable;
bool ODMCombineEnabled;
} Pipe;
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
#define DCN30_MAX_DSC_IMAGE_WIDTH 5184
#define DCN30_MAX_FMT_420_BUFFER_WIDTH 4096
static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
struct display_mode_lib *mode_lib);
static unsigned int dscceComputeDelay(
unsigned int bpc,
double BPP,
unsigned int sliceWidth,
unsigned int numSlices,
enum output_format_class pixelFormat,
enum output_encoder_class Output);
static unsigned int dscComputeDelay(
enum output_format_class pixelFormat,
enum output_encoder_class Output);
// Super monster function with some 45 argument
static bool CalculatePrefetchSchedule(
struct display_mode_lib *mode_lib,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
Pipe *myPipe,
unsigned int DSCDelay,
double DPPCLKDelaySubtotalPlusCNVCFormater,
double DPPCLKDelaySCL,
double DPPCLKDelaySCLLBOnly,
double DPPCLKDelayCNVCCursor,
double DISPCLKDelaySubtotal,
unsigned int DPP_RECOUT_WIDTH,
enum output_format_class OutputFormat,
unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int MaxVStartup,
unsigned int GPUVMPageTableLevels,
bool GPUVMEnable,
bool HostVMEnable,
unsigned int HostVMMaxNonCachedPageTableLevels,
double HostVMMinPageSize,
bool DynamicMetadataEnable,
bool DynamicMetadataVMEnabled,
int DynamicMetadataLinesBeforeActiveRequired,
unsigned int DynamicMetadataTransmittedBytes,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
unsigned int PDEAndMetaPTEBytesFrame,
unsigned int MetaRowByte,
unsigned int PixelPTEBytesPerRow,
double PrefetchSourceLinesY,
unsigned int SwathWidthY,
int BytePerPixelY,
double VInitPreFillY,
unsigned int MaxNumSwathY,
double PrefetchSourceLinesC,
unsigned int SwathWidthC,
int BytePerPixelC,
double VInitPreFillC,
unsigned int MaxNumSwathC,
long swath_width_luma_ub,
long swath_width_chroma_ub,
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double TWait,
bool ProgressiveToInterlaceUnitInOPP,
double *DSTXAfterScaler,
double *DSTYAfterScaler,
double *DestinationLinesForPrefetch,
double *PrefetchBandwidth,
double *DestinationLinesToRequestVMInVBlank,
double *DestinationLinesToRequestRowInVBlank,
double *VRatioPrefetchY,
double *VRatioPrefetchC,
double *RequiredPrefetchPixDataBWLuma,
double *RequiredPrefetchPixDataBWChroma,
bool *NotEnoughTimeForDynamicMetadata,
double *Tno_bw,
double *prefetch_vmrow_bw,
double *Tdmdl_vm,
double *Tdmdl,
unsigned int *VUpdateOffsetPix,
double *VUpdateWidthPix,
double *VReadyOffsetPix);
static double RoundToDFSGranularityUp(double Clock, double VCOSpeed);
static double RoundToDFSGranularityDown(double Clock, double VCOSpeed);
static void CalculateDCCConfiguration(
bool DCCEnabled,
bool DCCProgrammingAssumesScanDirectionUnknown,
enum source_format_class SourcePixelFormat,
unsigned int ViewportWidthLuma,
unsigned int ViewportWidthChroma,
unsigned int ViewportHeightLuma,
unsigned int ViewportHeightChroma,
double DETBufferSize,
unsigned int RequestHeight256ByteLuma,
unsigned int RequestHeight256ByteChroma,
enum dm_swizzle_mode TilingFormat,
unsigned int BytePerPixelY,
unsigned int BytePerPixelC,
double BytePerPixelDETY,
double BytePerPixelDETC,
enum scan_direction_class ScanOrientation,
unsigned int *MaxUncompressedBlockLuma,
unsigned int *MaxUncompressedBlockChroma,
unsigned int *MaxCompressedBlockLuma,
unsigned int *MaxCompressedBlockChroma,
unsigned int *IndependentBlockLuma,
unsigned int *IndependentBlockChroma);
static double CalculatePrefetchSourceLines(
struct display_mode_lib *mode_lib,
double VRatio,
double vtaps,
bool Interlace,
bool ProgressiveToInterlaceUnitInOPP,
unsigned int SwathHeight,
unsigned int ViewportYStart,
double *VInitPreFill,
unsigned int *MaxNumSwath);
static unsigned int CalculateVMAndRowBytes(
struct display_mode_lib *mode_lib,
bool DCCEnable,
unsigned int BlockHeight256Bytes,
unsigned int BlockWidth256Bytes,
enum source_format_class SourcePixelFormat,
unsigned int SurfaceTiling,
unsigned int BytePerPixel,
enum scan_direction_class ScanDirection,
unsigned int SwathWidth,
unsigned int ViewportHeight,
bool GPUVMEnable,
bool HostVMEnable,
unsigned int HostVMMaxNonCachedPageTableLevels,
unsigned int GPUVMMinPageSize,
unsigned int HostVMMinPageSize,
unsigned int PTEBufferSizeInRequests,
unsigned int Pitch,
unsigned int DCCMetaPitch,
unsigned int *MacroTileWidth,
unsigned int *MetaRowByte,
unsigned int *PixelPTEBytesPerRow,
bool *PTEBufferSizeNotExceeded,
unsigned int *dpte_row_width_ub,
unsigned int *dpte_row_height,
unsigned int *MetaRequestWidth,
unsigned int *MetaRequestHeight,
unsigned int *meta_row_width,
unsigned int *meta_row_height,
unsigned int *vm_group_bytes,
unsigned int *dpte_group_bytes,
unsigned int *PixelPTEReqWidth,
unsigned int *PixelPTEReqHeight,
unsigned int *PTERequestSize,
unsigned int *DPDE0BytesFrame,
unsigned int *MetaPTEBytesFrame);
static double CalculateTWait(
unsigned int PrefetchMode,
double DRAMClockChangeLatency,
double UrgentLatency,
double SREnterPlusExitTime);
static void CalculateRowBandwidth(
bool GPUVMEnable,
enum source_format_class SourcePixelFormat,
double VRatio,
double VRatioChroma,
bool DCCEnable,
double LineTime,
unsigned int MetaRowByteLuma,
unsigned int MetaRowByteChroma,
unsigned int meta_row_height_luma,
unsigned int meta_row_height_chroma,
unsigned int PixelPTEBytesPerRowLuma,
unsigned int PixelPTEBytesPerRowChroma,
unsigned int dpte_row_height_luma,
unsigned int dpte_row_height_chroma,
double *meta_row_bw,
double *dpte_row_bw);
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
double UrgentExtraLatency,
double UrgentLatency,
unsigned int GPUVMMaxPageTableLevels,
bool HostVMEnable,
unsigned int HostVMMaxNonCachedPageTableLevels,
bool GPUVMEnable,
double HostVMMinPageSize,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
double DPTEBytesPerRow,
double BandwidthAvailableForImmediateFlip,
unsigned int TotImmediateFlipBytes,
enum source_format_class SourcePixelFormat,
double LineTime,
double VRatio,
double VRatioChroma,
double Tno_bw,
bool DCCEnable,
unsigned int dpte_row_height,
unsigned int meta_row_height,
unsigned int dpte_row_height_chroma,
unsigned int meta_row_height_chroma,
double *DestinationLinesToRequestVMInImmediateFlip,
double *DestinationLinesToRequestRowInImmediateFlip,
double *final_flip_bw,
bool *ImmediateFlipSupportedForPipe);
static double CalculateWriteBackDelay(
enum source_format_class WritebackPixelFormat,
double WritebackHRatio,
double WritebackVRatio,
unsigned int WritebackVTaps,
long WritebackDestinationWidth,
long WritebackDestinationHeight,
long WritebackSourceHeight,
unsigned int HTotal);
static void CalculateDynamicMetadataParameters(
int MaxInterDCNTileRepeaters,
double DPPCLK,
double DISPCLK,
double DCFClkDeepSleep,
double PixelClock,
long HTotal,
long VBlank,
long DynamicMetadataTransmittedBytes,
long DynamicMetadataLinesBeforeActiveRequired,
int InterlaceEnable,
bool ProgressiveToInterlaceUnitInOPP,
double *Tsetup,
double *Tdmbf,
double *Tdmec,
double *Tdmsks);
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
unsigned int NumberOfActivePlanes,
unsigned int MaxLineBufferLines,
unsigned int LineBufferSize,
unsigned int DPPOutputBufferPixels,
unsigned int DETBufferSizeInKByte,
unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
bool GPUVMEnable,
unsigned int dpte_group_bytes[],
unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
double WritebackLatency,
double WritebackChunkSize,
double SOCCLK,
double DRAMClockChangeLatency,
double SRExitTime,
double SREnterPlusExitTime,
double DCFCLKDeepSleep,
unsigned int DPPPerPlane[],
bool DCCEnable[],
double DPPCLK[],
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
double HRatio[],
double HRatioChroma[],
unsigned int vtaps[],
unsigned int VTAPsChroma[],
double VRatio[],
double VRatioChroma[],
unsigned int HTotal[],
double PixelClock[],
unsigned int BlendingAndTiming[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
double DSTXAfterScaler[],
double DSTYAfterScaler[],
bool WritebackEnable[],
enum source_format_class WritebackPixelFormat[],
double WritebackDestinationWidth[],
double WritebackDestinationHeight[],
double WritebackSourceHeight[],
enum clock_change_support *DRAMClockChangeSupport,
double *UrgentWatermark,
double *WritebackUrgentWatermark,
double *DRAMClockChangeWatermark,
double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *MinActiveDRAMClockChangeLatencySupported);
static void CalculateDCFCLKDeepSleep(
struct display_mode_lib *mode_lib,
unsigned int NumberOfActivePlanes,
int BytePerPixelY[],
int BytePerPixelC[],
double VRatio[],
double VRatioChroma[],
double SwathWidthY[],
double SwathWidthC[],
unsigned int DPPPerPlane[],
double HRatio[],
double HRatioChroma[],
double PixelClock[],
double PSCL_THROUGHPUT[],
double PSCL_THROUGHPUT_CHROMA[],
double DPPCLK[],
double ReadBandwidthLuma[],
double ReadBandwidthChroma[],
int ReturnBusWidth,
double *DCFCLKDeepSleep);
static void CalculateUrgentBurstFactor(
long swath_width_luma_ub,
long swath_width_chroma_ub,
unsigned int DETBufferSizeInKByte,
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double LineTime,
double UrgentLatency,
double CursorBufferSize,
unsigned int CursorWidth,
unsigned int CursorBPP,
double VRatio,
double VRatioC,
double BytePerPixelInDETY,
double BytePerPixelInDETC,
double DETBufferSizeY,
double DETBufferSizeC,
double *UrgentBurstFactorCursor,
double *UrgentBurstFactorLuma,
double *UrgentBurstFactorChroma,
bool *NotEnoughUrgentLatencyHiding);
static void UseMinimumDCFCLK(
struct display_mode_lib *mode_lib,
struct vba_vars_st *v,
int MaxPrefetchMode,
int ReorderingBytes);
static void CalculatePixelDeliveryTimes(
unsigned int NumberOfActivePlanes,
double VRatio[],
double VRatioChroma[],
double VRatioPrefetchY[],
double VRatioPrefetchC[],
unsigned int swath_width_luma_ub[],
unsigned int swath_width_chroma_ub[],
unsigned int DPPPerPlane[],
double HRatio[],
double HRatioChroma[],
double PixelClock[],
double PSCL_THROUGHPUT[],
double PSCL_THROUGHPUT_CHROMA[],
double DPPCLK[],
int BytePerPixelC[],
enum scan_direction_class SourceScan[],
unsigned int NumberOfCursors[],
unsigned int CursorWidth[][2],
unsigned int CursorBPP[][2],
unsigned int BlockWidth256BytesY[],
unsigned int BlockHeight256BytesY[],
unsigned int BlockWidth256BytesC[],
unsigned int BlockHeight256BytesC[],
double DisplayPipeLineDeliveryTimeLuma[],
double DisplayPipeLineDeliveryTimeChroma[],
double DisplayPipeLineDeliveryTimeLumaPrefetch[],
double DisplayPipeLineDeliveryTimeChromaPrefetch[],
double DisplayPipeRequestDeliveryTimeLuma[],
double DisplayPipeRequestDeliveryTimeChroma[],
double DisplayPipeRequestDeliveryTimeLumaPrefetch[],
double DisplayPipeRequestDeliveryTimeChromaPrefetch[],
double CursorRequestDeliveryTime[],
double CursorRequestDeliveryTimePrefetch[]);
static void CalculateMetaAndPTETimes(
int NumberOfActivePlanes,
bool GPUVMEnable,
int MetaChunkSize,
int MinMetaChunkSizeBytes,
int HTotal[],
double VRatio[],
double VRatioChroma[],
double DestinationLinesToRequestRowInVBlank[],
double DestinationLinesToRequestRowInImmediateFlip[],
bool DCCEnable[],
double PixelClock[],
int BytePerPixelY[],
int BytePerPixelC[],
enum scan_direction_class SourceScan[],
int dpte_row_height[],
int dpte_row_height_chroma[],
int meta_row_width[],
int meta_row_width_chroma[],
int meta_row_height[],
int meta_row_height_chroma[],
int meta_req_width[],
int meta_req_width_chroma[],
int meta_req_height[],
int meta_req_height_chroma[],
int dpte_group_bytes[],
int PTERequestSizeY[],
int PTERequestSizeC[],
int PixelPTEReqWidthY[],
int PixelPTEReqHeightY[],
int PixelPTEReqWidthC[],
int PixelPTEReqHeightC[],
int dpte_row_width_luma_ub[],
int dpte_row_width_chroma_ub[],
double DST_Y_PER_PTE_ROW_NOM_L[],
double DST_Y_PER_PTE_ROW_NOM_C[],
double DST_Y_PER_META_ROW_NOM_L[],
double DST_Y_PER_META_ROW_NOM_C[],
double TimePerMetaChunkNominal[],
double TimePerChromaMetaChunkNominal[],
double TimePerMetaChunkVBlank[],
double TimePerChromaMetaChunkVBlank[],
double TimePerMetaChunkFlip[],
double TimePerChromaMetaChunkFlip[],
double time_per_pte_group_nom_luma[],
double time_per_pte_group_vblank_luma[],
double time_per_pte_group_flip_luma[],
double time_per_pte_group_nom_chroma[],
double time_per_pte_group_vblank_chroma[],
double time_per_pte_group_flip_chroma[]);
static void CalculateVMGroupAndRequestTimes(
unsigned int NumberOfActivePlanes,
bool GPUVMEnable,
unsigned int GPUVMMaxPageTableLevels,
unsigned int HTotal[],
int BytePerPixelC[],
double DestinationLinesToRequestVMInVBlank[],
double DestinationLinesToRequestVMInImmediateFlip[],
bool DCCEnable[],
double PixelClock[],
int dpte_row_width_luma_ub[],
int dpte_row_width_chroma_ub[],
int vm_group_bytes[],
unsigned int dpde0_bytes_per_frame_ub_l[],
unsigned int dpde0_bytes_per_frame_ub_c[],
int meta_pte_bytes_per_frame_ub_l[],
int meta_pte_bytes_per_frame_ub_c[],
double TimePerVMGroupVBlank[],
double TimePerVMGroupFlip[],
double TimePerVMRequestVBlank[],
double TimePerVMRequestFlip[]);
static void CalculateStutterEfficiency(
int NumberOfActivePlanes,
long ROBBufferSizeInKByte,
double TotalDataReadBandwidth,
double DCFCLK,
double ReturnBW,
double SRExitTime,
bool SynchronizedVBlank,
int DPPPerPlane[],
unsigned int DETBufferSizeY[],
int BytePerPixelY[],
double BytePerPixelDETY[],
double SwathWidthY[],
int SwathHeightY[],
int SwathHeightC[],
double DCCRateLuma[],
double DCCRateChroma[],
int HTotal[],
int VTotal[],
double PixelClock[],
double VRatio[],
enum scan_direction_class SourceScan[],
int BlockHeight256BytesY[],
int BlockWidth256BytesY[],
int BlockHeight256BytesC[],
int BlockWidth256BytesC[],
int DCCYMaxUncompressedBlock[],
int DCCCMaxUncompressedBlock[],
int VActive[],
bool DCCEnable[],
bool WritebackEnable[],
double ReadBandwidthPlaneLuma[],
double ReadBandwidthPlaneChroma[],
double meta_row_bw[],
double dpte_row_bw[],
double *StutterEfficiencyNotIncludingVBlank,
double *StutterEfficiency,
double *StutterPeriodOut);
static void CalculateSwathAndDETConfiguration(
bool ForceSingleDPP,
int NumberOfActivePlanes,
unsigned int DETBufferSizeInKByte,
double MaximumSwathWidthLuma[],
double MaximumSwathWidthChroma[],
enum scan_direction_class SourceScan[],
enum source_format_class SourcePixelFormat[],
enum dm_swizzle_mode SurfaceTiling[],
int ViewportWidth[],
int ViewportHeight[],
int SurfaceWidthY[],
int SurfaceWidthC[],
int SurfaceHeightY[],
int SurfaceHeightC[],
int Read256BytesBlockHeightY[],
int Read256BytesBlockHeightC[],
int Read256BytesBlockWidthY[],
int Read256BytesBlockWidthC[],
enum odm_combine_mode ODMCombineEnabled[],
int BlendingAndTiming[],
int BytePerPixY[],
int BytePerPixC[],
double BytePerPixDETY[],
double BytePerPixDETC[],
int HActive[],
double HRatio[],
double HRatioChroma[],
int DPPPerPlane[],
int swath_width_luma_ub[],
int swath_width_chroma_ub[],
double SwathWidth[],
double SwathWidthChroma[],
int SwathHeightY[],
int SwathHeightC[],
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
bool ViewportSizeSupportPerPlane[],
bool *ViewportSizeSupport);
static void CalculateSwathWidth(
bool ForceSingleDPP,
int NumberOfActivePlanes,
enum source_format_class SourcePixelFormat[],
enum scan_direction_class SourceScan[],
unsigned int ViewportWidth[],
unsigned int ViewportHeight[],
unsigned int SurfaceWidthY[],
unsigned int SurfaceWidthC[],
unsigned int SurfaceHeightY[],
unsigned int SurfaceHeightC[],
enum odm_combine_mode ODMCombineEnabled[],
int BytePerPixY[],
int BytePerPixC[],
int Read256BytesBlockHeightY[],
int Read256BytesBlockHeightC[],
int Read256BytesBlockWidthY[],
int Read256BytesBlockWidthC[],
int BlendingAndTiming[],
unsigned int HActive[],
double HRatio[],
int DPPPerPlane[],
double SwathWidthSingleDPPY[],
double SwathWidthSingleDPPC[],
double SwathWidthY[],
double SwathWidthC[],
int MaximumSwathHeightY[],
int MaximumSwathHeightC[],
unsigned int swath_width_luma_ub[],
unsigned int swath_width_chroma_ub[]);
static double CalculateExtraLatency(
long RoundTripPingLatencyCycles,
long ReorderingBytes,
double DCFCLK,
int TotalNumberOfActiveDPP,
int PixelChunkSizeInKByte,
int TotalNumberOfDCCActiveDPP,
int MetaChunkSize,
double ReturnBW,
bool GPUVMEnable,
bool HostVMEnable,
int NumberOfActivePlanes,
int NumberOfDPP[],
int dpte_group_bytes[],
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
double HostVMMinPageSize,
int HostVMMaxNonCachedPageTableLevels);
static double CalculateExtraLatencyBytes(
long ReorderingBytes,
int TotalNumberOfActiveDPP,
int PixelChunkSizeInKByte,
int TotalNumberOfDCCActiveDPP,
int MetaChunkSize,
bool GPUVMEnable,
bool HostVMEnable,
int NumberOfActivePlanes,
int NumberOfDPP[],
int dpte_group_bytes[],
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
double HostVMMinPageSize,
int HostVMMaxNonCachedPageTableLevels);
static double CalculateUrgentLatency(
double UrgentLatencyPixelDataOnly,
double UrgentLatencyPixelMixedWithVMData,
double UrgentLatencyVMDataOnly,
bool DoUrgentLatencyAdjustment,
double UrgentLatencyAdjustmentFabricClockComponent,
double UrgentLatencyAdjustmentFabricClockReference,
double FabricClockSingle);
void dml30_recalculate(struct display_mode_lib *mode_lib)
{
ModeSupportAndSystemConfiguration(mode_lib);
PixelClockAdjustmentForProgressiveToInterlaceUnit(mode_lib);
DisplayPipeConfiguration(mode_lib);
DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(mode_lib);
}
static unsigned int dscceComputeDelay(
unsigned int bpc,
double BPP,
unsigned int sliceWidth,
unsigned int numSlices,
enum output_format_class pixelFormat,
enum output_encoder_class Output)
{
// valid bpc = source bits per component in the set of {8, 10, 12}
// valid bpp = increments of 1/16 of a bit
// min = 6/7/8 in N420/N422/444, respectively
// max = such that compression is 1:1
//valid sliceWidth = number of pixels per slice line, must be less than or equal to 5184/numSlices (or 4096/numSlices in 420 mode)
//valid numSlices = number of slices in the horiziontal direction per DSC engine in the set of {1, 2, 3, 4}
//valid pixelFormat = pixel/color format in the set of {:N444_RGB, :S422, :N422, :N420}
// fixed value
unsigned int rcModelSize = 8192;
// N422/N420 operate at 2 pixels per clock
unsigned int pixelsPerClock, lstall, D, initalXmitDelay, w, s, ix, wx, P, l0, a, ax, L,
Delay, pixels;
if (pixelFormat == dm_420)
pixelsPerClock = 2;
// #all other modes operate at 1 pixel per clock
else if (pixelFormat == dm_444)
pixelsPerClock = 1;
else if (pixelFormat == dm_n422)
pixelsPerClock = 2;
else
pixelsPerClock = 1;
//initial transmit delay as per PPS
initalXmitDelay = dml_round(rcModelSize / 2.0 / BPP / pixelsPerClock);
//compute ssm delay
if (bpc == 8)
D = 81;
else if (bpc == 10)
D = 89;
else
D = 113;
//divide by pixel per cycle to compute slice width as seen by DSC
w = sliceWidth / pixelsPerClock;
//422 mode has an additional cycle of delay
if (pixelFormat == dm_420 || pixelFormat == dm_444 || pixelFormat == dm_n422)
s = 0;
else
s = 1;
//main calculation for the dscce
ix = initalXmitDelay + 45;
wx = (w + 2) / 3;
P = 3 * wx - w;
l0 = ix / w;
a = ix + P * l0;
ax = (a + 2) / 3 + D + 6 + 1;
L = (ax + wx - 1) / wx;
if ((ix % w) == 0 && P != 0)
lstall = 1;
else
lstall = 0;
Delay = L * wx * (numSlices - 1) + ax + s + lstall + 22;
//dsc processes 3 pixel containers per cycle and a container can contain 1 or 2 pixels
pixels = Delay * 3 * pixelsPerClock;
return pixels;
}
static unsigned int dscComputeDelay(enum output_format_class pixelFormat, enum output_encoder_class Output)
{
unsigned int Delay = 0;
if (pixelFormat == dm_420) {
// sfr
Delay = Delay + 2;
// dsccif
Delay = Delay + 0;
// dscc - input deserializer
Delay = Delay + 3;
// dscc gets pixels every other cycle
Delay = Delay + 2;
// dscc - input cdc fifo
Delay = Delay + 12;
// dscc gets pixels every other cycle
Delay = Delay + 13;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output cdc fifo
Delay = Delay + 7;
// dscc gets pixels every other cycle
Delay = Delay + 3;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output serializer
Delay = Delay + 1;
// sft
Delay = Delay + 1;
} else if (pixelFormat == dm_n422) {
// sfr
Delay = Delay + 2;
// dsccif
Delay = Delay + 1;
// dscc - input deserializer
Delay = Delay + 5;
// dscc - input cdc fifo
Delay = Delay + 25;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output cdc fifo
Delay = Delay + 10;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output serializer
Delay = Delay + 1;
// sft
Delay = Delay + 1;
} else {
// sfr
Delay = Delay + 2;
// dsccif
Delay = Delay + 0;
// dscc - input deserializer
Delay = Delay + 3;
// dscc - input cdc fifo
Delay = Delay + 12;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output cdc fifo
Delay = Delay + 7;
// dscc - output serializer
Delay = Delay + 1;
// dscc - cdc uncertainty
Delay = Delay + 2;
// sft
Delay = Delay + 1;
}
return Delay;
}
static bool CalculatePrefetchSchedule(
struct display_mode_lib *mode_lib,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
Pipe *myPipe,
unsigned int DSCDelay,
double DPPCLKDelaySubtotalPlusCNVCFormater,
double DPPCLKDelaySCL,
double DPPCLKDelaySCLLBOnly,
double DPPCLKDelayCNVCCursor,
double DISPCLKDelaySubtotal,
unsigned int DPP_RECOUT_WIDTH,
enum output_format_class OutputFormat,
unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int MaxVStartup,
unsigned int GPUVMPageTableLevels,
bool GPUVMEnable,
bool HostVMEnable,
unsigned int HostVMMaxNonCachedPageTableLevels,
double HostVMMinPageSize,
bool DynamicMetadataEnable,
bool DynamicMetadataVMEnabled,
int DynamicMetadataLinesBeforeActiveRequired,
unsigned int DynamicMetadataTransmittedBytes,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
unsigned int PDEAndMetaPTEBytesFrame,
unsigned int MetaRowByte,
unsigned int PixelPTEBytesPerRow,
double PrefetchSourceLinesY,
unsigned int SwathWidthY,
int BytePerPixelY,
double VInitPreFillY,
unsigned int MaxNumSwathY,
double PrefetchSourceLinesC,
unsigned int SwathWidthC,
int BytePerPixelC,
double VInitPreFillC,
unsigned int MaxNumSwathC,
long swath_width_luma_ub,
long swath_width_chroma_ub,
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double TWait,
bool ProgressiveToInterlaceUnitInOPP,
double *DSTXAfterScaler,
double *DSTYAfterScaler,
double *DestinationLinesForPrefetch,
double *PrefetchBandwidth,
double *DestinationLinesToRequestVMInVBlank,
double *DestinationLinesToRequestRowInVBlank,
double *VRatioPrefetchY,
double *VRatioPrefetchC,
double *RequiredPrefetchPixDataBWLuma,
double *RequiredPrefetchPixDataBWChroma,
bool *NotEnoughTimeForDynamicMetadata,
double *Tno_bw,
double *prefetch_vmrow_bw,
double *Tdmdl_vm,
double *Tdmdl,
unsigned int *VUpdateOffsetPix,
double *VUpdateWidthPix,
double *VReadyOffsetPix)
{
bool MyError = false;
unsigned int DPPCycles = 0, DISPCLKCycles = 0;
double DSTTotalPixelsAfterScaler = 0;
double LineTime = 0, Tsetup = 0;
double dst_y_prefetch_equ = 0;
double Tsw_oto = 0;
double prefetch_bw_oto = 0;
double Tvm_oto = 0;
double Tr0_oto = 0;
double Tvm_oto_lines = 0;
double Tr0_oto_lines = 0;
double dst_y_prefetch_oto = 0;
double TimeForFetchingMetaPTE = 0;
double TimeForFetchingRowInVBlank = 0;
double LinesToRequestPrefetchPixelData = 0;
double HostVMInefficiencyFactor = 0;
unsigned int HostVMDynamicLevelsTrips = 0;
double trip_to_mem = 0;
double Tvm_trips = 0;
double Tr0_trips = 0;
double Tvm_trips_rounded = 0;
double Tr0_trips_rounded = 0;
double Lsw_oto = 0;
double Tpre_rounded = 0;
double prefetch_bw_equ = 0;
double Tvm_equ = 0;
double Tr0_equ = 0;
double Tdmbf = 0;
double Tdmec = 0;
double Tdmsks = 0;
if (GPUVMEnable == true && HostVMEnable == true) {
HostVMInefficiencyFactor = PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData / PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly;
HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
} else {
HostVMInefficiencyFactor = 1;
HostVMDynamicLevelsTrips = 0;
}
CalculateDynamicMetadataParameters(
MaxInterDCNTileRepeaters,
myPipe->DPPCLK,
myPipe->DISPCLK,
myPipe->DCFCLKDeepSleep,
myPipe->PixelClock,
myPipe->HTotal,
myPipe->VBlank,
DynamicMetadataTransmittedBytes,
DynamicMetadataLinesBeforeActiveRequired,
myPipe->InterlaceEnable,
ProgressiveToInterlaceUnitInOPP,
&Tsetup,
&Tdmbf,
&Tdmec,
&Tdmsks);
LineTime = myPipe->HTotal / myPipe->PixelClock;
trip_to_mem = UrgentLatency;
Tvm_trips = UrgentExtraLatency + trip_to_mem * (GPUVMPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
if (DynamicMetadataVMEnabled == true && GPUVMEnable == true) {
*Tdmdl = TWait + Tvm_trips + trip_to_mem;
} else {
*Tdmdl = TWait + UrgentExtraLatency;
}
if (DynamicMetadataEnable == true) {
if (VStartup * LineTime < Tsetup + *Tdmdl + Tdmbf + Tdmec + Tdmsks) {
*NotEnoughTimeForDynamicMetadata = true;
} else {
*NotEnoughTimeForDynamicMetadata = false;
dml_print("DML: Not Enough Time for Dynamic Meta!\n");
dml_print("DML: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", Tdmbf);
dml_print("DML: Tdmec: %fus - time dio takes to transfer dmd\n", Tdmec);
dml_print("DML: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", Tdmsks);
dml_print("DML: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", *Tdmdl);
}
} else {
*NotEnoughTimeForDynamicMetadata = false;
}
*Tdmdl_vm = (DynamicMetadataEnable == true && DynamicMetadataVMEnabled == true && GPUVMEnable == true ? TWait + Tvm_trips : 0);
if (myPipe->ScalerEnabled)
DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCL;
else
DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCLLBOnly;
DPPCycles = DPPCycles + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor;
DISPCLKCycles = DISPCLKDelaySubtotal;
if (myPipe->DPPCLK == 0.0 || myPipe->DISPCLK == 0.0)
return true;
*DSTXAfterScaler = DPPCycles * myPipe->PixelClock / myPipe->DPPCLK + DISPCLKCycles * myPipe->PixelClock / myPipe->DISPCLK
+ DSCDelay;
*DSTXAfterScaler = *DSTXAfterScaler + ((myPipe->ODMCombineEnabled)?18:0) + (myPipe->DPPPerPlane - 1) * DPP_RECOUT_WIDTH;
if (OutputFormat == dm_420 || (myPipe->InterlaceEnable && ProgressiveToInterlaceUnitInOPP))
*DSTYAfterScaler = 1;
else
*DSTYAfterScaler = 0;
DSTTotalPixelsAfterScaler = *DSTYAfterScaler * myPipe->HTotal + *DSTXAfterScaler;
*DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
*DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * myPipe->HTotal));
MyError = false;
Tr0_trips = trip_to_mem * (HostVMDynamicLevelsTrips + 1);
Tvm_trips_rounded = dml_ceil(4.0 * Tvm_trips / LineTime, 1) / 4 * LineTime;
Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1) / 4 * LineTime;
if (GPUVMEnable) {
if (GPUVMPageTableLevels >= 3) {
*Tno_bw = UrgentExtraLatency + trip_to_mem * ((GPUVMPageTableLevels - 2) - 1);
} else
*Tno_bw = 0;
} else if (!myPipe->DCCEnable)
*Tno_bw = LineTime;
else
*Tno_bw = LineTime / 4;
dst_y_prefetch_equ = VStartup - (Tsetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime
- (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal);
Lsw_oto = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC);
Tsw_oto = Lsw_oto * LineTime;
prefetch_bw_oto = (PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * BytePerPixelC) / Tsw_oto;
if (GPUVMEnable == true) {
Tvm_oto = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto,
Tvm_trips,
LineTime / 4.0);
} else
Tvm_oto = LineTime / 4.0;
if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
Tr0_oto = dml_max3(
(MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto,
LineTime - Tvm_oto, LineTime / 4);
} else
Tr0_oto = (LineTime - Tvm_oto) / 2.0;
Tvm_oto_lines = dml_ceil(4.0 * Tvm_oto / LineTime, 1) / 4.0;
Tr0_oto_lines = dml_ceil(4.0 * Tr0_oto / LineTime, 1) / 4.0;
dst_y_prefetch_oto = Tvm_oto_lines + 2 * Tr0_oto_lines + Lsw_oto;
dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0;
Tpre_rounded = dst_y_prefetch_equ * LineTime;
dml_print("DML: dst_y_prefetch_oto: %f\n", dst_y_prefetch_oto);
dml_print("DML: dst_y_prefetch_equ: %f\n", dst_y_prefetch_equ);
dml_print("DML: LineTime: %f\n", LineTime);
dml_print("DML: VStartup: %d\n", VStartup);
dml_print("DML: Tvstartup: %fus - time between vstartup and first pixel of active\n", VStartup * LineTime);
dml_print("DML: Tsetup: %fus - time from vstartup to vready\n", Tsetup);
dml_print("DML: TCalc: %fus - time for calculations in dchub starting at vready\n", TCalc);
dml_print("DML: TWait: %fus - time for fabric to become ready max(pstate exit,cstate enter/exit, urgent latency) after TCalc\n", TWait);
dml_print("DML: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", Tdmbf);
dml_print("DML: Tdmec: %fus - time dio takes to transfer dmd\n", Tdmec);
dml_print("DML: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", Tdmsks);
dml_print("DML: Tdmdl_vm: %fus - time for vm stages of dmd \n", *Tdmdl_vm);
dml_print("DML: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", *Tdmdl);
dml_print("DML: dst_x_after_scl: %f pixels - number of pixel clocks pipeline and buffer delay after scaler \n", *DSTXAfterScaler);
dml_print("DML: dst_y_after_scl: %d lines - number of lines of pipeline and buffer delay after scaler \n", (int)*DSTYAfterScaler);
*PrefetchBandwidth = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
if (dst_y_prefetch_equ > 1) {
double PrefetchBandwidth1 = 0;
double PrefetchBandwidth2 = 0;
double PrefetchBandwidth3 = 0;
double PrefetchBandwidth4 = 0;
if (Tpre_rounded - *Tno_bw > 0)
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte
+ 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
+ PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY
+ PrefetchSourceLinesC * swath_width_chroma_ub * BytePerPixelC)
/ (Tpre_rounded - *Tno_bw);
else
PrefetchBandwidth1 = 0;
if (VStartup == MaxVStartup && (PrefetchBandwidth1 > 4 * prefetch_bw_oto) && (Tpre_rounded - Tsw_oto / 4 - 0.75 * LineTime - *Tno_bw) > 0) {
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor) / (Tpre_rounded - Tsw_oto / 4 - 0.75 * LineTime - *Tno_bw);
}
if (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded > 0)
PrefetchBandwidth2 = (PDEAndMetaPTEBytesFrame *
HostVMInefficiencyFactor + PrefetchSourceLinesY *
swath_width_luma_ub * BytePerPixelY +
PrefetchSourceLinesC * swath_width_chroma_ub *
BytePerPixelC) /
(Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded);
else
PrefetchBandwidth2 = 0;
if (Tpre_rounded - Tvm_trips_rounded > 0)
PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow *
HostVMInefficiencyFactor + PrefetchSourceLinesY *
swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC *
swath_width_chroma_ub * BytePerPixelC) / (Tpre_rounded -
Tvm_trips_rounded);
else
PrefetchBandwidth3 = 0;
if (VStartup == MaxVStartup && (PrefetchBandwidth3 > 4 * prefetch_bw_oto) && Tpre_rounded - Tsw_oto / 4 - 0.75 * LineTime - Tvm_trips_rounded > 0) {
PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor) / (Tpre_rounded - Tsw_oto / 4 - 0.75 * LineTime - Tvm_trips_rounded);
}
if (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded > 0)
PrefetchBandwidth4 = (PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * BytePerPixelC)
/ (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded);
else
PrefetchBandwidth4 = 0;
{
bool Case1OK;
bool Case2OK;
bool Case3OK;
if (PrefetchBandwidth1 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth1
>= Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth1 >= Tr0_trips_rounded) {
Case1OK = true;
} else {
Case1OK = false;
}
} else {
Case1OK = false;
}
if (PrefetchBandwidth2 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth2
>= Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth2 < Tr0_trips_rounded) {
Case2OK = true;
} else {
Case2OK = false;
}
} else {
Case2OK = false;
}
if (PrefetchBandwidth3 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth3
< Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth3 >= Tr0_trips_rounded) {
Case3OK = true;
} else {
Case3OK = false;
}
} else {
Case3OK = false;
}
if (Case1OK) {
prefetch_bw_equ = PrefetchBandwidth1;
} else if (Case2OK) {
prefetch_bw_equ = PrefetchBandwidth2;
} else if (Case3OK) {
prefetch_bw_equ = PrefetchBandwidth3;
} else {
prefetch_bw_equ = PrefetchBandwidth4;
}
dml_print("DML: prefetch_bw_equ: %f\n", prefetch_bw_equ);
if (prefetch_bw_equ > 0) {
if (GPUVMEnable) {
Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_equ, Tvm_trips, LineTime / 4);
} else {
Tvm_equ = LineTime / 4;
}
if ((GPUVMEnable || myPipe->DCCEnable)) {
Tr0_equ = dml_max4(
(MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_equ,
Tr0_trips,
(LineTime - Tvm_equ) / 2,
LineTime / 4);
} else {
Tr0_equ = (LineTime - Tvm_equ) / 2;
}
} else {
Tvm_equ = 0;
Tr0_equ = 0;
dml_print("DML: prefetch_bw_equ equals 0! %s:%d\n", __FILE__, __LINE__);
}
}
if (dst_y_prefetch_oto < dst_y_prefetch_equ) {
*DestinationLinesForPrefetch = dst_y_prefetch_oto;
TimeForFetchingMetaPTE = Tvm_oto;
TimeForFetchingRowInVBlank = Tr0_oto;
*PrefetchBandwidth = prefetch_bw_oto;
} else {
*DestinationLinesForPrefetch = dst_y_prefetch_equ;
TimeForFetchingMetaPTE = Tvm_equ;
TimeForFetchingRowInVBlank = Tr0_equ;
*PrefetchBandwidth = prefetch_bw_equ;
}
*DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0;
*DestinationLinesToRequestRowInVBlank = dml_ceil(4.0 * TimeForFetchingRowInVBlank / LineTime, 1.0) / 4.0;
LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch - *DestinationLinesToRequestVMInVBlank
- 2 * *DestinationLinesToRequestRowInVBlank;
if (LinesToRequestPrefetchPixelData > 0 && prefetch_bw_equ > 0) {
*VRatioPrefetchY = (double) PrefetchSourceLinesY
/ LinesToRequestPrefetchPixelData;
*VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
if ((SwathHeightY > 4) && (VInitPreFillY > 3)) {
if (LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
*VRatioPrefetchY = dml_max((double) PrefetchSourceLinesY / LinesToRequestPrefetchPixelData,
(double) MaxNumSwathY * SwathHeightY / (LinesToRequestPrefetchPixelData - (VInitPreFillY - 3.0) / 2.0));
*VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
} else {
MyError = true;
dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
*VRatioPrefetchY = 0;
}
}
*VRatioPrefetchC = (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData;
*VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
if ((SwathHeightC > 4)) {
if (LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
*VRatioPrefetchC = dml_max(*VRatioPrefetchC,
(double) MaxNumSwathC * SwathHeightC / (LinesToRequestPrefetchPixelData - (VInitPreFillC - 3.0) / 2.0));
*VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
} else {
MyError = true;
dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
*VRatioPrefetchC = 0;
}
}
*RequiredPrefetchPixDataBWLuma = (double) PrefetchSourceLinesY / LinesToRequestPrefetchPixelData * BytePerPixelY * swath_width_luma_ub / LineTime;
*RequiredPrefetchPixDataBWChroma = (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData * BytePerPixelC * swath_width_chroma_ub / LineTime;
} else {
MyError = true;
dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
dml_print("DML: LinesToRequestPrefetchPixelData: %f, should be > 0\n", LinesToRequestPrefetchPixelData);
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
*RequiredPrefetchPixDataBWChroma = 0;
}
dml_print("DML: Tpre: %fus - sum of tim to request meta pte, 2 x data pte + meta data, swaths\n", (double)LinesToRequestPrefetchPixelData * LineTime + 2.0*TimeForFetchingRowInVBlank + TimeForFetchingMetaPTE);
dml_print("DML: Tvm: %fus - time to fetch page tables for meta surface\n", TimeForFetchingMetaPTE);
dml_print("DML: Tr0: %fus - time to fetch first row of data pagetables and first row of meta data (done in parallel)\n", TimeForFetchingRowInVBlank);
dml_print("DML: Tr1: %fus - time to fetch second row of data pagetables and second row of meta data (done in parallel)\n", TimeForFetchingRowInVBlank);
dml_print("DML: Tsw: %fus = time to fetch enough pixel data and cursor data to feed the scalers init position and detile\n", (double)LinesToRequestPrefetchPixelData * LineTime);
dml_print("DML: To: %fus - time for propagation from scaler to optc\n", (*DSTYAfterScaler + ((*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime);
dml_print("DML: Tvstartup - Tsetup - Tcalc - Twait - Tpre - To > 0\n");
dml_print("DML: Tslack(pre): %fus - time left over in schedule\n", VStartup * LineTime - TimeForFetchingMetaPTE - 2 * TimeForFetchingRowInVBlank - (*DSTYAfterScaler + ((*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime - TWait - TCalc - Tsetup);
dml_print("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %d\n", PixelPTEBytesPerRow);
} else {
MyError = true;
dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
}
{
double prefetch_vm_bw = 0;
double prefetch_row_bw = 0;
if (PDEAndMetaPTEBytesFrame == 0) {
prefetch_vm_bw = 0;
} else if (*DestinationLinesToRequestVMInVBlank > 0) {
prefetch_vm_bw = PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInVBlank * LineTime);
} else {
prefetch_vm_bw = 0;
MyError = true;
dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
}
if (MetaRowByte + PixelPTEBytesPerRow == 0) {
prefetch_row_bw = 0;
} else if (*DestinationLinesToRequestRowInVBlank > 0) {
prefetch_row_bw = (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInVBlank * LineTime);
} else {
prefetch_row_bw = 0;
MyError = true;
dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
}
*prefetch_vmrow_bw = dml_max(prefetch_vm_bw, prefetch_row_bw);
}
if (MyError) {
*PrefetchBandwidth = 0;
TimeForFetchingMetaPTE = 0;
TimeForFetchingRowInVBlank = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*DestinationLinesForPrefetch = 0;
LinesToRequestPrefetchPixelData = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
*RequiredPrefetchPixDataBWChroma = 0;
}
return MyError;
}
static double RoundToDFSGranularityUp(double Clock, double VCOSpeed)
{
return VCOSpeed * 4 / dml_floor(VCOSpeed * 4 / Clock, 1);
}
static double RoundToDFSGranularityDown(double Clock, double VCOSpeed)
{
return VCOSpeed * 4 / dml_ceil(VCOSpeed * 4.0 / Clock, 1);
}
static void CalculateDCCConfiguration(
bool DCCEnabled,
bool DCCProgrammingAssumesScanDirectionUnknown,
enum source_format_class SourcePixelFormat,
unsigned int SurfaceWidthLuma,
unsigned int SurfaceWidthChroma,
unsigned int SurfaceHeightLuma,
unsigned int SurfaceHeightChroma,
double DETBufferSize,
unsigned int RequestHeight256ByteLuma,
unsigned int RequestHeight256ByteChroma,
enum dm_swizzle_mode TilingFormat,
unsigned int BytePerPixelY,
unsigned int BytePerPixelC,
double BytePerPixelDETY,
double BytePerPixelDETC,
enum scan_direction_class ScanOrientation,
unsigned int *MaxUncompressedBlockLuma,
unsigned int *MaxUncompressedBlockChroma,
unsigned int *MaxCompressedBlockLuma,
unsigned int *MaxCompressedBlockChroma,
unsigned int *IndependentBlockLuma,
unsigned int *IndependentBlockChroma)
{
int yuv420 = 0;
int horz_div_l = 0;
int horz_div_c = 0;
int vert_div_l = 0;
int vert_div_c = 0;
int req128_horz_wc_l = 0;
int req128_horz_wc_c = 0;
int req128_vert_wc_l = 0;
int req128_vert_wc_c = 0;
int segment_order_horz_contiguous_luma = 0;
int segment_order_horz_contiguous_chroma = 0;
int segment_order_vert_contiguous_luma = 0;
int segment_order_vert_contiguous_chroma = 0;
long full_swath_bytes_horz_wc_l = 0;
long full_swath_bytes_horz_wc_c = 0;
long full_swath_bytes_vert_wc_l = 0;
long full_swath_bytes_vert_wc_c = 0;
long swath_buf_size = 0;
double detile_buf_vp_horz_limit = 0;
double detile_buf_vp_vert_limit = 0;
long MAS_vp_horz_limit = 0;
long MAS_vp_vert_limit = 0;
long max_vp_horz_width = 0;
long max_vp_vert_height = 0;
long eff_surf_width_l = 0;
long eff_surf_width_c = 0;
long eff_surf_height_l = 0;
long eff_surf_height_c = 0;
typedef enum {
REQ_256Bytes,
REQ_128BytesNonContiguous,
REQ_128BytesContiguous,
REQ_NA
} RequestType;
RequestType RequestLuma;
RequestType RequestChroma;
yuv420 = ((SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_420_12) ? 1 : 0);
horz_div_l = 1;
horz_div_c = 1;
vert_div_l = 1;
vert_div_c = 1;
if (BytePerPixelY == 1)
vert_div_l = 0;
if (BytePerPixelC == 1)
vert_div_c = 0;
if (BytePerPixelY == 8
&& (TilingFormat == dm_sw_64kb_s || TilingFormat == dm_sw_64kb_s_t
|| TilingFormat == dm_sw_64kb_s_x))
horz_div_l = 0;
if (BytePerPixelC == 8
&& (TilingFormat == dm_sw_64kb_s || TilingFormat == dm_sw_64kb_s_t
|| TilingFormat == dm_sw_64kb_s_x))
horz_div_c = 0;
if (BytePerPixelC == 0) {
swath_buf_size = DETBufferSize / 2 - 2 * 256;
detile_buf_vp_horz_limit = (double) swath_buf_size
/ ((double) RequestHeight256ByteLuma * BytePerPixelY
/ (1 + horz_div_l));
detile_buf_vp_vert_limit = (double) swath_buf_size
/ (256.0 / RequestHeight256ByteLuma / (1 + vert_div_l));
} else {
swath_buf_size = DETBufferSize / 2 - 2 * 2 * 256;
detile_buf_vp_horz_limit = (double) swath_buf_size
/ ((double) RequestHeight256ByteLuma * BytePerPixelY
/ (1 + horz_div_l)
+ (double) RequestHeight256ByteChroma
* BytePerPixelC / (1 + horz_div_c)
/ (1 + yuv420));
detile_buf_vp_vert_limit = (double) swath_buf_size
/ (256.0 / RequestHeight256ByteLuma / (1 + vert_div_l)
+ 256.0 / RequestHeight256ByteChroma
/ (1 + vert_div_c) / (1 + yuv420));
}
if (SourcePixelFormat == dm_420_10) {
detile_buf_vp_horz_limit = 1.5 * detile_buf_vp_horz_limit;
detile_buf_vp_vert_limit = 1.5 * detile_buf_vp_vert_limit;
}
detile_buf_vp_horz_limit = dml_floor(detile_buf_vp_horz_limit - 1, 16);
detile_buf_vp_vert_limit = dml_floor(detile_buf_vp_vert_limit - 1, 16);
MAS_vp_horz_limit = 5760;
MAS_vp_vert_limit = (BytePerPixelC > 0 ? 2880 : 5760);
max_vp_horz_width = dml_min((double) MAS_vp_horz_limit, detile_buf_vp_horz_limit);
max_vp_vert_height = dml_min((double) MAS_vp_vert_limit, detile_buf_vp_vert_limit);
eff_surf_width_l =
(SurfaceWidthLuma > max_vp_horz_width ? max_vp_horz_width : SurfaceWidthLuma);
eff_surf_width_c = eff_surf_width_l / (1 + yuv420);
eff_surf_height_l = (
SurfaceHeightLuma > max_vp_vert_height ?
max_vp_vert_height : SurfaceHeightLuma);
eff_surf_height_c = eff_surf_height_l / (1 + yuv420);
full_swath_bytes_horz_wc_l = eff_surf_width_l * RequestHeight256ByteLuma * BytePerPixelY;
full_swath_bytes_vert_wc_l = eff_surf_height_l * 256 / RequestHeight256ByteLuma;
if (BytePerPixelC > 0) {
full_swath_bytes_horz_wc_c = eff_surf_width_c * RequestHeight256ByteChroma
* BytePerPixelC;
full_swath_bytes_vert_wc_c = eff_surf_height_c * 256 / RequestHeight256ByteChroma;
} else {
full_swath_bytes_horz_wc_c = 0;
full_swath_bytes_vert_wc_c = 0;
}
if (SourcePixelFormat == dm_420_10) {
full_swath_bytes_horz_wc_l = dml_ceil(full_swath_bytes_horz_wc_l * 2 / 3, 256);
full_swath_bytes_horz_wc_c = dml_ceil(full_swath_bytes_horz_wc_c * 2 / 3, 256);
full_swath_bytes_vert_wc_l = dml_ceil(full_swath_bytes_vert_wc_l * 2 / 3, 256);
full_swath_bytes_vert_wc_c = dml_ceil(full_swath_bytes_vert_wc_c * 2 / 3, 256);
}
if (2 * full_swath_bytes_horz_wc_l + 2 * full_swath_bytes_horz_wc_c <= DETBufferSize) {
req128_horz_wc_l = 0;
req128_horz_wc_c = 0;
} else if (full_swath_bytes_horz_wc_l < 1.5 * full_swath_bytes_horz_wc_c
&& 2 * full_swath_bytes_horz_wc_l + full_swath_bytes_horz_wc_c
<= DETBufferSize) {
req128_horz_wc_l = 0;
req128_horz_wc_c = 1;
} else if (full_swath_bytes_horz_wc_l >= 1.5 * full_swath_bytes_horz_wc_c
&& full_swath_bytes_horz_wc_l + 2 * full_swath_bytes_horz_wc_c
<= DETBufferSize) {
req128_horz_wc_l = 1;
req128_horz_wc_c = 0;
} else {
req128_horz_wc_l = 1;
req128_horz_wc_c = 1;
}
if (2 * full_swath_bytes_vert_wc_l + 2 * full_swath_bytes_vert_wc_c <= DETBufferSize) {
req128_vert_wc_l = 0;
req128_vert_wc_c = 0;
} else if (full_swath_bytes_vert_wc_l < 1.5 * full_swath_bytes_vert_wc_c
&& 2 * full_swath_bytes_vert_wc_l + full_swath_bytes_vert_wc_c
<= DETBufferSize) {
req128_vert_wc_l = 0;
req128_vert_wc_c = 1;
} else if (full_swath_bytes_vert_wc_l >= 1.5 * full_swath_bytes_vert_wc_c
&& full_swath_bytes_vert_wc_l + 2 * full_swath_bytes_vert_wc_c
<= DETBufferSize) {
req128_vert_wc_l = 1;
req128_vert_wc_c = 0;
} else {
req128_vert_wc_l = 1;
req128_vert_wc_c = 1;
}
if (BytePerPixelY == 2 || (BytePerPixelY == 4 && TilingFormat != dm_sw_64kb_r_x)) {
segment_order_horz_contiguous_luma = 0;
} else {
segment_order_horz_contiguous_luma = 1;
}
if ((BytePerPixelY == 8
&& (TilingFormat == dm_sw_64kb_d || TilingFormat == dm_sw_64kb_d_x
|| TilingFormat == dm_sw_64kb_d_t
|| TilingFormat == dm_sw_64kb_r_x))
|| (BytePerPixelY == 4 && TilingFormat == dm_sw_64kb_r_x)) {
segment_order_vert_contiguous_luma = 0;
} else {
segment_order_vert_contiguous_luma = 1;
}
if (BytePerPixelC == 2 || (BytePerPixelC == 4 && TilingFormat != dm_sw_64kb_r_x)) {
segment_order_horz_contiguous_chroma = 0;
} else {
segment_order_horz_contiguous_chroma = 1;
}
if ((BytePerPixelC == 8
&& (TilingFormat == dm_sw_64kb_d || TilingFormat == dm_sw_64kb_d_x
|| TilingFormat == dm_sw_64kb_d_t
|| TilingFormat == dm_sw_64kb_r_x))
|| (BytePerPixelC == 4 && TilingFormat == dm_sw_64kb_r_x)) {
segment_order_vert_contiguous_chroma = 0;
} else {
segment_order_vert_contiguous_chroma = 1;
}
if (DCCProgrammingAssumesScanDirectionUnknown == true) {
if (req128_horz_wc_l == 0 && req128_vert_wc_l == 0) {
RequestLuma = REQ_256Bytes;
} else if ((req128_horz_wc_l == 1 && segment_order_horz_contiguous_luma == 0)
|| (req128_vert_wc_l == 1 && segment_order_vert_contiguous_luma == 0)) {
RequestLuma = REQ_128BytesNonContiguous;
} else {
RequestLuma = REQ_128BytesContiguous;
}
if (req128_horz_wc_c == 0 && req128_vert_wc_c == 0) {
RequestChroma = REQ_256Bytes;
} else if ((req128_horz_wc_c == 1 && segment_order_horz_contiguous_chroma == 0)
|| (req128_vert_wc_c == 1
&& segment_order_vert_contiguous_chroma == 0)) {
RequestChroma = REQ_128BytesNonContiguous;
} else {
RequestChroma = REQ_128BytesContiguous;
}
} else if (ScanOrientation != dm_vert) {
if (req128_horz_wc_l == 0) {
RequestLuma = REQ_256Bytes;
} else if (segment_order_horz_contiguous_luma == 0) {
RequestLuma = REQ_128BytesNonContiguous;
} else {
RequestLuma = REQ_128BytesContiguous;
}
if (req128_horz_wc_c == 0) {
RequestChroma = REQ_256Bytes;
} else if (segment_order_horz_contiguous_chroma == 0) {
RequestChroma = REQ_128BytesNonContiguous;
} else {
RequestChroma = REQ_128BytesContiguous;
}
} else {
if (req128_vert_wc_l == 0) {
RequestLuma = REQ_256Bytes;
} else if (segment_order_vert_contiguous_luma == 0) {
RequestLuma = REQ_128BytesNonContiguous;
} else {
RequestLuma = REQ_128BytesContiguous;
}
if (req128_vert_wc_c == 0) {
RequestChroma = REQ_256Bytes;
} else if (segment_order_vert_contiguous_chroma == 0) {
RequestChroma = REQ_128BytesNonContiguous;
} else {
RequestChroma = REQ_128BytesContiguous;
}
}
if (RequestLuma == REQ_256Bytes) {
*MaxUncompressedBlockLuma = 256;
*MaxCompressedBlockLuma = 256;
*IndependentBlockLuma = 0;
} else if (RequestLuma == REQ_128BytesContiguous) {
*MaxUncompressedBlockLuma = 256;
*MaxCompressedBlockLuma = 128;
*IndependentBlockLuma = 128;
} else {
*MaxUncompressedBlockLuma = 256;
*MaxCompressedBlockLuma = 64;
*IndependentBlockLuma = 64;
}
if (RequestChroma == REQ_256Bytes) {
*MaxUncompressedBlockChroma = 256;
*MaxCompressedBlockChroma = 256;
*IndependentBlockChroma = 0;
} else if (RequestChroma == REQ_128BytesContiguous) {
*MaxUncompressedBlockChroma = 256;
*MaxCompressedBlockChroma = 128;
*IndependentBlockChroma = 128;
} else {
*MaxUncompressedBlockChroma = 256;
*MaxCompressedBlockChroma = 64;
*IndependentBlockChroma = 64;
}
if (DCCEnabled != true || BytePerPixelC == 0) {
*MaxUncompressedBlockChroma = 0;
*MaxCompressedBlockChroma = 0;
*IndependentBlockChroma = 0;
}
if (DCCEnabled != true) {
*MaxUncompressedBlockLuma = 0;
*MaxCompressedBlockLuma = 0;
*IndependentBlockLuma = 0;
}
}
static double CalculatePrefetchSourceLines(
struct display_mode_lib *mode_lib,
double VRatio,
double vtaps,
bool Interlace,
bool ProgressiveToInterlaceUnitInOPP,
unsigned int SwathHeight,
unsigned int ViewportYStart,
double *VInitPreFill,
unsigned int *MaxNumSwath)
{
unsigned int MaxPartialSwath = 0;
if (ProgressiveToInterlaceUnitInOPP)
*VInitPreFill = dml_floor((VRatio + vtaps + 1) / 2.0, 1);
else
*VInitPreFill = dml_floor((VRatio + vtaps + 1 + Interlace * 0.5 * VRatio) / 2.0, 1);
if (!mode_lib->vba.IgnoreViewportPositioning) {
*MaxNumSwath = dml_ceil((*VInitPreFill - 1.0) / SwathHeight, 1) + 1.0;
if (*VInitPreFill > 1.0)
MaxPartialSwath = (unsigned int) (*VInitPreFill - 2) % SwathHeight;
else
MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 2)
% SwathHeight;
MaxPartialSwath = dml_max(1U, MaxPartialSwath);
} else {
if (ViewportYStart != 0)
dml_print(
"WARNING DML: using viewport y position of 0 even though actual viewport y position is non-zero in prefetch source lines calculation\n");
*MaxNumSwath = dml_ceil(*VInitPreFill / SwathHeight, 1);
if (*VInitPreFill > 1.0)
MaxPartialSwath = (unsigned int) (*VInitPreFill - 1) % SwathHeight;
else
MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 1)
% SwathHeight;
}
return *MaxNumSwath * SwathHeight + MaxPartialSwath;
}
static unsigned int CalculateVMAndRowBytes(
struct display_mode_lib *mode_lib,
bool DCCEnable,
unsigned int BlockHeight256Bytes,
unsigned int BlockWidth256Bytes,
enum source_format_class SourcePixelFormat,
unsigned int SurfaceTiling,
unsigned int BytePerPixel,
enum scan_direction_class ScanDirection,
unsigned int SwathWidth,
unsigned int ViewportHeight,
bool GPUVMEnable,
bool HostVMEnable,
unsigned int HostVMMaxNonCachedPageTableLevels,
unsigned int GPUVMMinPageSize,
unsigned int HostVMMinPageSize,
unsigned int PTEBufferSizeInRequests,
unsigned int Pitch,
unsigned int DCCMetaPitch,
unsigned int *MacroTileWidth,
unsigned int *MetaRowByte,
unsigned int *PixelPTEBytesPerRow,
bool *PTEBufferSizeNotExceeded,
unsigned int *dpte_row_width_ub,
unsigned int *dpte_row_height,
unsigned int *MetaRequestWidth,
unsigned int *MetaRequestHeight,
unsigned int *meta_row_width,
unsigned int *meta_row_height,
unsigned int *vm_group_bytes,
unsigned int *dpte_group_bytes,
unsigned int *PixelPTEReqWidth,
unsigned int *PixelPTEReqHeight,
unsigned int *PTERequestSize,
unsigned int *DPDE0BytesFrame,
unsigned int *MetaPTEBytesFrame)
{
unsigned int MPDEBytesFrame = 0;
unsigned int DCCMetaSurfaceBytes = 0;
unsigned int MacroTileSizeBytes = 0;
unsigned int MacroTileHeight = 0;
unsigned int ExtraDPDEBytesFrame = 0;
unsigned int PDEAndMetaPTEBytesFrame = 0;
unsigned int PixelPTEReqHeightPTEs = 0;
unsigned int HostVMDynamicLevels = 0;
double FractionOfPTEReturnDrop;
if (GPUVMEnable == true && HostVMEnable == true) {
if (HostVMMinPageSize < 2048) {
HostVMDynamicLevels = HostVMMaxNonCachedPageTableLevels;
} else if (HostVMMinPageSize >= 2048 && HostVMMinPageSize < 1048576) {
HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 1);
} else {
HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 2);
}
}
*MetaRequestHeight = 8 * BlockHeight256Bytes;
*MetaRequestWidth = 8 * BlockWidth256Bytes;
if (ScanDirection != dm_vert) {
*meta_row_height = *MetaRequestHeight;
*meta_row_width = dml_ceil((double) SwathWidth - 1, *MetaRequestWidth)
+ *MetaRequestWidth;
*MetaRowByte = *meta_row_width * *MetaRequestHeight * BytePerPixel / 256.0;
} else {
*meta_row_height = *MetaRequestWidth;
*meta_row_width = dml_ceil((double) SwathWidth - 1, *MetaRequestHeight)
+ *MetaRequestHeight;
*MetaRowByte = *meta_row_width * *MetaRequestWidth * BytePerPixel / 256.0;
}
DCCMetaSurfaceBytes = DCCMetaPitch * (dml_ceil(ViewportHeight - 1, 64 * BlockHeight256Bytes)
+ 64 * BlockHeight256Bytes) * BytePerPixel / 256;
if (GPUVMEnable == true) {
*MetaPTEBytesFrame = (dml_ceil((double) (DCCMetaSurfaceBytes - 4.0 * 1024.0) / (8 * 4.0 * 1024), 1) + 1) * 64;
MPDEBytesFrame = 128 * (mode_lib->vba.GPUVMMaxPageTableLevels - 1);
} else {
*MetaPTEBytesFrame = 0;
MPDEBytesFrame = 0;
}
if (DCCEnable != true) {
*MetaPTEBytesFrame = 0;
MPDEBytesFrame = 0;
*MetaRowByte = 0;
}
if (SurfaceTiling == dm_sw_linear) {
MacroTileSizeBytes = 256;
MacroTileHeight = BlockHeight256Bytes;
} else {
MacroTileSizeBytes = 65536;
MacroTileHeight = 16 * BlockHeight256Bytes;
}
*MacroTileWidth = MacroTileSizeBytes / BytePerPixel / MacroTileHeight;
if (GPUVMEnable == true && mode_lib->vba.GPUVMMaxPageTableLevels > 1) {
if (ScanDirection != dm_vert) {
*DPDE0BytesFrame = 64 * (dml_ceil(((Pitch * (dml_ceil(ViewportHeight - 1, MacroTileHeight) + MacroTileHeight) * BytePerPixel) - MacroTileSizeBytes) / (8 * 2097152), 1) + 1);
} else {
*DPDE0BytesFrame = 64 * (dml_ceil(((Pitch * (dml_ceil((double) SwathWidth - 1, MacroTileHeight) + MacroTileHeight) * BytePerPixel) - MacroTileSizeBytes) / (8 * 2097152), 1) + 1);
}
ExtraDPDEBytesFrame = 128 * (mode_lib->vba.GPUVMMaxPageTableLevels - 2);
} else {
*DPDE0BytesFrame = 0;
ExtraDPDEBytesFrame = 0;
}
PDEAndMetaPTEBytesFrame = *MetaPTEBytesFrame + MPDEBytesFrame + *DPDE0BytesFrame
+ ExtraDPDEBytesFrame;
if (HostVMEnable == true) {
PDEAndMetaPTEBytesFrame = PDEAndMetaPTEBytesFrame * (1 + 8 * HostVMDynamicLevels);
}
if (SurfaceTiling == dm_sw_linear) {
PixelPTEReqHeightPTEs = 1;
*PixelPTEReqHeight = 1;
*PixelPTEReqWidth = 32768.0 / BytePerPixel;
*PTERequestSize = 64;
FractionOfPTEReturnDrop = 0;
} else if (MacroTileSizeBytes == 4096) {
PixelPTEReqHeightPTEs = 1;
*PixelPTEReqHeight = MacroTileHeight;
*PixelPTEReqWidth = 8 * *MacroTileWidth;
*PTERequestSize = 64;
if (ScanDirection != dm_vert)
FractionOfPTEReturnDrop = 0;
else
FractionOfPTEReturnDrop = 7 / 8;
} else if (GPUVMMinPageSize == 4 && MacroTileSizeBytes > 4096) {
PixelPTEReqHeightPTEs = 16;
*PixelPTEReqHeight = 16 * BlockHeight256Bytes;
*PixelPTEReqWidth = 16 * BlockWidth256Bytes;
*PTERequestSize = 128;
FractionOfPTEReturnDrop = 0;
} else {
PixelPTEReqHeightPTEs = 1;
*PixelPTEReqHeight = MacroTileHeight;
*PixelPTEReqWidth = 8 * *MacroTileWidth;
*PTERequestSize = 64;
FractionOfPTEReturnDrop = 0;
}
if (SurfaceTiling == dm_sw_linear) {
if (PTEBufferSizeInRequests == 0)
*dpte_row_height = 1;
else
*dpte_row_height = dml_min(128, 1 << (unsigned int) dml_floor(dml_log2(PTEBufferSizeInRequests * *PixelPTEReqWidth / Pitch), 1));
*dpte_row_width_ub = (dml_ceil(((double) SwathWidth - 1) / *PixelPTEReqWidth, 1) + 1) * *PixelPTEReqWidth;
*PixelPTEBytesPerRow = *dpte_row_width_ub / *PixelPTEReqWidth * *PTERequestSize;
} else if (ScanDirection != dm_vert) {
*dpte_row_height = *PixelPTEReqHeight;
*dpte_row_width_ub = (dml_ceil((double) (SwathWidth - 1) / *PixelPTEReqWidth, 1) + 1) * *PixelPTEReqWidth;
*PixelPTEBytesPerRow = *dpte_row_width_ub / *PixelPTEReqWidth * *PTERequestSize;
} else {
*dpte_row_height = dml_min(*PixelPTEReqWidth, *MacroTileWidth);
*dpte_row_width_ub = (dml_ceil((double) (SwathWidth - 1) / *PixelPTEReqHeight, 1) + 1) * *PixelPTEReqHeight;
*PixelPTEBytesPerRow = *dpte_row_width_ub / *PixelPTEReqHeight * *PTERequestSize;
}
if (*PixelPTEBytesPerRow * (1 - FractionOfPTEReturnDrop)
<= 64 * PTEBufferSizeInRequests) {
*PTEBufferSizeNotExceeded = true;
} else {
*PTEBufferSizeNotExceeded = false;
}
if (GPUVMEnable != true) {
*PixelPTEBytesPerRow = 0;
*PTEBufferSizeNotExceeded = true;
}
dml_print("DML: vm_bytes = meta_pte_bytes_per_frame (per_pipe) = MetaPTEBytesFrame = : %i\n", *MetaPTEBytesFrame);
if (HostVMEnable == true) {
*PixelPTEBytesPerRow = *PixelPTEBytesPerRow * (1 + 8 * HostVMDynamicLevels);
}
if (HostVMEnable == true) {
*vm_group_bytes = 512;
*dpte_group_bytes = 512;
} else if (GPUVMEnable == true) {
*vm_group_bytes = 2048;
if (SurfaceTiling != dm_sw_linear && PixelPTEReqHeightPTEs == 1 && ScanDirection == dm_vert) {
*dpte_group_bytes = 512;
} else {
*dpte_group_bytes = 2048;
}
} else {
*vm_group_bytes = 0;
*dpte_group_bytes = 0;
}
return PDEAndMetaPTEBytesFrame;
}
static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
struct display_mode_lib *mode_lib)
{
struct vba_vars_st *v = &mode_lib->vba;
unsigned int j, k;
long ReorderBytes = 0;
unsigned int PrefetchMode = v->PrefetchModePerState[v->VoltageLevel][v->maxMpcComb];
double MaxTotalRDBandwidth = 0;
double MaxTotalRDBandwidthNoUrgentBurst = 0;
bool DestinationLineTimesForPrefetchLessThan2 = false;
bool VRatioPrefetchMoreThan4 = false;
double TWait;
v->WritebackDISPCLK = 0.0;
v->DISPCLKWithRamping = 0;
v->DISPCLKWithoutRamping = 0;
v->GlobalDPPCLK = 0.0;
/* DAL custom code: need to update ReturnBW in case min dcfclk is overriden */
v->IdealSDPPortBandwidthPerState[v->VoltageLevel][v->maxMpcComb] = dml_min3(
v->ReturnBusWidth * v->DCFCLK,
v->DRAMSpeedPerState[v->VoltageLevel] * v->NumberOfChannels * v->DRAMChannelWidth,
v->FabricClockPerState[v->VoltageLevel] * v->FabricDatapathToDCNDataReturn);
if (v->HostVMEnable != true) {
v->ReturnBW = v->IdealSDPPortBandwidthPerState[v->VoltageLevel][v->maxMpcComb] * v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100;
} else {
v->ReturnBW = v->IdealSDPPortBandwidthPerState[v->VoltageLevel][v->maxMpcComb] * v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData / 100;
}
/* End DAL custom code */
// DISPCLK and DPPCLK Calculation
//
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k]) {
v->WritebackDISPCLK = dml_max(v->WritebackDISPCLK,
dml30_CalculateWriteBackDISPCLK(
v->WritebackPixelFormat[k],
v->PixelClock[k],
v->WritebackHRatio[k],
v->WritebackVRatio[k],
v->WritebackHTaps[k],
v->WritebackVTaps[k],
v->WritebackSourceWidth[k],
v->WritebackDestinationWidth[k],
v->HTotal[k],
v->WritebackLineBufferSize));
}
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->HRatio[k] > 1) {
v->PSCL_THROUGHPUT_LUMA[k] = dml_min(v->MaxDCHUBToPSCLThroughput,
v->MaxPSCLToLBThroughput * v->HRatio[k] / dml_ceil(v->htaps[k] / 6.0, 1));
} else {
v->PSCL_THROUGHPUT_LUMA[k] = dml_min(
v->MaxDCHUBToPSCLThroughput,
v->MaxPSCLToLBThroughput);
}
v->DPPCLKUsingSingleDPPLuma = v->PixelClock[k]
* dml_max(v->vtaps[k] / 6.0 * dml_min(1.0, v->HRatio[k]),
dml_max(v->HRatio[k] * v->VRatio[k] / v->PSCL_THROUGHPUT_LUMA[k], 1.0));
if ((v->htaps[k] > 6 || v->vtaps[k] > 6)
&& v->DPPCLKUsingSingleDPPLuma < 2 * v->PixelClock[k]) {
v->DPPCLKUsingSingleDPPLuma = 2 * v->PixelClock[k];
}
if ((v->SourcePixelFormat[k] != dm_420_8
&& v->SourcePixelFormat[k] != dm_420_10
&& v->SourcePixelFormat[k] != dm_420_12
&& v->SourcePixelFormat[k] != dm_rgbe_alpha)) {
v->PSCL_THROUGHPUT_CHROMA[k] = 0.0;
v->DPPCLKUsingSingleDPP[k] = v->DPPCLKUsingSingleDPPLuma;
} else {
if (v->HRatioChroma[k] > 1) {
v->PSCL_THROUGHPUT_CHROMA[k] = dml_min(v->MaxDCHUBToPSCLThroughput,
v->MaxPSCLToLBThroughput * v->HRatioChroma[k] / dml_ceil(v->HTAPsChroma[k] / 6.0, 1.0));
} else {
v->PSCL_THROUGHPUT_CHROMA[k] = dml_min(
v->MaxDCHUBToPSCLThroughput,
v->MaxPSCLToLBThroughput);
}
v->DPPCLKUsingSingleDPPChroma = v->PixelClock[k]
* dml_max3(v->VTAPsChroma[k] / 6.0 * dml_min(1.0, v->HRatioChroma[k]),
v->HRatioChroma[k] * v->VRatioChroma[k] / v->PSCL_THROUGHPUT_CHROMA[k], 1.0);
if ((v->HTAPsChroma[k] > 6 || v->VTAPsChroma[k] > 6)
&& v->DPPCLKUsingSingleDPPChroma
< 2 * v->PixelClock[k]) {
v->DPPCLKUsingSingleDPPChroma = 2
* v->PixelClock[k];
}
v->DPPCLKUsingSingleDPP[k] = dml_max(
v->DPPCLKUsingSingleDPPLuma,
v->DPPCLKUsingSingleDPPChroma);
}
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->BlendingAndTiming[k] != k)
continue;
if (v->ODMCombineEnabled[k] == dm_odm_combine_mode_4to1) {
v->DISPCLKWithRamping = dml_max(v->DISPCLKWithRamping,
v->PixelClock[k] / 4 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100)
* (1 + v->DISPCLKRampingMargin / 100));
v->DISPCLKWithoutRamping = dml_max(v->DISPCLKWithoutRamping,
v->PixelClock[k] / 4 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100));
} else if (v->ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
v->DISPCLKWithRamping = dml_max(v->DISPCLKWithRamping,
v->PixelClock[k] / 2 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100)
* (1 + v->DISPCLKRampingMargin / 100));
v->DISPCLKWithoutRamping = dml_max(v->DISPCLKWithoutRamping,
v->PixelClock[k] / 2 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100));
} else {
v->DISPCLKWithRamping = dml_max(v->DISPCLKWithRamping,
v->PixelClock[k] * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100)
* (1 + v->DISPCLKRampingMargin / 100));
v->DISPCLKWithoutRamping = dml_max(v->DISPCLKWithoutRamping,
v->PixelClock[k] * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100));
}
}
v->DISPCLKWithRamping = dml_max(
v->DISPCLKWithRamping,
v->WritebackDISPCLK);
v->DISPCLKWithoutRamping = dml_max(
v->DISPCLKWithoutRamping,
v->WritebackDISPCLK);
ASSERT(v->DISPCLKDPPCLKVCOSpeed != 0);
v->DISPCLKWithRampingRoundedToDFSGranularity = RoundToDFSGranularityUp(
v->DISPCLKWithRamping,
v->DISPCLKDPPCLKVCOSpeed);
v->DISPCLKWithoutRampingRoundedToDFSGranularity = RoundToDFSGranularityUp(
v->DISPCLKWithoutRamping,
v->DISPCLKDPPCLKVCOSpeed);
v->MaxDispclkRoundedToDFSGranularity = RoundToDFSGranularityDown(
v->soc.clock_limits[mode_lib->soc.num_states - 1].dispclk_mhz,
v->DISPCLKDPPCLKVCOSpeed);
if (v->DISPCLKWithoutRampingRoundedToDFSGranularity
> v->MaxDispclkRoundedToDFSGranularity) {
v->DISPCLK_calculated =
v->DISPCLKWithoutRampingRoundedToDFSGranularity;
} else if (v->DISPCLKWithRampingRoundedToDFSGranularity
> v->MaxDispclkRoundedToDFSGranularity) {
v->DISPCLK_calculated = v->MaxDispclkRoundedToDFSGranularity;
} else {
v->DISPCLK_calculated =
v->DISPCLKWithRampingRoundedToDFSGranularity;
}
v->DISPCLK = v->DISPCLK_calculated;
DTRACE(" dispclk_mhz (calculated) = %f", v->DISPCLK_calculated);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->DPPCLK_calculated[k] = v->DPPCLKUsingSingleDPP[k]
/ v->DPPPerPlane[k]
* (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100);
v->GlobalDPPCLK = dml_max(
v->GlobalDPPCLK,
v->DPPCLK_calculated[k]);
}
v->GlobalDPPCLK = RoundToDFSGranularityUp(
v->GlobalDPPCLK,
v->DISPCLKDPPCLKVCOSpeed);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->DPPCLK_calculated[k] = v->GlobalDPPCLK / 255
* dml_ceil(
v->DPPCLK_calculated[k] * 255.0
/ v->GlobalDPPCLK,
1);
DTRACE(" dppclk_mhz[%i] (calculated) = %f", k, v->DPPCLK_calculated[k]);
v->DPPCLK[k] = v->DPPCLK_calculated[k];
}
// Urgent and B P-State/DRAM Clock Change Watermark
DTRACE(" dcfclk_mhz = %f", v->DCFCLK);
DTRACE(" return_bus_bw = %f", v->ReturnBW);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
dml30_CalculateBytePerPixelAnd256BBlockSizes(
v->SourcePixelFormat[k],
v->SurfaceTiling[k],
&v->BytePerPixelY[k],
&v->BytePerPixelC[k],
&v->BytePerPixelDETY[k],
&v->BytePerPixelDETC[k],
&v->BlockHeight256BytesY[k],
&v->BlockHeight256BytesC[k],
&v->BlockWidth256BytesY[k],
&v->BlockWidth256BytesC[k]);
}
CalculateSwathWidth(
false,
v->NumberOfActivePlanes,
v->SourcePixelFormat,
v->SourceScan,
v->ViewportWidth,
v->ViewportHeight,
v->SurfaceWidthY,
v->SurfaceWidthC,
v->SurfaceHeightY,
v->SurfaceHeightC,
v->ODMCombineEnabled,
v->BytePerPixelY,
v->BytePerPixelC,
v->BlockHeight256BytesY,
v->BlockHeight256BytesC,
v->BlockWidth256BytesY,
v->BlockWidth256BytesC,
v->BlendingAndTiming,
v->HActive,
v->HRatio,
v->DPPPerPlane,
v->SwathWidthSingleDPPY,
v->SwathWidthSingleDPPC,
v->SwathWidthY,
v->SwathWidthC,
v->dummyinteger3,
v->dummyinteger4,
v->swath_width_luma_ub,
v->swath_width_chroma_ub);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->ReadBandwidthPlaneLuma[k] = v->SwathWidthSingleDPPY[k] * v->BytePerPixelY[k] / (v->HTotal[k] / v->PixelClock[k]) * v->VRatio[k];
v->ReadBandwidthPlaneChroma[k] = v->SwathWidthSingleDPPC[k] * v->BytePerPixelC[k] / (v->HTotal[k] / v->PixelClock[k]) * v->VRatioChroma[k];
DTRACE("read_bw[%i] = %fBps", k, v->ReadBandwidthPlaneLuma[k] + v->ReadBandwidthPlaneChroma[k]);
}
// DCFCLK Deep Sleep
CalculateDCFCLKDeepSleep(
mode_lib,
v->NumberOfActivePlanes,
v->BytePerPixelY,
v->BytePerPixelC,
v->VRatio,
v->VRatioChroma,
v->SwathWidthY,
v->SwathWidthC,
v->DPPPerPlane,
v->HRatio,
v->HRatioChroma,
v->PixelClock,
v->PSCL_THROUGHPUT_LUMA,
v->PSCL_THROUGHPUT_CHROMA,
v->DPPCLK,
v->ReadBandwidthPlaneLuma,
v->ReadBandwidthPlaneChroma,
v->ReturnBusWidth,
&v->DCFCLKDeepSleep);
// DSCCLK
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if ((v->BlendingAndTiming[k] != k) || !v->DSCEnabled[k]) {
v->DSCCLK_calculated[k] = 0.0;
} else {
if (v->OutputFormat[k] == dm_420)
v->DSCFormatFactor = 2;
else if (v->OutputFormat[k] == dm_444)
v->DSCFormatFactor = 1;
else if (v->OutputFormat[k] == dm_n422)
v->DSCFormatFactor = 2;
else
v->DSCFormatFactor = 1;
if (v->ODMCombineEnabled[k] == dm_odm_combine_mode_4to1)
v->DSCCLK_calculated[k] = v->PixelClockBackEnd[k] / 12
/ v->DSCFormatFactor / (1 - v->DISPCLKDPPCLKDSCCLKDownSpreading / 100);
else if (v->ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
v->DSCCLK_calculated[k] = v->PixelClockBackEnd[k] / 6
/ v->DSCFormatFactor / (1 - v->DISPCLKDPPCLKDSCCLKDownSpreading / 100);
else
v->DSCCLK_calculated[k] = v->PixelClockBackEnd[k] / 3
/ v->DSCFormatFactor / (1 - v->DISPCLKDPPCLKDSCCLKDownSpreading / 100);
}
}
// DSC Delay
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double BPP = v->OutputBppPerState[k][v->VoltageLevel];
if (v->DSCEnabled[k] && BPP != 0) {
if (v->ODMCombineEnabled[k] == dm_odm_combine_mode_disabled) {
v->DSCDelay[k] = dscceComputeDelay(v->DSCInputBitPerComponent[k],
BPP,
dml_ceil((double) v->HActive[k] / v->NumberOfDSCSlices[k], 1),
v->NumberOfDSCSlices[k],
v->OutputFormat[k],
v->Output[k])
+ dscComputeDelay(v->OutputFormat[k], v->Output[k]);
} else if (v->ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
v->DSCDelay[k] = 2 * dscceComputeDelay(v->DSCInputBitPerComponent[k],
BPP,
dml_ceil((double) v->HActive[k] / v->NumberOfDSCSlices[k], 1),
v->NumberOfDSCSlices[k] / 2.0,
v->OutputFormat[k],
v->Output[k])
+ dscComputeDelay(v->OutputFormat[k], v->Output[k]);
} else {
v->DSCDelay[k] = 4 * dscceComputeDelay(v->DSCInputBitPerComponent[k],
BPP,
dml_ceil((double) v->HActive[k] / v->NumberOfDSCSlices[k], 1),
v->NumberOfDSCSlices[k] / 4.0,
v->OutputFormat[k],
v->Output[k])
+ dscComputeDelay(v->OutputFormat[k], v->Output[k]);
}
v->DSCDelay[k] = v->DSCDelay[k] * v->PixelClock[k] / v->PixelClockBackEnd[k];
} else {
v->DSCDelay[k] = 0;
}
}
for (k = 0; k < v->NumberOfActivePlanes; ++k)
for (j = 0; j < v->NumberOfActivePlanes; ++j) // NumberOfPlanes
if (j != k && v->BlendingAndTiming[k] == j
&& v->DSCEnabled[j])
v->DSCDelay[k] = v->DSCDelay[j];
// Prefetch
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
unsigned int PDEAndMetaPTEBytesFrameY = 0;
unsigned int PixelPTEBytesPerRowY = 0;
unsigned int MetaRowByteY = 0;
unsigned int MetaRowByteC = 0;
unsigned int PDEAndMetaPTEBytesFrameC = 0;
unsigned int PixelPTEBytesPerRowC = 0;
bool PTEBufferSizeNotExceededY = 0;
bool PTEBufferSizeNotExceededC = 0;
if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_420_12 || v->SourcePixelFormat[k] == dm_rgbe_alpha) {
if ((v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_420_12) && v->SourceScan[k] != dm_vert) {
v->PTEBufferSizeInRequestsForLuma = (v->PTEBufferSizeInRequestsLuma + v->PTEBufferSizeInRequestsChroma) / 2;
v->PTEBufferSizeInRequestsForChroma = v->PTEBufferSizeInRequestsForLuma;
} else {
v->PTEBufferSizeInRequestsForLuma = v->PTEBufferSizeInRequestsLuma;
v->PTEBufferSizeInRequestsForChroma = v->PTEBufferSizeInRequestsChroma;
}
PDEAndMetaPTEBytesFrameC = CalculateVMAndRowBytes(
mode_lib,
v->DCCEnable[k],
v->BlockHeight256BytesC[k],
v->BlockWidth256BytesC[k],
v->SourcePixelFormat[k],
v->SurfaceTiling[k],
v->BytePerPixelC[k],
v->SourceScan[k],
v->SwathWidthC[k],
v->ViewportHeightChroma[k],
v->GPUVMEnable,
v->HostVMEnable,
v->HostVMMaxNonCachedPageTableLevels,
v->GPUVMMinPageSize,
v->HostVMMinPageSize,
v->PTEBufferSizeInRequestsForChroma,
v->PitchC[k],
v->DCCMetaPitchC[k],
&v->MacroTileWidthC[k],
&MetaRowByteC,
&PixelPTEBytesPerRowC,
&PTEBufferSizeNotExceededC,
&v->dpte_row_width_chroma_ub[k],
&v->dpte_row_height_chroma[k],
&v->meta_req_width_chroma[k],
&v->meta_req_height_chroma[k],
&v->meta_row_width_chroma[k],
&v->meta_row_height_chroma[k],
&v->dummyinteger1,
&v->dummyinteger2,
&v->PixelPTEReqWidthC[k],
&v->PixelPTEReqHeightC[k],
&v->PTERequestSizeC[k],
&v->dpde0_bytes_per_frame_ub_c[k],
&v->meta_pte_bytes_per_frame_ub_c[k]);
v->PrefetchSourceLinesC[k] = CalculatePrefetchSourceLines(
mode_lib,
v->VRatioChroma[k],
v->VTAPsChroma[k],
v->Interlace[k],
v->ProgressiveToInterlaceUnitInOPP,
v->SwathHeightC[k],
v->ViewportYStartC[k],
&v->VInitPreFillC[k],
&v->MaxNumSwathC[k]);
} else {
v->PTEBufferSizeInRequestsForLuma = v->PTEBufferSizeInRequestsLuma + v->PTEBufferSizeInRequestsChroma;
v->PTEBufferSizeInRequestsForChroma = 0;
PixelPTEBytesPerRowC = 0;
PDEAndMetaPTEBytesFrameC = 0;
MetaRowByteC = 0;
v->MaxNumSwathC[k] = 0;
v->PrefetchSourceLinesC[k] = 0;
}
PDEAndMetaPTEBytesFrameY = CalculateVMAndRowBytes(
mode_lib,
v->DCCEnable[k],
v->BlockHeight256BytesY[k],
v->BlockWidth256BytesY[k],
v->SourcePixelFormat[k],
v->SurfaceTiling[k],
v->BytePerPixelY[k],
v->SourceScan[k],
v->SwathWidthY[k],
v->ViewportHeight[k],
v->GPUVMEnable,
v->HostVMEnable,
v->HostVMMaxNonCachedPageTableLevels,
v->GPUVMMinPageSize,
v->HostVMMinPageSize,
v->PTEBufferSizeInRequestsForLuma,
v->PitchY[k],
v->DCCMetaPitchY[k],
&v->MacroTileWidthY[k],
&MetaRowByteY,
&PixelPTEBytesPerRowY,
&PTEBufferSizeNotExceededY,
&v->dpte_row_width_luma_ub[k],
&v->dpte_row_height[k],
&v->meta_req_width[k],
&v->meta_req_height[k],
&v->meta_row_width[k],
&v->meta_row_height[k],
&v->vm_group_bytes[k],
&v->dpte_group_bytes[k],
&v->PixelPTEReqWidthY[k],
&v->PixelPTEReqHeightY[k],
&v->PTERequestSizeY[k],
&v->dpde0_bytes_per_frame_ub_l[k],
&v->meta_pte_bytes_per_frame_ub_l[k]);
v->PrefetchSourceLinesY[k] = CalculatePrefetchSourceLines(
mode_lib,
v->VRatio[k],
v->vtaps[k],
v->Interlace[k],
v->ProgressiveToInterlaceUnitInOPP,
v->SwathHeightY[k],
v->ViewportYStartY[k],
&v->VInitPreFillY[k],
&v->MaxNumSwathY[k]);
v->PixelPTEBytesPerRow[k] = PixelPTEBytesPerRowY + PixelPTEBytesPerRowC;
v->PDEAndMetaPTEBytesFrame[k] = PDEAndMetaPTEBytesFrameY
+ PDEAndMetaPTEBytesFrameC;
v->MetaRowByte[k] = MetaRowByteY + MetaRowByteC;
CalculateRowBandwidth(
v->GPUVMEnable,
v->SourcePixelFormat[k],
v->VRatio[k],
v->VRatioChroma[k],
v->DCCEnable[k],
v->HTotal[k] / v->PixelClock[k],
MetaRowByteY,
MetaRowByteC,
v->meta_row_height[k],
v->meta_row_height_chroma[k],
PixelPTEBytesPerRowY,
PixelPTEBytesPerRowC,
v->dpte_row_height[k],
v->dpte_row_height_chroma[k],
&v->meta_row_bw[k],
&v->dpte_row_bw[k]);
}
v->TotalDCCActiveDPP = 0;
v->TotalActiveDPP = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->TotalActiveDPP = v->TotalActiveDPP
+ v->DPPPerPlane[k];
if (v->DCCEnable[k])
v->TotalDCCActiveDPP = v->TotalDCCActiveDPP
+ v->DPPPerPlane[k];
}
ReorderBytes = v->NumberOfChannels * dml_max3(
v->UrgentOutOfOrderReturnPerChannelPixelDataOnly,
v->UrgentOutOfOrderReturnPerChannelPixelMixedWithVMData,
v->UrgentOutOfOrderReturnPerChannelVMDataOnly);
v->UrgentExtraLatency = CalculateExtraLatency(
v->RoundTripPingLatencyCycles,
ReorderBytes,
v->DCFCLK,
v->TotalActiveDPP,
v->PixelChunkSizeInKByte,
v->TotalDCCActiveDPP,
v->MetaChunkSize,
v->ReturnBW,
v->GPUVMEnable,
v->HostVMEnable,
v->NumberOfActivePlanes,
v->DPPPerPlane,
v->dpte_group_bytes,
v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
v->HostVMMinPageSize,
v->HostVMMaxNonCachedPageTableLevels);
v->TCalc = 24.0 / v->DCFCLKDeepSleep;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->BlendingAndTiming[k] == k) {
if (v->WritebackEnable[k] == true) {
v->WritebackDelay[v->VoltageLevel][k] = v->WritebackLatency +
CalculateWriteBackDelay(v->WritebackPixelFormat[k],
v->WritebackHRatio[k],
v->WritebackVRatio[k],
v->WritebackVTaps[k],
v->WritebackDestinationWidth[k],
v->WritebackDestinationHeight[k],
v->WritebackSourceHeight[k],
v->HTotal[k]) / v->DISPCLK;
} else
v->WritebackDelay[v->VoltageLevel][k] = 0;
for (j = 0; j < v->NumberOfActivePlanes; ++j) {
if (v->BlendingAndTiming[j] == k
&& v->WritebackEnable[j] == true) {
v->WritebackDelay[v->VoltageLevel][k] = dml_max(v->WritebackDelay[v->VoltageLevel][k],
v->WritebackLatency + CalculateWriteBackDelay(
v->WritebackPixelFormat[j],
v->WritebackHRatio[j],
v->WritebackVRatio[j],
v->WritebackVTaps[j],
v->WritebackDestinationWidth[j],
v->WritebackDestinationHeight[j],
v->WritebackSourceHeight[j],
v->HTotal[k]) / v->DISPCLK);
}
}
}
}
for (k = 0; k < v->NumberOfActivePlanes; ++k)
for (j = 0; j < v->NumberOfActivePlanes; ++j)
if (v->BlendingAndTiming[k] == j)
v->WritebackDelay[v->VoltageLevel][k] = v->WritebackDelay[v->VoltageLevel][j];
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->MaxVStartupLines[k] = v->VTotal[k] - v->VActive[k] - dml_max(1.0, dml_ceil((double) v->WritebackDelay[v->VoltageLevel][k] / (v->HTotal[k] / v->PixelClock[k]), 1));
}
v->MaximumMaxVStartupLines = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k)
v->MaximumMaxVStartupLines = dml_max(v->MaximumMaxVStartupLines, v->MaxVStartupLines[k]);
if (v->DRAMClockChangeLatencyOverride > 0.0) {
v->FinalDRAMClockChangeLatency = v->DRAMClockChangeLatencyOverride;
} else {
v->FinalDRAMClockChangeLatency = v->DRAMClockChangeLatency;
}
v->UrgentLatency = CalculateUrgentLatency(v->UrgentLatencyPixelDataOnly, v->UrgentLatencyPixelMixedWithVMData, v->UrgentLatencyVMDataOnly, v->DoUrgentLatencyAdjustment, v->UrgentLatencyAdjustmentFabricClockComponent, v->UrgentLatencyAdjustmentFabricClockReference, v->FabricClock);
v->FractionOfUrgentBandwidth = 0.0;
v->FractionOfUrgentBandwidthImmediateFlip = 0.0;
v->VStartupLines = 13;
do {
MaxTotalRDBandwidth = 0;
MaxTotalRDBandwidthNoUrgentBurst = 0;
DestinationLineTimesForPrefetchLessThan2 = false;
VRatioPrefetchMoreThan4 = false;
TWait = CalculateTWait(
PrefetchMode,
v->FinalDRAMClockChangeLatency,
v->UrgentLatency,
v->SREnterPlusExitTime);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
Pipe myPipe = { 0 };
myPipe.DPPCLK = v->DPPCLK[k];
myPipe.DISPCLK = v->DISPCLK;
myPipe.PixelClock = v->PixelClock[k];
myPipe.DCFCLKDeepSleep = v->DCFCLKDeepSleep;
myPipe.DPPPerPlane = v->DPPPerPlane[k];
myPipe.ScalerEnabled = v->ScalerEnabled[k];
myPipe.SourceScan = v->SourceScan[k];
myPipe.BlockWidth256BytesY = v->BlockWidth256BytesY[k];
myPipe.BlockHeight256BytesY = v->BlockHeight256BytesY[k];
myPipe.BlockWidth256BytesC = v->BlockWidth256BytesC[k];
myPipe.BlockHeight256BytesC = v->BlockHeight256BytesC[k];
myPipe.InterlaceEnable = v->Interlace[k];
myPipe.NumberOfCursors = v->NumberOfCursors[k];
myPipe.VBlank = v->VTotal[k] - v->VActive[k];
myPipe.HTotal = v->HTotal[k];
myPipe.DCCEnable = v->DCCEnable[k];
myPipe.ODMCombineEnabled = !!v->ODMCombineEnabled[k];
v->ErrorResult[k] = CalculatePrefetchSchedule(
mode_lib,
v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
&myPipe,
v->DSCDelay[k],
v->DPPCLKDelaySubtotal
+ v->DPPCLKDelayCNVCFormater,
v->DPPCLKDelaySCL,
v->DPPCLKDelaySCLLBOnly,
v->DPPCLKDelayCNVCCursor,
v->DISPCLKDelaySubtotal,
(unsigned int) (v->SwathWidthY[k] / v->HRatio[k]),
v->OutputFormat[k],
v->MaxInterDCNTileRepeaters,
dml_min(v->VStartupLines, v->MaxVStartupLines[k]),
v->MaxVStartupLines[k],
v->GPUVMMaxPageTableLevels,
v->GPUVMEnable,
v->HostVMEnable,
v->HostVMMaxNonCachedPageTableLevels,
v->HostVMMinPageSize,
v->DynamicMetadataEnable[k],
v->DynamicMetadataVMEnabled,
v->DynamicMetadataLinesBeforeActiveRequired[k],
v->DynamicMetadataTransmittedBytes[k],
v->UrgentLatency,
v->UrgentExtraLatency,
v->TCalc,
v->PDEAndMetaPTEBytesFrame[k],
v->MetaRowByte[k],
v->PixelPTEBytesPerRow[k],
v->PrefetchSourceLinesY[k],
v->SwathWidthY[k],
v->BytePerPixelY[k],
v->VInitPreFillY[k],
v->MaxNumSwathY[k],
v->PrefetchSourceLinesC[k],
v->SwathWidthC[k],
v->BytePerPixelC[k],
v->VInitPreFillC[k],
v->MaxNumSwathC[k],
v->swath_width_luma_ub[k],
v->swath_width_chroma_ub[k],
v->SwathHeightY[k],
v->SwathHeightC[k],
TWait,
v->ProgressiveToInterlaceUnitInOPP,
&v->DSTXAfterScaler[k],
&v->DSTYAfterScaler[k],
&v->DestinationLinesForPrefetch[k],
&v->PrefetchBandwidth[k],
&v->DestinationLinesToRequestVMInVBlank[k],
&v->DestinationLinesToRequestRowInVBlank[k],
&v->VRatioPrefetchY[k],
&v->VRatioPrefetchC[k],
&v->RequiredPrefetchPixDataBWLuma[k],
&v->RequiredPrefetchPixDataBWChroma[k],
&v->NotEnoughTimeForDynamicMetadata[k],
&v->Tno_bw[k],
&v->prefetch_vmrow_bw[k],
&v->Tdmdl_vm[k],
&v->Tdmdl[k],
&v->VUpdateOffsetPix[k],
&v->VUpdateWidthPix[k],
&v->VReadyOffsetPix[k]);
if (v->BlendingAndTiming[k] == k) {
double TotalRepeaterDelayTime = v->MaxInterDCNTileRepeaters * (2 / v->DPPCLK[k] + 3 / v->DISPCLK);
v->VUpdateWidthPix[k] = (14 / v->DCFCLKDeepSleep + 12 / v->DPPCLK[k] + TotalRepeaterDelayTime) * v->PixelClock[k];
v->VReadyOffsetPix[k] = dml_max(150.0 / v->DPPCLK[k], TotalRepeaterDelayTime + 20 / v->DCFCLKDeepSleep + 10 / v->DPPCLK[k]) * v->PixelClock[k];
v->VUpdateOffsetPix[k] = dml_ceil(v->HTotal[k] / 4.0, 1);
v->VStartup[k] = dml_min(v->VStartupLines, v->MaxVStartupLines[k]);
} else {
int x = v->BlendingAndTiming[k];
double TotalRepeaterDelayTime = v->MaxInterDCNTileRepeaters * (2 / v->DPPCLK[k] + 3 / v->DISPCLK);
v->VUpdateWidthPix[k] = (14 / v->DCFCLKDeepSleep + 12 / v->DPPCLK[k] + TotalRepeaterDelayTime) * v->PixelClock[x];
v->VReadyOffsetPix[k] = dml_max(150.0 / v->DPPCLK[k], TotalRepeaterDelayTime + 20 / v->DCFCLKDeepSleep + 10 / v->DPPCLK[k]) * v->PixelClock[x];
v->VUpdateOffsetPix[k] = dml_ceil(v->HTotal[x] / 4.0, 1);
if (!v->MaxVStartupLines[x])
v->MaxVStartupLines[x] = v->MaxVStartupLines[k];
v->VStartup[k] = dml_min(v->VStartupLines, v->MaxVStartupLines[x]);
}
}
v->NotEnoughUrgentLatencyHiding[0][0] = false;
v->NotEnoughUrgentLatencyHidingPre = false;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->cursor_bw[k] = v->NumberOfCursors[k]
* v->CursorWidth[k][0] * v->CursorBPP[k][0]
/ 8.0
/ (v->HTotal[k] / v->PixelClock[k])
* v->VRatio[k];
v->cursor_bw_pre[k] = v->NumberOfCursors[k]
* v->CursorWidth[k][0] * v->CursorBPP[k][0]
/ 8.0
/ (v->HTotal[k] / v->PixelClock[k])
* v->VRatioPrefetchY[k];
CalculateUrgentBurstFactor(
v->swath_width_luma_ub[k],
v->swath_width_chroma_ub[k],
v->DETBufferSizeInKByte[0],
v->SwathHeightY[k],
v->SwathHeightC[k],
v->HTotal[k] / v->PixelClock[k],
v->UrgentLatency,
v->CursorBufferSize,
v->CursorWidth[k][0],
v->CursorBPP[k][0],
v->VRatio[k],
v->VRatioChroma[k],
v->BytePerPixelDETY[k],
v->BytePerPixelDETC[k],
v->DETBufferSizeY[k],
v->DETBufferSizeC[k],
&v->UrgentBurstFactorCursor[k],
&v->UrgentBurstFactorLuma[k],
&v->UrgentBurstFactorChroma[k],
&v->NoUrgentLatencyHiding[k]);
CalculateUrgentBurstFactor(
v->swath_width_luma_ub[k],
v->swath_width_chroma_ub[k],
v->DETBufferSizeInKByte[0],
v->SwathHeightY[k],
v->SwathHeightC[k],
v->HTotal[k] / v->PixelClock[k],
v->UrgentLatency,
v->CursorBufferSize,
v->CursorWidth[k][0],
v->CursorBPP[k][0],
v->VRatioPrefetchY[k],
v->VRatioPrefetchC[k],
v->BytePerPixelDETY[k],
v->BytePerPixelDETC[k],
v->DETBufferSizeY[k],
v->DETBufferSizeC[k],
&v->UrgentBurstFactorCursorPre[k],
&v->UrgentBurstFactorLumaPre[k],
&v->UrgentBurstFactorChromaPre[k],
&v->NoUrgentLatencyHidingPre[k]);
MaxTotalRDBandwidth = MaxTotalRDBandwidth +
dml_max3(v->DPPPerPlane[k] * v->prefetch_vmrow_bw[k],
v->ReadBandwidthPlaneLuma[k] *
v->UrgentBurstFactorLuma[k] +
v->ReadBandwidthPlaneChroma[k] *
v->UrgentBurstFactorChroma[k] +
v->cursor_bw[k] *
v->UrgentBurstFactorCursor[k] +
v->DPPPerPlane[k] * (v->meta_row_bw[k] + v->dpte_row_bw[k]),
v->DPPPerPlane[k] * (v->RequiredPrefetchPixDataBWLuma[k] * v->UrgentBurstFactorLumaPre[k] +
v->RequiredPrefetchPixDataBWChroma[k] * v->UrgentBurstFactorChromaPre[k]) + v->cursor_bw_pre[k] *
v->UrgentBurstFactorCursorPre[k]);
MaxTotalRDBandwidthNoUrgentBurst = MaxTotalRDBandwidthNoUrgentBurst +
dml_max3(v->DPPPerPlane[k] * v->prefetch_vmrow_bw[k],
v->ReadBandwidthPlaneLuma[k] +
v->ReadBandwidthPlaneChroma[k] +
v->cursor_bw[k] +
v->DPPPerPlane[k] * (v->meta_row_bw[k] + v->dpte_row_bw[k]),
v->DPPPerPlane[k] * (v->RequiredPrefetchPixDataBWLuma[k] + v->RequiredPrefetchPixDataBWChroma[k]) + v->cursor_bw_pre[k]);
if (v->DestinationLinesForPrefetch[k] < 2)
DestinationLineTimesForPrefetchLessThan2 = true;
if (v->VRatioPrefetchY[k] > 4 || v->VRatioPrefetchC[k] > 4)
VRatioPrefetchMoreThan4 = true;
if (v->NoUrgentLatencyHiding[k] == true)
v->NotEnoughUrgentLatencyHiding[0][0] = true;
if (v->NoUrgentLatencyHidingPre[k] == true)
v->NotEnoughUrgentLatencyHidingPre = true;
}
v->FractionOfUrgentBandwidth = MaxTotalRDBandwidthNoUrgentBurst / v->ReturnBW;
if (MaxTotalRDBandwidth <= v->ReturnBW && v->NotEnoughUrgentLatencyHiding[0][0] == 0
&& v->NotEnoughUrgentLatencyHidingPre == 0 && !VRatioPrefetchMoreThan4
&& !DestinationLineTimesForPrefetchLessThan2)
v->PrefetchModeSupported = true;
else {
v->PrefetchModeSupported = false;
dml_print("DML: CalculatePrefetchSchedule ***failed***. Bandwidth violation. Results are NOT valid\n");
dml_print("DML: MaxTotalRDBandwidth:%f AvailReturnBandwidth:%f\n", MaxTotalRDBandwidth, v->ReturnBW);
dml_print("DML: VRatioPrefetch %s more than 4\n", (VRatioPrefetchMoreThan4) ? "is" : "is not");
dml_print("DML: DestinationLines for Prefetch %s less than 2\n", (DestinationLineTimesForPrefetchLessThan2) ? "is" : "is not");
}
if (v->PrefetchModeSupported == true && v->ImmediateFlipSupport == true) {
v->BandwidthAvailableForImmediateFlip = v->ReturnBW;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->BandwidthAvailableForImmediateFlip =
v->BandwidthAvailableForImmediateFlip
- dml_max(
v->ReadBandwidthPlaneLuma[k] * v->UrgentBurstFactorLuma[k]
+ v->ReadBandwidthPlaneChroma[k] * v->UrgentBurstFactorChroma[k]
+ v->cursor_bw[k] * v->UrgentBurstFactorCursor[k],
v->DPPPerPlane[k] * (v->RequiredPrefetchPixDataBWLuma[k] * v->UrgentBurstFactorLumaPre[k] +
v->RequiredPrefetchPixDataBWChroma[k] * v->UrgentBurstFactorChromaPre[k]) +
v->cursor_bw_pre[k] * v->UrgentBurstFactorCursorPre[k]);
}
v->TotImmediateFlipBytes = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->TotImmediateFlipBytes = v->TotImmediateFlipBytes + v->DPPPerPlane[k] * (v->PDEAndMetaPTEBytesFrame[k] + v->MetaRowByte[k] + v->PixelPTEBytesPerRow[k]);
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
CalculateFlipSchedule(
mode_lib,
v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
v->UrgentExtraLatency,
v->UrgentLatency,
v->GPUVMMaxPageTableLevels,
v->HostVMEnable,
v->HostVMMaxNonCachedPageTableLevels,
v->GPUVMEnable,
v->HostVMMinPageSize,
v->PDEAndMetaPTEBytesFrame[k],
v->MetaRowByte[k],
v->PixelPTEBytesPerRow[k],
v->BandwidthAvailableForImmediateFlip,
v->TotImmediateFlipBytes,
v->SourcePixelFormat[k],
v->HTotal[k] / v->PixelClock[k],
v->VRatio[k],
v->VRatioChroma[k],
v->Tno_bw[k],
v->DCCEnable[k],
v->dpte_row_height[k],
v->meta_row_height[k],
v->dpte_row_height_chroma[k],
v->meta_row_height_chroma[k],
&v->DestinationLinesToRequestVMInImmediateFlip[k],
&v->DestinationLinesToRequestRowInImmediateFlip[k],
&v->final_flip_bw[k],
&v->ImmediateFlipSupportedForPipe[k]);
}
v->total_dcn_read_bw_with_flip = 0.0;
v->total_dcn_read_bw_with_flip_no_urgent_burst = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->total_dcn_read_bw_with_flip = v->total_dcn_read_bw_with_flip + dml_max3(
v->DPPPerPlane[k] * v->prefetch_vmrow_bw[k],
v->DPPPerPlane[k] * v->final_flip_bw[k] +
v->ReadBandwidthLuma[k] * v->UrgentBurstFactorLuma[k] +
v->ReadBandwidthChroma[k] * v->UrgentBurstFactorChroma[k] +
v->cursor_bw[k] * v->UrgentBurstFactorCursor[k],
v->DPPPerPlane[k] * (v->final_flip_bw[k] +
v->RequiredPrefetchPixDataBWLuma[k] * v->UrgentBurstFactorLumaPre[k] +
v->RequiredPrefetchPixDataBWChroma[k] * v->UrgentBurstFactorChromaPre[k]) +
v->cursor_bw_pre[k] * v->UrgentBurstFactorCursorPre[k]);
v->total_dcn_read_bw_with_flip_no_urgent_burst =
v->total_dcn_read_bw_with_flip_no_urgent_burst +
dml_max3(v->DPPPerPlane[k] * v->prefetch_vmrow_bw[k],
v->DPPPerPlane[k] * v->final_flip_bw[k] + v->ReadBandwidthPlaneLuma[k] + v->ReadBandwidthPlaneChroma[k] + v->cursor_bw[k],
v->DPPPerPlane[k] * (v->final_flip_bw[k] + v->RequiredPrefetchPixDataBWLuma[k] + v->RequiredPrefetchPixDataBWChroma[k]) + v->cursor_bw_pre[k]);
}
v->FractionOfUrgentBandwidthImmediateFlip = v->total_dcn_read_bw_with_flip_no_urgent_burst / v->ReturnBW;
v->ImmediateFlipSupported = true;
if (v->total_dcn_read_bw_with_flip > v->ReturnBW) {
v->ImmediateFlipSupported = false;
v->total_dcn_read_bw_with_flip = MaxTotalRDBandwidth;
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->ImmediateFlipSupportedForPipe[k] == false) {
v->ImmediateFlipSupported = false;
}
}
} else {
v->ImmediateFlipSupported = false;
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->ErrorResult[k] || v->NotEnoughTimeForDynamicMetadata[k]) {
v->PrefetchModeSupported = false;
dml_print("DML: CalculatePrefetchSchedule ***failed***. Prefetch schedule violation. Results are NOT valid\n");
}
}
v->VStartupLines = v->VStartupLines + 1;
v->PrefetchModeSupported = (v->PrefetchModeSupported == true && ((!v->ImmediateFlipSupport &&
!v->HostVMEnable && v->ImmediateFlipRequirement[0] != dm_immediate_flip_required) ||
v->ImmediateFlipSupported)) ? true : false;
} while (!v->PrefetchModeSupported && v->VStartupLines <= v->MaximumMaxVStartupLines);
ASSERT(v->PrefetchModeSupported);
//Watermarks and NB P-State/DRAM Clock Change Support
{
enum clock_change_support DRAMClockChangeSupport = 0; // dummy
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
PrefetchMode,
v->NumberOfActivePlanes,
v->MaxLineBufferLines,
v->LineBufferSize,
v->DPPOutputBufferPixels,
v->DETBufferSizeInKByte[0],
v->WritebackInterfaceBufferSize,
v->DCFCLK,
v->ReturnBW,
v->GPUVMEnable,
v->dpte_group_bytes,
v->MetaChunkSize,
v->UrgentLatency,
v->UrgentExtraLatency,
v->WritebackLatency,
v->WritebackChunkSize,
v->SOCCLK,
v->FinalDRAMClockChangeLatency,
v->SRExitTime,
v->SREnterPlusExitTime,
v->DCFCLKDeepSleep,
v->DPPPerPlane,
v->DCCEnable,
v->DPPCLK,
v->DETBufferSizeY,
v->DETBufferSizeC,
v->SwathHeightY,
v->SwathHeightC,
v->LBBitPerPixel,
v->SwathWidthY,
v->SwathWidthC,
v->HRatio,
v->HRatioChroma,
v->vtaps,
v->VTAPsChroma,
v->VRatio,
v->VRatioChroma,
v->HTotal,
v->PixelClock,
v->BlendingAndTiming,
v->BytePerPixelDETY,
v->BytePerPixelDETC,
v->DSTXAfterScaler,
v->DSTYAfterScaler,
v->WritebackEnable,
v->WritebackPixelFormat,
v->WritebackDestinationWidth,
v->WritebackDestinationHeight,
v->WritebackSourceHeight,
&DRAMClockChangeSupport,
&v->UrgentWatermark,
&v->WritebackUrgentWatermark,
&v->DRAMClockChangeWatermark,
&v->WritebackDRAMClockChangeWatermark,
&v->StutterExitWatermark,
&v->StutterEnterPlusExitWatermark,
&v->MinActiveDRAMClockChangeLatencySupported);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k] == true) {
if (v->BlendingAndTiming[k] == k) {
v->ThisVStartup = v->VStartup[k];
} else {
for (j = 0; j < v->NumberOfActivePlanes; ++j) {
if (v->BlendingAndTiming[k] == j) {
v->ThisVStartup = v->VStartup[j];
}
}
}
v->WritebackAllowDRAMClockChangeEndPosition[k] = dml_max(0,
v->ThisVStartup * v->HTotal[k] / v->PixelClock[k] - v->WritebackDRAMClockChangeWatermark);
} else {
v->WritebackAllowDRAMClockChangeEndPosition[k] = 0;
}
}
}
//Display Pipeline Delivery Time in Prefetch, Groups
CalculatePixelDeliveryTimes(
v->NumberOfActivePlanes,
v->VRatio,
v->VRatioChroma,
v->VRatioPrefetchY,
v->VRatioPrefetchC,
v->swath_width_luma_ub,
v->swath_width_chroma_ub,
v->DPPPerPlane,
v->HRatio,
v->HRatioChroma,
v->PixelClock,
v->PSCL_THROUGHPUT_LUMA,
v->PSCL_THROUGHPUT_CHROMA,
v->DPPCLK,
v->BytePerPixelC,
v->SourceScan,
v->NumberOfCursors,
v->CursorWidth,
v->CursorBPP,
v->BlockWidth256BytesY,
v->BlockHeight256BytesY,
v->BlockWidth256BytesC,
v->BlockHeight256BytesC,
v->DisplayPipeLineDeliveryTimeLuma,
v->DisplayPipeLineDeliveryTimeChroma,
v->DisplayPipeLineDeliveryTimeLumaPrefetch,
v->DisplayPipeLineDeliveryTimeChromaPrefetch,
v->DisplayPipeRequestDeliveryTimeLuma,
v->DisplayPipeRequestDeliveryTimeChroma,
v->DisplayPipeRequestDeliveryTimeLumaPrefetch,
v->DisplayPipeRequestDeliveryTimeChromaPrefetch,
v->CursorRequestDeliveryTime,
v->CursorRequestDeliveryTimePrefetch);
CalculateMetaAndPTETimes(
v->NumberOfActivePlanes,
v->GPUVMEnable,
v->MetaChunkSize,
v->MinMetaChunkSizeBytes,
v->HTotal,
v->VRatio,
v->VRatioChroma,
v->DestinationLinesToRequestRowInVBlank,
v->DestinationLinesToRequestRowInImmediateFlip,
v->DCCEnable,
v->PixelClock,
v->BytePerPixelY,
v->BytePerPixelC,
v->SourceScan,
v->dpte_row_height,
v->dpte_row_height_chroma,
v->meta_row_width,
v->meta_row_width_chroma,
v->meta_row_height,
v->meta_row_height_chroma,
v->meta_req_width,
v->meta_req_width_chroma,
v->meta_req_height,
v->meta_req_height_chroma,
v->dpte_group_bytes,
v->PTERequestSizeY,
v->PTERequestSizeC,
v->PixelPTEReqWidthY,
v->PixelPTEReqHeightY,
v->PixelPTEReqWidthC,
v->PixelPTEReqHeightC,
v->dpte_row_width_luma_ub,
v->dpte_row_width_chroma_ub,
v->DST_Y_PER_PTE_ROW_NOM_L,
v->DST_Y_PER_PTE_ROW_NOM_C,
v->DST_Y_PER_META_ROW_NOM_L,
v->DST_Y_PER_META_ROW_NOM_C,
v->TimePerMetaChunkNominal,
v->TimePerChromaMetaChunkNominal,
v->TimePerMetaChunkVBlank,
v->TimePerChromaMetaChunkVBlank,
v->TimePerMetaChunkFlip,
v->TimePerChromaMetaChunkFlip,
v->time_per_pte_group_nom_luma,
v->time_per_pte_group_vblank_luma,
v->time_per_pte_group_flip_luma,
v->time_per_pte_group_nom_chroma,
v->time_per_pte_group_vblank_chroma,
v->time_per_pte_group_flip_chroma);
CalculateVMGroupAndRequestTimes(
v->NumberOfActivePlanes,
v->GPUVMEnable,
v->GPUVMMaxPageTableLevels,
v->HTotal,
v->BytePerPixelC,
v->DestinationLinesToRequestVMInVBlank,
v->DestinationLinesToRequestVMInImmediateFlip,
v->DCCEnable,
v->PixelClock,
v->dpte_row_width_luma_ub,
v->dpte_row_width_chroma_ub,
v->vm_group_bytes,
v->dpde0_bytes_per_frame_ub_l,
v->dpde0_bytes_per_frame_ub_c,
v->meta_pte_bytes_per_frame_ub_l,
v->meta_pte_bytes_per_frame_ub_c,
v->TimePerVMGroupVBlank,
v->TimePerVMGroupFlip,
v->TimePerVMRequestVBlank,
v->TimePerVMRequestFlip);
// Min TTUVBlank
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (PrefetchMode == 0) {
v->AllowDRAMClockChangeDuringVBlank[k] = true;
v->AllowDRAMSelfRefreshDuringVBlank[k] = true;
v->MinTTUVBlank[k] = dml_max(
v->DRAMClockChangeWatermark,
dml_max(
v->StutterEnterPlusExitWatermark,
v->UrgentWatermark));
} else if (PrefetchMode == 1) {
v->AllowDRAMClockChangeDuringVBlank[k] = false;
v->AllowDRAMSelfRefreshDuringVBlank[k] = true;
v->MinTTUVBlank[k] = dml_max(
v->StutterEnterPlusExitWatermark,
v->UrgentWatermark);
} else {
v->AllowDRAMClockChangeDuringVBlank[k] = false;
v->AllowDRAMSelfRefreshDuringVBlank[k] = false;
v->MinTTUVBlank[k] = v->UrgentWatermark;
}
if (!v->DynamicMetadataEnable[k])
v->MinTTUVBlank[k] = v->TCalc
+ v->MinTTUVBlank[k];
}
// DCC Configuration
v->ActiveDPPs = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
CalculateDCCConfiguration(v->DCCEnable[k], false, // We should always know the direction DCCProgrammingAssumesScanDirectionUnknown,
v->SourcePixelFormat[k],
v->SurfaceWidthY[k],
v->SurfaceWidthC[k],
v->SurfaceHeightY[k],
v->SurfaceHeightC[k],
v->DETBufferSizeInKByte[0] * 1024,
v->BlockHeight256BytesY[k],
v->BlockHeight256BytesC[k],
v->SurfaceTiling[k],
v->BytePerPixelY[k],
v->BytePerPixelC[k],
v->BytePerPixelDETY[k],
v->BytePerPixelDETC[k],
v->SourceScan[k],
&v->DCCYMaxUncompressedBlock[k],
&v->DCCCMaxUncompressedBlock[k],
&v->DCCYMaxCompressedBlock[k],
&v->DCCCMaxCompressedBlock[k],
&v->DCCYIndependentBlock[k],
&v->DCCCIndependentBlock[k]);
}
{
//Maximum Bandwidth Used
v->TotalDataReadBandwidth = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->TotalDataReadBandwidth = v->TotalDataReadBandwidth
+ v->ReadBandwidthPlaneLuma[k]
+ v->ReadBandwidthPlaneChroma[k];
}
}
// VStartup Margin
v->VStartupMargin = 0;
v->FirstMainPlane = true;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->BlendingAndTiming[k] == k) {
double margin = (v->MaxVStartupLines[k] - v->VStartup[k]) * v->HTotal[k]
/ v->PixelClock[k];
if (v->FirstMainPlane == true) {
v->VStartupMargin = margin;
v->FirstMainPlane = false;
} else {
v->VStartupMargin = dml_min(v->VStartupMargin, margin);
}
}
}
// Stutter Efficiency
CalculateStutterEfficiency(
v->NumberOfActivePlanes,
v->ROBBufferSizeInKByte,
v->TotalDataReadBandwidth,
v->DCFCLK,
v->ReturnBW,
v->SRExitTime,
v->SynchronizedVBlank,
v->DPPPerPlane,
v->DETBufferSizeY,
v->BytePerPixelY,
v->BytePerPixelDETY,
v->SwathWidthY,
v->SwathHeightY,
v->SwathHeightC,
v->DCCRateLuma,
v->DCCRateChroma,
v->HTotal,
v->VTotal,
v->PixelClock,
v->VRatio,
v->SourceScan,
v->BlockHeight256BytesY,
v->BlockWidth256BytesY,
v->BlockHeight256BytesC,
v->BlockWidth256BytesC,
v->DCCYMaxUncompressedBlock,
v->DCCCMaxUncompressedBlock,
v->VActive,
v->DCCEnable,
v->WritebackEnable,
v->ReadBandwidthPlaneLuma,
v->ReadBandwidthPlaneChroma,
v->meta_row_bw,
v->dpte_row_bw,
&v->StutterEfficiencyNotIncludingVBlank,
&v->StutterEfficiency,
&v->StutterPeriod);
}
static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
{
// Display Pipe Configuration
double BytePerPixDETY[DC__NUM_DPP__MAX] = { 0 };
double BytePerPixDETC[DC__NUM_DPP__MAX] = { 0 };
int BytePerPixY[DC__NUM_DPP__MAX] = { 0 };
int BytePerPixC[DC__NUM_DPP__MAX] = { 0 };
int Read256BytesBlockHeightY[DC__NUM_DPP__MAX] = { 0 };
int Read256BytesBlockHeightC[DC__NUM_DPP__MAX] = { 0 };
int Read256BytesBlockWidthY[DC__NUM_DPP__MAX] = { 0 };
int Read256BytesBlockWidthC[DC__NUM_DPP__MAX] = { 0 };
double dummy1[DC__NUM_DPP__MAX] = { 0 };
double dummy2[DC__NUM_DPP__MAX] = { 0 };
double dummy3[DC__NUM_DPP__MAX] = { 0 };
double dummy4[DC__NUM_DPP__MAX] = { 0 };
int dummy5[DC__NUM_DPP__MAX] = { 0 };
int dummy6[DC__NUM_DPP__MAX] = { 0 };
bool dummy7[DC__NUM_DPP__MAX] = { 0 };
bool dummysinglestring = 0;
unsigned int k;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
dml30_CalculateBytePerPixelAnd256BBlockSizes(
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.SurfaceTiling[k],
&BytePerPixY[k],
&BytePerPixC[k],
&BytePerPixDETY[k],
&BytePerPixDETC[k],
&Read256BytesBlockHeightY[k],
&Read256BytesBlockHeightC[k],
&Read256BytesBlockWidthY[k],
&Read256BytesBlockWidthC[k]);
}
CalculateSwathAndDETConfiguration(
false,
mode_lib->vba.NumberOfActivePlanes,
mode_lib->vba.DETBufferSizeInKByte[0],
dummy1,
dummy2,
mode_lib->vba.SourceScan,
mode_lib->vba.SourcePixelFormat,
mode_lib->vba.SurfaceTiling,
mode_lib->vba.ViewportWidth,
mode_lib->vba.ViewportHeight,
mode_lib->vba.SurfaceWidthY,
mode_lib->vba.SurfaceWidthC,
mode_lib->vba.SurfaceHeightY,
mode_lib->vba.SurfaceHeightC,
Read256BytesBlockHeightY,
Read256BytesBlockHeightC,
Read256BytesBlockWidthY,
Read256BytesBlockWidthC,
mode_lib->vba.ODMCombineEnabled,
mode_lib->vba.BlendingAndTiming,
BytePerPixY,
BytePerPixC,
BytePerPixDETY,
BytePerPixDETC,
mode_lib->vba.HActive,
mode_lib->vba.HRatio,
mode_lib->vba.HRatioChroma,
mode_lib->vba.DPPPerPlane,
dummy5,
dummy6,
dummy3,
dummy4,
mode_lib->vba.SwathHeightY,
mode_lib->vba.SwathHeightC,
mode_lib->vba.DETBufferSizeY,
mode_lib->vba.DETBufferSizeC,
dummy7,
&dummysinglestring);
}
void dml30_CalculateBytePerPixelAnd256BBlockSizes(
enum source_format_class SourcePixelFormat,
enum dm_swizzle_mode SurfaceTiling,
unsigned int *BytePerPixelY,
unsigned int *BytePerPixelC,
double *BytePerPixelDETY,
double *BytePerPixelDETC,
unsigned int *BlockHeight256BytesY,
unsigned int *BlockHeight256BytesC,
unsigned int *BlockWidth256BytesY,
unsigned int *BlockWidth256BytesC)
{
if (SourcePixelFormat == dm_444_64) {
*BytePerPixelDETY = 8;
*BytePerPixelDETC = 0;
*BytePerPixelY = 8;
*BytePerPixelC = 0;
} else if (SourcePixelFormat == dm_444_32 || SourcePixelFormat == dm_rgbe) {
*BytePerPixelDETY = 4;
*BytePerPixelDETC = 0;
*BytePerPixelY = 4;
*BytePerPixelC = 0;
} else if (SourcePixelFormat == dm_444_16) {
*BytePerPixelDETY = 2;
*BytePerPixelDETC = 0;
*BytePerPixelY = 2;
*BytePerPixelC = 0;
} else if (SourcePixelFormat == dm_444_8) {
*BytePerPixelDETY = 1;
*BytePerPixelDETC = 0;
*BytePerPixelY = 1;
*BytePerPixelC = 0;
} else if (SourcePixelFormat == dm_rgbe_alpha) {
*BytePerPixelDETY = 4;
*BytePerPixelDETC = 1;
*BytePerPixelY = 4;
*BytePerPixelC = 1;
} else if (SourcePixelFormat == dm_420_8) {
*BytePerPixelDETY = 1;
*BytePerPixelDETC = 2;
*BytePerPixelY = 1;
*BytePerPixelC = 2;
} else if (SourcePixelFormat == dm_420_12) {
*BytePerPixelDETY = 2;
*BytePerPixelDETC = 4;
*BytePerPixelY = 2;
*BytePerPixelC = 4;
} else {
*BytePerPixelDETY = 4.0 / 3;
*BytePerPixelDETC = 8.0 / 3;
*BytePerPixelY = 2;
*BytePerPixelC = 4;
}
if ((SourcePixelFormat == dm_444_64 || SourcePixelFormat == dm_444_32
|| SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_8
|| SourcePixelFormat == dm_mono_16 || SourcePixelFormat == dm_mono_8
|| SourcePixelFormat == dm_rgbe)) {
if (SurfaceTiling == dm_sw_linear) {
*BlockHeight256BytesY = 1;
} else if (SourcePixelFormat == dm_444_64) {
*BlockHeight256BytesY = 4;
} else if (SourcePixelFormat == dm_444_8) {
*BlockHeight256BytesY = 16;
} else {
*BlockHeight256BytesY = 8;
}
*BlockWidth256BytesY = 256U / *BytePerPixelY / *BlockHeight256BytesY;
*BlockHeight256BytesC = 0;
*BlockWidth256BytesC = 0;
} else {
if (SurfaceTiling == dm_sw_linear) {
*BlockHeight256BytesY = 1;
*BlockHeight256BytesC = 1;
} else if (SourcePixelFormat == dm_rgbe_alpha) {
*BlockHeight256BytesY = 8;
*BlockHeight256BytesC = 16;
} else if (SourcePixelFormat == dm_420_8) {
*BlockHeight256BytesY = 16;
*BlockHeight256BytesC = 8;
} else {
*BlockHeight256BytesY = 8;
*BlockHeight256BytesC = 8;
}
*BlockWidth256BytesY = 256U / *BytePerPixelY / *BlockHeight256BytesY;
*BlockWidth256BytesC = 256U / *BytePerPixelC / *BlockHeight256BytesC;
}
}
static double CalculateTWait(
unsigned int PrefetchMode,
double DRAMClockChangeLatency,
double UrgentLatency,
double SREnterPlusExitTime)
{
if (PrefetchMode == 0) {
return dml_max(DRAMClockChangeLatency + UrgentLatency,
dml_max(SREnterPlusExitTime, UrgentLatency));
} else if (PrefetchMode == 1) {
return dml_max(SREnterPlusExitTime, UrgentLatency);
} else {
return UrgentLatency;
}
}
double dml30_CalculateWriteBackDISPCLK(
enum source_format_class WritebackPixelFormat,
double PixelClock,
double WritebackHRatio,
double WritebackVRatio,
unsigned int WritebackHTaps,
unsigned int WritebackVTaps,
long WritebackSourceWidth,
long WritebackDestinationWidth,
unsigned int HTotal,
unsigned int WritebackLineBufferSize)
{
double DISPCLK_H = 0, DISPCLK_V = 0, DISPCLK_HB = 0;
DISPCLK_H = PixelClock * dml_ceil(WritebackHTaps / 8.0, 1) / WritebackHRatio;
DISPCLK_V = PixelClock * (WritebackVTaps * dml_ceil(WritebackDestinationWidth / 6.0, 1) + 8.0) / HTotal;
DISPCLK_HB = PixelClock * WritebackVTaps * (WritebackDestinationWidth * WritebackVTaps - WritebackLineBufferSize / 57.0) / 6.0 / WritebackSourceWidth;
return dml_max3(DISPCLK_H, DISPCLK_V, DISPCLK_HB);
}
static double CalculateWriteBackDelay(
enum source_format_class WritebackPixelFormat,
double WritebackHRatio,
double WritebackVRatio,
unsigned int WritebackVTaps,
long WritebackDestinationWidth,
long WritebackDestinationHeight,
long WritebackSourceHeight,
unsigned int HTotal)
{
double CalculateWriteBackDelay = 0;
double Line_length = 0;
double Output_lines_last_notclamped = 0;
double WritebackVInit = 0;
WritebackVInit = (WritebackVRatio + WritebackVTaps + 1) / 2;
Line_length = dml_max((double) WritebackDestinationWidth, dml_ceil(WritebackDestinationWidth / 6.0, 1) * WritebackVTaps);
Output_lines_last_notclamped = WritebackDestinationHeight - 1 - dml_ceil((WritebackSourceHeight - WritebackVInit) / WritebackVRatio, 1);
if (Output_lines_last_notclamped < 0) {
CalculateWriteBackDelay = 0;
} else {
CalculateWriteBackDelay = Output_lines_last_notclamped * Line_length + (HTotal - WritebackDestinationWidth) + 80;
}
return CalculateWriteBackDelay;
}
static void CalculateDynamicMetadataParameters(int MaxInterDCNTileRepeaters, double DPPCLK, double DISPCLK,
double DCFClkDeepSleep, double PixelClock, long HTotal, long VBlank, long DynamicMetadataTransmittedBytes,
long DynamicMetadataLinesBeforeActiveRequired, int InterlaceEnable, bool ProgressiveToInterlaceUnitInOPP,
double *Tsetup, double *Tdmbf, double *Tdmec, double *Tdmsks)
{
double TotalRepeaterDelayTime = 0;
double VUpdateWidthPix = 0;
double VReadyOffsetPix = 0;
double VUpdateOffsetPix = 0;
TotalRepeaterDelayTime = MaxInterDCNTileRepeaters * (2 / DPPCLK + 3 / DISPCLK);
VUpdateWidthPix = (14 / DCFClkDeepSleep + 12 / DPPCLK + TotalRepeaterDelayTime) * PixelClock;
VReadyOffsetPix = dml_max(150.0 / DPPCLK, TotalRepeaterDelayTime + 20 / DCFClkDeepSleep + 10 / DPPCLK) * PixelClock;
VUpdateOffsetPix = dml_ceil(HTotal / 4.0, 1);
*Tsetup = (VUpdateOffsetPix + VUpdateWidthPix + VReadyOffsetPix) / PixelClock;
*Tdmbf = DynamicMetadataTransmittedBytes / 4.0 / DISPCLK;
*Tdmec = HTotal / PixelClock;
if (DynamicMetadataLinesBeforeActiveRequired == 0) {
*Tdmsks = VBlank * HTotal / PixelClock / 2.0;
} else {
*Tdmsks = DynamicMetadataLinesBeforeActiveRequired * HTotal / PixelClock;
}
if (InterlaceEnable == 1 && ProgressiveToInterlaceUnitInOPP == false) {
*Tdmsks = *Tdmsks / 2;
}
}
static void CalculateRowBandwidth(
bool GPUVMEnable,
enum source_format_class SourcePixelFormat,
double VRatio,
double VRatioChroma,
bool DCCEnable,
double LineTime,
unsigned int MetaRowByteLuma,
unsigned int MetaRowByteChroma,
unsigned int meta_row_height_luma,
unsigned int meta_row_height_chroma,
unsigned int PixelPTEBytesPerRowLuma,
unsigned int PixelPTEBytesPerRowChroma,
unsigned int dpte_row_height_luma,
unsigned int dpte_row_height_chroma,
double *meta_row_bw,
double *dpte_row_bw)
{
if (DCCEnable != true) {
*meta_row_bw = 0;
} else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_420_12 || SourcePixelFormat == dm_rgbe_alpha) {
*meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime)
+ VRatioChroma * MetaRowByteChroma
/ (meta_row_height_chroma * LineTime);
} else {
*meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime);
}
if (GPUVMEnable != true) {
*dpte_row_bw = 0;
} else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_420_12 || SourcePixelFormat == dm_rgbe_alpha) {
*dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime)
+ VRatioChroma * PixelPTEBytesPerRowChroma
/ (dpte_row_height_chroma * LineTime);
} else {
*dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime);
}
}
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
double UrgentExtraLatency,
double UrgentLatency,
unsigned int GPUVMMaxPageTableLevels,
bool HostVMEnable,
unsigned int HostVMMaxNonCachedPageTableLevels,
bool GPUVMEnable,
double HostVMMinPageSize,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
double DPTEBytesPerRow,
double BandwidthAvailableForImmediateFlip,
unsigned int TotImmediateFlipBytes,
enum source_format_class SourcePixelFormat,
double LineTime,
double VRatio,
double VRatioChroma,
double Tno_bw,
bool DCCEnable,
unsigned int dpte_row_height,
unsigned int meta_row_height,
unsigned int dpte_row_height_chroma,
unsigned int meta_row_height_chroma,
double *DestinationLinesToRequestVMInImmediateFlip,
double *DestinationLinesToRequestRowInImmediateFlip,
double *final_flip_bw,
bool *ImmediateFlipSupportedForPipe)
{
double min_row_time = 0.0;
unsigned int HostVMDynamicLevelsTrips = 0;
double TimeForFetchingMetaPTEImmediateFlip = 0;
double TimeForFetchingRowInVBlankImmediateFlip = 0;
double ImmediateFlipBW = 0;
double HostVMInefficiencyFactor = 0;
if (GPUVMEnable == true && HostVMEnable == true) {
HostVMInefficiencyFactor = PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData / PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly;
HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
} else {
HostVMInefficiencyFactor = 1;
HostVMDynamicLevelsTrips = 0;
}
if (GPUVMEnable == true || DCCEnable == true) {
ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * BandwidthAvailableForImmediateFlip / TotImmediateFlipBytes;
}
if (GPUVMEnable == true) {
TimeForFetchingMetaPTEImmediateFlip = dml_max3(Tno_bw + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
UrgentExtraLatency + UrgentLatency * (GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1), LineTime / 4.0);
} else {
TimeForFetchingMetaPTEImmediateFlip = 0;
}
*DestinationLinesToRequestVMInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
if ((GPUVMEnable == true || DCCEnable == true)) {
TimeForFetchingRowInVBlankImmediateFlip = dml_max3((MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / ImmediateFlipBW,
UrgentLatency * (HostVMDynamicLevelsTrips + 1), LineTime / 4);
} else {
TimeForFetchingRowInVBlankImmediateFlip = 0;
}
*DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
if (GPUVMEnable == true) {
*final_flip_bw = dml_max(PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime),
(MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime));
} else if ((GPUVMEnable == true || DCCEnable == true)) {
*final_flip_bw = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime);
} else {
*final_flip_bw = 0;
}
if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_rgbe_alpha) {
if (GPUVMEnable == true && DCCEnable != true) {
min_row_time = dml_min(dpte_row_height * LineTime / VRatio, dpte_row_height_chroma * LineTime / VRatioChroma);
} else if (GPUVMEnable != true && DCCEnable == true) {
min_row_time = dml_min(meta_row_height * LineTime / VRatio, meta_row_height_chroma * LineTime / VRatioChroma);
} else {
min_row_time = dml_min4(dpte_row_height * LineTime / VRatio, meta_row_height * LineTime / VRatio,
dpte_row_height_chroma * LineTime / VRatioChroma, meta_row_height_chroma * LineTime / VRatioChroma);
}
} else {
if (GPUVMEnable == true && DCCEnable != true) {
min_row_time = dpte_row_height * LineTime / VRatio;
} else if (GPUVMEnable != true && DCCEnable == true) {
min_row_time = meta_row_height * LineTime / VRatio;
} else {
min_row_time = dml_min(dpte_row_height * LineTime / VRatio, meta_row_height * LineTime / VRatio);
}
}
if (*DestinationLinesToRequestVMInImmediateFlip >= 32 || *DestinationLinesToRequestRowInImmediateFlip >= 16
|| TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) {
*ImmediateFlipSupportedForPipe = false;
} else {
*ImmediateFlipSupportedForPipe = true;
}
}
static double TruncToValidBPP(
double LinkBitRate,
int Lanes,
long HTotal,
long HActive,
double PixelClock,
double DesiredBPP,
bool DSCEnable,
enum output_encoder_class Output,
enum output_format_class Format,
unsigned int DSCInputBitPerComponent,
int DSCSlices,
int AudioRate,
int AudioLayout,
enum odm_combine_mode ODMCombine)
{
double MaxLinkBPP = 0;
int MinDSCBPP = 0;
double MaxDSCBPP = 0;
int NonDSCBPP0 = 0;
int NonDSCBPP1 = 0;
int NonDSCBPP2 = 0;
if (Format == dm_420) {
NonDSCBPP0 = 12;
NonDSCBPP1 = 15;
NonDSCBPP2 = 18;
MinDSCBPP = 6;
MaxDSCBPP = 1.5 * DSCInputBitPerComponent - 1.0 / 16;
} else if (Format == dm_444) {
NonDSCBPP0 = 24;
NonDSCBPP1 = 30;
NonDSCBPP2 = 36;
MinDSCBPP = 8;
MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16;
} else {
NonDSCBPP0 = 16;
NonDSCBPP1 = 20;
NonDSCBPP2 = 24;
if (Format == dm_n422) {
MinDSCBPP = 7;
MaxDSCBPP = 2 * DSCInputBitPerComponent - 1.0 / 16.0;
} else {
MinDSCBPP = 8;
MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16.0;
}
}
if (DSCEnable && Output == dm_dp) {
MaxLinkBPP = LinkBitRate / 10 * 8 * Lanes / PixelClock * (1 - 2.4 / 100);
} else {
MaxLinkBPP = LinkBitRate / 10 * 8 * Lanes / PixelClock;
}
if (ODMCombine == dm_odm_combine_mode_4to1 && MaxLinkBPP > 16) {
MaxLinkBPP = 16;
} else if (ODMCombine == dm_odm_combine_mode_2to1 && MaxLinkBPP > 32) {
MaxLinkBPP = 32;
}
if (DesiredBPP == 0) {
if (DSCEnable) {
if (MaxLinkBPP < MinDSCBPP) {
return BPP_INVALID;
} else if (MaxLinkBPP >= MaxDSCBPP) {
return MaxDSCBPP;
} else {
return dml_floor(16.0 * MaxLinkBPP, 1.0) / 16.0;
}
} else {
if (MaxLinkBPP >= NonDSCBPP2) {
return NonDSCBPP2;
} else if (MaxLinkBPP >= NonDSCBPP1) {
return NonDSCBPP1;
} else if (MaxLinkBPP >= NonDSCBPP0) {
return NonDSCBPP0;
} else {
return BPP_INVALID;
}
}
} else {
if (!((DSCEnable == false && (DesiredBPP == NonDSCBPP2 || DesiredBPP == NonDSCBPP1 || DesiredBPP == NonDSCBPP0 || DesiredBPP == 18)) ||
(DSCEnable && DesiredBPP >= MinDSCBPP && DesiredBPP <= MaxDSCBPP))) {
return BPP_INVALID;
} else {
return DesiredBPP;
}
}
return BPP_INVALID;
}
void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
{
struct vba_vars_st *v = &mode_lib->vba;
int MinPrefetchMode, MaxPrefetchMode;
int i;
unsigned int j, k, m;
bool EnoughWritebackUnits = true;
bool WritebackModeSupport = true;
bool ViewportExceedsSurface = false;
double MaxTotalVActiveRDBandwidth = 0;
long ReorderingBytes = 0;
bool NotUrgentLatencyHiding[DC__NUM_DPP__MAX] = { 0 };
/*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/
CalculateMinAndMaxPrefetchMode(
mode_lib->vba.AllowDRAMSelfRefreshOrDRAMClockChangeInVblank,
&MinPrefetchMode, &MaxPrefetchMode);
/*Scale Ratio, taps Support Check*/
v->ScaleRatioAndTapsSupport = true;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (v->ScalerEnabled[k] == false
&& ((v->SourcePixelFormat[k] != dm_444_64
&& v->SourcePixelFormat[k] != dm_444_32
&& v->SourcePixelFormat[k] != dm_444_16
&& v->SourcePixelFormat[k] != dm_mono_16
&& v->SourcePixelFormat[k] != dm_mono_8
&& v->SourcePixelFormat[k] != dm_rgbe
&& v->SourcePixelFormat[k] != dm_rgbe_alpha)
|| v->HRatio[k] != 1.0
|| v->htaps[k] != 1.0
|| v->VRatio[k] != 1.0
|| v->vtaps[k] != 1.0)) {
v->ScaleRatioAndTapsSupport = false;
} else if (v->vtaps[k] < 1.0 || v->vtaps[k] > 8.0
|| v->htaps[k] < 1.0 || v->htaps[k] > 8.0
|| (v->htaps[k] > 1.0
&& (v->htaps[k] % 2) == 1)
|| v->HRatio[k] > v->MaxHSCLRatio
|| v->VRatio[k] > v->MaxVSCLRatio
|| v->HRatio[k] > v->htaps[k]
|| v->VRatio[k] > v->vtaps[k]
|| (v->SourcePixelFormat[k] != dm_444_64
&& v->SourcePixelFormat[k] != dm_444_32
&& v->SourcePixelFormat[k] != dm_444_16
&& v->SourcePixelFormat[k] != dm_mono_16
&& v->SourcePixelFormat[k] != dm_mono_8
&& v->SourcePixelFormat[k] != dm_rgbe
&& (v->VTAPsChroma[k] < 1
|| v->VTAPsChroma[k] > 8
|| v->HTAPsChroma[k] < 1
|| v->HTAPsChroma[k] > 8
|| (v->HTAPsChroma[k] > 1 && v->HTAPsChroma[k] % 2 == 1)
|| v->HRatioChroma[k] > v->MaxHSCLRatio
|| v->VRatioChroma[k] > v->MaxVSCLRatio
|| v->HRatioChroma[k] > v->HTAPsChroma[k]
|| v->VRatioChroma[k] > v->VTAPsChroma[k]))) {
v->ScaleRatioAndTapsSupport = false;
}
}
/*Source Format, Pixel Format and Scan Support Check*/
v->SourceFormatPixelAndScanSupport = true;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if ((v->SurfaceTiling[k] == dm_sw_linear && (!(v->SourceScan[k] != dm_vert) || v->DCCEnable[k] == true))
|| ((v->SurfaceTiling[k] == dm_sw_64kb_d || v->SurfaceTiling[k] == dm_sw_64kb_d_t || v->SurfaceTiling[k] == dm_sw_64kb_d_x)
&& !(v->SourcePixelFormat[k] == dm_444_64))) {
v->SourceFormatPixelAndScanSupport = false;
}
}
/*Bandwidth Support Check*/
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
dml30_CalculateBytePerPixelAnd256BBlockSizes(
v->SourcePixelFormat[k],
v->SurfaceTiling[k],
&v->BytePerPixelY[k],
&v->BytePerPixelC[k],
&v->BytePerPixelInDETY[k],
&v->BytePerPixelInDETC[k],
&v->Read256BlockHeightY[k],
&v->Read256BlockHeightC[k],
&v->Read256BlockWidthY[k],
&v->Read256BlockWidthC[k]);
}
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (v->SourceScan[k] != dm_vert) {
v->SwathWidthYSingleDPP[k] = v->ViewportWidth[k];
v->SwathWidthCSingleDPP[k] = v->ViewportWidthChroma[k];
} else {
v->SwathWidthYSingleDPP[k] = v->ViewportHeight[k];
v->SwathWidthCSingleDPP[k] = v->ViewportHeightChroma[k];
}
}
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
v->ReadBandwidthLuma[k] = v->SwathWidthYSingleDPP[k] * dml_ceil(v->BytePerPixelInDETY[k], 1.0) / (v->HTotal[k] / v->PixelClock[k]) * v->VRatio[k];
v->ReadBandwidthChroma[k] = v->SwathWidthYSingleDPP[k] / 2 * dml_ceil(v->BytePerPixelInDETC[k], 2.0) / (v->HTotal[k] / v->PixelClock[k]) * v->VRatio[k] / 2.0;
}
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (v->WritebackEnable[k] == true
&& v->WritebackPixelFormat[k] == dm_444_64) {
v->WriteBandwidth[k] = v->WritebackDestinationWidth[k]
* v->WritebackDestinationHeight[k]
/ (v->WritebackSourceHeight[k]
* v->HTotal[k]
/ v->PixelClock[k]) * 8.0;
} else if (v->WritebackEnable[k] == true) {
v->WriteBandwidth[k] = v->WritebackDestinationWidth[k]
* v->WritebackDestinationHeight[k]
/ (v->WritebackSourceHeight[k]
* v->HTotal[k]
/ v->PixelClock[k]) * 4.0;
} else {
v->WriteBandwidth[k] = 0.0;
}
}
/*Writeback Latency support check*/
v->WritebackLatencySupport = true;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (v->WritebackEnable[k] == true) {
if (v->WritebackConfiguration == dm_whole_buffer_for_single_stream_no_interleave ||
v->WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave) {
if (v->WriteBandwidth[k]
> 2.0 * v->WritebackInterfaceBufferSize * 1024
/ v->WritebackLatency) {
v->WritebackLatencySupport = false;
}
} else {
if (v->WriteBandwidth[k]
> v->WritebackInterfaceBufferSize * 1024
/ v->WritebackLatency) {
v->WritebackLatencySupport = false;
}
}
}
}
/*Writeback Mode Support Check*/
v->TotalNumberOfActiveWriteback = 0;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (v->WritebackEnable[k] == true) {
v->TotalNumberOfActiveWriteback =
v->TotalNumberOfActiveWriteback + 1;
}
}
if (v->TotalNumberOfActiveWriteback > v->MaxNumWriteback) {
EnoughWritebackUnits = false;
}
if (!v->WritebackSupportInterleaveAndUsingWholeBufferForASingleStream
&& (v->WritebackConfiguration == dm_whole_buffer_for_single_stream_no_interleave
|| v->WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave)) {
WritebackModeSupport = false;
}
if (v->WritebackConfiguration == dm_whole_buffer_for_single_stream_no_interleave && v->TotalNumberOfActiveWriteback > 1) {
WritebackModeSupport = false;
}
/*Writeback Scale Ratio and Taps Support Check*/
v->WritebackScaleRatioAndTapsSupport = true;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (v->WritebackEnable[k] == true) {
if (v->WritebackHRatio[k] > v->WritebackMaxHSCLRatio
|| v->WritebackVRatio[k]
> v->WritebackMaxVSCLRatio
|| v->WritebackHRatio[k]
< v->WritebackMinHSCLRatio
|| v->WritebackVRatio[k]
< v->WritebackMinVSCLRatio
|| v->WritebackHTaps[k]
> v->WritebackMaxHSCLTaps
|| v->WritebackVTaps[k]
> v->WritebackMaxVSCLTaps
|| v->WritebackHRatio[k]
> v->WritebackHTaps[k]
|| v->WritebackVRatio[k]
> v->WritebackVTaps[k]
|| (v->WritebackHTaps[k] > 2.0
&& ((v->WritebackHTaps[k] % 2)
== 1))) {
v->WritebackScaleRatioAndTapsSupport = false;
}
if (2.0 * v->WritebackDestinationWidth[k] * (v->WritebackVTaps[k] - 1) * 57 > v->WritebackLineBufferSize) {
v->WritebackScaleRatioAndTapsSupport = false;
}
}
}
/*Maximum DISPCLK/DPPCLK Support check*/
v->WritebackRequiredDISPCLK = 0.0;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (v->WritebackEnable[k] == true) {
v->WritebackRequiredDISPCLK = dml_max(v->WritebackRequiredDISPCLK,
dml30_CalculateWriteBackDISPCLK(
v->WritebackPixelFormat[k],
v->PixelClock[k],
v->WritebackHRatio[k],
v->WritebackVRatio[k],
v->WritebackHTaps[k],
v->WritebackVTaps[k],
v->WritebackSourceWidth[k],
v->WritebackDestinationWidth[k],
v->HTotal[k],
v->WritebackLineBufferSize));
}
}
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (v->HRatio[k] > 1.0) {
v->PSCL_FACTOR[k] = dml_min(v->MaxDCHUBToPSCLThroughput, v->MaxPSCLToLBThroughput * v->HRatio[k] / dml_ceil(v->htaps[k] / 6.0, 1.0));
} else {
v->PSCL_FACTOR[k] = dml_min(v->MaxDCHUBToPSCLThroughput, v->MaxPSCLToLBThroughput);
}
if (v->BytePerPixelC[k] == 0.0) {
v->PSCL_FACTOR_CHROMA[k] = 0.0;
v->MinDPPCLKUsingSingleDPP[k] = v->PixelClock[k]
* dml_max3(v->vtaps[k] / 6.0 * dml_min(1.0, v->HRatio[k]), v->HRatio[k] * v->VRatio[k] / v->PSCL_FACTOR[k], 1.0);
if ((v->htaps[k] > 6.0 || v->vtaps[k] > 6.0) && v->MinDPPCLKUsingSingleDPP[k] < 2.0 * v->PixelClock[k]) {
v->MinDPPCLKUsingSingleDPP[k] = 2.0 * v->PixelClock[k];
}
} else {
if (v->HRatioChroma[k] > 1.0) {
v->PSCL_FACTOR_CHROMA[k] = dml_min(v->MaxDCHUBToPSCLThroughput,
v->MaxPSCLToLBThroughput * v->HRatioChroma[k] / dml_ceil(v->HTAPsChroma[k] / 6.0, 1.0));
} else {
v->PSCL_FACTOR_CHROMA[k] = dml_min(v->MaxDCHUBToPSCLThroughput, v->MaxPSCLToLBThroughput);
}
v->MinDPPCLKUsingSingleDPP[k] = v->PixelClock[k] * dml_max5(v->vtaps[k] / 6.0 * dml_min(1.0, v->HRatio[k]),
v->HRatio[k] * v->VRatio[k] / v->PSCL_FACTOR[k],
v->VTAPsChroma[k] / 6.0 * dml_min(1.0, v->HRatioChroma[k]),
v->HRatioChroma[k] * v->VRatioChroma[k] / v->PSCL_FACTOR_CHROMA[k],
1.0);
if ((v->htaps[k] > 6.0 || v->vtaps[k] > 6.0 || v->HTAPsChroma[k] > 6.0 || v->VTAPsChroma[k] > 6.0)
&& v->MinDPPCLKUsingSingleDPP[k] < 2.0 * v->PixelClock[k]) {
v->MinDPPCLKUsingSingleDPP[k] = 2.0 * v->PixelClock[k];
}
}
}
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
int MaximumSwathWidthSupportLuma = 0;
int MaximumSwathWidthSupportChroma = 0;
if (v->SurfaceTiling[k] == dm_sw_linear) {
MaximumSwathWidthSupportLuma = 8192.0;
} else if (v->SourceScan[k] == dm_vert && v->BytePerPixelC[k] > 0) {
MaximumSwathWidthSupportLuma = 2880.0;
} else {
MaximumSwathWidthSupportLuma = 5760.0;
}
if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_420_12) {
MaximumSwathWidthSupportChroma = MaximumSwathWidthSupportLuma / 2.0;
} else {
MaximumSwathWidthSupportChroma = MaximumSwathWidthSupportLuma;
}
v->MaximumSwathWidthInLineBufferLuma = v->LineBufferSize * dml_max(v->HRatio[k], 1.0) / v->LBBitPerPixel[k]
/ (v->vtaps[k] + dml_max(dml_ceil(v->VRatio[k], 1.0) - 2, 0.0));
if (v->BytePerPixelC[k] == 0.0) {
v->MaximumSwathWidthInLineBufferChroma = 0;
} else {
v->MaximumSwathWidthInLineBufferChroma = v->LineBufferSize * dml_max(v->HRatioChroma[k], 1.0) / v->LBBitPerPixel[k]
/ (v->VTAPsChroma[k] + dml_max(dml_ceil(v->VRatioChroma[k], 1.0) - 2, 0.0));
}
v->MaximumSwathWidthLuma[k] = dml_min(MaximumSwathWidthSupportLuma, v->MaximumSwathWidthInLineBufferLuma);
v->MaximumSwathWidthChroma[k] = dml_min(MaximumSwathWidthSupportChroma, v->MaximumSwathWidthInLineBufferChroma);
}
CalculateSwathAndDETConfiguration(
true,
v->NumberOfActivePlanes,
v->DETBufferSizeInKByte[0],
v->MaximumSwathWidthLuma,
v->MaximumSwathWidthChroma,
v->SourceScan,
v->SourcePixelFormat,
v->SurfaceTiling,
v->ViewportWidth,
v->ViewportHeight,
v->SurfaceWidthY,
v->SurfaceWidthC,
v->SurfaceHeightY,
v->SurfaceHeightC,
v->Read256BlockHeightY,
v->Read256BlockHeightC,
v->Read256BlockWidthY,
v->Read256BlockWidthC,
v->odm_combine_dummy,
v->BlendingAndTiming,
v->BytePerPixelY,
v->BytePerPixelC,
v->BytePerPixelInDETY,
v->BytePerPixelInDETC,
v->HActive,
v->HRatio,
v->HRatioChroma,
v->DPPPerPlane,
v->swath_width_luma_ub,
v->swath_width_chroma_ub,
v->SwathWidthY,
v->SwathWidthC,
v->SwathHeightY,
v->SwathHeightC,
v->DETBufferSizeY,
v->DETBufferSizeC,
v->SingleDPPViewportSizeSupportPerPlane,
&v->ViewportSizeSupport[0][0]);
for (i = 0; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
v->MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(v->MaxDispclk[i], v->DISPCLKDPPCLKVCOSpeed);
v->MaxDppclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(v->MaxDppclk[i], v->DISPCLKDPPCLKVCOSpeed);
v->RequiredDISPCLK[i][j] = 0.0;
v->DISPCLK_DPPCLK_Support[i][j] = true;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
v->PlaneRequiredDISPCLKWithoutODMCombine = v->PixelClock[k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
* (1.0 + v->DISPCLKRampingMargin / 100.0);
if ((v->PlaneRequiredDISPCLKWithoutODMCombine >= v->MaxDispclk[i] && v->MaxDispclk[i] == v->MaxDispclk[mode_lib->soc.num_states - 1]
&& v->MaxDppclk[i] == v->MaxDppclk[mode_lib->soc.num_states - 1])) {
v->PlaneRequiredDISPCLKWithoutODMCombine = v->PixelClock[k] * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
}
v->PlaneRequiredDISPCLKWithODMCombine2To1 = v->PixelClock[k] / 2 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
* (1 + v->DISPCLKRampingMargin / 100.0);
if ((v->PlaneRequiredDISPCLKWithODMCombine2To1 >= v->MaxDispclk[i] && v->MaxDispclk[i] == v->MaxDispclk[mode_lib->soc.num_states - 1]
&& v->MaxDppclk[i] == v->MaxDppclk[mode_lib->soc.num_states - 1])) {
v->PlaneRequiredDISPCLKWithODMCombine2To1 = v->PixelClock[k] / 2 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
}
v->PlaneRequiredDISPCLKWithODMCombine4To1 = v->PixelClock[k] / 4 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
* (1 + v->DISPCLKRampingMargin / 100.0);
if ((v->PlaneRequiredDISPCLKWithODMCombine4To1 >= v->MaxDispclk[i] && v->MaxDispclk[i] == v->MaxDispclk[mode_lib->soc.num_states - 1]
&& v->MaxDppclk[i] == v->MaxDppclk[mode_lib->soc.num_states - 1])) {
v->PlaneRequiredDISPCLKWithODMCombine4To1 = v->PixelClock[k] / 4 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
}
if (v->ODMCombinePolicy == dm_odm_combine_policy_none) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithoutODMCombine;
} else if (v->ODMCombinePolicy == dm_odm_combine_policy_2to1) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
} else if (v->ODMCombinePolicy == dm_odm_combine_policy_4to1
|| v->PlaneRequiredDISPCLKWithODMCombine2To1 > v->MaxDispclkRoundedDownToDFSGranularity) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_4to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine4To1;
} else if (v->PlaneRequiredDISPCLKWithoutODMCombine > v->MaxDispclkRoundedDownToDFSGranularity) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
} else {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithoutODMCombine;
}
if (v->DSCEnabled[k] && v->HActive[k] > DCN30_MAX_DSC_IMAGE_WIDTH
&& v->ODMCombineEnablePerState[i][k] != dm_odm_combine_mode_4to1) {
if (v->HActive[k] / 2 > DCN30_MAX_DSC_IMAGE_WIDTH) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_4to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine4To1;
} else {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
}
}
if (v->OutputFormat[k] == dm_420 && v->HActive[k] > DCN30_MAX_FMT_420_BUFFER_WIDTH
&& v->ODMCombineEnablePerState[i][k] != dm_odm_combine_mode_4to1) {
if (v->HActive[k] / 2 > DCN30_MAX_FMT_420_BUFFER_WIDTH) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_4to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine4To1;
} else {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
}
}
if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1) {
v->MPCCombine[i][j][k] = false;
v->NoOfDPP[i][j][k] = 4;
v->RequiredDPPCLK[i][j][k] = v->MinDPPCLKUsingSingleDPP[k] * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 4;
} else if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
v->MPCCombine[i][j][k] = false;
v->NoOfDPP[i][j][k] = 2;
v->RequiredDPPCLK[i][j][k] = v->MinDPPCLKUsingSingleDPP[k] * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 2;
} else if ((v->WhenToDoMPCCombine == dm_mpc_never
|| (v->MinDPPCLKUsingSingleDPP[k] * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= v->MaxDppclkRoundedDownToDFSGranularity
&& v->SingleDPPViewportSizeSupportPerPlane[k] == true))) {
v->MPCCombine[i][j][k] = false;
v->NoOfDPP[i][j][k] = 1;
v->RequiredDPPCLK[i][j][k] = v->MinDPPCLKUsingSingleDPP[k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
} else {
v->MPCCombine[i][j][k] = true;
v->NoOfDPP[i][j][k] = 2;
v->RequiredDPPCLK[i][j][k] = v->MinDPPCLKUsingSingleDPP[k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 2.0;
}
v->RequiredDISPCLK[i][j] = dml_max(v->RequiredDISPCLK[i][j], v->PlaneRequiredDISPCLK);
if ((v->MinDPPCLKUsingSingleDPP[k] / v->NoOfDPP[i][j][k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
> v->MaxDppclkRoundedDownToDFSGranularity) || (v->PlaneRequiredDISPCLK > v->MaxDispclkRoundedDownToDFSGranularity)) {
v->DISPCLK_DPPCLK_Support[i][j] = false;
}
}
v->TotalNumberOfActiveDPP[i][j] = 0;
v->TotalNumberOfSingleDPPPlanes[i][j] = 0;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
v->TotalNumberOfActiveDPP[i][j] = v->TotalNumberOfActiveDPP[i][j] + v->NoOfDPP[i][j][k];
if (v->NoOfDPP[i][j][k] == 1)
v->TotalNumberOfSingleDPPPlanes[i][j] = v->TotalNumberOfSingleDPPPlanes[i][j] + 1;
}
if (j == 1 && v->WhenToDoMPCCombine != dm_mpc_never) {
while (!(v->TotalNumberOfActiveDPP[i][j] >= v->MaxNumDPP || v->TotalNumberOfSingleDPPPlanes[i][j] == 0)) {
double BWOfNonSplitPlaneOfMaximumBandwidth = 0;
unsigned int NumberOfNonSplitPlaneOfMaximumBandwidth = 0;
BWOfNonSplitPlaneOfMaximumBandwidth = 0;
NumberOfNonSplitPlaneOfMaximumBandwidth = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k] > BWOfNonSplitPlaneOfMaximumBandwidth
&& v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled && v->MPCCombine[i][j][k] == false) {
BWOfNonSplitPlaneOfMaximumBandwidth = v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k];
NumberOfNonSplitPlaneOfMaximumBandwidth = k;
}
}
v->MPCCombine[i][j][NumberOfNonSplitPlaneOfMaximumBandwidth] = true;
v->NoOfDPP[i][j][NumberOfNonSplitPlaneOfMaximumBandwidth] = 2;
v->RequiredDPPCLK[i][j][NumberOfNonSplitPlaneOfMaximumBandwidth] = v->MinDPPCLKUsingSingleDPP[NumberOfNonSplitPlaneOfMaximumBandwidth]
* (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100) / 2;
v->TotalNumberOfActiveDPP[i][j] = v->TotalNumberOfActiveDPP[i][j] + 1;
v->TotalNumberOfSingleDPPPlanes[i][j] = v->TotalNumberOfSingleDPPPlanes[i][j] - 1;
}
}
if (v->TotalNumberOfActiveDPP[i][j] > v->MaxNumDPP) {
v->RequiredDISPCLK[i][j] = 0.0;
v->DISPCLK_DPPCLK_Support[i][j] = true;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
if (v->SingleDPPViewportSizeSupportPerPlane[k] == false && v->WhenToDoMPCCombine != dm_mpc_never) {
v->MPCCombine[i][j][k] = true;
v->NoOfDPP[i][j][k] = 2;
v->RequiredDPPCLK[i][j][k] = v->MinDPPCLKUsingSingleDPP[k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 2.0;
} else {
v->MPCCombine[i][j][k] = false;
v->NoOfDPP[i][j][k] = 1;
v->RequiredDPPCLK[i][j][k] = v->MinDPPCLKUsingSingleDPP[k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
}
if (!(v->MaxDispclk[i] == v->MaxDispclk[v->soc.num_states - 1] && v->MaxDppclk[i] == v->MaxDppclk[v->soc.num_states - 1])) {
v->PlaneRequiredDISPCLK = v->PixelClock[k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
* (1.0 + v->DISPCLKRampingMargin / 100.0);
} else {
v->PlaneRequiredDISPCLK = v->PixelClock[k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
}
v->RequiredDISPCLK[i][j] = dml_max(v->RequiredDISPCLK[i][j], v->PlaneRequiredDISPCLK);
if ((v->MinDPPCLKUsingSingleDPP[k] / v->NoOfDPP[i][j][k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
> v->MaxDppclkRoundedDownToDFSGranularity) || (v->PlaneRequiredDISPCLK > v->MaxDispclkRoundedDownToDFSGranularity)) {
v->DISPCLK_DPPCLK_Support[i][j] = false;
}
}
v->TotalNumberOfActiveDPP[i][j] = 0.0;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
v->TotalNumberOfActiveDPP[i][j] = v->TotalNumberOfActiveDPP[i][j] + v->NoOfDPP[i][j][k];
}
}
v->RequiredDISPCLK[i][j] = dml_max(v->RequiredDISPCLK[i][j], v->WritebackRequiredDISPCLK);
if (v->MaxDispclkRoundedDownToDFSGranularity < v->WritebackRequiredDISPCLK) {
v->DISPCLK_DPPCLK_Support[i][j] = false;
}
}
}
/*Total Available Pipes Support Check*/
for (i = 0; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
if (v->TotalNumberOfActiveDPP[i][j] <= v->MaxNumDPP) {
v->TotalAvailablePipesSupport[i][j] = true;
} else {
v->TotalAvailablePipesSupport[i][j] = false;
}
}
}
/*Display IO and DSC Support Check*/
v->NonsupportedDSCInputBPC = false;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (!(v->DSCInputBitPerComponent[k] == 12.0
|| v->DSCInputBitPerComponent[k] == 10.0
|| v->DSCInputBitPerComponent[k] == 8.0)) {
v->NonsupportedDSCInputBPC = true;
}
}
/*Number Of DSC Slices*/
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->BlendingAndTiming[k] == k) {
if (v->PixelClockBackEnd[k] > 3200) {
v->NumberOfDSCSlices[k] = dml_ceil(v->PixelClockBackEnd[k] / 400.0, 4.0);
} else if (v->PixelClockBackEnd[k] > 1360) {
v->NumberOfDSCSlices[k] = 8;
} else if (v->PixelClockBackEnd[k] > 680) {
v->NumberOfDSCSlices[k] = 4;
} else if (v->PixelClockBackEnd[k] > 340) {
v->NumberOfDSCSlices[k] = 2;
} else {
v->NumberOfDSCSlices[k] = 1;
}
} else {
v->NumberOfDSCSlices[k] = 0;
}
}
for (i = 0; i < v->soc.num_states; i++) {
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
v->RequiresDSC[i][k] = false;
v->RequiresFEC[i][k] = false;
if (v->BlendingAndTiming[k] == k) {
if (v->Output[k] == dm_hdmi) {
v->RequiresDSC[i][k] = false;
v->RequiresFEC[i][k] = false;
v->OutputBppPerState[i][k] = TruncToValidBPP(
dml_min(600.0, v->PHYCLKPerState[i]) * 10,
3,
v->HTotal[k],
v->HActive[k],
v->PixelClockBackEnd[k],
v->ForcedOutputLinkBPP[k],
false,
v->Output[k],
v->OutputFormat[k],
v->DSCInputBitPerComponent[k],
v->NumberOfDSCSlices[k],
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
} else if (v->Output[k] == dm_dp || v->Output[k] == dm_edp) {
if (v->DSCEnable[k] == true) {
v->RequiresDSC[i][k] = true;
v->LinkDSCEnable = true;
if (v->Output[k] == dm_dp) {
v->RequiresFEC[i][k] = true;
} else {
v->RequiresFEC[i][k] = false;
}
} else {
v->RequiresDSC[i][k] = false;
v->LinkDSCEnable = false;
v->RequiresFEC[i][k] = false;
}
v->Outbpp = BPP_INVALID;
if (v->PHYCLKPerState[i] >= 270.0) {
v->Outbpp = TruncToValidBPP(
(1.0 - v->Downspreading / 100.0) * 2700,
v->OutputLinkDPLanes[k],
v->HTotal[k],
v->HActive[k],
v->PixelClockBackEnd[k],
v->ForcedOutputLinkBPP[k],
v->LinkDSCEnable,
v->Output[k],
v->OutputFormat[k],
v->DSCInputBitPerComponent[k],
v->NumberOfDSCSlices[k],
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
v->OutputBppPerState[i][k] = v->Outbpp;
// TODO: Need some other way to handle this nonsense
// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR"
}
if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 540.0) {
v->Outbpp = TruncToValidBPP(
(1.0 - v->Downspreading / 100.0) * 5400,
v->OutputLinkDPLanes[k],
v->HTotal[k],
v->HActive[k],
v->PixelClockBackEnd[k],
v->ForcedOutputLinkBPP[k],
v->LinkDSCEnable,
v->Output[k],
v->OutputFormat[k],
v->DSCInputBitPerComponent[k],
v->NumberOfDSCSlices[k],
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
v->OutputBppPerState[i][k] = v->Outbpp;
// TODO: Need some other way to handle this nonsense
// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR2"
}
if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 810.0) {
v->Outbpp = TruncToValidBPP(
(1.0 - v->Downspreading / 100.0) * 8100,
v->OutputLinkDPLanes[k],
v->HTotal[k],
v->HActive[k],
v->PixelClockBackEnd[k],
v->ForcedOutputLinkBPP[k],
v->LinkDSCEnable,
v->Output[k],
v->OutputFormat[k],
v->DSCInputBitPerComponent[k],
v->NumberOfDSCSlices[k],
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
if (v->Outbpp == BPP_INVALID && v->ForcedOutputLinkBPP[k] == 0) {
//if (v->Outbpp == BPP_INVALID && v->DSCEnabled[k] == dm_dsc_enable_only_if_necessary && v->ForcedOutputLinkBPP[k] == 0) {
v->RequiresDSC[i][k] = true;
v->LinkDSCEnable = true;
if (v->Output[k] == dm_dp) {
v->RequiresFEC[i][k] = true;
}
v->Outbpp = TruncToValidBPP(
(1.0 - v->Downspreading / 100.0) * 8100,
v->OutputLinkDPLanes[k],
v->HTotal[k],
v->HActive[k],
v->PixelClockBackEnd[k],
v->ForcedOutputLinkBPP[k],
v->LinkDSCEnable,
v->Output[k],
v->OutputFormat[k],
v->DSCInputBitPerComponent[k],
v->NumberOfDSCSlices[k],
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
}
v->OutputBppPerState[i][k] = v->Outbpp;
// TODO: Need some other way to handle this nonsense
// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR3"
}
}
} else {
v->OutputBppPerState[i][k] = 0;
}
}
}
for (i = 0; i < v->soc.num_states; i++) {
v->DIOSupport[i] = true;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (!v->skip_dio_check[k] && v->BlendingAndTiming[k] == k && (v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_hdmi)
&& (v->OutputBppPerState[i][k] == 0
|| (v->OutputFormat[k] == dm_420 && v->Interlace[k] == true && v->ProgressiveToInterlaceUnitInOPP == true))) {
v->DIOSupport[i] = false;
}
}
}
for (i = 0; i < v->soc.num_states; ++i) {
v->ODMCombine4To1SupportCheckOK[i] = true;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->BlendingAndTiming[k] == k && v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1
&& (v->ODMCombine4To1Supported == false || v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_hdmi)) {
v->ODMCombine4To1SupportCheckOK[i] = false;
}
}
}
/* Skip dscclk validation: as long as dispclk is supported, dscclk is also implicitly supported */
for (i = 0; i < v->soc.num_states; i++) {
v->NotEnoughDSCUnits[i] = false;
v->TotalDSCUnitsRequired = 0.0;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (v->RequiresDSC[i][k] == true) {
if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1) {
v->TotalDSCUnitsRequired = v->TotalDSCUnitsRequired + 4.0;
} else if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
v->TotalDSCUnitsRequired = v->TotalDSCUnitsRequired + 2.0;
} else {
v->TotalDSCUnitsRequired = v->TotalDSCUnitsRequired + 1.0;
}
}
}
if (v->TotalDSCUnitsRequired > v->NumberOfDSC) {
v->NotEnoughDSCUnits[i] = true;
}
}
/*DSC Delay per state*/
for (i = 0; i < v->soc.num_states; i++) {
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (v->OutputBppPerState[i][k] == BPP_INVALID) {
v->BPP = 0.0;
} else {
v->BPP = v->OutputBppPerState[i][k];
}
if (v->RequiresDSC[i][k] == true && v->BPP != 0.0) {
if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
v->DSCDelayPerState[i][k] = dscceComputeDelay(
v->DSCInputBitPerComponent[k],
v->BPP,
dml_ceil(1.0 * v->HActive[k] / v->NumberOfDSCSlices[k], 1.0),
v->NumberOfDSCSlices[k],
v->OutputFormat[k],
v->Output[k]) + dscComputeDelay(v->OutputFormat[k], v->Output[k]);
} else if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
v->DSCDelayPerState[i][k] = 2.0
* dscceComputeDelay(
v->DSCInputBitPerComponent[k],
v->BPP,
dml_ceil(1.0 * v->HActive[k] / v->NumberOfDSCSlices[k], 1.0),
v->NumberOfDSCSlices[k] / 2,
v->OutputFormat[k],
v->Output[k]) + dscComputeDelay(v->OutputFormat[k], v->Output[k]);
} else {
v->DSCDelayPerState[i][k] = 4.0
* (dscceComputeDelay(
v->DSCInputBitPerComponent[k],
v->BPP,
dml_ceil(1.0 * v->HActive[k] / v->NumberOfDSCSlices[k], 1.0),
v->NumberOfDSCSlices[k] / 4,
v->OutputFormat[k],
v->Output[k]) + dscComputeDelay(v->OutputFormat[k], v->Output[k]));
}
v->DSCDelayPerState[i][k] = v->DSCDelayPerState[i][k] * v->PixelClock[k] / v->PixelClockBackEnd[k];
} else {
v->DSCDelayPerState[i][k] = 0.0;
}
}
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
for (m = 0; m <= v->NumberOfActivePlanes - 1; m++) {
if (v->BlendingAndTiming[k] == m && v->RequiresDSC[i][m] == true) {
v->DSCDelayPerState[i][k] = v->DSCDelayPerState[i][m];
}
}
}
}
//Calculate Swath, DET Configuration, DCFCLKDeepSleep
//
for (i = 0; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->RequiredDPPCLKThisState[k] = v->RequiredDPPCLK[i][j][k];
v->NoOfDPPThisState[k] = v->NoOfDPP[i][j][k];
v->ODMCombineEnableThisState[k] = v->ODMCombineEnablePerState[i][k];
}
CalculateSwathAndDETConfiguration(
false,
v->NumberOfActivePlanes,
v->DETBufferSizeInKByte[0],
v->MaximumSwathWidthLuma,
v->MaximumSwathWidthChroma,
v->SourceScan,
v->SourcePixelFormat,
v->SurfaceTiling,
v->ViewportWidth,
v->ViewportHeight,
v->SurfaceWidthY,
v->SurfaceWidthC,
v->SurfaceHeightY,
v->SurfaceHeightC,
v->Read256BlockHeightY,
v->Read256BlockHeightC,
v->Read256BlockWidthY,
v->Read256BlockWidthC,
v->ODMCombineEnableThisState,
v->BlendingAndTiming,
v->BytePerPixelY,
v->BytePerPixelC,
v->BytePerPixelInDETY,
v->BytePerPixelInDETC,
v->HActive,
v->HRatio,
v->HRatioChroma,
v->NoOfDPPThisState,
v->swath_width_luma_ub_this_state,
v->swath_width_chroma_ub_this_state,
v->SwathWidthYThisState,
v->SwathWidthCThisState,
v->SwathHeightYThisState,
v->SwathHeightCThisState,
v->DETBufferSizeYThisState,
v->DETBufferSizeCThisState,
v->dummystring,
&v->ViewportSizeSupport[i][j]);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->swath_width_luma_ub_all_states[i][j][k] = v->swath_width_luma_ub_this_state[k];
v->swath_width_chroma_ub_all_states[i][j][k] = v->swath_width_chroma_ub_this_state[k];
v->SwathWidthYAllStates[i][j][k] = v->SwathWidthYThisState[k];
v->SwathWidthCAllStates[i][j][k] = v->SwathWidthCThisState[k];
v->SwathHeightYAllStates[i][j][k] = v->SwathHeightYThisState[k];
v->SwathHeightCAllStates[i][j][k] = v->SwathHeightCThisState[k];
v->DETBufferSizeYAllStates[i][j][k] = v->DETBufferSizeYThisState[k];
v->DETBufferSizeCAllStates[i][j][k] = v->DETBufferSizeCThisState[k];
}
}
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->cursor_bw[k] = v->NumberOfCursors[k] * v->CursorWidth[k][0] * v->CursorBPP[k][0] / 8.0 / (v->HTotal[k] / v->PixelClock[k]) * v->VRatio[k];
}
for (i = 0; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
v->swath_width_luma_ub_this_state[k] = v->swath_width_luma_ub_all_states[i][j][k];
v->swath_width_chroma_ub_this_state[k] = v->swath_width_chroma_ub_all_states[i][j][k];
v->SwathWidthYThisState[k] = v->SwathWidthYAllStates[i][j][k];
v->SwathWidthCThisState[k] = v->SwathWidthCAllStates[i][j][k];
v->SwathHeightYThisState[k] = v->SwathHeightYAllStates[i][j][k];
v->SwathHeightCThisState[k] = v->SwathHeightCAllStates[i][j][k];
v->DETBufferSizeYThisState[k] = v->DETBufferSizeYAllStates[i][j][k];
v->DETBufferSizeCThisState[k] = v->DETBufferSizeCAllStates[i][j][k];
}
v->TotalNumberOfDCCActiveDPP[i][j] = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->DCCEnable[k] == true) {
v->TotalNumberOfDCCActiveDPP[i][j] = v->TotalNumberOfDCCActiveDPP[i][j] + v->NoOfDPP[i][j][k];
}
}
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_420_12
|| v->SourcePixelFormat[k] == dm_rgbe_alpha) {
if ((v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_420_12) && v->SourceScan[k] != dm_vert) {
v->PTEBufferSizeInRequestsForLuma = (v->PTEBufferSizeInRequestsLuma + v->PTEBufferSizeInRequestsChroma) / 2;
v->PTEBufferSizeInRequestsForChroma = v->PTEBufferSizeInRequestsForLuma;
} else {
v->PTEBufferSizeInRequestsForLuma = v->PTEBufferSizeInRequestsLuma;
v->PTEBufferSizeInRequestsForChroma = v->PTEBufferSizeInRequestsChroma;
}
v->PDEAndMetaPTEBytesPerFrameC = CalculateVMAndRowBytes(
mode_lib,
v->DCCEnable[k],
v->Read256BlockHeightC[k],
v->Read256BlockWidthY[k],
v->SourcePixelFormat[k],
v->SurfaceTiling[k],
v->BytePerPixelC[k],
v->SourceScan[k],
v->SwathWidthCThisState[k],
v->ViewportHeightChroma[k],
v->GPUVMEnable,
v->HostVMEnable,
v->HostVMMaxNonCachedPageTableLevels,
v->GPUVMMinPageSize,
v->HostVMMinPageSize,
v->PTEBufferSizeInRequestsForChroma,
v->PitchC[k],
0.0,
&v->MacroTileWidthC[k],
&v->MetaRowBytesC,
&v->DPTEBytesPerRowC,
&v->PTEBufferSizeNotExceededC[i][j][k],
&v->dummyinteger7,
&v->dpte_row_height_chroma[k],
&v->dummyinteger28,
&v->dummyinteger26,
&v->dummyinteger23,
&v->meta_row_height_chroma[k],
&v->dummyinteger8,
&v->dummyinteger9,
&v->dummyinteger19,
&v->dummyinteger20,
&v->dummyinteger17,
&v->dummyinteger10,
&v->dummyinteger11);
v->PrefetchLinesC[i][j][k] = CalculatePrefetchSourceLines(
mode_lib,
v->VRatioChroma[k],
v->VTAPsChroma[k],
v->Interlace[k],
v->ProgressiveToInterlaceUnitInOPP,
v->SwathHeightCThisState[k],
v->ViewportYStartC[k],
&v->PrefillC[k],
&v->MaxNumSwC[k]);
} else {
v->PTEBufferSizeInRequestsForLuma = v->PTEBufferSizeInRequestsLuma + v->PTEBufferSizeInRequestsChroma;
v->PTEBufferSizeInRequestsForChroma = 0;
v->PDEAndMetaPTEBytesPerFrameC = 0.0;
v->MetaRowBytesC = 0.0;
v->DPTEBytesPerRowC = 0.0;
v->PrefetchLinesC[i][j][k] = 0.0;
v->PTEBufferSizeNotExceededC[i][j][k] = true;
}
v->PDEAndMetaPTEBytesPerFrameY = CalculateVMAndRowBytes(
mode_lib,
v->DCCEnable[k],
v->Read256BlockHeightY[k],
v->Read256BlockWidthY[k],
v->SourcePixelFormat[k],
v->SurfaceTiling[k],
v->BytePerPixelY[k],
v->SourceScan[k],
v->SwathWidthYThisState[k],
v->ViewportHeight[k],
v->GPUVMEnable,
v->HostVMEnable,
v->HostVMMaxNonCachedPageTableLevels,
v->GPUVMMinPageSize,
v->HostVMMinPageSize,
v->PTEBufferSizeInRequestsForLuma,
v->PitchY[k],
v->DCCMetaPitchY[k],
&v->MacroTileWidthY[k],
&v->MetaRowBytesY,
&v->DPTEBytesPerRowY,
&v->PTEBufferSizeNotExceededY[i][j][k],
v->dummyinteger4,
&v->dpte_row_height[k],
&v->dummyinteger29,
&v->dummyinteger27,
&v->dummyinteger24,
&v->meta_row_height[k],
&v->dummyinteger25,
&v->dpte_group_bytes[k],
&v->dummyinteger21,
&v->dummyinteger22,
&v->dummyinteger18,
&v->dummyinteger5,
&v->dummyinteger6);
v->PrefetchLinesY[i][j][k] = CalculatePrefetchSourceLines(
mode_lib,
v->VRatio[k],
v->vtaps[k],
v->Interlace[k],
v->ProgressiveToInterlaceUnitInOPP,
v->SwathHeightYThisState[k],
v->ViewportYStartY[k],
&v->PrefillY[k],
&v->MaxNumSwY[k]);
v->PDEAndMetaPTEBytesPerFrame[i][j][k] = v->PDEAndMetaPTEBytesPerFrameY + v->PDEAndMetaPTEBytesPerFrameC;
v->MetaRowBytes[i][j][k] = v->MetaRowBytesY + v->MetaRowBytesC;
v->DPTEBytesPerRow[i][j][k] = v->DPTEBytesPerRowY + v->DPTEBytesPerRowC;
CalculateRowBandwidth(
v->GPUVMEnable,
v->SourcePixelFormat[k],
v->VRatio[k],
v->VRatioChroma[k],
v->DCCEnable[k],
v->HTotal[k] / v->PixelClock[k],
v->MetaRowBytesY,
v->MetaRowBytesC,
v->meta_row_height[k],
v->meta_row_height_chroma[k],
v->DPTEBytesPerRowY,
v->DPTEBytesPerRowC,
v->dpte_row_height[k],
v->dpte_row_height_chroma[k],
&v->meta_row_bandwidth[i][j][k],
&v->dpte_row_bandwidth[i][j][k]);
}
v->UrgLatency[i] = CalculateUrgentLatency(
v->UrgentLatencyPixelDataOnly,
v->UrgentLatencyPixelMixedWithVMData,
v->UrgentLatencyVMDataOnly,
v->DoUrgentLatencyAdjustment,
v->UrgentLatencyAdjustmentFabricClockComponent,
v->UrgentLatencyAdjustmentFabricClockReference,
v->FabricClockPerState[i]);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
CalculateUrgentBurstFactor(
v->swath_width_luma_ub_this_state[k],
v->swath_width_chroma_ub_this_state[k],
v->DETBufferSizeInKByte[0],
v->SwathHeightYThisState[k],
v->SwathHeightCThisState[k],
v->HTotal[k] / v->PixelClock[k],
v->UrgLatency[i],
v->CursorBufferSize,
v->CursorWidth[k][0],
v->CursorBPP[k][0],
v->VRatio[k],
v->VRatioChroma[k],
v->BytePerPixelInDETY[k],
v->BytePerPixelInDETC[k],
v->DETBufferSizeYThisState[k],
v->DETBufferSizeCThisState[k],
&v->UrgentBurstFactorCursor[k],
&v->UrgentBurstFactorLuma[k],
&v->UrgentBurstFactorChroma[k],
&NotUrgentLatencyHiding[k]);
}
v->NotUrgentLatencyHiding[i][j] = false;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (NotUrgentLatencyHiding[k]) {
v->NotUrgentLatencyHiding[i][j] = true;
}
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->VActivePixelBandwidth[i][j][k] = v->ReadBandwidthLuma[k] * v->UrgentBurstFactorLuma[k]
+ v->ReadBandwidthChroma[k] * v->UrgentBurstFactorChroma[k];
v->VActiveCursorBandwidth[i][j][k] = v->cursor_bw[k] * v->UrgentBurstFactorCursor[k];
}
v->TotalVActivePixelBandwidth[i][j] = 0;
v->TotalVActiveCursorBandwidth[i][j] = 0;
v->TotalMetaRowBandwidth[i][j] = 0;
v->TotalDPTERowBandwidth[i][j] = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->TotalVActivePixelBandwidth[i][j] = v->TotalVActivePixelBandwidth[i][j] + v->VActivePixelBandwidth[i][j][k];
v->TotalVActiveCursorBandwidth[i][j] = v->TotalVActiveCursorBandwidth[i][j] + v->VActiveCursorBandwidth[i][j][k];
v->TotalMetaRowBandwidth[i][j] = v->TotalMetaRowBandwidth[i][j] + v->NoOfDPP[i][j][k] * v->meta_row_bandwidth[i][j][k];
v->TotalDPTERowBandwidth[i][j] = v->TotalDPTERowBandwidth[i][j] + v->NoOfDPP[i][j][k] * v->dpte_row_bandwidth[i][j][k];
}
CalculateDCFCLKDeepSleep(
mode_lib,
v->NumberOfActivePlanes,
v->BytePerPixelY,
v->BytePerPixelC,
v->VRatio,
v->VRatioChroma,
v->SwathWidthYThisState,
v->SwathWidthCThisState,
v->NoOfDPPThisState,
v->HRatio,
v->HRatioChroma,
v->PixelClock,
v->PSCL_FACTOR,
v->PSCL_FACTOR_CHROMA,
v->RequiredDPPCLKThisState,
v->ReadBandwidthLuma,
v->ReadBandwidthChroma,
v->ReturnBusWidth,
&v->ProjectedDCFCLKDeepSleep[i][j]);
}
}
//Calculate Return BW
for (i = 0; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (v->BlendingAndTiming[k] == k) {
if (v->WritebackEnable[k] == true) {
v->WritebackDelayTime[k] = v->WritebackLatency
+ CalculateWriteBackDelay(
v->WritebackPixelFormat[k],
v->WritebackHRatio[k],
v->WritebackVRatio[k],
v->WritebackVTaps[k],
v->WritebackDestinationWidth[k],
v->WritebackDestinationHeight[k],
v->WritebackSourceHeight[k],
v->HTotal[k]) / v->RequiredDISPCLK[i][j];
} else {
v->WritebackDelayTime[k] = 0.0;
}
for (m = 0; m <= v->NumberOfActivePlanes - 1; m++) {
if (v->BlendingAndTiming[m] == k && v->WritebackEnable[m] == true) {
v->WritebackDelayTime[k] = dml_max(
v->WritebackDelayTime[k],
v->WritebackLatency
+ CalculateWriteBackDelay(
v->WritebackPixelFormat[m],
v->WritebackHRatio[m],
v->WritebackVRatio[m],
v->WritebackVTaps[m],
v->WritebackDestinationWidth[m],
v->WritebackDestinationHeight[m],
v->WritebackSourceHeight[m],
v->HTotal[m]) / v->RequiredDISPCLK[i][j]);
}
}
}
}
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
for (m = 0; m <= v->NumberOfActivePlanes - 1; m++) {
if (v->BlendingAndTiming[k] == m) {
v->WritebackDelayTime[k] = v->WritebackDelayTime[m];
}
}
}
v->MaxMaxVStartup[i][j] = 0;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
v->MaximumVStartup[i][j][k] = v->VTotal[k] - v->VActive[k]
- dml_max(1.0, dml_ceil(1.0 * v->WritebackDelayTime[k] / (v->HTotal[k] / v->PixelClock[k]), 1.0));
v->MaxMaxVStartup[i][j] = dml_max(v->MaxMaxVStartup[i][j], v->MaximumVStartup[i][j][k]);
}
}
}
ReorderingBytes = v->NumberOfChannels
* dml_max3(
v->UrgentOutOfOrderReturnPerChannelPixelDataOnly,
v->UrgentOutOfOrderReturnPerChannelPixelMixedWithVMData,
v->UrgentOutOfOrderReturnPerChannelVMDataOnly);
v->FinalDRAMClockChangeLatency = (v->DRAMClockChangeLatencyOverride > 0 ? v->DRAMClockChangeLatencyOverride : v->DRAMClockChangeLatency);
for (i = 0; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
v->DCFCLKState[i][j] = v->DCFCLKPerState[i];
}
}
if (v->UseMinimumRequiredDCFCLK == true) {
UseMinimumDCFCLK(mode_lib, v, MaxPrefetchMode, ReorderingBytes);
if (v->ClampMinDCFCLK) {
/* Clamp calculated values to actual minimum */
for (i = 0; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
if (v->DCFCLKState[i][j] < mode_lib->soc.min_dcfclk) {
v->DCFCLKState[i][j] = mode_lib->soc.min_dcfclk;
}
}
}
}
}
for (i = 0; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
v->IdealSDPPortBandwidthPerState[i][j] = dml_min3(
v->ReturnBusWidth * v->DCFCLKState[i][j],
v->DRAMSpeedPerState[i] * v->NumberOfChannels * v->DRAMChannelWidth,
v->FabricClockPerState[i] * v->FabricDatapathToDCNDataReturn);
if (v->HostVMEnable != true) {
v->ReturnBWPerState[i][j] = v->IdealSDPPortBandwidthPerState[i][j] * v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly
/ 100;
} else {
v->ReturnBWPerState[i][j] = v->IdealSDPPortBandwidthPerState[i][j]
* v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData / 100;
}
}
}
//Re-ordering Buffer Support Check
for (i = 0; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
if ((v->ROBBufferSizeInKByte - v->PixelChunkSizeInKByte) * 1024 / v->ReturnBWPerState[i][j]
> (v->RoundTripPingLatencyCycles + 32) / v->DCFCLKState[i][j] + ReorderingBytes / v->ReturnBWPerState[i][j]) {
v->ROBSupport[i][j] = true;
} else {
v->ROBSupport[i][j] = false;
}
}
}
//Vertical Active BW support check
MaxTotalVActiveRDBandwidth = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
MaxTotalVActiveRDBandwidth = MaxTotalVActiveRDBandwidth + v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k];
}
for (i = 0; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
v->MaxTotalVerticalActiveAvailableBandwidth[i][j] = dml_min(
v->IdealSDPPortBandwidthPerState[i][j] * v->MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation / 100,
v->DRAMSpeedPerState[i] * v->NumberOfChannels * v->DRAMChannelWidth * v->MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation
/ 100);
if (MaxTotalVActiveRDBandwidth <= v->MaxTotalVerticalActiveAvailableBandwidth[i][j]) {
v->TotalVerticalActiveBandwidthSupport[i][j] = true;
} else {
v->TotalVerticalActiveBandwidthSupport[i][j] = false;
}
}
}
//Prefetch Check
for (i = 0; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
int NextPrefetchModeState = MinPrefetchMode;
v->TimeCalc = 24 / v->ProjectedDCFCLKDeepSleep[i][j];
v->BandwidthWithoutPrefetchSupported[i][j] = true;
if (v->TotalVActivePixelBandwidth[i][j] + v->TotalVActiveCursorBandwidth[i][j] + v->TotalMetaRowBandwidth[i][j] + v->TotalDPTERowBandwidth[i][j]
> v->ReturnBWPerState[i][j] || v->NotUrgentLatencyHiding[i][j]) {
v->BandwidthWithoutPrefetchSupported[i][j] = false;
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->NoOfDPPThisState[k] = v->NoOfDPP[i][j][k];
v->swath_width_luma_ub_this_state[k] = v->swath_width_luma_ub_all_states[i][j][k];
v->swath_width_chroma_ub_this_state[k] = v->swath_width_chroma_ub_all_states[i][j][k];
v->SwathWidthYThisState[k] = v->SwathWidthYAllStates[i][j][k];
v->SwathWidthCThisState[k] = v->SwathWidthCAllStates[i][j][k];
v->SwathHeightYThisState[k] = v->SwathHeightYAllStates[i][j][k];
v->SwathHeightCThisState[k] = v->SwathHeightCAllStates[i][j][k];
v->DETBufferSizeYThisState[k] = v->DETBufferSizeYAllStates[i][j][k];
v->DETBufferSizeCThisState[k] = v->DETBufferSizeCAllStates[i][j][k];
v->ODMCombineEnabled[k] = v->ODMCombineEnablePerState[i][k];
}
v->ExtraLatency = CalculateExtraLatency(
v->RoundTripPingLatencyCycles,
ReorderingBytes,
v->DCFCLKState[i][j],
v->TotalNumberOfActiveDPP[i][j],
v->PixelChunkSizeInKByte,
v->TotalNumberOfDCCActiveDPP[i][j],
v->MetaChunkSize,
v->ReturnBWPerState[i][j],
v->GPUVMEnable,
v->HostVMEnable,
v->NumberOfActivePlanes,
v->NoOfDPPThisState,
v->dpte_group_bytes,
v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
v->HostVMMinPageSize,
v->HostVMMaxNonCachedPageTableLevels);
v->NextMaxVStartup = v->MaxMaxVStartup[i][j];
do {
v->PrefetchModePerState[i][j] = NextPrefetchModeState;
v->MaxVStartup = v->NextMaxVStartup;
v->TWait = CalculateTWait(v->PrefetchModePerState[i][j], v->FinalDRAMClockChangeLatency, v->UrgLatency[i], v->SREnterPlusExitTime);
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
Pipe myPipe = { 0 };
myPipe.DPPCLK = v->RequiredDPPCLK[i][j][k];
myPipe.DISPCLK = v->RequiredDISPCLK[i][j];
myPipe.PixelClock = v->PixelClock[k];
myPipe.DCFCLKDeepSleep = v->ProjectedDCFCLKDeepSleep[i][j];
myPipe.DPPPerPlane = v->NoOfDPP[i][j][k];
myPipe.ScalerEnabled = v->ScalerEnabled[k];
myPipe.SourceScan = v->SourceScan[k];
myPipe.BlockWidth256BytesY = v->Read256BlockWidthY[k];
myPipe.BlockHeight256BytesY = v->Read256BlockHeightY[k];
myPipe.BlockWidth256BytesC = v->Read256BlockWidthC[k];
myPipe.BlockHeight256BytesC = v->Read256BlockHeightC[k];
myPipe.InterlaceEnable = v->Interlace[k];
myPipe.NumberOfCursors = v->NumberOfCursors[k];
myPipe.VBlank = v->VTotal[k] - v->VActive[k];
myPipe.HTotal = v->HTotal[k];
myPipe.DCCEnable = v->DCCEnable[k];
myPipe.ODMCombineEnabled = !!v->ODMCombineEnabled[k];
v->NoTimeForPrefetch[i][j][k] = CalculatePrefetchSchedule(
mode_lib,
v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
&myPipe,
v->DSCDelayPerState[i][k],
v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater,
v->DPPCLKDelaySCL,
v->DPPCLKDelaySCLLBOnly,
v->DPPCLKDelayCNVCCursor,
v->DISPCLKDelaySubtotal,
v->SwathWidthYThisState[k] / v->HRatio[k],
v->OutputFormat[k],
v->MaxInterDCNTileRepeaters,
dml_min(v->MaxVStartup, v->MaximumVStartup[i][j][k]),
v->MaximumVStartup[i][j][k],
v->GPUVMMaxPageTableLevels,
v->GPUVMEnable,
v->HostVMEnable,
v->HostVMMaxNonCachedPageTableLevels,
v->HostVMMinPageSize,
v->DynamicMetadataEnable[k],
v->DynamicMetadataVMEnabled,
v->DynamicMetadataLinesBeforeActiveRequired[k],
v->DynamicMetadataTransmittedBytes[k],
v->UrgLatency[i],
v->ExtraLatency,
v->TimeCalc,
v->PDEAndMetaPTEBytesPerFrame[i][j][k],
v->MetaRowBytes[i][j][k],
v->DPTEBytesPerRow[i][j][k],
v->PrefetchLinesY[i][j][k],
v->SwathWidthYThisState[k],
v->BytePerPixelY[k],
v->PrefillY[k],
v->MaxNumSwY[k],
v->PrefetchLinesC[i][j][k],
v->SwathWidthCThisState[k],
v->BytePerPixelC[k],
v->PrefillC[k],
v->MaxNumSwC[k],
v->swath_width_luma_ub_this_state[k],
v->swath_width_chroma_ub_this_state[k],
v->SwathHeightYThisState[k],
v->SwathHeightCThisState[k],
v->TWait,
v->ProgressiveToInterlaceUnitInOPP,
&v->DSTXAfterScaler[k],
&v->DSTYAfterScaler[k],
&v->LineTimesForPrefetch[k],
&v->PrefetchBW[k],
&v->LinesForMetaPTE[k],
&v->LinesForMetaAndDPTERow[k],
&v->VRatioPreY[i][j][k],
&v->VRatioPreC[i][j][k],
&v->RequiredPrefetchPixelDataBWLuma[i][j][k],
&v->RequiredPrefetchPixelDataBWChroma[i][j][k],
&v->NoTimeForDynamicMetadata[i][j][k],
&v->Tno_bw[k],
&v->prefetch_vmrow_bw[k],
&v->Tdmdl_vm[k],
&v->Tdmdl[k],
&v->VUpdateOffsetPix[k],
&v->VUpdateWidthPix[k],
&v->VReadyOffsetPix[k]);
}
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
CalculateUrgentBurstFactor(
v->swath_width_luma_ub_this_state[k],
v->swath_width_chroma_ub_this_state[k],
v->DETBufferSizeInKByte[0],
v->SwathHeightYThisState[k],
v->SwathHeightCThisState[k],
v->HTotal[k] / v->PixelClock[k],
v->UrgLatency[i],
v->CursorBufferSize,
v->CursorWidth[k][0],
v->CursorBPP[k][0],
v->VRatioPreY[i][j][k],
v->VRatioPreC[i][j][k],
v->BytePerPixelInDETY[k],
v->BytePerPixelInDETC[k],
v->DETBufferSizeYThisState[k],
v->DETBufferSizeCThisState[k],
&v->UrgentBurstFactorCursorPre[k],
&v->UrgentBurstFactorLumaPre[k],
&v->UrgentBurstFactorChromaPre[k],
&v->NoUrgentLatencyHidingPre[k]);
}
v->MaximumReadBandwidthWithPrefetch = 0.0;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
v->cursor_bw_pre[k] = v->NumberOfCursors[k] * v->CursorWidth[k][0] * v->CursorBPP[k][0] / 8.0 / (v->HTotal[k] / v->PixelClock[k])
* v->VRatioPreY[i][j][k];
v->MaximumReadBandwidthWithPrefetch = v->MaximumReadBandwidthWithPrefetch
+ dml_max4(
v->VActivePixelBandwidth[i][j][k],
v->VActiveCursorBandwidth[i][j][k]
+ v->NoOfDPP[i][j][k] * (v->meta_row_bandwidth[i][j][k] + v->dpte_row_bandwidth[i][j][k]),
v->NoOfDPP[i][j][k] * v->prefetch_vmrow_bw[k],
v->NoOfDPP[i][j][k]
* (v->RequiredPrefetchPixelDataBWLuma[i][j][k] * v->UrgentBurstFactorLumaPre[k]
+ v->RequiredPrefetchPixelDataBWChroma[i][j][k]
* v->UrgentBurstFactorChromaPre[k])
+ v->cursor_bw_pre[k] * v->UrgentBurstFactorCursorPre[k]);
}
v->NotEnoughUrgentLatencyHidingPre = false;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (v->NoUrgentLatencyHidingPre[k] == true) {
v->NotEnoughUrgentLatencyHidingPre = true;
}
}
v->PrefetchSupported[i][j] = true;
if (v->BandwidthWithoutPrefetchSupported[i][j] == false || v->MaximumReadBandwidthWithPrefetch > v->ReturnBWPerState[i][j]
|| v->NotEnoughUrgentLatencyHidingPre == 1) {
v->PrefetchSupported[i][j] = false;
}
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (v->LineTimesForPrefetch[k] < 2.0 || v->LinesForMetaPTE[k] >= 32.0 || v->LinesForMetaAndDPTERow[k] >= 16.0
|| v->NoTimeForPrefetch[i][j][k] == true) {
v->PrefetchSupported[i][j] = false;
}
}
v->DynamicMetadataSupported[i][j] = true;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->NoTimeForDynamicMetadata[i][j][k] == true) {
v->DynamicMetadataSupported[i][j] = false;
}
}
v->VRatioInPrefetchSupported[i][j] = true;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (v->VRatioPreY[i][j][k] > 4.0 || v->VRatioPreC[i][j][k] > 4.0 || v->NoTimeForPrefetch[i][j][k] == true) {
v->VRatioInPrefetchSupported[i][j] = false;
}
}
v->AnyLinesForVMOrRowTooLarge = false;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->LinesForMetaAndDPTERow[k] >= 16 || v->LinesForMetaPTE[k] >= 32) {
v->AnyLinesForVMOrRowTooLarge = true;
}
}
if (v->PrefetchSupported[i][j] == true && v->VRatioInPrefetchSupported[i][j] == true) {
v->BandwidthAvailableForImmediateFlip = v->ReturnBWPerState[i][j];
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
v->BandwidthAvailableForImmediateFlip = v->BandwidthAvailableForImmediateFlip
- dml_max(
v->VActivePixelBandwidth[i][j][k] + v->VActiveCursorBandwidth[i][j][k],
v->NoOfDPP[i][j][k]
* (v->RequiredPrefetchPixelDataBWLuma[i][j][k] * v->UrgentBurstFactorLumaPre[k]
+ v->RequiredPrefetchPixelDataBWChroma[i][j][k]
* v->UrgentBurstFactorChromaPre[k])
+ v->cursor_bw_pre[k] * v->UrgentBurstFactorCursorPre[k]);
}
v->TotImmediateFlipBytes = 0.0;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
v->TotImmediateFlipBytes = v->TotImmediateFlipBytes + v->NoOfDPP[i][j][k] * (v->PDEAndMetaPTEBytesPerFrame[i][j][k]
+ v->MetaRowBytes[i][j][k] + v->DPTEBytesPerRow[i][j][k]);
}
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
CalculateFlipSchedule(
mode_lib,
v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
v->ExtraLatency,
v->UrgLatency[i],
v->GPUVMMaxPageTableLevels,
v->HostVMEnable,
v->HostVMMaxNonCachedPageTableLevels,
v->GPUVMEnable,
v->HostVMMinPageSize,
v->PDEAndMetaPTEBytesPerFrame[i][j][k],
v->MetaRowBytes[i][j][k],
v->DPTEBytesPerRow[i][j][k],
v->BandwidthAvailableForImmediateFlip,
v->TotImmediateFlipBytes,
v->SourcePixelFormat[k],
v->HTotal[k] / v->PixelClock[k],
v->VRatio[k],
v->VRatioChroma[k],
v->Tno_bw[k],
v->DCCEnable[k],
v->dpte_row_height[k],
v->meta_row_height[k],
v->dpte_row_height_chroma[k],
v->meta_row_height_chroma[k],
&v->DestinationLinesToRequestVMInImmediateFlip[k],
&v->DestinationLinesToRequestRowInImmediateFlip[k],
&v->final_flip_bw[k],
&v->ImmediateFlipSupportedForPipe[k]);
}
v->total_dcn_read_bw_with_flip = 0.0;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
v->total_dcn_read_bw_with_flip = v->total_dcn_read_bw_with_flip
+ dml_max3(
v->NoOfDPP[i][j][k] * v->prefetch_vmrow_bw[k],
v->NoOfDPP[i][j][k] * v->final_flip_bw[k] + v->VActivePixelBandwidth[i][j][k]
+ v->VActiveCursorBandwidth[i][j][k],
v->NoOfDPP[i][j][k]
* (v->final_flip_bw[k]
+ v->RequiredPrefetchPixelDataBWLuma[i][j][k]
* v->UrgentBurstFactorLumaPre[k]
+ v->RequiredPrefetchPixelDataBWChroma[i][j][k]
* v->UrgentBurstFactorChromaPre[k])
+ v->cursor_bw_pre[k] * v->UrgentBurstFactorCursorPre[k]);
}
v->ImmediateFlipSupportedForState[i][j] = true;
if (v->total_dcn_read_bw_with_flip > v->ReturnBWPerState[i][j]) {
v->ImmediateFlipSupportedForState[i][j] = false;
}
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (v->ImmediateFlipSupportedForPipe[k] == false) {
v->ImmediateFlipSupportedForState[i][j] = false;
}
}
} else {
v->ImmediateFlipSupportedForState[i][j] = false;
}
if (v->MaxVStartup <= 13 || v->AnyLinesForVMOrRowTooLarge == false) {
v->NextMaxVStartup = v->MaxMaxVStartup[i][j];
NextPrefetchModeState = NextPrefetchModeState + 1;
} else {
v->NextMaxVStartup = v->NextMaxVStartup - 1;
}
} while (!((v->PrefetchSupported[i][j] == true && v->DynamicMetadataSupported[i][j] == true && v->VRatioInPrefetchSupported[i][j] == true
&& ((v->HostVMEnable == false && v->ImmediateFlipRequirement[0] != dm_immediate_flip_required)
|| v->ImmediateFlipSupportedForState[i][j] == true))
|| (v->NextMaxVStartup == v->MaxMaxVStartup[i][j] && NextPrefetchModeState > MaxPrefetchMode)));
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
v->PrefetchModePerState[i][j],
v->NumberOfActivePlanes,
v->MaxLineBufferLines,
v->LineBufferSize,
v->DPPOutputBufferPixels,
v->DETBufferSizeInKByte[0],
v->WritebackInterfaceBufferSize,
v->DCFCLKState[i][j],
v->ReturnBWPerState[i][j],
v->GPUVMEnable,
v->dpte_group_bytes,
v->MetaChunkSize,
v->UrgLatency[i],
v->ExtraLatency,
v->WritebackLatency,
v->WritebackChunkSize,
v->SOCCLKPerState[i],
v->FinalDRAMClockChangeLatency,
v->SRExitTime,
v->SREnterPlusExitTime,
v->ProjectedDCFCLKDeepSleep[i][j],
v->NoOfDPPThisState,
v->DCCEnable,
v->RequiredDPPCLKThisState,
v->DETBufferSizeYThisState,
v->DETBufferSizeCThisState,
v->SwathHeightYThisState,
v->SwathHeightCThisState,
v->LBBitPerPixel,
v->SwathWidthYThisState,
v->SwathWidthCThisState,
v->HRatio,
v->HRatioChroma,
v->vtaps,
v->VTAPsChroma,
v->VRatio,
v->VRatioChroma,
v->HTotal,
v->PixelClock,
v->BlendingAndTiming,
v->BytePerPixelInDETY,
v->BytePerPixelInDETC,
v->DSTXAfterScaler,
v->DSTYAfterScaler,
v->WritebackEnable,
v->WritebackPixelFormat,
v->WritebackDestinationWidth,
v->WritebackDestinationHeight,
v->WritebackSourceHeight,
&v->DRAMClockChangeSupport[i][j],
&v->UrgentWatermark,
&v->WritebackUrgentWatermark,
&v->DRAMClockChangeWatermark,
&v->WritebackDRAMClockChangeWatermark,
&v->StutterExitWatermark,
&v->StutterEnterPlusExitWatermark,
&v->MinActiveDRAMClockChangeLatencySupported);
}
}
/*PTE Buffer Size Check*/
for (i = 0; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
v->PTEBufferSizeNotExceeded[i][j] = true;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (v->PTEBufferSizeNotExceededY[i][j][k] == false || v->PTEBufferSizeNotExceededC[i][j][k] == false) {
v->PTEBufferSizeNotExceeded[i][j] = false;
}
}
}
}
/*Cursor Support Check*/
v->CursorSupport = true;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (v->CursorWidth[k][0] > 0.0) {
if (v->CursorBPP[k][0] == 64 && v->Cursor64BppSupport == false) {
v->CursorSupport = false;
}
}
}
/*Valid Pitch Check*/
v->PitchSupport = true;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
v->AlignedYPitch[k] = dml_ceil(dml_max(v->PitchY[k], v->SurfaceWidthY[k]), v->MacroTileWidthY[k]);
if (v->DCCEnable[k] == true) {
v->AlignedDCCMetaPitchY[k] = dml_ceil(dml_max(v->DCCMetaPitchY[k], v->SurfaceWidthY[k]), 64.0 * v->Read256BlockWidthY[k]);
} else {
v->AlignedDCCMetaPitchY[k] = v->DCCMetaPitchY[k];
}
if (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32 && v->SourcePixelFormat[k] != dm_444_16 && v->SourcePixelFormat[k] != dm_mono_16
&& v->SourcePixelFormat[k] != dm_rgbe && v->SourcePixelFormat[k] != dm_mono_8) {
v->AlignedCPitch[k] = dml_ceil(dml_max(v->PitchC[k], v->SurfaceWidthC[k]), v->MacroTileWidthC[k]);
if (v->DCCEnable[k] == true) {
v->AlignedDCCMetaPitchC[k] = dml_ceil(dml_max(v->DCCMetaPitchC[k], v->SurfaceWidthC[k]), 64.0 * v->Read256BlockWidthC[k]);
} else {
v->AlignedDCCMetaPitchC[k] = v->DCCMetaPitchC[k];
}
} else {
v->AlignedCPitch[k] = v->PitchC[k];
v->AlignedDCCMetaPitchC[k] = v->DCCMetaPitchC[k];
}
if (v->AlignedYPitch[k] > v->PitchY[k] || v->AlignedCPitch[k] > v->PitchC[k] || v->AlignedDCCMetaPitchY[k] > v->DCCMetaPitchY[k]
|| v->AlignedDCCMetaPitchC[k] > v->DCCMetaPitchC[k]) {
v->PitchSupport = false;
}
}
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (v->ViewportWidth[k] > v->SurfaceWidthY[k] || v->ViewportHeight[k] > v->SurfaceHeightY[k])
ViewportExceedsSurface = true;
if (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32 && v->SourcePixelFormat[k] != dm_444_16
&& v->SourcePixelFormat[k] != dm_444_16 && v->SourcePixelFormat[k] != dm_444_8 && v->SourcePixelFormat[k] != dm_rgbe) {
if (v->ViewportWidthChroma[k] > v->SurfaceWidthC[k] || v->ViewportHeightChroma[k] > v->SurfaceHeightC[k]) {
ViewportExceedsSurface = true;
}
}
}
/*Mode Support, Voltage State and SOC Configuration*/
for (i = v->soc.num_states - 1; i >= 0; i--) {
for (j = 0; j < 2; j++) {
if (v->ScaleRatioAndTapsSupport == 1 && v->SourceFormatPixelAndScanSupport == 1 && v->ViewportSizeSupport[i][j] == 1
&& v->DIOSupport[i] == 1 && v->ODMCombine4To1SupportCheckOK[i] == 1
&& v->NotEnoughDSCUnits[i] == 0
&& v->DTBCLKRequiredMoreThanSupported[i] == 0
&& v->ROBSupport[i][j] == 1 && v->DISPCLK_DPPCLK_Support[i][j] == 1 && v->TotalAvailablePipesSupport[i][j] == 1
&& EnoughWritebackUnits == 1 && WritebackModeSupport == 1
&& v->WritebackLatencySupport == 1 && v->WritebackScaleRatioAndTapsSupport == 1 && v->CursorSupport == 1 && v->PitchSupport == 1
&& ViewportExceedsSurface == 0 && v->PrefetchSupported[i][j] == 1 && v->DynamicMetadataSupported[i][j] == 1
&& v->TotalVerticalActiveBandwidthSupport[i][j] == 1 && v->VRatioInPrefetchSupported[i][j] == 1
&& v->PTEBufferSizeNotExceeded[i][j] == 1 && v->NonsupportedDSCInputBPC == 0
&& ((v->HostVMEnable == 0 && v->ImmediateFlipRequirement[0] != dm_immediate_flip_required)
|| v->ImmediateFlipSupportedForState[i][j] == true)) {
v->ModeSupport[i][j] = true;
} else {
v->ModeSupport[i][j] = false;
}
}
}
{
unsigned int MaximumMPCCombine = 0;
for (i = v->soc.num_states; i >= 0; i--) {
if (i == v->soc.num_states || v->ModeSupport[i][0] == true || v->ModeSupport[i][1] == true) {
v->VoltageLevel = i;
v->ModeIsSupported = v->ModeSupport[i][0] == true || v->ModeSupport[i][1] == true;
if (v->ModeSupport[i][1] == true) {
MaximumMPCCombine = 1;
} else {
MaximumMPCCombine = 0;
}
}
}
v->ImmediateFlipSupport = v->ImmediateFlipSupportedForState[v->VoltageLevel][MaximumMPCCombine];
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
v->MPCCombineEnable[k] = v->MPCCombine[v->VoltageLevel][MaximumMPCCombine][k];
v->DPPPerPlane[k] = v->NoOfDPP[v->VoltageLevel][MaximumMPCCombine][k];
}
v->DCFCLK = v->DCFCLKState[v->VoltageLevel][MaximumMPCCombine];
v->DRAMSpeed = v->DRAMSpeedPerState[v->VoltageLevel];
v->FabricClock = v->FabricClockPerState[v->VoltageLevel];
v->SOCCLK = v->SOCCLKPerState[v->VoltageLevel];
v->ReturnBW = v->ReturnBWPerState[v->VoltageLevel][MaximumMPCCombine];
v->maxMpcComb = MaximumMPCCombine;
}
}
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
unsigned int NumberOfActivePlanes,
unsigned int MaxLineBufferLines,
unsigned int LineBufferSize,
unsigned int DPPOutputBufferPixels,
unsigned int DETBufferSizeInKByte,
unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
bool GPUVMEnable,
unsigned int dpte_group_bytes[],
unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
double WritebackLatency,
double WritebackChunkSize,
double SOCCLK,
double DRAMClockChangeLatency,
double SRExitTime,
double SREnterPlusExitTime,
double DCFCLKDeepSleep,
unsigned int DPPPerPlane[],
bool DCCEnable[],
double DPPCLK[],
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
double HRatio[],
double HRatioChroma[],
unsigned int vtaps[],
unsigned int VTAPsChroma[],
double VRatio[],
double VRatioChroma[],
unsigned int HTotal[],
double PixelClock[],
unsigned int BlendingAndTiming[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
double DSTXAfterScaler[],
double DSTYAfterScaler[],
bool WritebackEnable[],
enum source_format_class WritebackPixelFormat[],
double WritebackDestinationWidth[],
double WritebackDestinationHeight[],
double WritebackSourceHeight[],
enum clock_change_support *DRAMClockChangeSupport,
double *UrgentWatermark,
double *WritebackUrgentWatermark,
double *DRAMClockChangeWatermark,
double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *MinActiveDRAMClockChangeLatencySupported)
{
double EffectiveLBLatencyHidingY = 0;
double EffectiveLBLatencyHidingC = 0;
double LinesInDETY[DC__NUM_DPP__MAX] = { 0 };
double LinesInDETC = 0;
unsigned int LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX] = { 0 };
unsigned int LinesInDETCRoundedDownToSwath = 0;
double FullDETBufferingTimeY[DC__NUM_DPP__MAX] = { 0 };
double FullDETBufferingTimeC = 0;
double ActiveDRAMClockChangeLatencyMarginY = 0;
double ActiveDRAMClockChangeLatencyMarginC = 0;
double WritebackDRAMClockChangeLatencyMargin = 0;
double PlaneWithMinActiveDRAMClockChangeMargin = 0;
double SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 0;
double FullDETBufferingTimeYStutterCriticalPlane = 0;
double TimeToFinishSwathTransferStutterCriticalPlane = 0;
double WritebackDRAMClockChangeLatencyHiding = 0;
unsigned int k, j;
mode_lib->vba.TotalActiveDPP = 0;
mode_lib->vba.TotalDCCActiveDPP = 0;
for (k = 0; k < NumberOfActivePlanes; ++k) {
mode_lib->vba.TotalActiveDPP = mode_lib->vba.TotalActiveDPP + DPPPerPlane[k];
if (DCCEnable[k] == true) {
mode_lib->vba.TotalDCCActiveDPP = mode_lib->vba.TotalDCCActiveDPP + DPPPerPlane[k];
}
}
*UrgentWatermark = UrgentLatency + ExtraLatency;
*DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark;
mode_lib->vba.TotalActiveWriteback = 0;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (WritebackEnable[k] == true) {
mode_lib->vba.TotalActiveWriteback = mode_lib->vba.TotalActiveWriteback + 1;
}
}
if (mode_lib->vba.TotalActiveWriteback <= 1) {
*WritebackUrgentWatermark = WritebackLatency;
} else {
*WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
if (mode_lib->vba.TotalActiveWriteback <= 1) {
*WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency;
} else {
*WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
for (k = 0; k < NumberOfActivePlanes; ++k) {
mode_lib->vba.LBLatencyHidingSourceLinesY = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1);
mode_lib->vba.LBLatencyHidingSourceLinesC = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1);
EffectiveLBLatencyHidingY = mode_lib->vba.LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]);
EffectiveLBLatencyHidingC = mode_lib->vba.LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
LinesInDETY[k] = (double) DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k];
LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
FullDETBufferingTimeY[k] = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
if (BytePerPixelDETC[k] > 0) {
LinesInDETC = mode_lib->vba.DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k];
} else {
LinesInDETC = 0;
FullDETBufferingTimeC = 999999;
}
ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY[k] - *UrgentWatermark - (HTotal[k] / PixelClock[k]) * (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) - *DRAMClockChangeWatermark;
if (NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC - *UrgentWatermark - (HTotal[k] / PixelClock[k]) * (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) - *DRAMClockChangeWatermark;
if (NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k];
}
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
} else {
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
}
if (WritebackEnable[k] == true) {
WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024 / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
if (WritebackPixelFormat[k] == dm_444_64) {
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
}
if (mode_lib->vba.WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave) {
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding * 2;
}
WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - mode_lib->vba.WritebackDRAMClockChangeWatermark;
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k], WritebackDRAMClockChangeLatencyMargin);
}
}
mode_lib->vba.MinActiveDRAMClockChangeMargin = 999999;
PlaneWithMinActiveDRAMClockChangeMargin = 0;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] < mode_lib->vba.MinActiveDRAMClockChangeMargin) {
mode_lib->vba.MinActiveDRAMClockChangeMargin = mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
if (BlendingAndTiming[k] == k) {
PlaneWithMinActiveDRAMClockChangeMargin = k;
} else {
for (j = 0; j < NumberOfActivePlanes; ++j) {
if (BlendingAndTiming[k] == j) {
PlaneWithMinActiveDRAMClockChangeMargin = j;
}
}
}
}
}
*MinActiveDRAMClockChangeLatencySupported = mode_lib->vba.MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency;
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin) && mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
}
}
mode_lib->vba.TotalNumberOfActiveOTG = 0;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (BlendingAndTiming[k] == k) {
mode_lib->vba.TotalNumberOfActiveOTG = mode_lib->vba.TotalNumberOfActiveOTG + 1;
}
}
if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
} else if (((mode_lib->vba.SynchronizedVBlank == true || mode_lib->vba.TotalNumberOfActiveOTG == 1 || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0)) {
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
} else {
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
}
FullDETBufferingTimeYStutterCriticalPlane = FullDETBufferingTimeY[0];
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (FullDETBufferingTimeY[k] <= FullDETBufferingTimeYStutterCriticalPlane) {
FullDETBufferingTimeYStutterCriticalPlane = FullDETBufferingTimeY[k];
TimeToFinishSwathTransferStutterCriticalPlane = (SwathHeightY[k] - (LinesInDETY[k] - LinesInDETYRoundedDownToSwath[k])) * (HTotal[k] / PixelClock[k]) / VRatio[k];
}
}
*StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
*StutterEnterPlusExitWatermark = dml_max(SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep, TimeToFinishSwathTransferStutterCriticalPlane);
}
static void CalculateDCFCLKDeepSleep(
struct display_mode_lib *mode_lib,
unsigned int NumberOfActivePlanes,
int BytePerPixelY[],
int BytePerPixelC[],
double VRatio[],
double VRatioChroma[],
double SwathWidthY[],
double SwathWidthC[],
unsigned int DPPPerPlane[],
double HRatio[],
double HRatioChroma[],
double PixelClock[],
double PSCL_THROUGHPUT[],
double PSCL_THROUGHPUT_CHROMA[],
double DPPCLK[],
double ReadBandwidthLuma[],
double ReadBandwidthChroma[],
int ReturnBusWidth,
double *DCFCLKDeepSleep)
{
double DisplayPipeLineDeliveryTimeLuma = 0;
double DisplayPipeLineDeliveryTimeChroma = 0;
unsigned int k;
double ReadBandwidth = 0.0;
//double DCFCLKDeepSleepPerPlane[DC__NUM_DPP__MAX];
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (VRatio[k] <= 1) {
DisplayPipeLineDeliveryTimeLuma = SwathWidthY[k] * DPPPerPlane[k] / HRatio[k] / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeLuma = SwathWidthY[k] / PSCL_THROUGHPUT[k] / DPPCLK[k];
}
if (BytePerPixelC[k] == 0) {
DisplayPipeLineDeliveryTimeChroma = 0;
} else {
if (VRatioChroma[k] <= 1) {
DisplayPipeLineDeliveryTimeChroma = SwathWidthC[k] * DPPPerPlane[k] / HRatioChroma[k] / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeChroma = SwathWidthC[k] / PSCL_THROUGHPUT_CHROMA[k] / DPPCLK[k];
}
}
if (BytePerPixelC[k] > 0) {
mode_lib->vba.DCFCLKDeepSleepPerPlane[k] = dml_max(1.1 * SwathWidthY[k] * BytePerPixelY[k] / 32.0 / DisplayPipeLineDeliveryTimeLuma, 1.1 * SwathWidthC[k] * BytePerPixelC[k] / 32.0 / DisplayPipeLineDeliveryTimeChroma);
} else {
mode_lib->vba.DCFCLKDeepSleepPerPlane[k] = 1.1 * SwathWidthY[k] * BytePerPixelY[k] / 64.0 / DisplayPipeLineDeliveryTimeLuma;
}
mode_lib->vba.DCFCLKDeepSleepPerPlane[k] = dml_max(mode_lib->vba.DCFCLKDeepSleepPerPlane[k], PixelClock[k] / 16);
}
for (k = 0; k < NumberOfActivePlanes; ++k) {
ReadBandwidth = ReadBandwidth + ReadBandwidthLuma[k] + ReadBandwidthChroma[k];
}
*DCFCLKDeepSleep = dml_max(8.0, ReadBandwidth / ReturnBusWidth);
for (k = 0; k < NumberOfActivePlanes; ++k) {
*DCFCLKDeepSleep = dml_max(*DCFCLKDeepSleep, mode_lib->vba.DCFCLKDeepSleepPerPlane[k]);
}
}
static void CalculateUrgentBurstFactor(
long swath_width_luma_ub,
long swath_width_chroma_ub,
unsigned int DETBufferSizeInKByte,
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double LineTime,
double UrgentLatency,
double CursorBufferSize,
unsigned int CursorWidth,
unsigned int CursorBPP,
double VRatio,
double VRatioC,
double BytePerPixelInDETY,
double BytePerPixelInDETC,
double DETBufferSizeY,
double DETBufferSizeC,
double *UrgentBurstFactorCursor,
double *UrgentBurstFactorLuma,
double *UrgentBurstFactorChroma,
bool *NotEnoughUrgentLatencyHiding)
{
double LinesInDETLuma = 0;
double LinesInDETChroma = 0;
unsigned int LinesInCursorBuffer = 0;
double CursorBufferSizeInTime = 0;
double DETBufferSizeInTimeLuma = 0;
double DETBufferSizeInTimeChroma = 0;
*NotEnoughUrgentLatencyHiding = 0;
if (CursorWidth > 0) {
LinesInCursorBuffer = 1 << (unsigned int) dml_floor(dml_log2(CursorBufferSize * 1024.0 / (CursorWidth * CursorBPP / 8.0)), 1.0);
if (VRatio > 0) {
CursorBufferSizeInTime = LinesInCursorBuffer * LineTime / VRatio;
if (CursorBufferSizeInTime - UrgentLatency <= 0) {
*NotEnoughUrgentLatencyHiding = 1;
*UrgentBurstFactorCursor = 0;
} else {
*UrgentBurstFactorCursor = CursorBufferSizeInTime / (CursorBufferSizeInTime - UrgentLatency);
}
} else {
*UrgentBurstFactorCursor = 1;
}
}
LinesInDETLuma = DETBufferSizeY / BytePerPixelInDETY / swath_width_luma_ub;
if (VRatio > 0) {
DETBufferSizeInTimeLuma = dml_floor(LinesInDETLuma, SwathHeightY) * LineTime / VRatio;
if (DETBufferSizeInTimeLuma - UrgentLatency <= 0) {
*NotEnoughUrgentLatencyHiding = 1;
*UrgentBurstFactorLuma = 0;
} else {
*UrgentBurstFactorLuma = DETBufferSizeInTimeLuma / (DETBufferSizeInTimeLuma - UrgentLatency);
}
} else {
*UrgentBurstFactorLuma = 1;
}
if (BytePerPixelInDETC > 0) {
LinesInDETChroma = DETBufferSizeC / BytePerPixelInDETC / swath_width_chroma_ub;
if (VRatio > 0) {
DETBufferSizeInTimeChroma = dml_floor(LinesInDETChroma, SwathHeightC) * LineTime / VRatio;
if (DETBufferSizeInTimeChroma - UrgentLatency <= 0) {
*NotEnoughUrgentLatencyHiding = 1;
*UrgentBurstFactorChroma = 0;
} else {
*UrgentBurstFactorChroma = DETBufferSizeInTimeChroma / (DETBufferSizeInTimeChroma - UrgentLatency);
}
} else {
*UrgentBurstFactorChroma = 1;
}
}
}
static void CalculatePixelDeliveryTimes(
unsigned int NumberOfActivePlanes,
double VRatio[],
double VRatioChroma[],
double VRatioPrefetchY[],
double VRatioPrefetchC[],
unsigned int swath_width_luma_ub[],
unsigned int swath_width_chroma_ub[],
unsigned int DPPPerPlane[],
double HRatio[],
double HRatioChroma[],
double PixelClock[],
double PSCL_THROUGHPUT[],
double PSCL_THROUGHPUT_CHROMA[],
double DPPCLK[],
int BytePerPixelC[],
enum scan_direction_class SourceScan[],
unsigned int NumberOfCursors[],
unsigned int CursorWidth[][2],
unsigned int CursorBPP[][2],
unsigned int BlockWidth256BytesY[],
unsigned int BlockHeight256BytesY[],
unsigned int BlockWidth256BytesC[],
unsigned int BlockHeight256BytesC[],
double DisplayPipeLineDeliveryTimeLuma[],
double DisplayPipeLineDeliveryTimeChroma[],
double DisplayPipeLineDeliveryTimeLumaPrefetch[],
double DisplayPipeLineDeliveryTimeChromaPrefetch[],
double DisplayPipeRequestDeliveryTimeLuma[],
double DisplayPipeRequestDeliveryTimeChroma[],
double DisplayPipeRequestDeliveryTimeLumaPrefetch[],
double DisplayPipeRequestDeliveryTimeChromaPrefetch[],
double CursorRequestDeliveryTime[],
double CursorRequestDeliveryTimePrefetch[])
{
double req_per_swath_ub = 0;
unsigned int k;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (VRatio[k] <= 1) {
DisplayPipeLineDeliveryTimeLuma[k] = swath_width_luma_ub[k] * DPPPerPlane[k] / HRatio[k] / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeLuma[k] = swath_width_luma_ub[k] / PSCL_THROUGHPUT[k] / DPPCLK[k];
}
if (BytePerPixelC[k] == 0) {
DisplayPipeLineDeliveryTimeChroma[k] = 0;
} else {
if (VRatioChroma[k] <= 1) {
DisplayPipeLineDeliveryTimeChroma[k] = swath_width_chroma_ub[k] * DPPPerPlane[k] / HRatioChroma[k] / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeChroma[k] = swath_width_chroma_ub[k] / PSCL_THROUGHPUT_CHROMA[k] / DPPCLK[k];
}
}
if (VRatioPrefetchY[k] <= 1) {
DisplayPipeLineDeliveryTimeLumaPrefetch[k] = swath_width_luma_ub[k] * DPPPerPlane[k] / HRatio[k] / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeLumaPrefetch[k] = swath_width_luma_ub[k] / PSCL_THROUGHPUT[k] / DPPCLK[k];
}
if (BytePerPixelC[k] == 0) {
DisplayPipeLineDeliveryTimeChromaPrefetch[k] = 0;
} else {
if (VRatioPrefetchC[k] <= 1) {
DisplayPipeLineDeliveryTimeChromaPrefetch[k] = swath_width_chroma_ub[k] * DPPPerPlane[k] / HRatioChroma[k] / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeChromaPrefetch[k] = swath_width_chroma_ub[k] / PSCL_THROUGHPUT_CHROMA[k] / DPPCLK[k];
}
}
}
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (SourceScan[k] != dm_vert) {
req_per_swath_ub = swath_width_luma_ub[k] / BlockWidth256BytesY[k];
} else {
req_per_swath_ub = swath_width_luma_ub[k] / BlockHeight256BytesY[k];
}
DisplayPipeRequestDeliveryTimeLuma[k] = DisplayPipeLineDeliveryTimeLuma[k] / req_per_swath_ub;
DisplayPipeRequestDeliveryTimeLumaPrefetch[k] = DisplayPipeLineDeliveryTimeLumaPrefetch[k] / req_per_swath_ub;
if (BytePerPixelC[k] == 0) {
DisplayPipeRequestDeliveryTimeChroma[k] = 0;
DisplayPipeRequestDeliveryTimeChromaPrefetch[k] = 0;
} else {
if (SourceScan[k] != dm_vert) {
req_per_swath_ub = swath_width_chroma_ub[k] / BlockWidth256BytesC[k];
} else {
req_per_swath_ub = swath_width_chroma_ub[k] / BlockHeight256BytesC[k];
}
DisplayPipeRequestDeliveryTimeChroma[k] = DisplayPipeLineDeliveryTimeChroma[k] / req_per_swath_ub;
DisplayPipeRequestDeliveryTimeChromaPrefetch[k] = DisplayPipeLineDeliveryTimeChromaPrefetch[k] / req_per_swath_ub;
}
}
for (k = 0; k < NumberOfActivePlanes; ++k) {
int cursor_req_per_width = 0;
cursor_req_per_width = dml_ceil(CursorWidth[k][0] * CursorBPP[k][0] / 256 / 8, 1);
if (NumberOfCursors[k] > 0) {
if (VRatio[k] <= 1) {
CursorRequestDeliveryTime[k] = CursorWidth[k][0] / HRatio[k] / PixelClock[k] / cursor_req_per_width;
} else {
CursorRequestDeliveryTime[k] = CursorWidth[k][0] / PSCL_THROUGHPUT[k] / DPPCLK[k] / cursor_req_per_width;
}
if (VRatioPrefetchY[k] <= 1) {
CursorRequestDeliveryTimePrefetch[k] = CursorWidth[k][0] / HRatio[k] / PixelClock[k] / cursor_req_per_width;
} else {
CursorRequestDeliveryTimePrefetch[k] = CursorWidth[k][0] / PSCL_THROUGHPUT[k] / DPPCLK[k] / cursor_req_per_width;
}
} else {
CursorRequestDeliveryTime[k] = 0;
CursorRequestDeliveryTimePrefetch[k] = 0;
}
}
}
static void CalculateMetaAndPTETimes(
int NumberOfActivePlanes,
bool GPUVMEnable,
int MetaChunkSize,
int MinMetaChunkSizeBytes,
int HTotal[],
double VRatio[],
double VRatioChroma[],
double DestinationLinesToRequestRowInVBlank[],
double DestinationLinesToRequestRowInImmediateFlip[],
bool DCCEnable[],
double PixelClock[],
int BytePerPixelY[],
int BytePerPixelC[],
enum scan_direction_class SourceScan[],
int dpte_row_height[],
int dpte_row_height_chroma[],
int meta_row_width[],
int meta_row_width_chroma[],
int meta_row_height[],
int meta_row_height_chroma[],
int meta_req_width[],
int meta_req_width_chroma[],
int meta_req_height[],
int meta_req_height_chroma[],
int dpte_group_bytes[],
int PTERequestSizeY[],
int PTERequestSizeC[],
int PixelPTEReqWidthY[],
int PixelPTEReqHeightY[],
int PixelPTEReqWidthC[],
int PixelPTEReqHeightC[],
int dpte_row_width_luma_ub[],
int dpte_row_width_chroma_ub[],
double DST_Y_PER_PTE_ROW_NOM_L[],
double DST_Y_PER_PTE_ROW_NOM_C[],
double DST_Y_PER_META_ROW_NOM_L[],
double DST_Y_PER_META_ROW_NOM_C[],
double TimePerMetaChunkNominal[],
double TimePerChromaMetaChunkNominal[],
double TimePerMetaChunkVBlank[],
double TimePerChromaMetaChunkVBlank[],
double TimePerMetaChunkFlip[],
double TimePerChromaMetaChunkFlip[],
double time_per_pte_group_nom_luma[],
double time_per_pte_group_vblank_luma[],
double time_per_pte_group_flip_luma[],
double time_per_pte_group_nom_chroma[],
double time_per_pte_group_vblank_chroma[],
double time_per_pte_group_flip_chroma[])
{
unsigned int meta_chunk_width = 0;
unsigned int min_meta_chunk_width = 0;
unsigned int meta_chunk_per_row_int = 0;
unsigned int meta_row_remainder = 0;
unsigned int meta_chunk_threshold = 0;
unsigned int meta_chunks_per_row_ub = 0;
unsigned int meta_chunk_width_chroma = 0;
unsigned int min_meta_chunk_width_chroma = 0;
unsigned int meta_chunk_per_row_int_chroma = 0;
unsigned int meta_row_remainder_chroma = 0;
unsigned int meta_chunk_threshold_chroma = 0;
unsigned int meta_chunks_per_row_ub_chroma = 0;
unsigned int dpte_group_width_luma = 0;
unsigned int dpte_groups_per_row_luma_ub = 0;
unsigned int dpte_group_width_chroma = 0;
unsigned int dpte_groups_per_row_chroma_ub = 0;
unsigned int k;
for (k = 0; k < NumberOfActivePlanes; ++k) {
DST_Y_PER_PTE_ROW_NOM_L[k] = dpte_row_height[k] / VRatio[k];
if (BytePerPixelC[k] == 0) {
DST_Y_PER_PTE_ROW_NOM_C[k] = 0;
} else {
DST_Y_PER_PTE_ROW_NOM_C[k] = dpte_row_height_chroma[k] / VRatioChroma[k];
}
DST_Y_PER_META_ROW_NOM_L[k] = meta_row_height[k] / VRatio[k];
if (BytePerPixelC[k] == 0) {
DST_Y_PER_META_ROW_NOM_C[k] = 0;
} else {
DST_Y_PER_META_ROW_NOM_C[k] = meta_row_height_chroma[k] / VRatioChroma[k];
}
}
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (DCCEnable[k] == true) {
meta_chunk_width = MetaChunkSize * 1024 * 256 / BytePerPixelY[k] / meta_row_height[k];
min_meta_chunk_width = MinMetaChunkSizeBytes * 256 / BytePerPixelY[k] / meta_row_height[k];
meta_chunk_per_row_int = meta_row_width[k] / meta_chunk_width;
meta_row_remainder = meta_row_width[k] % meta_chunk_width;
if (SourceScan[k] != dm_vert) {
meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_width[k];
} else {
meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_height[k];
}
if (meta_row_remainder <= meta_chunk_threshold) {
meta_chunks_per_row_ub = meta_chunk_per_row_int + 1;
} else {
meta_chunks_per_row_ub = meta_chunk_per_row_int + 2;
}
TimePerMetaChunkNominal[k] = meta_row_height[k] / VRatio[k] * HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub;
TimePerMetaChunkVBlank[k] = DestinationLinesToRequestRowInVBlank[k] * HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub;
TimePerMetaChunkFlip[k] = DestinationLinesToRequestRowInImmediateFlip[k] * HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub;
if (BytePerPixelC[k] == 0) {
TimePerChromaMetaChunkNominal[k] = 0;
TimePerChromaMetaChunkVBlank[k] = 0;
TimePerChromaMetaChunkFlip[k] = 0;
} else {
meta_chunk_width_chroma = MetaChunkSize * 1024 * 256 / BytePerPixelC[k] / meta_row_height_chroma[k];
min_meta_chunk_width_chroma = MinMetaChunkSizeBytes * 256 / BytePerPixelC[k] / meta_row_height_chroma[k];
meta_chunk_per_row_int_chroma = (double) meta_row_width_chroma[k] / meta_chunk_width_chroma;
meta_row_remainder_chroma = meta_row_width_chroma[k] % meta_chunk_width_chroma;
if (SourceScan[k] != dm_vert) {
meta_chunk_threshold_chroma = 2 * min_meta_chunk_width_chroma - meta_req_width_chroma[k];
} else {
meta_chunk_threshold_chroma = 2 * min_meta_chunk_width_chroma - meta_req_height_chroma[k];
}
if (meta_row_remainder_chroma <= meta_chunk_threshold_chroma) {
meta_chunks_per_row_ub_chroma = meta_chunk_per_row_int_chroma + 1;
} else {
meta_chunks_per_row_ub_chroma = meta_chunk_per_row_int_chroma + 2;
}
TimePerChromaMetaChunkNominal[k] = meta_row_height_chroma[k] / VRatioChroma[k] * HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub_chroma;
TimePerChromaMetaChunkVBlank[k] = DestinationLinesToRequestRowInVBlank[k] * HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub_chroma;
TimePerChromaMetaChunkFlip[k] = DestinationLinesToRequestRowInImmediateFlip[k] * HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub_chroma;
}
} else {
TimePerMetaChunkNominal[k] = 0;
TimePerMetaChunkVBlank[k] = 0;
TimePerMetaChunkFlip[k] = 0;
TimePerChromaMetaChunkNominal[k] = 0;
TimePerChromaMetaChunkVBlank[k] = 0;
TimePerChromaMetaChunkFlip[k] = 0;
}
}
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (GPUVMEnable == true) {
if (SourceScan[k] != dm_vert) {
dpte_group_width_luma = dpte_group_bytes[k] / PTERequestSizeY[k] * PixelPTEReqWidthY[k];
} else {
dpte_group_width_luma = dpte_group_bytes[k] / PTERequestSizeY[k] * PixelPTEReqHeightY[k];
}
dpte_groups_per_row_luma_ub = dml_ceil(1.0 * dpte_row_width_luma_ub[k] / dpte_group_width_luma, 1);
time_per_pte_group_nom_luma[k] = DST_Y_PER_PTE_ROW_NOM_L[k] * HTotal[k] / PixelClock[k] / dpte_groups_per_row_luma_ub;
time_per_pte_group_vblank_luma[k] = DestinationLinesToRequestRowInVBlank[k] * HTotal[k] / PixelClock[k] / dpte_groups_per_row_luma_ub;
time_per_pte_group_flip_luma[k] = DestinationLinesToRequestRowInImmediateFlip[k] * HTotal[k] / PixelClock[k] / dpte_groups_per_row_luma_ub;
if (BytePerPixelC[k] == 0) {
time_per_pte_group_nom_chroma[k] = 0;
time_per_pte_group_vblank_chroma[k] = 0;
time_per_pte_group_flip_chroma[k] = 0;
} else {
if (SourceScan[k] != dm_vert) {
dpte_group_width_chroma = dpte_group_bytes[k] / PTERequestSizeC[k] * PixelPTEReqWidthC[k];
} else {
dpte_group_width_chroma = dpte_group_bytes[k] / PTERequestSizeC[k] * PixelPTEReqHeightC[k];
}
dpte_groups_per_row_chroma_ub = dml_ceil(1.0 * dpte_row_width_chroma_ub[k] / dpte_group_width_chroma, 1);
time_per_pte_group_nom_chroma[k] = DST_Y_PER_PTE_ROW_NOM_C[k] * HTotal[k] / PixelClock[k] / dpte_groups_per_row_chroma_ub;
time_per_pte_group_vblank_chroma[k] = DestinationLinesToRequestRowInVBlank[k] * HTotal[k] / PixelClock[k] / dpte_groups_per_row_chroma_ub;
time_per_pte_group_flip_chroma[k] = DestinationLinesToRequestRowInImmediateFlip[k] * HTotal[k] / PixelClock[k] / dpte_groups_per_row_chroma_ub;
}
} else {
time_per_pte_group_nom_luma[k] = 0;
time_per_pte_group_vblank_luma[k] = 0;
time_per_pte_group_flip_luma[k] = 0;
time_per_pte_group_nom_chroma[k] = 0;
time_per_pte_group_vblank_chroma[k] = 0;
time_per_pte_group_flip_chroma[k] = 0;
}
}
}
static void CalculateVMGroupAndRequestTimes(
unsigned int NumberOfActivePlanes,
bool GPUVMEnable,
unsigned int GPUVMMaxPageTableLevels,
unsigned int HTotal[],
int BytePerPixelC[],
double DestinationLinesToRequestVMInVBlank[],
double DestinationLinesToRequestVMInImmediateFlip[],
bool DCCEnable[],
double PixelClock[],
int dpte_row_width_luma_ub[],
int dpte_row_width_chroma_ub[],
int vm_group_bytes[],
unsigned int dpde0_bytes_per_frame_ub_l[],
unsigned int dpde0_bytes_per_frame_ub_c[],
int meta_pte_bytes_per_frame_ub_l[],
int meta_pte_bytes_per_frame_ub_c[],
double TimePerVMGroupVBlank[],
double TimePerVMGroupFlip[],
double TimePerVMRequestVBlank[],
double TimePerVMRequestFlip[])
{
int num_group_per_lower_vm_stage = 0;
int num_req_per_lower_vm_stage = 0;
unsigned int k;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (GPUVMEnable == true && (DCCEnable[k] == true || GPUVMMaxPageTableLevels > 1)) {
if (DCCEnable[k] == false) {
if (BytePerPixelC[k] > 0) {
num_group_per_lower_vm_stage = dml_ceil((double) (dpde0_bytes_per_frame_ub_l[k])
/ (double) (vm_group_bytes[k]), 1) + dml_ceil((double) (dpde0_bytes_per_frame_ub_c[k])
/ (double) (vm_group_bytes[k]), 1);
} else {
num_group_per_lower_vm_stage = dml_ceil((double) (dpde0_bytes_per_frame_ub_l[k])
/ (double) (vm_group_bytes[k]), 1);
}
} else {
if (GPUVMMaxPageTableLevels == 1) {
if (BytePerPixelC[k] > 0) {
num_group_per_lower_vm_stage = dml_ceil((double) (meta_pte_bytes_per_frame_ub_l[k])
/ (double) (vm_group_bytes[k]), 1) + dml_ceil((double) (meta_pte_bytes_per_frame_ub_c[k])
/ (double) (vm_group_bytes[k]), 1);
} else {
num_group_per_lower_vm_stage = dml_ceil((double) (meta_pte_bytes_per_frame_ub_l[k])
/ (double) (vm_group_bytes[k]), 1);
}
} else {
if (BytePerPixelC[k] > 0) {
num_group_per_lower_vm_stage = 2 + dml_ceil((double) (dpde0_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1)
+ dml_ceil((double) (dpde0_bytes_per_frame_ub_c[k]) / (double) (vm_group_bytes[k]), 1)
+ dml_ceil((double) (meta_pte_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1)
+ dml_ceil((double) (meta_pte_bytes_per_frame_ub_c[k]) / (double) (vm_group_bytes[k]), 1);
} else {
num_group_per_lower_vm_stage = 1 + dml_ceil((double) (dpde0_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1)
+ dml_ceil((double) (meta_pte_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1);
}
}
}
if (DCCEnable[k] == false) {
if (BytePerPixelC[k] > 0) {
num_req_per_lower_vm_stage = dpde0_bytes_per_frame_ub_l[k] / 64 + dpde0_bytes_per_frame_ub_c[k] / 64;
} else {
num_req_per_lower_vm_stage = dpde0_bytes_per_frame_ub_l[k] / 64;
}
} else {
if (GPUVMMaxPageTableLevels == 1) {
if (BytePerPixelC[k] > 0) {
num_req_per_lower_vm_stage = meta_pte_bytes_per_frame_ub_l[k] / 64
+ meta_pte_bytes_per_frame_ub_c[k] / 64;
} else {
num_req_per_lower_vm_stage = meta_pte_bytes_per_frame_ub_l[k] / 64;
}
} else {
if (BytePerPixelC[k] > 0) {
num_req_per_lower_vm_stage = dpde0_bytes_per_frame_ub_l[k] / 64
+ dpde0_bytes_per_frame_ub_c[k] / 64 + meta_pte_bytes_per_frame_ub_l[k]
/ 64 + meta_pte_bytes_per_frame_ub_c[k] / 64;
} else {
num_req_per_lower_vm_stage = dpde0_bytes_per_frame_ub_l[k] / 64
+ meta_pte_bytes_per_frame_ub_l[k] / 64;
}
}
}
TimePerVMGroupVBlank[k] = DestinationLinesToRequestVMInVBlank[k] * HTotal[k] / PixelClock[k]
/ num_group_per_lower_vm_stage;
TimePerVMGroupFlip[k] = DestinationLinesToRequestVMInImmediateFlip[k] * HTotal[k] / PixelClock[k]
/ num_group_per_lower_vm_stage;
TimePerVMRequestVBlank[k] = DestinationLinesToRequestVMInVBlank[k] * HTotal[k] / PixelClock[k]
/ num_req_per_lower_vm_stage;
TimePerVMRequestFlip[k] = DestinationLinesToRequestVMInImmediateFlip[k] * HTotal[k] / PixelClock[k]
/ num_req_per_lower_vm_stage;
if (GPUVMMaxPageTableLevels > 2) {
TimePerVMGroupVBlank[k] = TimePerVMGroupVBlank[k] / 2;
TimePerVMGroupFlip[k] = TimePerVMGroupFlip[k] / 2;
TimePerVMRequestVBlank[k] = TimePerVMRequestVBlank[k] / 2;
TimePerVMRequestFlip[k] = TimePerVMRequestFlip[k] / 2;
}
} else {
TimePerVMGroupVBlank[k] = 0;
TimePerVMGroupFlip[k] = 0;
TimePerVMRequestVBlank[k] = 0;
TimePerVMRequestFlip[k] = 0;
}
}
}
static void CalculateStutterEfficiency(
int NumberOfActivePlanes,
long ROBBufferSizeInKByte,
double TotalDataReadBandwidth,
double DCFCLK,
double ReturnBW,
double SRExitTime,
bool SynchronizedVBlank,
int DPPPerPlane[],
unsigned int DETBufferSizeY[],
int BytePerPixelY[],
double BytePerPixelDETY[],
double SwathWidthY[],
int SwathHeightY[],
int SwathHeightC[],
double DCCRateLuma[],
double DCCRateChroma[],
int HTotal[],
int VTotal[],
double PixelClock[],
double VRatio[],
enum scan_direction_class SourceScan[],
int BlockHeight256BytesY[],
int BlockWidth256BytesY[],
int BlockHeight256BytesC[],
int BlockWidth256BytesC[],
int DCCYMaxUncompressedBlock[],
int DCCCMaxUncompressedBlock[],
int VActive[],
bool DCCEnable[],
bool WritebackEnable[],
double ReadBandwidthPlaneLuma[],
double ReadBandwidthPlaneChroma[],
double meta_row_bw[],
double dpte_row_bw[],
double *StutterEfficiencyNotIncludingVBlank,
double *StutterEfficiency,
double *StutterPeriodOut)
{
double FullDETBufferingTimeY[DC__NUM_DPP__MAX] = { 0 };
double FrameTimeForMinFullDETBufferingTime = 0;
double StutterPeriod = 0;
double AverageReadBandwidth = 0;
double TotalRowReadBandwidth = 0;
double AverageDCCCompressionRate = 0;
double PartOfBurstThatFitsInROB = 0;
double StutterBurstTime = 0;
int TotalActiveWriteback = 0;
double VBlankTime = 0;
double SmallestVBlank = 0;
int BytePerPixelYCriticalPlane = 0;
double SwathWidthYCriticalPlane = 0;
double LinesInDETY[DC__NUM_DPP__MAX] = { 0 };
double LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX] = { 0 };
double LinesToFinishSwathTransferStutterCriticalPlane = 0;
double MaximumEffectiveCompressionLuma = 0;
double MaximumEffectiveCompressionChroma = 0;
unsigned int k;
for (k = 0; k < NumberOfActivePlanes; ++k) {
LinesInDETY[k] = DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k];
LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
FullDETBufferingTimeY[k] = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
}
StutterPeriod = FullDETBufferingTimeY[0];
FrameTimeForMinFullDETBufferingTime = VTotal[0] * HTotal[0] / PixelClock[0];
BytePerPixelYCriticalPlane = BytePerPixelY[0];
SwathWidthYCriticalPlane = SwathWidthY[0];
LinesToFinishSwathTransferStutterCriticalPlane = SwathHeightY[0]
- (LinesInDETY[0] - LinesInDETYRoundedDownToSwath[0]);
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (FullDETBufferingTimeY[k] < StutterPeriod) {
StutterPeriod = FullDETBufferingTimeY[k];
FrameTimeForMinFullDETBufferingTime = VTotal[k] * HTotal[k] / PixelClock[k];
BytePerPixelYCriticalPlane = BytePerPixelY[k];
SwathWidthYCriticalPlane = SwathWidthY[k];
LinesToFinishSwathTransferStutterCriticalPlane = SwathHeightY[k]
- (LinesInDETY[k] - LinesInDETYRoundedDownToSwath[k]);
}
}
AverageReadBandwidth = 0;
TotalRowReadBandwidth = 0;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (DCCEnable[k] == true) {
if ((SourceScan[k] == dm_vert && BlockWidth256BytesY[k] > SwathHeightY[k])
|| (SourceScan[k] != dm_vert
&& BlockHeight256BytesY[k] > SwathHeightY[k])
|| DCCYMaxUncompressedBlock[k] < 256) {
MaximumEffectiveCompressionLuma = 2;
} else {
MaximumEffectiveCompressionLuma = 4;
}
AverageReadBandwidth = AverageReadBandwidth + ReadBandwidthPlaneLuma[k] / dml_min(DCCRateLuma[k], MaximumEffectiveCompressionLuma);
if (ReadBandwidthPlaneChroma[k] > 0) {
if ((SourceScan[k] == dm_vert && BlockWidth256BytesC[k] > SwathHeightC[k])
|| (SourceScan[k] != dm_vert && BlockHeight256BytesC[k] > SwathHeightC[k])
|| DCCCMaxUncompressedBlock[k] < 256) {
MaximumEffectiveCompressionChroma = 2;
} else {
MaximumEffectiveCompressionChroma = 4;
}
AverageReadBandwidth = AverageReadBandwidth + ReadBandwidthPlaneChroma[k] / dml_min(DCCRateChroma[k], MaximumEffectiveCompressionChroma);
}
} else {
AverageReadBandwidth = AverageReadBandwidth + ReadBandwidthPlaneLuma[k] + ReadBandwidthPlaneChroma[k];
}
TotalRowReadBandwidth = TotalRowReadBandwidth + DPPPerPlane[k] * (meta_row_bw[k] + dpte_row_bw[k]);
}
AverageDCCCompressionRate = TotalDataReadBandwidth / AverageReadBandwidth;
PartOfBurstThatFitsInROB = dml_min(StutterPeriod * TotalDataReadBandwidth, ROBBufferSizeInKByte * 1024 * AverageDCCCompressionRate);
StutterBurstTime = PartOfBurstThatFitsInROB / AverageDCCCompressionRate / ReturnBW + (StutterPeriod * TotalDataReadBandwidth
- PartOfBurstThatFitsInROB) / (DCFCLK * 64) + StutterPeriod * TotalRowReadBandwidth / ReturnBW;
StutterBurstTime = dml_max(StutterBurstTime, LinesToFinishSwathTransferStutterCriticalPlane * BytePerPixelYCriticalPlane * SwathWidthYCriticalPlane / ReturnBW);
TotalActiveWriteback = 0;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (WritebackEnable[k] == true) {
TotalActiveWriteback = TotalActiveWriteback + 1;
}
}
if (TotalActiveWriteback == 0) {
*StutterEfficiencyNotIncludingVBlank = (1
- (SRExitTime + StutterBurstTime) / StutterPeriod) * 100;
} else {
*StutterEfficiencyNotIncludingVBlank = 0;
}
if (SynchronizedVBlank == true || NumberOfActivePlanes == 1) {
SmallestVBlank = (VTotal[0] - VActive[0]) * HTotal[0] / PixelClock[0];
} else {
SmallestVBlank = 0;
}
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (SynchronizedVBlank == true || NumberOfActivePlanes == 1) {
VBlankTime = (VTotal[k] - VActive[k]) * HTotal[k] / PixelClock[k];
} else {
VBlankTime = 0;
}
SmallestVBlank = dml_min(SmallestVBlank, VBlankTime);
}
*StutterEfficiency = (*StutterEfficiencyNotIncludingVBlank / 100.0 * (FrameTimeForMinFullDETBufferingTime - SmallestVBlank) + SmallestVBlank) / FrameTimeForMinFullDETBufferingTime * 100;
if (StutterPeriodOut)
*StutterPeriodOut = StutterPeriod;
}
static void CalculateSwathAndDETConfiguration(
bool ForceSingleDPP,
int NumberOfActivePlanes,
unsigned int DETBufferSizeInKByte,
double MaximumSwathWidthLuma[],
double MaximumSwathWidthChroma[],
enum scan_direction_class SourceScan[],
enum source_format_class SourcePixelFormat[],
enum dm_swizzle_mode SurfaceTiling[],
int ViewportWidth[],
int ViewportHeight[],
int SurfaceWidthY[],
int SurfaceWidthC[],
int SurfaceHeightY[],
int SurfaceHeightC[],
int Read256BytesBlockHeightY[],
int Read256BytesBlockHeightC[],
int Read256BytesBlockWidthY[],
int Read256BytesBlockWidthC[],
enum odm_combine_mode ODMCombineEnabled[],
int BlendingAndTiming[],
int BytePerPixY[],
int BytePerPixC[],
double BytePerPixDETY[],
double BytePerPixDETC[],
int HActive[],
double HRatio[],
double HRatioChroma[],
int DPPPerPlane[],
int swath_width_luma_ub[],
int swath_width_chroma_ub[],
double SwathWidth[],
double SwathWidthChroma[],
int SwathHeightY[],
int SwathHeightC[],
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
bool ViewportSizeSupportPerPlane[],
bool *ViewportSizeSupport)
{
int MaximumSwathHeightY[DC__NUM_DPP__MAX] = { 0 };
int MaximumSwathHeightC[DC__NUM_DPP__MAX] = { 0 };
int MinimumSwathHeightY = 0;
int MinimumSwathHeightC = 0;
long RoundedUpMaxSwathSizeBytesY = 0;
long RoundedUpMaxSwathSizeBytesC = 0;
long RoundedUpMinSwathSizeBytesY = 0;
long RoundedUpMinSwathSizeBytesC = 0;
long RoundedUpSwathSizeBytesY = 0;
long RoundedUpSwathSizeBytesC = 0;
double SwathWidthSingleDPP[DC__NUM_DPP__MAX] = { 0 };
double SwathWidthSingleDPPChroma[DC__NUM_DPP__MAX] = { 0 };
int k;
CalculateSwathWidth(
ForceSingleDPP,
NumberOfActivePlanes,
SourcePixelFormat,
SourceScan,
ViewportWidth,
ViewportHeight,
SurfaceWidthY,
SurfaceWidthC,
SurfaceHeightY,
SurfaceHeightC,
ODMCombineEnabled,
BytePerPixY,
BytePerPixC,
Read256BytesBlockHeightY,
Read256BytesBlockHeightC,
Read256BytesBlockWidthY,
Read256BytesBlockWidthC,
BlendingAndTiming,
HActive,
HRatio,
DPPPerPlane,
SwathWidthSingleDPP,
SwathWidthSingleDPPChroma,
SwathWidth,
SwathWidthChroma,
MaximumSwathHeightY,
MaximumSwathHeightC,
swath_width_luma_ub,
swath_width_chroma_ub);
*ViewportSizeSupport = true;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if ((SourcePixelFormat[k] == dm_444_64 || SourcePixelFormat[k] == dm_444_32
|| SourcePixelFormat[k] == dm_444_16
|| SourcePixelFormat[k] == dm_mono_16
|| SourcePixelFormat[k] == dm_mono_8
|| SourcePixelFormat[k] == dm_rgbe)) {
if (SurfaceTiling[k] == dm_sw_linear
|| (SourcePixelFormat[k] == dm_444_64
&& (SurfaceTiling[k] == dm_sw_64kb_s || SurfaceTiling[k] == dm_sw_64kb_s_t || SurfaceTiling[k] == dm_sw_64kb_s_x)
&& SourceScan[k] != dm_vert)) {
MinimumSwathHeightY = MaximumSwathHeightY[k];
} else if (SourcePixelFormat[k] == dm_444_8 && SourceScan[k] == dm_vert) {
MinimumSwathHeightY = MaximumSwathHeightY[k];
} else {
MinimumSwathHeightY = MaximumSwathHeightY[k] / 2;
}
MinimumSwathHeightC = MaximumSwathHeightC[k];
} else {
if (SurfaceTiling[k] == dm_sw_linear) {
MinimumSwathHeightY = MaximumSwathHeightY[k];
MinimumSwathHeightC = MaximumSwathHeightC[k];
} else if (SourcePixelFormat[k] == dm_rgbe_alpha
&& SourceScan[k] == dm_vert) {
MinimumSwathHeightY = MaximumSwathHeightY[k] / 2;
MinimumSwathHeightC = MaximumSwathHeightC[k];
} else if (SourcePixelFormat[k] == dm_rgbe_alpha) {
MinimumSwathHeightY = MaximumSwathHeightY[k] / 2;
MinimumSwathHeightC = MaximumSwathHeightC[k] / 2;
} else if (SourcePixelFormat[k] == dm_420_8 && SourceScan[k] == dm_vert) {
MinimumSwathHeightY = MaximumSwathHeightY[k];
MinimumSwathHeightC = MaximumSwathHeightC[k] / 2;
} else {
MinimumSwathHeightC = MaximumSwathHeightC[k] / 2;
MinimumSwathHeightY = MaximumSwathHeightY[k] / 2;
}
}
RoundedUpMaxSwathSizeBytesY = swath_width_luma_ub[k] * BytePerPixDETY[k]
* MaximumSwathHeightY[k];
RoundedUpMinSwathSizeBytesY = swath_width_luma_ub[k] * BytePerPixDETY[k]
* MinimumSwathHeightY;
if (SourcePixelFormat[k] == dm_420_10) {
RoundedUpMaxSwathSizeBytesY = dml_ceil((double) RoundedUpMaxSwathSizeBytesY, 256);
RoundedUpMinSwathSizeBytesY = dml_ceil((double) RoundedUpMinSwathSizeBytesY, 256);
}
RoundedUpMaxSwathSizeBytesC = swath_width_chroma_ub[k] * BytePerPixDETC[k]
* MaximumSwathHeightC[k];
RoundedUpMinSwathSizeBytesC = swath_width_chroma_ub[k] * BytePerPixDETC[k]
* MinimumSwathHeightC;
if (SourcePixelFormat[k] == dm_420_10) {
RoundedUpMaxSwathSizeBytesC = dml_ceil(RoundedUpMaxSwathSizeBytesC, 256);
RoundedUpMinSwathSizeBytesC = dml_ceil(RoundedUpMinSwathSizeBytesC, 256);
}
if (RoundedUpMaxSwathSizeBytesY + RoundedUpMaxSwathSizeBytesC
<= DETBufferSizeInKByte * 1024 / 2) {
SwathHeightY[k] = MaximumSwathHeightY[k];
SwathHeightC[k] = MaximumSwathHeightC[k];
RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY;
RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC;
} else if (RoundedUpMaxSwathSizeBytesY >= 1.5 * RoundedUpMaxSwathSizeBytesC
&& RoundedUpMinSwathSizeBytesY + RoundedUpMaxSwathSizeBytesC
<= DETBufferSizeInKByte * 1024 / 2) {
SwathHeightY[k] = MinimumSwathHeightY;
SwathHeightC[k] = MaximumSwathHeightC[k];
RoundedUpSwathSizeBytesY = RoundedUpMinSwathSizeBytesY;
RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC;
} else if (RoundedUpMaxSwathSizeBytesY < 1.5 * RoundedUpMaxSwathSizeBytesC
&& RoundedUpMaxSwathSizeBytesY + RoundedUpMinSwathSizeBytesC
<= DETBufferSizeInKByte * 1024 / 2) {
SwathHeightY[k] = MaximumSwathHeightY[k];
SwathHeightC[k] = MinimumSwathHeightC;
RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY;
RoundedUpSwathSizeBytesC = RoundedUpMinSwathSizeBytesC;
} else {
SwathHeightY[k] = MinimumSwathHeightY;
SwathHeightC[k] = MinimumSwathHeightC;
RoundedUpSwathSizeBytesY = RoundedUpMinSwathSizeBytesY;
RoundedUpSwathSizeBytesC = RoundedUpMinSwathSizeBytesC;
}
if (SwathHeightC[k] == 0) {
DETBufferSizeY[k] = DETBufferSizeInKByte * 1024;
DETBufferSizeC[k] = 0;
} else if (RoundedUpSwathSizeBytesY <= 1.5 * RoundedUpSwathSizeBytesC) {
DETBufferSizeY[k] = DETBufferSizeInKByte * 1024 / 2;
DETBufferSizeC[k] = DETBufferSizeInKByte * 1024 / 2;
} else {
DETBufferSizeY[k] = DETBufferSizeInKByte * 1024 * 2 / 3;
DETBufferSizeC[k] = DETBufferSizeInKByte * 1024 / 3;
}
if (RoundedUpMinSwathSizeBytesY + RoundedUpMinSwathSizeBytesC
> DETBufferSizeInKByte * 1024 / 2
|| SwathWidth[k] > MaximumSwathWidthLuma[k]
|| (SwathHeightC[k] > 0
&& SwathWidthChroma[k] > MaximumSwathWidthChroma[k])) {
*ViewportSizeSupport = false;
ViewportSizeSupportPerPlane[k] = false;
} else {
ViewportSizeSupportPerPlane[k] = true;
}
}
}
static void CalculateSwathWidth(
bool ForceSingleDPP,
int NumberOfActivePlanes,
enum source_format_class SourcePixelFormat[],
enum scan_direction_class SourceScan[],
unsigned int ViewportWidth[],
unsigned int ViewportHeight[],
unsigned int SurfaceWidthY[],
unsigned int SurfaceWidthC[],
unsigned int SurfaceHeightY[],
unsigned int SurfaceHeightC[],
enum odm_combine_mode ODMCombineEnabled[],
int BytePerPixY[],
int BytePerPixC[],
int Read256BytesBlockHeightY[],
int Read256BytesBlockHeightC[],
int Read256BytesBlockWidthY[],
int Read256BytesBlockWidthC[],
int BlendingAndTiming[],
unsigned int HActive[],
double HRatio[],
int DPPPerPlane[],
double SwathWidthSingleDPPY[],
double SwathWidthSingleDPPC[],
double SwathWidthY[],
double SwathWidthC[],
int MaximumSwathHeightY[],
int MaximumSwathHeightC[],
unsigned int swath_width_luma_ub[],
unsigned int swath_width_chroma_ub[])
{
unsigned int k, j;
long surface_width_ub_l;
long surface_height_ub_l;
long surface_width_ub_c;
long surface_height_ub_c;
for (k = 0; k < NumberOfActivePlanes; ++k) {
enum odm_combine_mode MainPlaneODMCombine = 0;
if (SourceScan[k] != dm_vert) {
SwathWidthSingleDPPY[k] = ViewportWidth[k];
} else {
SwathWidthSingleDPPY[k] = ViewportHeight[k];
}
MainPlaneODMCombine = ODMCombineEnabled[k];
for (j = 0; j < NumberOfActivePlanes; ++j) {
if (BlendingAndTiming[k] == j) {
MainPlaneODMCombine = ODMCombineEnabled[j];
}
}
if (MainPlaneODMCombine == dm_odm_combine_mode_4to1) {
SwathWidthY[k] = dml_min(SwathWidthSingleDPPY[k], dml_round(HActive[k] / 4.0 * HRatio[k]));
} else if (MainPlaneODMCombine == dm_odm_combine_mode_2to1) {
SwathWidthY[k] = dml_min(SwathWidthSingleDPPY[k], dml_round(HActive[k] / 2.0 * HRatio[k]));
} else if (DPPPerPlane[k] == 2) {
SwathWidthY[k] = SwathWidthSingleDPPY[k] / 2;
} else {
SwathWidthY[k] = SwathWidthSingleDPPY[k];
}
if (SourcePixelFormat[k] == dm_420_8 || SourcePixelFormat[k] == dm_420_10 || SourcePixelFormat[k] == dm_420_12) {
SwathWidthC[k] = SwathWidthY[k] / 2;
SwathWidthSingleDPPC[k] = SwathWidthSingleDPPY[k] / 2;
} else {
SwathWidthC[k] = SwathWidthY[k];
SwathWidthSingleDPPC[k] = SwathWidthSingleDPPY[k];
}
if (ForceSingleDPP == true) {
SwathWidthY[k] = SwathWidthSingleDPPY[k];
SwathWidthC[k] = SwathWidthSingleDPPC[k];
}
surface_width_ub_l = dml_ceil(SurfaceWidthY[k], Read256BytesBlockWidthY[k]);
surface_height_ub_l = dml_ceil(SurfaceHeightY[k], Read256BytesBlockHeightY[k]);
if (SourceScan[k] != dm_vert) {
MaximumSwathHeightY[k] = Read256BytesBlockHeightY[k];
MaximumSwathHeightC[k] = Read256BytesBlockHeightC[k];
swath_width_luma_ub[k] = dml_min(surface_width_ub_l, (long) dml_ceil(SwathWidthY[k] - 1,
Read256BytesBlockWidthY[k]) + Read256BytesBlockWidthY[k]);
if (BytePerPixC[k] > 0) {
surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
swath_width_chroma_ub[k] = dml_min(surface_width_ub_c, (long) dml_ceil(SwathWidthC[k] - 1,
Read256BytesBlockWidthC[k]) + Read256BytesBlockWidthC[k]);
} else {
swath_width_chroma_ub[k] = 0;
}
} else {
MaximumSwathHeightY[k] = Read256BytesBlockWidthY[k];
MaximumSwathHeightC[k] = Read256BytesBlockWidthC[k];
swath_width_luma_ub[k] = dml_min(surface_height_ub_l, (long) dml_ceil(SwathWidthY[k] - 1,
Read256BytesBlockHeightY[k]) + Read256BytesBlockHeightY[k]);
if (BytePerPixC[k] > 0) {
surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
swath_width_chroma_ub[k] = dml_min(surface_height_ub_c, (long) dml_ceil(SwathWidthC[k] - 1,
Read256BytesBlockHeightC[k]) + Read256BytesBlockHeightC[k]);
} else {
swath_width_chroma_ub[k] = 0;
}
}
}
}
static double CalculateExtraLatency(
long RoundTripPingLatencyCycles,
long ReorderingBytes,
double DCFCLK,
int TotalNumberOfActiveDPP,
int PixelChunkSizeInKByte,
int TotalNumberOfDCCActiveDPP,
int MetaChunkSize,
double ReturnBW,
bool GPUVMEnable,
bool HostVMEnable,
int NumberOfActivePlanes,
int NumberOfDPP[],
int dpte_group_bytes[],
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
double HostVMMinPageSize,
int HostVMMaxNonCachedPageTableLevels)
{
double ExtraLatencyBytes = 0;
ExtraLatencyBytes = CalculateExtraLatencyBytes(
ReorderingBytes,
TotalNumberOfActiveDPP,
PixelChunkSizeInKByte,
TotalNumberOfDCCActiveDPP,
MetaChunkSize,
GPUVMEnable,
HostVMEnable,
NumberOfActivePlanes,
NumberOfDPP,
dpte_group_bytes,
PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
HostVMMinPageSize,
HostVMMaxNonCachedPageTableLevels);
return (RoundTripPingLatencyCycles + 32) / DCFCLK + ExtraLatencyBytes / ReturnBW;
}
static double CalculateExtraLatencyBytes(
long ReorderingBytes,
int TotalNumberOfActiveDPP,
int PixelChunkSizeInKByte,
int TotalNumberOfDCCActiveDPP,
int MetaChunkSize,
bool GPUVMEnable,
bool HostVMEnable,
int NumberOfActivePlanes,
int NumberOfDPP[],
int dpte_group_bytes[],
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
double HostVMMinPageSize,
int HostVMMaxNonCachedPageTableLevels)
{
double ret = 0;
double HostVMInefficiencyFactor = 0;
int HostVMDynamicLevels = 0;
unsigned int k;
if (GPUVMEnable == true && HostVMEnable == true) {
HostVMInefficiencyFactor = PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData / PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly;
if (HostVMMinPageSize < 2048) {
HostVMDynamicLevels = HostVMMaxNonCachedPageTableLevels;
} else if (HostVMMinPageSize >= 2048 && HostVMMinPageSize < 1048576) {
HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 1);
} else {
HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 2);
}
} else {
HostVMInefficiencyFactor = 1;
HostVMDynamicLevels = 0;
}
ret = ReorderingBytes + (TotalNumberOfActiveDPP * PixelChunkSizeInKByte + TotalNumberOfDCCActiveDPP * MetaChunkSize) * 1024.0;
if (GPUVMEnable == true) {
for (k = 0; k < NumberOfActivePlanes; ++k) {
ret = ret + NumberOfDPP[k] * dpte_group_bytes[k] * (1 + 8 * HostVMDynamicLevels) * HostVMInefficiencyFactor;
}
}
return ret;
}
static double CalculateUrgentLatency(
double UrgentLatencyPixelDataOnly,
double UrgentLatencyPixelMixedWithVMData,
double UrgentLatencyVMDataOnly,
bool DoUrgentLatencyAdjustment,
double UrgentLatencyAdjustmentFabricClockComponent,
double UrgentLatencyAdjustmentFabricClockReference,
double FabricClock)
{
double ret;
ret = dml_max3(UrgentLatencyPixelDataOnly, UrgentLatencyPixelMixedWithVMData, UrgentLatencyVMDataOnly);
if (DoUrgentLatencyAdjustment == true) {
ret = ret + UrgentLatencyAdjustmentFabricClockComponent * (UrgentLatencyAdjustmentFabricClockReference / FabricClock - 1);
}
return ret;
}
static noinline_for_stack void UseMinimumDCFCLK(
struct display_mode_lib *mode_lib,
struct vba_vars_st *v,
int MaxPrefetchMode,
int ReorderingBytes)
{
double NormalEfficiency = 0;
double PTEEfficiency = 0;
double TotalMaxPrefetchFlipDPTERowBandwidth[DC__VOLTAGE_STATES][2] = { { 0 } };
unsigned int i, j, k;
NormalEfficiency = (v->HostVMEnable == true ? v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData
: v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly) / 100.0;
PTEEfficiency = (v->HostVMEnable == true ? v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly
/ v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData : 1.0);
for (i = 0; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
double PixelDCFCLKCyclesRequiredInPrefetch[DC__NUM_DPP__MAX] = { 0 };
double PrefetchPixelLinesTime[DC__NUM_DPP__MAX] = { 0 };
double DCFCLKRequiredForPeakBandwidthPerPlane[DC__NUM_DPP__MAX] = { 0 };
double DynamicMetadataVMExtraLatency[DC__NUM_DPP__MAX] = { 0 };
double MinimumTWait = 0;
double NonDPTEBandwidth = 0;
double DPTEBandwidth = 0;
double DCFCLKRequiredForAverageBandwidth = 0;
double ExtraLatencyBytes = 0;
double ExtraLatencyCycles = 0;
double DCFCLKRequiredForPeakBandwidth = 0;
int NoOfDPPState[DC__NUM_DPP__MAX] = { 0 };
double MinimumTvmPlus2Tr0 = 0;
TotalMaxPrefetchFlipDPTERowBandwidth[i][j] = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
TotalMaxPrefetchFlipDPTERowBandwidth[i][j] = TotalMaxPrefetchFlipDPTERowBandwidth[i][j]
+ v->NoOfDPP[i][j][k] * v->DPTEBytesPerRow[i][j][k] / (15.75 * v->HTotal[k] / v->PixelClock[k]);
}
for (k = 0; k <= v->NumberOfActivePlanes - 1; ++k) {
NoOfDPPState[k] = v->NoOfDPP[i][j][k];
}
MinimumTWait = CalculateTWait(MaxPrefetchMode, v->FinalDRAMClockChangeLatency, v->UrgLatency[i], v->SREnterPlusExitTime);
NonDPTEBandwidth = v->TotalVActivePixelBandwidth[i][j] + v->TotalVActiveCursorBandwidth[i][j] + v->TotalMetaRowBandwidth[i][j];
DPTEBandwidth = (v->HostVMEnable == true || v->ImmediateFlipRequirement[0] == dm_immediate_flip_required) ?
TotalMaxPrefetchFlipDPTERowBandwidth[i][j] : v->TotalDPTERowBandwidth[i][j];
DCFCLKRequiredForAverageBandwidth = dml_max3(v->ProjectedDCFCLKDeepSleep[i][j],
(NonDPTEBandwidth + v->TotalDPTERowBandwidth[i][j]) / v->ReturnBusWidth / (v->MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation / 100),
(NonDPTEBandwidth + DPTEBandwidth / PTEEfficiency) / NormalEfficiency / v->ReturnBusWidth);
ExtraLatencyBytes = CalculateExtraLatencyBytes(ReorderingBytes, v->TotalNumberOfActiveDPP[i][j], v->PixelChunkSizeInKByte, v->TotalNumberOfDCCActiveDPP[i][j],
v->MetaChunkSize, v->GPUVMEnable, v->HostVMEnable, v->NumberOfActivePlanes, NoOfDPPState, v->dpte_group_bytes,
v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData, v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
v->HostVMMinPageSize, v->HostVMMaxNonCachedPageTableLevels);
ExtraLatencyCycles = v->RoundTripPingLatencyCycles + 32 + ExtraLatencyBytes / NormalEfficiency / v->ReturnBusWidth;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double DCFCLKCyclesRequiredInPrefetch = { 0 };
double ExpectedPrefetchBWAcceleration = { 0 };
double PrefetchTime = { 0 };
PixelDCFCLKCyclesRequiredInPrefetch[k] = (v->PrefetchLinesY[i][j][k] * v->swath_width_luma_ub_all_states[i][j][k] * v->BytePerPixelY[k]
+ v->PrefetchLinesC[i][j][k] * v->swath_width_chroma_ub_all_states[i][j][k] * v->BytePerPixelC[k]) / NormalEfficiency / v->ReturnBusWidth;
DCFCLKCyclesRequiredInPrefetch = 2 * ExtraLatencyCycles / NoOfDPPState[k] + v->PDEAndMetaPTEBytesPerFrame[i][j][k] / PTEEfficiency
/ NormalEfficiency / v->ReturnBusWidth * (v->GPUVMMaxPageTableLevels > 2 ? 1 : 0) + 2 * v->DPTEBytesPerRow[i][j][k] / PTEEfficiency
/ NormalEfficiency / v->ReturnBusWidth + 2 * v->MetaRowBytes[i][j][k] / NormalEfficiency / v->ReturnBusWidth + PixelDCFCLKCyclesRequiredInPrefetch[k];
PrefetchPixelLinesTime[k] = dml_max(v->PrefetchLinesY[i][j][k], v->PrefetchLinesC[i][j][k]) * v->HTotal[k] / v->PixelClock[k];
ExpectedPrefetchBWAcceleration = (v->VActivePixelBandwidth[i][j][k] + v->VActiveCursorBandwidth[i][j][k]) / (v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k]);
DynamicMetadataVMExtraLatency[k] = (v->GPUVMEnable == true && v->DynamicMetadataEnable[k] == true && v->DynamicMetadataVMEnabled == true) ?
v->UrgLatency[i] * v->GPUVMMaxPageTableLevels * (v->HostVMEnable == true ? v->HostVMMaxNonCachedPageTableLevels + 1 : 1) : 0;
PrefetchTime = (v->MaximumVStartup[i][j][k] - 1) * v->HTotal[k] / v->PixelClock[k] - MinimumTWait - v->UrgLatency[i] * ((v->GPUVMMaxPageTableLevels <= 2 ? v->GPUVMMaxPageTableLevels
: v->GPUVMMaxPageTableLevels - 2) * (v->HostVMEnable == true ? v->HostVMMaxNonCachedPageTableLevels + 1 : 1) - 1) - DynamicMetadataVMExtraLatency[k];
if (PrefetchTime > 0) {
double ExpectedVRatioPrefetch = { 0 };
ExpectedVRatioPrefetch = PrefetchPixelLinesTime[k] / (PrefetchTime * PixelDCFCLKCyclesRequiredInPrefetch[k] / DCFCLKCyclesRequiredInPrefetch);
DCFCLKRequiredForPeakBandwidthPerPlane[k] = NoOfDPPState[k] * PixelDCFCLKCyclesRequiredInPrefetch[k] / PrefetchPixelLinesTime[k]
* dml_max(1.0, ExpectedVRatioPrefetch) * dml_max(1.0, ExpectedVRatioPrefetch / 4) * ExpectedPrefetchBWAcceleration;
if (v->HostVMEnable == true || v->ImmediateFlipRequirement[0] == dm_immediate_flip_required) {
DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKRequiredForPeakBandwidthPerPlane[k]
+ NoOfDPPState[k] * DPTEBandwidth / PTEEfficiency / NormalEfficiency / v->ReturnBusWidth;
}
} else {
DCFCLKRequiredForPeakBandwidthPerPlane[k] = v->DCFCLKPerState[i];
}
if (v->DynamicMetadataEnable[k] == true) {
double TsetupPipe = { 0 };
double TdmbfPipe = { 0 };
double TdmsksPipe = { 0 };
double TdmecPipe = { 0 };
double AllowedTimeForUrgentExtraLatency = { 0 };
CalculateDynamicMetadataParameters(
v->MaxInterDCNTileRepeaters,
v->RequiredDPPCLK[i][j][k],
v->RequiredDISPCLK[i][j],
v->ProjectedDCFCLKDeepSleep[i][j],
v->PixelClock[k],
v->HTotal[k],
v->VTotal[k] - v->VActive[k],
v->DynamicMetadataTransmittedBytes[k],
v->DynamicMetadataLinesBeforeActiveRequired[k],
v->Interlace[k],
v->ProgressiveToInterlaceUnitInOPP,
&TsetupPipe,
&TdmbfPipe,
&TdmecPipe,
&TdmsksPipe);
AllowedTimeForUrgentExtraLatency = v->MaximumVStartup[i][j][k] * v->HTotal[k] / v->PixelClock[k] - MinimumTWait - TsetupPipe
- TdmbfPipe - TdmecPipe - TdmsksPipe - DynamicMetadataVMExtraLatency[k];
if (AllowedTimeForUrgentExtraLatency > 0) {
DCFCLKRequiredForPeakBandwidthPerPlane[k] = dml_max(DCFCLKRequiredForPeakBandwidthPerPlane[k],
ExtraLatencyCycles / AllowedTimeForUrgentExtraLatency);
} else {
DCFCLKRequiredForPeakBandwidthPerPlane[k] = v->DCFCLKPerState[i];
}
}
}
DCFCLKRequiredForPeakBandwidth = 0;
for (k = 0; k <= v->NumberOfActivePlanes - 1; ++k) {
DCFCLKRequiredForPeakBandwidth = DCFCLKRequiredForPeakBandwidth + DCFCLKRequiredForPeakBandwidthPerPlane[k];
}
MinimumTvmPlus2Tr0 = v->UrgLatency[i] * (v->GPUVMEnable == true ? (v->HostVMEnable == true ?
(v->GPUVMMaxPageTableLevels + 2) * (v->HostVMMaxNonCachedPageTableLevels + 1) - 1 : v->GPUVMMaxPageTableLevels + 1) : 0);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double MaximumTvmPlus2Tr0PlusTsw = { 0 };
MaximumTvmPlus2Tr0PlusTsw = (v->MaximumVStartup[i][j][k] - 2) * v->HTotal[k] / v->PixelClock[k] - MinimumTWait - DynamicMetadataVMExtraLatency[k];
if (MaximumTvmPlus2Tr0PlusTsw <= MinimumTvmPlus2Tr0 + PrefetchPixelLinesTime[k] / 4) {
DCFCLKRequiredForPeakBandwidth = v->DCFCLKPerState[i];
} else {
DCFCLKRequiredForPeakBandwidth = dml_max3(DCFCLKRequiredForPeakBandwidth, 2 * ExtraLatencyCycles
/ (MaximumTvmPlus2Tr0PlusTsw - MinimumTvmPlus2Tr0 - PrefetchPixelLinesTime[k] / 4),
(2 * ExtraLatencyCycles + PixelDCFCLKCyclesRequiredInPrefetch[k]) / (MaximumTvmPlus2Tr0PlusTsw - MinimumTvmPlus2Tr0));
}
}
v->DCFCLKState[i][j] = dml_min(v->DCFCLKPerState[i], 1.05 * (1 + mode_lib->vba.PercentMarginOverMinimumRequiredDCFCLK / 100)
* dml_max(DCFCLKRequiredForAverageBandwidth, DCFCLKRequiredForPeakBandwidth));
}
}
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "../display_mode_lib.h"
#include "../display_mode_vba.h"
#include "../dml_inline_defs.h"
#include "display_rq_dlg_calc_30.h"
#include "display_mode_vba_30.h"
static bool is_dual_plane(enum source_format_class source_format)
{
bool ret_val = 0;
if ((source_format == dm_420_12) || (source_format == dm_420_8) || (source_format == dm_420_10) || (source_format == dm_rgbe_alpha))
ret_val = 1;
return ret_val;
}
static double get_refcyc_per_delivery(struct display_mode_lib *mode_lib,
double refclk_freq_in_mhz,
double pclk_freq_in_mhz,
unsigned int odm_combine,
unsigned int recout_width,
unsigned int hactive,
double vratio,
double hscale_pixel_rate,
unsigned int delivery_width,
unsigned int req_per_swath_ub)
{
double refcyc_per_delivery = 0.0;
if (vratio <= 1.0) {
if (odm_combine)
refcyc_per_delivery = (double)refclk_freq_in_mhz * (double)((unsigned int)odm_combine*2)
* dml_min((double)recout_width, (double)hactive / ((unsigned int)odm_combine*2))
/ pclk_freq_in_mhz / (double)req_per_swath_ub;
else
refcyc_per_delivery = (double)refclk_freq_in_mhz * (double)recout_width
/ pclk_freq_in_mhz / (double)req_per_swath_ub;
} else {
refcyc_per_delivery = (double)refclk_freq_in_mhz * (double)delivery_width
/ (double)hscale_pixel_rate / (double)req_per_swath_ub;
}
dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
dml_print("DML_DLG: %s: recout_width = %d\n", __func__, recout_width);
dml_print("DML_DLG: %s: vratio = %3.2f\n", __func__, vratio);
dml_print("DML_DLG: %s: req_per_swath_ub = %d\n", __func__, req_per_swath_ub);
dml_print("DML_DLG: %s: refcyc_per_delivery= %3.2f\n", __func__, refcyc_per_delivery);
return refcyc_per_delivery;
}
static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_size)
{
if (tile_size == dm_256k_tile)
return (256 * 1024);
else if (tile_size == dm_64k_tile)
return (64 * 1024);
else
return (4 * 1024);
}
static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib,
display_data_rq_regs_st *rq_regs,
const display_data_rq_sizing_params_st *rq_sizing)
{
dml_print("DML_DLG: %s: rq_sizing param\n", __func__);
print__data_rq_sizing_params_st(mode_lib, rq_sizing);
rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10;
if (rq_sizing->min_chunk_bytes == 0)
rq_regs->min_chunk_size = 0;
else
rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1;
rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10;
if (rq_sizing->min_meta_chunk_bytes == 0)
rq_regs->min_meta_chunk_size = 0;
else
rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1;
rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6;
rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6;
}
static void extract_rq_regs(struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
const display_rq_params_st *rq_param)
{
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
unsigned int detile_buf_plane1_addr = 0;
extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l);
rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_l.dpte_row_height),
1) - 3;
if (rq_param->yuv420) {
extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c);
rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_c.dpte_row_height),
1) - 3;
}
rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height);
rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height);
// FIXME: take the max between luma, chroma chunk size?
// okay for now, as we are setting chunk_bytes to 8kb anyways
if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024 || (rq_param->yuv420 && rq_param->sizing.rq_c.chunk_bytes >= 32 * 1024)) { //32kb
rq_regs->drq_expansion_mode = 0;
} else {
rq_regs->drq_expansion_mode = 2;
}
rq_regs->prq_expansion_mode = 1;
rq_regs->mrq_expansion_mode = 1;
rq_regs->crq_expansion_mode = 1;
if (rq_param->yuv420) {
if ((double)rq_param->misc.rq_l.stored_swath_bytes
/ (double)rq_param->misc.rq_c.stored_swath_bytes <= 1.5) {
detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma
} else {
detile_buf_plane1_addr = dml_round_to_multiple((unsigned int)((2.0 * detile_buf_size_in_bytes) / 3.0),
256,
0) / 64.0; // 2/3 to chroma
}
}
rq_regs->plane1_base_address = detile_buf_plane1_addr;
}
static void handle_det_buf_split(struct display_mode_lib *mode_lib,
display_rq_params_st *rq_param,
const display_pipe_source_params_st *pipe_src_param)
{
unsigned int total_swath_bytes = 0;
unsigned int swath_bytes_l = 0;
unsigned int swath_bytes_c = 0;
unsigned int full_swath_bytes_packed_l = 0;
unsigned int full_swath_bytes_packed_c = 0;
bool req128_l = false;
bool req128_c = false;
bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear);
bool surf_vert = (pipe_src_param->source_scan == dm_vert);
unsigned int log2_swath_height_l = 0;
unsigned int log2_swath_height_c = 0;
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
full_swath_bytes_packed_l = rq_param->misc.rq_l.full_swath_bytes;
full_swath_bytes_packed_c = rq_param->misc.rq_c.full_swath_bytes;
if (rq_param->yuv420_10bpc) {
full_swath_bytes_packed_l = dml_round_to_multiple(rq_param->misc.rq_l.full_swath_bytes * 2.0 / 3.0,
256,
1) + 256;
full_swath_bytes_packed_c = dml_round_to_multiple(rq_param->misc.rq_c.full_swath_bytes * 2.0 / 3.0,
256,
1) + 256;
}
if (rq_param->yuv420)
total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;
else
total_swath_bytes = 2 * full_swath_bytes_packed_l;
if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request
req128_l = false;
req128_c = false;
swath_bytes_l = full_swath_bytes_packed_l;
swath_bytes_c = full_swath_bytes_packed_c;
} else if (!rq_param->yuv420) {
req128_l = true;
req128_c = false;
swath_bytes_c = full_swath_bytes_packed_c;
swath_bytes_l = full_swath_bytes_packed_l / 2;
} else if ((double)full_swath_bytes_packed_l / (double)full_swath_bytes_packed_c < 1.5) {
req128_l = false;
req128_c = true;
swath_bytes_l = full_swath_bytes_packed_l;
swath_bytes_c = full_swath_bytes_packed_c / 2;
total_swath_bytes = 2 * swath_bytes_l + 2 * swath_bytes_c;
if (total_swath_bytes > detile_buf_size_in_bytes) {
req128_l = true;
swath_bytes_l = full_swath_bytes_packed_l / 2;
}
} else {
req128_l = true;
req128_c = false;
swath_bytes_l = full_swath_bytes_packed_l/2;
swath_bytes_c = full_swath_bytes_packed_c;
total_swath_bytes = 2 * swath_bytes_l + 2 * swath_bytes_c;
if (total_swath_bytes > detile_buf_size_in_bytes) {
req128_c = true;
swath_bytes_c = full_swath_bytes_packed_c/2;
}
}
if (rq_param->yuv420)
total_swath_bytes = 2 * swath_bytes_l + 2 * swath_bytes_c;
else
total_swath_bytes = 2 * swath_bytes_l;
rq_param->misc.rq_l.stored_swath_bytes = swath_bytes_l;
rq_param->misc.rq_c.stored_swath_bytes = swath_bytes_c;
if (surf_linear) {
log2_swath_height_l = 0;
log2_swath_height_c = 0;
} else {
unsigned int swath_height_l;
unsigned int swath_height_c;
if (!surf_vert) {
swath_height_l = rq_param->misc.rq_l.blk256_height;
swath_height_c = rq_param->misc.rq_c.blk256_height;
} else {
swath_height_l = rq_param->misc.rq_l.blk256_width;
swath_height_c = rq_param->misc.rq_c.blk256_width;
}
if (swath_height_l > 0)
log2_swath_height_l = dml_log2(swath_height_l);
if (req128_l && log2_swath_height_l > 0)
log2_swath_height_l -= 1;
if (swath_height_c > 0)
log2_swath_height_c = dml_log2(swath_height_c);
if (req128_c && log2_swath_height_c > 0)
log2_swath_height_c -= 1;
}
rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
dml_print("DML_DLG: %s: req128_l = %0d\n", __func__, req128_l);
dml_print("DML_DLG: %s: req128_c = %0d\n", __func__, req128_c);
dml_print("DML_DLG: %s: full_swath_bytes_packed_l = %0d\n",
__func__,
full_swath_bytes_packed_l);
dml_print("DML_DLG: %s: full_swath_bytes_packed_c = %0d\n",
__func__,
full_swath_bytes_packed_c);
}
static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
display_data_rq_dlg_params_st *rq_dlg_param,
display_data_rq_misc_params_st *rq_misc_param,
display_data_rq_sizing_params_st *rq_sizing_param,
unsigned int vp_width,
unsigned int vp_height,
unsigned int data_pitch,
unsigned int meta_pitch,
unsigned int source_format,
unsigned int tiling,
unsigned int macro_tile_size,
unsigned int source_scan,
unsigned int hostvm_enable,
unsigned int is_chroma,
unsigned int surface_height)
{
bool surf_linear = (tiling == dm_sw_linear);
bool surf_vert = (source_scan == dm_vert);
unsigned int bytes_per_element = 0;
unsigned int bytes_per_element_y = 0;
unsigned int bytes_per_element_c = 0;
unsigned int blk256_width = 0;
unsigned int blk256_height = 0;
unsigned int blk256_width_y = 0;
unsigned int blk256_height_y = 0;
unsigned int blk256_width_c = 0;
unsigned int blk256_height_c = 0;
unsigned int log2_bytes_per_element = 0;
unsigned int log2_blk256_width = 0;
unsigned int log2_blk256_height = 0;
unsigned int blk_bytes = 0;
unsigned int log2_blk_bytes = 0;
unsigned int log2_blk_height = 0;
unsigned int log2_blk_width = 0;
unsigned int log2_meta_req_bytes = 0;
unsigned int log2_meta_req_height = 0;
unsigned int log2_meta_req_width = 0;
unsigned int meta_req_width = 0;
unsigned int meta_req_height = 0;
unsigned int log2_meta_row_height = 0;
unsigned int meta_row_width_ub = 0;
unsigned int log2_meta_chunk_bytes = 0;
unsigned int log2_meta_chunk_height = 0;
//full sized meta chunk width in unit of data elements
unsigned int log2_meta_chunk_width = 0;
unsigned int log2_min_meta_chunk_bytes = 0;
unsigned int min_meta_chunk_width = 0;
unsigned int meta_chunk_width = 0;
unsigned int meta_chunk_per_row_int = 0;
unsigned int meta_row_remainder = 0;
unsigned int meta_chunk_threshold = 0;
unsigned int meta_blk_bytes = 0;
unsigned int meta_blk_height = 0;
unsigned int meta_blk_width = 0;
unsigned int meta_surface_bytes = 0;
unsigned int vmpg_bytes = 0;
unsigned int meta_pte_req_per_frame_ub = 0;
unsigned int meta_pte_bytes_per_frame_ub = 0;
const unsigned int log2_vmpg_bytes = dml_log2(mode_lib->soc.gpuvm_min_page_size_bytes);
const bool dual_plane_en = is_dual_plane((enum source_format_class)(source_format));
const unsigned int dpte_buf_in_pte_reqs = dual_plane_en ?
(is_chroma ? mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma : mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma)
: (mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma + mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma);
unsigned int log2_vmpg_height = 0;
unsigned int log2_vmpg_width = 0;
unsigned int log2_dpte_req_height_ptes = 0;
unsigned int log2_dpte_req_height = 0;
unsigned int log2_dpte_req_width = 0;
unsigned int log2_dpte_row_height_linear = 0;
unsigned int log2_dpte_row_height = 0;
unsigned int log2_dpte_group_width = 0;
unsigned int dpte_row_width_ub = 0;
unsigned int dpte_req_height = 0;
unsigned int dpte_req_width = 0;
unsigned int dpte_group_width = 0;
unsigned int log2_dpte_group_bytes = 0;
unsigned int log2_dpte_group_length = 0;
double byte_per_pixel_det_y = 0;
double byte_per_pixel_det_c = 0;
dml30_CalculateBytePerPixelAnd256BBlockSizes((enum source_format_class)(source_format),
(enum dm_swizzle_mode)(tiling),
&bytes_per_element_y,
&bytes_per_element_c,
&byte_per_pixel_det_y,
&byte_per_pixel_det_c,
&blk256_height_y,
&blk256_height_c,
&blk256_width_y,
&blk256_width_c);
if (!is_chroma) {
blk256_width = blk256_width_y;
blk256_height = blk256_height_y;
bytes_per_element = bytes_per_element_y;
} else {
blk256_width = blk256_width_c;
blk256_height = blk256_height_c;
bytes_per_element = bytes_per_element_c;
}
log2_bytes_per_element = dml_log2(bytes_per_element);
dml_print("DML_DLG: %s: surf_linear = %d\n", __func__, surf_linear);
dml_print("DML_DLG: %s: surf_vert = %d\n", __func__, surf_vert);
dml_print("DML_DLG: %s: blk256_width = %d\n", __func__, blk256_width);
dml_print("DML_DLG: %s: blk256_height = %d\n", __func__, blk256_height);
log2_blk256_width = dml_log2((double)blk256_width);
log2_blk256_height = dml_log2((double)blk256_height);
blk_bytes = surf_linear ?
256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
log2_blk_bytes = dml_log2((double)blk_bytes);
log2_blk_height = 0;
log2_blk_width = 0;
// remember log rule
// "+" in log is multiply
// "-" in log is divide
// "/2" is like square root
// blk is vertical biased
if (tiling != dm_sw_linear)
log2_blk_height = log2_blk256_height
+ dml_ceil((double)(log2_blk_bytes - 8) / 2.0, 1);
else
log2_blk_height = 0; // blk height of 1
log2_blk_width = log2_blk_bytes - log2_bytes_per_element - log2_blk_height;
if (!surf_vert) {
int unsigned temp = 0;
temp = dml_round_to_multiple(vp_width - 1, blk256_width, 1) + blk256_width;
if (data_pitch < blk256_width) {
dml_print("WARNING: DML_DLG: %s: swath_size calculation ignoring data_pitch=%u < blk256_width=%u\n", __func__, data_pitch, blk256_width);
} else {
if (temp > data_pitch) {
if (data_pitch >= vp_width)
temp = data_pitch;
else
dml_print("WARNING: DML_DLG: %s: swath_size calculation ignoring data_pitch=%u < vp_width=%u\n", __func__, data_pitch, vp_width);
}
}
rq_dlg_param->swath_width_ub = temp;
rq_dlg_param->req_per_swath_ub = temp >> log2_blk256_width;
} else {
int unsigned temp = 0;
temp = dml_round_to_multiple(vp_height - 1, blk256_height, 1) + blk256_height;
if (surface_height < blk256_height) {
dml_print("WARNING: DML_DLG: %s swath_size calculation ignored surface_height=%u < blk256_height=%u\n", __func__, surface_height, blk256_height);
} else {
if (temp > surface_height) {
if (surface_height >= vp_height)
temp = surface_height;
else
dml_print("WARNING: DML_DLG: %s swath_size calculation ignored surface_height=%u < vp_height=%u\n", __func__, surface_height, vp_height);
}
}
rq_dlg_param->swath_width_ub = temp;
rq_dlg_param->req_per_swath_ub = temp >> log2_blk256_height;
}
if (!surf_vert)
rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_height
* bytes_per_element;
else
rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_width
* bytes_per_element;
rq_misc_param->blk256_height = blk256_height;
rq_misc_param->blk256_width = blk256_width;
// -------
// meta
// -------
log2_meta_req_bytes = 6; // meta request is 64b and is 8x8byte meta element
// each 64b meta request for dcn is 8x8 meta elements and
// a meta element covers one 256b block of the data surface.
log2_meta_req_height = log2_blk256_height + 3; // meta req is 8x8 byte, each byte represent 1 blk256
log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element
- log2_meta_req_height;
meta_req_width = 1 << log2_meta_req_width;
meta_req_height = 1 << log2_meta_req_height;
log2_meta_row_height = 0;
meta_row_width_ub = 0;
// the dimensions of a meta row are meta_row_width x meta_row_height in elements.
// calculate upper bound of the meta_row_width
if (!surf_vert) {
log2_meta_row_height = log2_meta_req_height;
meta_row_width_ub = dml_round_to_multiple(vp_width - 1, meta_req_width, 1)
+ meta_req_width;
rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_width;
} else {
log2_meta_row_height = log2_meta_req_width;
meta_row_width_ub = dml_round_to_multiple(vp_height - 1, meta_req_height, 1)
+ meta_req_height;
rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_height;
}
rq_dlg_param->meta_bytes_per_row_ub = rq_dlg_param->meta_req_per_row_ub * 64;
rq_dlg_param->meta_row_height = 1 << log2_meta_row_height;
log2_meta_chunk_bytes = dml_log2(rq_sizing_param->meta_chunk_bytes);
log2_meta_chunk_height = log2_meta_row_height;
//full sized meta chunk width in unit of data elements
log2_meta_chunk_width = log2_meta_chunk_bytes + 8 - log2_bytes_per_element
- log2_meta_chunk_height;
log2_min_meta_chunk_bytes = dml_log2(rq_sizing_param->min_meta_chunk_bytes);
min_meta_chunk_width = 1
<< (log2_min_meta_chunk_bytes + 8 - log2_bytes_per_element
- log2_meta_chunk_height);
meta_chunk_width = 1 << log2_meta_chunk_width;
meta_chunk_per_row_int = (unsigned int)(meta_row_width_ub / meta_chunk_width);
meta_row_remainder = meta_row_width_ub % meta_chunk_width;
meta_chunk_threshold = 0;
meta_blk_bytes = 4096;
meta_blk_height = blk256_height * 64;
meta_blk_width = meta_blk_bytes * 256 / bytes_per_element / meta_blk_height;
meta_surface_bytes = meta_pitch
* (dml_round_to_multiple(vp_height - 1, meta_blk_height, 1) + meta_blk_height)
* bytes_per_element / 256;
vmpg_bytes = mode_lib->soc.gpuvm_min_page_size_bytes;
meta_pte_req_per_frame_ub = (dml_round_to_multiple(meta_surface_bytes - vmpg_bytes,
8 * vmpg_bytes,
1) + 8 * vmpg_bytes) / (8 * vmpg_bytes);
meta_pte_bytes_per_frame_ub = meta_pte_req_per_frame_ub * 64; //64B mpte request
rq_dlg_param->meta_pte_bytes_per_frame_ub = meta_pte_bytes_per_frame_ub;
dml_print("DML_DLG: %s: meta_blk_height = %d\n", __func__, meta_blk_height);
dml_print("DML_DLG: %s: meta_blk_width = %d\n", __func__, meta_blk_width);
dml_print("DML_DLG: %s: meta_surface_bytes = %d\n", __func__, meta_surface_bytes);
dml_print("DML_DLG: %s: meta_pte_req_per_frame_ub = %d\n",
__func__,
meta_pte_req_per_frame_ub);
dml_print("DML_DLG: %s: meta_pte_bytes_per_frame_ub = %d\n",
__func__,
meta_pte_bytes_per_frame_ub);
if (!surf_vert)
meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_width;
else
meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_height;
if (meta_row_remainder <= meta_chunk_threshold)
rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 1;
else
rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 2;
// ------
// dpte
// ------
if (surf_linear) {
log2_vmpg_height = 0; // one line high
} else {
log2_vmpg_height = (log2_vmpg_bytes - 8) / 2 + log2_blk256_height;
}
log2_vmpg_width = log2_vmpg_bytes - log2_bytes_per_element - log2_vmpg_height;
// only 3 possible shapes for dpte request in dimensions of ptes: 8x1, 4x2, 2x4.
if (surf_linear) { //one 64B PTE request returns 8 PTEs
log2_dpte_req_height_ptes = 0;
log2_dpte_req_width = log2_vmpg_width + 3;
log2_dpte_req_height = 0;
} else if (log2_blk_bytes == 12) { //4KB tile means 4kB page size
//one 64B req gives 8x1 PTEs for 4KB tile
log2_dpte_req_height_ptes = 0;
log2_dpte_req_width = log2_blk_width + 3;
log2_dpte_req_height = log2_blk_height + 0;
} else if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) { // tile block >= 64KB
//two 64B reqs of 2x4 PTEs give 16 PTEs to cover 64KB
log2_dpte_req_height_ptes = 4;
log2_dpte_req_width = log2_blk256_width + 4; // log2_64KB_width
log2_dpte_req_height = log2_blk256_height + 4; // log2_64KB_height
} else { //64KB page size and must 64KB tile block
//one 64B req gives 8x1 PTEs for 64KB tile
log2_dpte_req_height_ptes = 0;
log2_dpte_req_width = log2_blk_width + 3;
log2_dpte_req_height = log2_blk_height + 0;
}
// The dpte request dimensions in data elements is dpte_req_width x dpte_req_height
// log2_vmpg_width is how much 1 pte represent, now calculating how much a 64b pte req represent
// That depends on the pte shape (i.e. 8x1, 4x2, 2x4)
//log2_dpte_req_height = log2_vmpg_height + log2_dpte_req_height_ptes;
//log2_dpte_req_width = log2_vmpg_width + log2_dpte_req_width_ptes;
dpte_req_height = 1 << log2_dpte_req_height;
dpte_req_width = 1 << log2_dpte_req_width;
// calculate pitch dpte row buffer can hold
// round the result down to a power of two.
if (surf_linear) {
unsigned int dpte_row_height = 0;
log2_dpte_row_height_linear = dml_floor(dml_log2(dpte_buf_in_pte_reqs * dpte_req_width / data_pitch), 1);
dml_print("DML_DLG: %s: is_chroma = %d\n", __func__, is_chroma);
dml_print("DML_DLG: %s: dpte_buf_in_pte_reqs = %d\n", __func__, dpte_buf_in_pte_reqs);
dml_print("DML_DLG: %s: log2_dpte_row_height_linear = %d\n", __func__, log2_dpte_row_height_linear);
ASSERT(log2_dpte_row_height_linear >= 3);
if (log2_dpte_row_height_linear > 7)
log2_dpte_row_height_linear = 7;
log2_dpte_row_height = log2_dpte_row_height_linear;
// For linear, the dpte row is pitch dependent and the pte requests wrap at the pitch boundary.
// the dpte_row_width_ub is the upper bound of data_pitch*dpte_row_height in elements with this unique buffering.
dpte_row_height = 1 << log2_dpte_row_height;
dpte_row_width_ub = dml_round_to_multiple(data_pitch * dpte_row_height - 1,
dpte_req_width,
1) + dpte_req_width;
rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
} else {
// the upper bound of the dpte_row_width without dependency on viewport position follows.
// for tiled mode, row height is the same as req height and row store up to vp size upper bound
if (!surf_vert) {
log2_dpte_row_height = log2_dpte_req_height;
dpte_row_width_ub = dml_round_to_multiple(vp_width - 1, dpte_req_width, 1)
+ dpte_req_width;
rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
} else {
log2_dpte_row_height =
(log2_blk_width < log2_dpte_req_width) ?
log2_blk_width : log2_dpte_req_width;
dpte_row_width_ub = dml_round_to_multiple(vp_height - 1, dpte_req_height, 1)
+ dpte_req_height;
rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_height;
}
}
if (log2_blk_bytes >= 16 && log2_vmpg_bytes == 12) // tile block >= 64KB
rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 128; //2*64B dpte request
else
rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 64; //64B dpte request
rq_dlg_param->dpte_row_height = 1 << log2_dpte_row_height;
// the dpte_group_bytes is reduced for the specific case of vertical
// access of a tile surface that has dpte request of 8x1 ptes.
if (hostvm_enable)
rq_sizing_param->dpte_group_bytes = 512;
else {
if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
rq_sizing_param->dpte_group_bytes = 2048;
}
//since pte request size is 64byte, the number of data pte requests per full sized group is as follows.
log2_dpte_group_bytes = dml_log2(rq_sizing_param->dpte_group_bytes);
log2_dpte_group_length = log2_dpte_group_bytes - 6; //length in 64b requests
// full sized data pte group width in elements
if (!surf_vert)
log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_width;
else
log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_height;
//But if the tile block >=64KB and the page size is 4KB, then each dPTE request is 2*64B
if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) // tile block >= 64KB
log2_dpte_group_width = log2_dpte_group_width - 1;
dpte_group_width = 1 << log2_dpte_group_width;
// since dpte groups are only aligned to dpte_req_width and not dpte_group_width,
// the upper bound for the dpte groups per row is as follows.
rq_dlg_param->dpte_groups_per_row_ub = dml_ceil((double)dpte_row_width_ub / dpte_group_width,
1);
}
static void get_surf_rq_param(struct display_mode_lib *mode_lib,
display_data_rq_sizing_params_st *rq_sizing_param,
display_data_rq_dlg_params_st *rq_dlg_param,
display_data_rq_misc_params_st *rq_misc_param,
const display_pipe_params_st *pipe_param,
bool is_chroma,
bool is_alpha)
{
bool mode_422 = 0;
unsigned int vp_width = 0;
unsigned int vp_height = 0;
unsigned int data_pitch = 0;
unsigned int meta_pitch = 0;
unsigned int surface_height = 0;
unsigned int ppe = mode_422 ? 2 : 1;
// FIXME check if ppe apply for both luma and chroma in 422 case
if (is_chroma | is_alpha) {
vp_width = pipe_param->src.viewport_width_c / ppe;
vp_height = pipe_param->src.viewport_height_c;
data_pitch = pipe_param->src.data_pitch_c;
meta_pitch = pipe_param->src.meta_pitch_c;
surface_height = pipe_param->src.surface_height_y / 2.0;
} else {
vp_width = pipe_param->src.viewport_width / ppe;
vp_height = pipe_param->src.viewport_height;
data_pitch = pipe_param->src.data_pitch;
meta_pitch = pipe_param->src.meta_pitch;
surface_height = pipe_param->src.surface_height_y;
}
if (pipe_param->dest.odm_combine) {
unsigned int access_dir = 0;
unsigned int full_src_vp_width = 0;
unsigned int hactive_odm = 0;
unsigned int src_hactive_odm = 0;
access_dir = (pipe_param->src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
hactive_odm = pipe_param->dest.hactive / ((unsigned int) pipe_param->dest.odm_combine*2);
if (is_chroma) {
full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio_c * pipe_param->dest.full_recout_width;
src_hactive_odm = pipe_param->scale_ratio_depth.hscl_ratio_c * hactive_odm;
} else {
full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio * pipe_param->dest.full_recout_width;
src_hactive_odm = pipe_param->scale_ratio_depth.hscl_ratio * hactive_odm;
}
if (access_dir == 0) {
vp_width = dml_min(full_src_vp_width, src_hactive_odm);
dml_print("DML_DLG: %s: vp_width = %d\n", __func__, vp_width);
} else {
vp_height = dml_min(full_src_vp_width, src_hactive_odm);
dml_print("DML_DLG: %s: vp_height = %d\n", __func__, vp_height);
}
dml_print("DML_DLG: %s: full_src_vp_width = %d\n", __func__, full_src_vp_width);
dml_print("DML_DLG: %s: hactive_odm = %d\n", __func__, hactive_odm);
dml_print("DML_DLG: %s: src_hactive_odm = %d\n", __func__, src_hactive_odm);
}
rq_sizing_param->chunk_bytes = 8192;
if (is_alpha) {
rq_sizing_param->chunk_bytes = 4096;
}
if (rq_sizing_param->chunk_bytes == 64 * 1024)
rq_sizing_param->min_chunk_bytes = 0;
else
rq_sizing_param->min_chunk_bytes = 1024;
rq_sizing_param->meta_chunk_bytes = 2048;
rq_sizing_param->min_meta_chunk_bytes = 256;
if (pipe_param->src.hostvm)
rq_sizing_param->mpte_group_bytes = 512;
else
rq_sizing_param->mpte_group_bytes = 2048;
get_meta_and_pte_attr(mode_lib,
rq_dlg_param,
rq_misc_param,
rq_sizing_param,
vp_width,
vp_height,
data_pitch,
meta_pitch,
pipe_param->src.source_format,
pipe_param->src.sw_mode,
pipe_param->src.macro_tile_size,
pipe_param->src.source_scan,
pipe_param->src.hostvm,
is_chroma,
surface_height);
}
static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
display_rq_params_st *rq_param,
const display_pipe_params_st *pipe_param)
{
// get param for luma surface
rq_param->yuv420 = pipe_param->src.source_format == dm_420_8
|| pipe_param->src.source_format == dm_420_10
|| pipe_param->src.source_format == dm_rgbe_alpha
|| pipe_param->src.source_format == dm_420_12;
rq_param->yuv420_10bpc = pipe_param->src.source_format == dm_420_10;
rq_param->rgbe_alpha = (pipe_param->src.source_format == dm_rgbe_alpha)?1:0;
get_surf_rq_param(mode_lib,
&(rq_param->sizing.rq_l),
&(rq_param->dlg.rq_l),
&(rq_param->misc.rq_l),
pipe_param,
0,
0);
if (is_dual_plane((enum source_format_class)(pipe_param->src.source_format))) {
// get param for chroma surface
get_surf_rq_param(mode_lib,
&(rq_param->sizing.rq_c),
&(rq_param->dlg.rq_c),
&(rq_param->misc.rq_c),
pipe_param,
1,
rq_param->rgbe_alpha);
}
// calculate how to split the det buffer space between luma and chroma
handle_det_buf_split(mode_lib, rq_param, &pipe_param->src);
print__rq_params_st(mode_lib, rq_param);
}
void dml30_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
const display_pipe_params_st *pipe_param)
{
display_rq_params_st rq_param = { 0 };
memset(rq_regs, 0, sizeof(*rq_regs));
dml_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param);
extract_rq_regs(mode_lib, rq_regs, &rq_param);
print__rq_regs_st(mode_lib, rq_regs);
}
static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
double *refcyc_per_req_delivery_pre_cur,
double *refcyc_per_req_delivery_cur,
double refclk_freq_in_mhz,
double ref_freq_to_pix_freq,
double hscale_pixel_rate_l,
double hscl_ratio,
double vratio_pre_l,
double vratio_l,
unsigned int cur_width,
enum cursor_bpp cur_bpp)
{
unsigned int cur_src_width = cur_width;
unsigned int cur_req_size = 0;
unsigned int cur_req_width = 0;
double cur_width_ub = 0.0;
double cur_req_per_width = 0.0;
double hactive_cur = 0.0;
ASSERT(cur_src_width <= 256);
*refcyc_per_req_delivery_pre_cur = 0.0;
*refcyc_per_req_delivery_cur = 0.0;
if (cur_src_width > 0) {
unsigned int cur_bit_per_pixel = 0;
if (cur_bpp == dm_cur_2bit) {
cur_req_size = 64; // byte
cur_bit_per_pixel = 2;
} else { // 32bit
cur_bit_per_pixel = 32;
if (cur_src_width >= 1 && cur_src_width <= 16)
cur_req_size = 64;
else if (cur_src_width >= 17 && cur_src_width <= 31)
cur_req_size = 128;
else
cur_req_size = 256;
}
cur_req_width = (double)cur_req_size / ((double)cur_bit_per_pixel / 8.0);
cur_width_ub = dml_ceil((double)cur_src_width / (double)cur_req_width, 1)
* (double)cur_req_width;
cur_req_per_width = cur_width_ub / (double)cur_req_width;
hactive_cur = (double)cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor
if (vratio_pre_l <= 1.0) {
*refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq
/ (double)cur_req_per_width;
} else {
*refcyc_per_req_delivery_pre_cur = (double)refclk_freq_in_mhz
* (double)cur_src_width / hscale_pixel_rate_l
/ (double)cur_req_per_width;
}
ASSERT(*refcyc_per_req_delivery_pre_cur < dml_pow(2, 13));
if (vratio_l <= 1.0) {
*refcyc_per_req_delivery_cur = hactive_cur * ref_freq_to_pix_freq
/ (double)cur_req_per_width;
} else {
*refcyc_per_req_delivery_cur = (double)refclk_freq_in_mhz
* (double)cur_src_width / hscale_pixel_rate_l
/ (double)cur_req_per_width;
}
dml_print("DML_DLG: %s: cur_req_width = %d\n",
__func__,
cur_req_width);
dml_print("DML_DLG: %s: cur_width_ub = %3.2f\n",
__func__,
cur_width_ub);
dml_print("DML_DLG: %s: cur_req_per_width = %3.2f\n",
__func__,
cur_req_per_width);
dml_print("DML_DLG: %s: hactive_cur = %3.2f\n",
__func__,
hactive_cur);
dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_cur = %3.2f\n",
__func__,
*refcyc_per_req_delivery_pre_cur);
dml_print("DML_DLG: %s: refcyc_per_req_delivery_cur = %3.2f\n",
__func__,
*refcyc_per_req_delivery_cur);
ASSERT(*refcyc_per_req_delivery_cur < dml_pow(2, 13));
}
}
// Note: currently taken in as is.
// Nice to decouple code from hw register implement and extract code that are repeated for luma and chroma.
static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
display_dlg_regs_st *disp_dlg_regs,
display_ttu_regs_st *disp_ttu_regs,
const display_rq_dlg_params_st rq_dlg_param,
const display_dlg_sys_params_st dlg_sys_param,
const bool cstate_en,
const bool pstate_en,
const bool vm_en,
const bool ignore_viewport_pos,
const bool immediate_flip_support)
{
const display_pipe_source_params_st *src = &e2e_pipe_param[pipe_idx].pipe.src;
const display_pipe_dest_params_st *dst = &e2e_pipe_param[pipe_idx].pipe.dest;
const display_output_params_st *dout = &e2e_pipe_param[pipe_idx].dout;
const display_clocks_and_cfg_st *clks = &e2e_pipe_param[pipe_idx].clks_cfg;
const scaler_ratio_depth_st *scl = &e2e_pipe_param[pipe_idx].pipe.scale_ratio_depth;
const scaler_taps_st *taps = &e2e_pipe_param[pipe_idx].pipe.scale_taps;
// -------------------------
// Section 1.15.2.1: OTG dependent Params
// -------------------------
// Timing
unsigned int htotal = dst->htotal;
// unsigned int hblank_start = dst.hblank_start; // TODO: Remove
unsigned int hblank_end = dst->hblank_end;
unsigned int vblank_start = dst->vblank_start;
unsigned int vblank_end = dst->vblank_end;
unsigned int min_vblank = mode_lib->ip.min_vblank_lines;
double dppclk_freq_in_mhz = clks->dppclk_mhz;
double dispclk_freq_in_mhz = clks->dispclk_mhz;
double refclk_freq_in_mhz = clks->refclk_mhz;
double pclk_freq_in_mhz = dst->pixel_rate_mhz;
bool interlaced = dst->interlaced;
double ref_freq_to_pix_freq = refclk_freq_in_mhz / pclk_freq_in_mhz;
double min_dcfclk_mhz = 0;
double t_calc_us = 0;
double min_ttu_vblank = 0;
double min_dst_y_ttu_vblank = 0;
unsigned int dlg_vblank_start = 0;
bool dual_plane = false;
bool mode_422 = false;
unsigned int access_dir = 0;
unsigned int vp_height_l = 0;
unsigned int vp_width_l = 0;
unsigned int vp_height_c = 0;
unsigned int vp_width_c = 0;
// Scaling
unsigned int htaps_l = 0;
unsigned int htaps_c = 0;
double hratio_l = 0;
double hratio_c = 0;
double vratio_l = 0;
double vratio_c = 0;
bool scl_enable = false;
double line_time_in_us = 0;
// double vinit_l;
// double vinit_c;
// double vinit_bot_l;
// double vinit_bot_c;
// unsigned int swath_height_l;
unsigned int swath_width_ub_l = 0;
// unsigned int dpte_bytes_per_row_ub_l;
unsigned int dpte_groups_per_row_ub_l = 0;
// unsigned int meta_pte_bytes_per_frame_ub_l;
// unsigned int meta_bytes_per_row_ub_l;
// unsigned int swath_height_c;
unsigned int swath_width_ub_c = 0;
// unsigned int dpte_bytes_per_row_ub_c;
unsigned int dpte_groups_per_row_ub_c = 0;
unsigned int meta_chunks_per_row_ub_l = 0;
unsigned int meta_chunks_per_row_ub_c = 0;
unsigned int vupdate_offset = 0;
unsigned int vupdate_width = 0;
unsigned int vready_offset = 0;
unsigned int dppclk_delay_subtotal = 0;
unsigned int dispclk_delay_subtotal = 0;
unsigned int pixel_rate_delay_subtotal = 0;
unsigned int vstartup_start = 0;
unsigned int dst_x_after_scaler = 0;
int dst_y_after_scaler = 0;
double line_wait = 0;
double dst_y_prefetch = 0;
double dst_y_per_vm_vblank = 0;
double dst_y_per_row_vblank = 0;
double dst_y_per_vm_flip = 0;
double dst_y_per_row_flip = 0;
double max_dst_y_per_vm_vblank = 0;
double max_dst_y_per_row_vblank = 0;
double lsw = 0;
double vratio_pre_l = 0;
double vratio_pre_c = 0;
unsigned int req_per_swath_ub_l = 0;
unsigned int req_per_swath_ub_c = 0;
unsigned int meta_row_height_l = 0;
unsigned int meta_row_height_c = 0;
unsigned int swath_width_pixels_ub_l = 0;
unsigned int swath_width_pixels_ub_c = 0;
unsigned int scaler_rec_in_width_l = 0;
unsigned int scaler_rec_in_width_c = 0;
unsigned int dpte_row_height_l = 0;
unsigned int dpte_row_height_c = 0;
double hscale_pixel_rate_l = 0;
double hscale_pixel_rate_c = 0;
double min_hratio_fact_l = 0;
double min_hratio_fact_c = 0;
double refcyc_per_line_delivery_pre_l = 0;
double refcyc_per_line_delivery_pre_c = 0;
double refcyc_per_line_delivery_l = 0;
double refcyc_per_line_delivery_c = 0;
double refcyc_per_req_delivery_pre_l = 0;
double refcyc_per_req_delivery_pre_c = 0;
double refcyc_per_req_delivery_l = 0;
double refcyc_per_req_delivery_c = 0;
unsigned int full_recout_width = 0;
double refcyc_per_req_delivery_pre_cur0 = 0;
double refcyc_per_req_delivery_cur0 = 0;
double refcyc_per_req_delivery_pre_cur1 = 0;
double refcyc_per_req_delivery_cur1 = 0;
unsigned int pipe_index_in_combine[DC__NUM_PIPES__MAX] = { 0 };
memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
dml_print("DML_DLG: %s: cstate_en = %d\n", __func__, cstate_en);
dml_print("DML_DLG: %s: pstate_en = %d\n", __func__, pstate_en);
dml_print("DML_DLG: %s: vm_en = %d\n", __func__, vm_en);
dml_print("DML_DLG: %s: ignore_viewport_pos = %d\n", __func__, ignore_viewport_pos);
dml_print("DML_DLG: %s: immediate_flip_support = %d\n", __func__, immediate_flip_support);
dml_print("DML_DLG: %s: dppclk_freq_in_mhz = %3.2f\n", __func__, dppclk_freq_in_mhz);
dml_print("DML_DLG: %s: dispclk_freq_in_mhz = %3.2f\n", __func__, dispclk_freq_in_mhz);
dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
dml_print("DML_DLG: %s: interlaced = %d\n", __func__, interlaced);
ASSERT(ref_freq_to_pix_freq < 4.0);
disp_dlg_regs->ref_freq_to_pix_freq =
(unsigned int)(ref_freq_to_pix_freq * dml_pow(2, 19));
disp_dlg_regs->refcyc_per_htotal = (unsigned int)(ref_freq_to_pix_freq * (double)htotal
* dml_pow(2, 8));
disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; // 15 bits
min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz;
t_calc_us = get_tcalc(mode_lib, e2e_pipe_param, num_pipes);
min_ttu_vblank = get_min_ttu_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double)htotal;
dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
disp_dlg_regs->min_dst_y_next_start = (unsigned int)(((double)dlg_vblank_start
) * dml_pow(2, 2));
ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18));
dml_print("DML_DLG: %s: min_dcfclk_mhz = %3.2f\n",
__func__,
min_dcfclk_mhz);
dml_print("DML_DLG: %s: min_ttu_vblank = %3.2f\n",
__func__,
min_ttu_vblank);
dml_print("DML_DLG: %s: min_dst_y_ttu_vblank = %3.2f\n",
__func__,
min_dst_y_ttu_vblank);
dml_print("DML_DLG: %s: t_calc_us = %3.2f\n",
__func__,
t_calc_us);
dml_print("DML_DLG: %s: disp_dlg_regs->min_dst_y_next_start = 0x%0x\n",
__func__,
disp_dlg_regs->min_dst_y_next_start);
dml_print("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n",
__func__,
ref_freq_to_pix_freq);
// -------------------------
// Section 1.15.2.2: Prefetch, Active and TTU
// -------------------------
// Prefetch Calc
// Source
// dcc_en = src.dcc;
dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
mode_422 = false; // TODO
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
vp_height_l = src->viewport_height;
vp_width_l = src->viewport_width;
vp_height_c = src->viewport_height_c;
vp_width_c = src->viewport_width_c;
// Scaling
htaps_l = taps->htaps;
htaps_c = taps->htaps_c;
hratio_l = scl->hscl_ratio;
hratio_c = scl->hscl_ratio_c;
vratio_l = scl->vscl_ratio;
vratio_c = scl->vscl_ratio_c;
scl_enable = scl->scl_enable;
line_time_in_us = (htotal / pclk_freq_in_mhz);
swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub;
dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub;
swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub;
dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub;
meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub;
meta_chunks_per_row_ub_c = rq_dlg_param.rq_c.meta_chunks_per_row_ub;
vupdate_offset = dst->vupdate_offset;
vupdate_width = dst->vupdate_width;
vready_offset = dst->vready_offset;
dppclk_delay_subtotal = mode_lib->ip.dppclk_delay_subtotal;
dispclk_delay_subtotal = mode_lib->ip.dispclk_delay_subtotal;
if (scl_enable)
dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_scl;
else
dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_scl_lb_only;
dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_cnvc_formatter
+ src->num_cursors * mode_lib->ip.dppclk_delay_cnvc_cursor;
if (dout->dsc_enable) {
double dsc_delay = get_dsc_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
dispclk_delay_subtotal += dsc_delay;
}
pixel_rate_delay_subtotal = dppclk_delay_subtotal * pclk_freq_in_mhz / dppclk_freq_in_mhz
+ dispclk_delay_subtotal * pclk_freq_in_mhz / dispclk_freq_in_mhz;
vstartup_start = dst->vstartup_start;
if (interlaced) {
if (vstartup_start / 2.0
- (double)(vready_offset + vupdate_width + vupdate_offset) / htotal
<= vblank_end / 2.0)
disp_dlg_regs->vready_after_vcount0 = 1;
else
disp_dlg_regs->vready_after_vcount0 = 0;
} else {
if (vstartup_start
- (double)(vready_offset + vupdate_width + vupdate_offset) / htotal
<= vblank_end)
disp_dlg_regs->vready_after_vcount0 = 1;
else
disp_dlg_regs->vready_after_vcount0 = 0;
}
// TODO: Where is this coming from?
if (interlaced)
vstartup_start = vstartup_start / 2;
// TODO: What if this min_vblank doesn't match the value in the dml_config_settings.cpp?
if (vstartup_start >= min_vblank) {
dml_print("WARNING: DML_DLG: %s: vblank_start=%d vblank_end=%d\n",
__func__,
vblank_start,
vblank_end);
dml_print("WARNING: DML_DLG: %s: vstartup_start=%d should be less than min_vblank=%d\n",
__func__,
vstartup_start,
min_vblank);
min_vblank = vstartup_start + 1;
dml_print("WARNING: DML_DLG: %s: vstartup_start=%d should be less than min_vblank=%d\n",
__func__,
vstartup_start,
min_vblank);
}
dst_x_after_scaler = get_dst_x_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
dst_y_after_scaler = get_dst_y_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
if (dst_y_after_scaler < 0)
dst_y_after_scaler = 0;
// do some adjustment on the dst_after scaler to account for odm combine mode
dml_print("DML_DLG: %s: input dst_x_after_scaler = %d\n",
__func__,
dst_x_after_scaler);
dml_print("DML_DLG: %s: input dst_y_after_scaler = %d\n",
__func__,
dst_y_after_scaler);
// need to figure out which side of odm combine we're in
if (dst->odm_combine) {
// figure out which pipes go together
bool visited[DC__NUM_PIPES__MAX] = { false };
unsigned int i, j, k;
for (k = 0; k < num_pipes; ++k) {
visited[k] = false;
pipe_index_in_combine[k] = 0;
}
for (i = 0; i < num_pipes; i++) {
if (e2e_pipe_param[i].pipe.src.is_hsplit && !visited[i]) {
unsigned int grp = e2e_pipe_param[i].pipe.src.hsplit_grp;
unsigned int grp_idx = 0;
for (j = i; j < num_pipes; j++) {
if (e2e_pipe_param[j].pipe.src.hsplit_grp == grp
&& e2e_pipe_param[j].pipe.src.is_hsplit && !visited[j]) {
pipe_index_in_combine[j] = grp_idx;
dml_print("DML_DLG: %s: pipe[%d] is in grp %d idx %d\n", __func__, j, grp, grp_idx);
grp_idx++;
visited[j] = true;
}
}
}
}
}
if (dst->odm_combine == dm_odm_combine_mode_disabled) {
disp_dlg_regs->refcyc_h_blank_end = (unsigned int)((double) hblank_end * ref_freq_to_pix_freq);
} else {
unsigned int odm_combine_factor = (dst->odm_combine == dm_odm_combine_mode_2to1 ? 2 : 4); // TODO: We should really check that 4to1 is supported before setting it to 4
unsigned int odm_pipe_index = pipe_index_in_combine[pipe_idx];
disp_dlg_regs->refcyc_h_blank_end = (unsigned int)(((double) hblank_end + odm_pipe_index * (double) dst->hactive / odm_combine_factor) * ref_freq_to_pix_freq);
}
ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int)dml_pow(2, 13));
dml_print("DML_DLG: %s: htotal = %d\n", __func__, htotal);
dml_print("DML_DLG: %s: pixel_rate_delay_subtotal = %d\n",
__func__,
pixel_rate_delay_subtotal);
dml_print("DML_DLG: %s: dst_x_after_scaler[%d] = %d\n",
__func__,
pipe_idx,
dst_x_after_scaler);
dml_print("DML_DLG: %s: dst_y_after_scaler[%d] = %d\n",
__func__,
pipe_idx,
dst_y_after_scaler);
// Lwait
// TODO: Should this be urgent_latency_pixel_mixed_with_vm_data_us?
line_wait = mode_lib->soc.urgent_latency_pixel_data_only_us;
if (cstate_en)
line_wait = dml_max(mode_lib->soc.sr_enter_plus_exit_time_us, line_wait);
if (pstate_en)
line_wait = dml_max(mode_lib->soc.dram_clock_change_latency_us
+ mode_lib->soc.urgent_latency_pixel_data_only_us, // TODO: Should this be urgent_latency_pixel_mixed_with_vm_data_us?
line_wait);
line_wait = line_wait / line_time_in_us;
dst_y_prefetch = get_dst_y_prefetch(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
dml_print("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, dst_y_prefetch);
dst_y_per_vm_vblank = get_dst_y_per_vm_vblank(mode_lib,
e2e_pipe_param,
num_pipes,
pipe_idx);
dst_y_per_row_vblank = get_dst_y_per_row_vblank(mode_lib,
e2e_pipe_param,
num_pipes,
pipe_idx);
dst_y_per_vm_flip = get_dst_y_per_vm_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
dst_y_per_row_flip = get_dst_y_per_row_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
max_dst_y_per_vm_vblank = 32.0; //U5.2
max_dst_y_per_row_vblank = 16.0; //U4.2
// magic!
if (htotal <= 75) {
min_vblank = 300;
max_dst_y_per_vm_vblank = 100.0;
max_dst_y_per_row_vblank = 100.0;
}
dml_print("DML_DLG: %s: dst_y_per_vm_flip = %3.2f\n", __func__, dst_y_per_vm_flip);
dml_print("DML_DLG: %s: dst_y_per_row_flip = %3.2f\n", __func__, dst_y_per_row_flip);
dml_print("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, dst_y_per_vm_vblank);
dml_print("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, dst_y_per_row_vblank);
ASSERT(dst_y_per_vm_vblank < max_dst_y_per_vm_vblank);
ASSERT(dst_y_per_row_vblank < max_dst_y_per_row_vblank);
ASSERT(dst_y_prefetch > (dst_y_per_vm_vblank + dst_y_per_row_vblank));
lsw = dst_y_prefetch - (dst_y_per_vm_vblank + dst_y_per_row_vblank);
dml_print("DML_DLG: %s: lsw = %3.2f\n", __func__, lsw);
vratio_pre_l = get_vratio_prefetch_l(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
vratio_pre_c = get_vratio_prefetch_c(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
dml_print("DML_DLG: %s: vratio_pre_l=%3.2f\n", __func__, vratio_pre_l);
dml_print("DML_DLG: %s: vratio_pre_c=%3.2f\n", __func__, vratio_pre_c);
// Active
req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub;
req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub;
meta_row_height_l = rq_dlg_param.rq_l.meta_row_height;
meta_row_height_c = rq_dlg_param.rq_c.meta_row_height;
swath_width_pixels_ub_l = 0;
swath_width_pixels_ub_c = 0;
scaler_rec_in_width_l = 0;
scaler_rec_in_width_c = 0;
dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height;
dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height;
if (mode_422) {
swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element
swath_width_pixels_ub_c = swath_width_ub_c * 2;
} else {
swath_width_pixels_ub_l = swath_width_ub_l * 1;
swath_width_pixels_ub_c = swath_width_ub_c * 1;
}
hscale_pixel_rate_l = 0.;
hscale_pixel_rate_c = 0.;
min_hratio_fact_l = 1.0;
min_hratio_fact_c = 1.0;
if (hratio_l <= 1)
min_hratio_fact_l = 2.0;
else if (htaps_l <= 6) {
if ((hratio_l * 2.0) > 4.0)
min_hratio_fact_l = 4.0;
else
min_hratio_fact_l = hratio_l * 2.0;
} else {
if (hratio_l > 4.0)
min_hratio_fact_l = 4.0;
else
min_hratio_fact_l = hratio_l;
}
hscale_pixel_rate_l = min_hratio_fact_l * dppclk_freq_in_mhz;
if (hratio_c <= 1)
min_hratio_fact_c = 2.0;
else if (htaps_c <= 6) {
if ((hratio_c * 2.0) > 4.0)
min_hratio_fact_c = 4.0;
else
min_hratio_fact_c = hratio_c * 2.0;
} else {
if (hratio_c > 4.0)
min_hratio_fact_c = 4.0;
else
min_hratio_fact_c = hratio_c;
}
hscale_pixel_rate_c = min_hratio_fact_c * dppclk_freq_in_mhz;
refcyc_per_line_delivery_pre_l = 0.;
refcyc_per_line_delivery_pre_c = 0.;
refcyc_per_line_delivery_l = 0.;
refcyc_per_line_delivery_c = 0.;
refcyc_per_req_delivery_pre_l = 0.;
refcyc_per_req_delivery_pre_c = 0.;
refcyc_per_req_delivery_l = 0.;
refcyc_per_req_delivery_c = 0.;
full_recout_width = 0;
// In ODM
if (src->is_hsplit) {
// This "hack" is only allowed (and valid) for MPC combine. In ODM
// combine, you MUST specify the full_recout_width...according to Oswin
if (dst->full_recout_width == 0 && !dst->odm_combine) {
dml_print("DML_DLG: %s: Warning: full_recout_width not set in hsplit mode\n",
__func__);
full_recout_width = dst->recout_width * 2; // assume half split for dcn1
} else
full_recout_width = dst->full_recout_width;
} else
full_recout_width = dst->recout_width;
// As of DCN2, mpc_combine and odm_combine are mutually exclusive
refcyc_per_line_delivery_pre_l = get_refcyc_per_delivery(mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_pre_l,
hscale_pixel_rate_l,
swath_width_pixels_ub_l,
1); // per line
refcyc_per_line_delivery_l = get_refcyc_per_delivery(mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_l,
hscale_pixel_rate_l,
swath_width_pixels_ub_l,
1); // per line
dml_print("DML_DLG: %s: full_recout_width = %d\n",
__func__,
full_recout_width);
dml_print("DML_DLG: %s: hscale_pixel_rate_l = %3.2f\n",
__func__,
hscale_pixel_rate_l);
dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n",
__func__,
refcyc_per_line_delivery_pre_l);
dml_print("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n",
__func__,
refcyc_per_line_delivery_l);
if (dual_plane) {
refcyc_per_line_delivery_pre_c = get_refcyc_per_delivery(mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_pre_c,
hscale_pixel_rate_c,
swath_width_pixels_ub_c,
1); // per line
refcyc_per_line_delivery_c = get_refcyc_per_delivery(mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_c,
hscale_pixel_rate_c,
swath_width_pixels_ub_c,
1); // per line
dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n",
__func__,
refcyc_per_line_delivery_pre_c);
dml_print("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n",
__func__,
refcyc_per_line_delivery_c);
}
// smehta: this is a hack added until we get the real dml, sorry, need to make progress
if (src->dynamic_metadata_enable && src->gpuvm) {
unsigned int levels = mode_lib->ip.gpuvm_max_page_table_levels;
double ref_cycles;
if (src->hostvm)
levels = levels * (mode_lib->ip.hostvm_max_page_table_levels+1);
ref_cycles = (levels * mode_lib->soc.urgent_latency_vm_data_only_us) * refclk_freq_in_mhz;
dml_print("BENyamin: dst_y_prefetch = %f %d %f %f \n",
ref_cycles, levels, mode_lib->soc.urgent_latency_vm_data_only_us, refclk_freq_in_mhz);
disp_dlg_regs->refcyc_per_vm_dmdata = (unsigned int) ref_cycles;
}
dml_print("BENyamin: dmdta_en vm = %d %d \n",
src->dynamic_metadata_enable, src->vm);
// TTU - Luma / Chroma
if (access_dir) { // vertical access
scaler_rec_in_width_l = vp_height_l;
scaler_rec_in_width_c = vp_height_c;
} else {
scaler_rec_in_width_l = vp_width_l;
scaler_rec_in_width_c = vp_width_c;
}
refcyc_per_req_delivery_pre_l = get_refcyc_per_delivery(mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_pre_l,
hscale_pixel_rate_l,
scaler_rec_in_width_l,
req_per_swath_ub_l); // per req
refcyc_per_req_delivery_l = get_refcyc_per_delivery(mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_l,
hscale_pixel_rate_l,
scaler_rec_in_width_l,
req_per_swath_ub_l); // per req
dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n",
__func__,
refcyc_per_req_delivery_pre_l);
dml_print("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n",
__func__,
refcyc_per_req_delivery_l);
ASSERT(refcyc_per_req_delivery_pre_l < dml_pow(2, 13));
ASSERT(refcyc_per_req_delivery_l < dml_pow(2, 13));
if (dual_plane) {
refcyc_per_req_delivery_pre_c = get_refcyc_per_delivery(mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_pre_c,
hscale_pixel_rate_c,
scaler_rec_in_width_c,
req_per_swath_ub_c); // per req
refcyc_per_req_delivery_c = get_refcyc_per_delivery(mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_c,
hscale_pixel_rate_c,
scaler_rec_in_width_c,
req_per_swath_ub_c); // per req
dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n",
__func__,
refcyc_per_req_delivery_pre_c);
dml_print("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n",
__func__,
refcyc_per_req_delivery_c);
ASSERT(refcyc_per_req_delivery_pre_c < dml_pow(2, 13));
ASSERT(refcyc_per_req_delivery_c < dml_pow(2, 13));
}
// TTU - Cursor
refcyc_per_req_delivery_pre_cur0 = 0.0;
refcyc_per_req_delivery_cur0 = 0.0;
if (src->num_cursors > 0) {
calculate_ttu_cursor(mode_lib,
&refcyc_per_req_delivery_pre_cur0,
&refcyc_per_req_delivery_cur0,
refclk_freq_in_mhz,
ref_freq_to_pix_freq,
hscale_pixel_rate_l,
scl->hscl_ratio,
vratio_pre_l,
vratio_l,
src->cur0_src_width,
(enum cursor_bpp)(src->cur0_bpp));
}
refcyc_per_req_delivery_pre_cur1 = 0.0;
refcyc_per_req_delivery_cur1 = 0.0;
if (src->num_cursors > 1) {
calculate_ttu_cursor(mode_lib,
&refcyc_per_req_delivery_pre_cur1,
&refcyc_per_req_delivery_cur1,
refclk_freq_in_mhz,
ref_freq_to_pix_freq,
hscale_pixel_rate_l,
scl->hscl_ratio,
vratio_pre_l,
vratio_l,
src->cur1_src_width,
(enum cursor_bpp)(src->cur1_bpp));
}
// TTU - Misc
// all hard-coded
// Assignment to register structures
disp_dlg_regs->dst_y_after_scaler = dst_y_after_scaler; // in terms of line
ASSERT(disp_dlg_regs->dst_y_after_scaler < (unsigned int)8);
disp_dlg_regs->refcyc_x_after_scaler = dst_x_after_scaler * ref_freq_to_pix_freq; // in terms of refclk
ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int)dml_pow(2, 13));
disp_dlg_regs->dst_y_prefetch = (unsigned int)(dst_y_prefetch * dml_pow(2, 2));
disp_dlg_regs->dst_y_per_vm_vblank = (unsigned int)(dst_y_per_vm_vblank * dml_pow(2, 2));
disp_dlg_regs->dst_y_per_row_vblank = (unsigned int)(dst_y_per_row_vblank * dml_pow(2, 2));
disp_dlg_regs->dst_y_per_vm_flip = (unsigned int)(dst_y_per_vm_flip * dml_pow(2, 2));
disp_dlg_regs->dst_y_per_row_flip = (unsigned int)(dst_y_per_row_flip * dml_pow(2, 2));
disp_dlg_regs->vratio_prefetch = (unsigned int)(vratio_pre_l * dml_pow(2, 19));
disp_dlg_regs->vratio_prefetch_c = (unsigned int)(vratio_pre_c * dml_pow(2, 19));
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_vblank);
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_vblank);
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip);
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip);
disp_dlg_regs->refcyc_per_pte_group_vblank_l =
(unsigned int)(dst_y_per_row_vblank * (double)htotal
* ref_freq_to_pix_freq / (double)dpte_groups_per_row_ub_l);
ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)dml_pow(2, 13));
if (dual_plane) {
disp_dlg_regs->refcyc_per_pte_group_vblank_c = (unsigned int)(dst_y_per_row_vblank
* (double)htotal * ref_freq_to_pix_freq
/ (double)dpte_groups_per_row_ub_c);
ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c
< (unsigned int)dml_pow(2, 13));
}
disp_dlg_regs->refcyc_per_meta_chunk_vblank_l =
(unsigned int)(dst_y_per_row_vblank * (double)htotal
* ref_freq_to_pix_freq / (double)meta_chunks_per_row_ub_l);
ASSERT(disp_dlg_regs->refcyc_per_meta_chunk_vblank_l < (unsigned int)dml_pow(2, 13));
disp_dlg_regs->refcyc_per_meta_chunk_vblank_c =
disp_dlg_regs->refcyc_per_meta_chunk_vblank_l; // dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now
disp_dlg_regs->refcyc_per_pte_group_flip_l = (unsigned int)(dst_y_per_row_flip * htotal
* ref_freq_to_pix_freq) / dpte_groups_per_row_ub_l;
disp_dlg_regs->refcyc_per_meta_chunk_flip_l = (unsigned int)(dst_y_per_row_flip * htotal
* ref_freq_to_pix_freq) / meta_chunks_per_row_ub_l;
if (dual_plane) {
disp_dlg_regs->refcyc_per_pte_group_flip_c = (unsigned int)(dst_y_per_row_flip
* htotal * ref_freq_to_pix_freq) / dpte_groups_per_row_ub_c;
disp_dlg_regs->refcyc_per_meta_chunk_flip_c = (unsigned int)(dst_y_per_row_flip
* htotal * ref_freq_to_pix_freq) / meta_chunks_per_row_ub_c;
}
disp_dlg_regs->refcyc_per_vm_group_vblank = get_refcyc_per_vm_group_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
disp_dlg_regs->refcyc_per_vm_group_flip = get_refcyc_per_vm_group_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
disp_dlg_regs->refcyc_per_vm_req_vblank = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10);
disp_dlg_regs->refcyc_per_vm_req_flip = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10);
// Clamp to max for now
if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int)dml_pow(2, 23))
disp_dlg_regs->refcyc_per_vm_group_vblank = dml_pow(2, 23) - 1;
if (disp_dlg_regs->refcyc_per_vm_group_flip >= (unsigned int)dml_pow(2, 23))
disp_dlg_regs->refcyc_per_vm_group_flip = dml_pow(2, 23) - 1;
if (disp_dlg_regs->refcyc_per_vm_req_vblank >= (unsigned int)dml_pow(2, 23))
disp_dlg_regs->refcyc_per_vm_req_vblank = dml_pow(2, 23) - 1;
if (disp_dlg_regs->refcyc_per_vm_req_flip >= (unsigned int)dml_pow(2, 23))
disp_dlg_regs->refcyc_per_vm_req_flip = dml_pow(2, 23) - 1;
disp_dlg_regs->dst_y_per_pte_row_nom_l = (unsigned int)((double)dpte_row_height_l
/ (double)vratio_l * dml_pow(2, 2));
ASSERT(disp_dlg_regs->dst_y_per_pte_row_nom_l < (unsigned int)dml_pow(2, 17));
if (dual_plane) {
disp_dlg_regs->dst_y_per_pte_row_nom_c = (unsigned int)((double)dpte_row_height_c
/ (double)vratio_c * dml_pow(2, 2));
if (disp_dlg_regs->dst_y_per_pte_row_nom_c >= (unsigned int)dml_pow(2, 17)) {
dml_print("DML_DLG: %s: Warning dst_y_per_pte_row_nom_c %u larger than supported by register format U15.2 %u\n",
__func__,
disp_dlg_regs->dst_y_per_pte_row_nom_c,
(unsigned int)dml_pow(2, 17) - 1);
}
}
disp_dlg_regs->dst_y_per_meta_row_nom_l = (unsigned int)((double)meta_row_height_l
/ (double)vratio_l * dml_pow(2, 2));
ASSERT(disp_dlg_regs->dst_y_per_meta_row_nom_l < (unsigned int)dml_pow(2, 17));
disp_dlg_regs->dst_y_per_meta_row_nom_c = disp_dlg_regs->dst_y_per_meta_row_nom_l; // TODO: dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now
dml_print("DML: Trow: %fus\n", line_time_in_us * (double)dpte_row_height_l / (double)vratio_l);
disp_dlg_regs->refcyc_per_pte_group_nom_l = (unsigned int)((double)dpte_row_height_l
/ (double)vratio_l * (double)htotal * ref_freq_to_pix_freq
/ (double)dpte_groups_per_row_ub_l);
if (disp_dlg_regs->refcyc_per_pte_group_nom_l >= (unsigned int)dml_pow(2, 23))
disp_dlg_regs->refcyc_per_pte_group_nom_l = dml_pow(2, 23) - 1;
disp_dlg_regs->refcyc_per_meta_chunk_nom_l = (unsigned int)((double)meta_row_height_l
/ (double)vratio_l * (double)htotal * ref_freq_to_pix_freq
/ (double)meta_chunks_per_row_ub_l);
if (disp_dlg_regs->refcyc_per_meta_chunk_nom_l >= (unsigned int)dml_pow(2, 23))
disp_dlg_regs->refcyc_per_meta_chunk_nom_l = dml_pow(2, 23) - 1;
if (dual_plane) {
disp_dlg_regs->refcyc_per_pte_group_nom_c =
(unsigned int)((double)dpte_row_height_c / (double)vratio_c
* (double)htotal * ref_freq_to_pix_freq
/ (double)dpte_groups_per_row_ub_c);
if (disp_dlg_regs->refcyc_per_pte_group_nom_c >= (unsigned int)dml_pow(2, 23))
disp_dlg_regs->refcyc_per_pte_group_nom_c = dml_pow(2, 23) - 1;
// TODO: Is this the right calculation? Does htotal need to be halved?
disp_dlg_regs->refcyc_per_meta_chunk_nom_c =
(unsigned int)((double)meta_row_height_c / (double)vratio_c
* (double)htotal * ref_freq_to_pix_freq
/ (double)meta_chunks_per_row_ub_c);
if (disp_dlg_regs->refcyc_per_meta_chunk_nom_c >= (unsigned int)dml_pow(2, 23))
disp_dlg_regs->refcyc_per_meta_chunk_nom_c = dml_pow(2, 23) - 1;
}
disp_dlg_regs->refcyc_per_line_delivery_pre_l = (unsigned int)dml_floor(refcyc_per_line_delivery_pre_l,
1);
disp_dlg_regs->refcyc_per_line_delivery_l = (unsigned int)dml_floor(refcyc_per_line_delivery_l,
1);
ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int)dml_pow(2, 13));
ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int)dml_pow(2, 13));
disp_dlg_regs->refcyc_per_line_delivery_pre_c = (unsigned int)dml_floor(refcyc_per_line_delivery_pre_c,
1);
disp_dlg_regs->refcyc_per_line_delivery_c = (unsigned int)dml_floor(refcyc_per_line_delivery_c,
1);
ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int)dml_pow(2, 13));
ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int)dml_pow(2, 13));
disp_dlg_regs->chunk_hdl_adjust_cur0 = 3;
disp_dlg_regs->dst_y_offset_cur0 = 0;
disp_dlg_regs->chunk_hdl_adjust_cur1 = 3;
disp_dlg_regs->dst_y_offset_cur1 = 0;
disp_dlg_regs->dst_y_delta_drq_limit = 0x7fff; // off
disp_ttu_regs->refcyc_per_req_delivery_pre_l = (unsigned int)(refcyc_per_req_delivery_pre_l
* dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_l = (unsigned int)(refcyc_per_req_delivery_l
* dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_pre_c = (unsigned int)(refcyc_per_req_delivery_pre_c
* dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_c = (unsigned int)(refcyc_per_req_delivery_c
* dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_pre_cur0 =
(unsigned int)(refcyc_per_req_delivery_pre_cur0 * dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_cur0 = (unsigned int)(refcyc_per_req_delivery_cur0
* dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_pre_cur1 =
(unsigned int)(refcyc_per_req_delivery_pre_cur1 * dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_cur1 = (unsigned int)(refcyc_per_req_delivery_cur1
* dml_pow(2, 10));
disp_ttu_regs->qos_level_low_wm = 0;
ASSERT(disp_ttu_regs->qos_level_low_wm < dml_pow(2, 14));
disp_ttu_regs->qos_level_high_wm = (unsigned int)(4.0 * (double)htotal
* ref_freq_to_pix_freq);
ASSERT(disp_ttu_regs->qos_level_high_wm < dml_pow(2, 14));
disp_ttu_regs->qos_level_flip = 14;
disp_ttu_regs->qos_level_fixed_l = 8;
disp_ttu_regs->qos_level_fixed_c = 8;
disp_ttu_regs->qos_level_fixed_cur0 = 8;
disp_ttu_regs->qos_ramp_disable_l = 0;
disp_ttu_regs->qos_ramp_disable_c = 0;
disp_ttu_regs->qos_ramp_disable_cur0 = 0;
disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
print__ttu_regs_st(mode_lib, disp_ttu_regs);
print__dlg_regs_st(mode_lib, disp_dlg_regs);
}
void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
const bool pstate_en,
const bool vm_en,
const bool ignore_viewport_pos,
const bool immediate_flip_support)
{
display_rq_params_st rq_param = { 0 };
display_dlg_sys_params_st dlg_sys_param = { 0 };
// Get watermark and Tex.
dlg_sys_param.t_urg_wm_us = get_wm_urgent(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.deepsleep_dcfclk_mhz = get_clk_dcf_deepsleep(mode_lib,
e2e_pipe_param,
num_pipes);
dlg_sys_param.t_extra_us = get_urgent_extra_latency(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.mem_trip_us = get_wm_memory_trip(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.t_mclk_wm_us = get_wm_dram_clock_change(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.t_sr_wm_us = get_wm_stutter_enter_exit(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.total_flip_bw = get_total_immediate_flip_bw(mode_lib,
e2e_pipe_param,
num_pipes);
dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib,
e2e_pipe_param,
num_pipes);
print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
// system parameter calculation done
dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx);
dml_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe);
dml_rq_dlg_get_dlg_params(mode_lib,
e2e_pipe_param,
num_pipes,
pipe_idx,
dlg_regs,
ttu_regs,
rq_param.dlg,
dlg_sys_param,
cstate_en,
pstate_en,
vm_en,
ignore_viewport_pos,
immediate_flip_support);
dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx);
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c |
/*
* Copyright 2020-2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "resource.h"
#include "clk_mgr.h"
#include "reg_helper.h"
#include "dcn_calc_math.h"
#include "dcn20/dcn20_resource.h"
#include "dcn30/dcn30_resource.h"
#include "clk_mgr/dcn30/dcn30_smu11_driver_if.h"
#include "display_mode_vba_30.h"
#include "dcn30_fpu.h"
#define REG(reg)\
optc1->tg_regs->reg
#define CTX \
optc1->base.ctx
#undef FN
#define FN(reg_name, field_name) \
optc1->tg_shift->field_name, optc1->tg_mask->field_name
struct _vcs_dpi_ip_params_st dcn3_0_ip = {
.use_min_dcfclk = 0,
.clamp_min_dcfclk = 0,
.odm_capable = 1,
.gpuvm_enable = 0,
.hostvm_enable = 0,
.gpuvm_max_page_table_levels = 4,
.hostvm_max_page_table_levels = 4,
.hostvm_cached_page_table_levels = 0,
.pte_group_size_bytes = 2048,
.num_dsc = 6,
.rob_buffer_size_kbytes = 184,
.det_buffer_size_kbytes = 184,
.dpte_buffer_size_in_pte_reqs_luma = 84,
.pde_proc_buffer_size_64k_reqs = 48,
.dpp_output_buffer_pixels = 2560,
.opp_output_buffer_lines = 1,
.pixel_chunk_size_kbytes = 8,
.pte_enable = 1,
.max_page_table_levels = 2,
.pte_chunk_size_kbytes = 2, // ?
.meta_chunk_size_kbytes = 2,
.writeback_chunk_size_kbytes = 8,
.line_buffer_size_bits = 789504,
.is_line_buffer_bpp_fixed = 0, // ?
.line_buffer_fixed_bpp = 0, // ?
.dcc_supported = true,
.writeback_interface_buffer_size_kbytes = 90,
.writeback_line_buffer_buffer_size = 0,
.max_line_buffer_lines = 12,
.writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640
.writeback_chroma_buffer_size_kbytes = 8,
.writeback_chroma_line_buffer_width_pixels = 4,
.writeback_max_hscl_ratio = 1,
.writeback_max_vscl_ratio = 1,
.writeback_min_hscl_ratio = 1,
.writeback_min_vscl_ratio = 1,
.writeback_max_hscl_taps = 1,
.writeback_max_vscl_taps = 1,
.writeback_line_buffer_luma_buffer_size = 0,
.writeback_line_buffer_chroma_buffer_size = 14643,
.cursor_buffer_size = 8,
.cursor_chunk_size = 2,
.max_num_otg = 6,
.max_num_dpp = 6,
.max_num_wb = 1,
.max_dchub_pscl_bw_pix_per_clk = 4,
.max_pscl_lb_bw_pix_per_clk = 2,
.max_lb_vscl_bw_pix_per_clk = 4,
.max_vscl_hscl_bw_pix_per_clk = 4,
.max_hscl_ratio = 6,
.max_vscl_ratio = 6,
.hscl_mults = 4,
.vscl_mults = 4,
.max_hscl_taps = 8,
.max_vscl_taps = 8,
.dispclk_ramp_margin_percent = 1,
.underscan_factor = 1.11,
.min_vblank_lines = 32,
.dppclk_delay_subtotal = 46,
.dynamic_metadata_vm_enabled = true,
.dppclk_delay_scl_lb_only = 16,
.dppclk_delay_scl = 50,
.dppclk_delay_cnvc_formatter = 27,
.dppclk_delay_cnvc_cursor = 6,
.dispclk_delay_subtotal = 119,
.dcfclk_cstate_latency = 5.2, // SRExitTime
.max_inter_dcn_tile_repeaters = 8,
.max_num_hdmi_frl_outputs = 1,
.odm_combine_4to1_supported = true,
.xfc_supported = false,
.xfc_fill_bw_overhead_percent = 10.0,
.xfc_fill_constant_bytes = 0,
.gfx7_compat_tiling_supported = 0,
.number_of_cursors = 1,
};
struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc = {
.clock_limits = {
{
.state = 0,
.dispclk_mhz = 562.0,
.dppclk_mhz = 300.0,
.phyclk_mhz = 300.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 405.6,
},
},
.min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
.num_states = 1,
.sr_exit_time_us = 15.5,
.sr_enter_plus_exit_time_us = 20,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
.urgent_latency_vm_data_only_us = 4.0,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
.max_avg_sdp_bw_use_normal_percent = 60.0,
.max_avg_dram_bw_use_normal_percent = 40.0,
.writeback_latency_us = 12.0,
.max_request_size_bytes = 256,
.fabric_datapath_to_dcn_data_return_bytes = 64,
.dcn_downspread_percent = 0.5,
.downspread_percent = 0.38,
.dram_page_open_time_ns = 50.0,
.dram_rw_turnaround_time_ns = 17.5,
.dram_return_buffer_per_channel_bytes = 8192,
.round_trip_ping_latency_dcfclk_cycles = 191,
.urgent_out_of_order_return_per_channel_bytes = 4096,
.channel_interleave_bytes = 256,
.num_banks = 8,
.gpuvm_min_page_size_bytes = 4096,
.hostvm_min_page_size_bytes = 4096,
.dram_clock_change_latency_us = 404,
.dummy_pstate_latency_us = 5,
.writeback_dram_clock_change_latency_us = 23.0,
.return_bus_width_bytes = 64,
.dispclk_dppclk_vco_speed_mhz = 3650,
.xfc_bus_transport_time_us = 20, // ?
.xfc_xbuf_latency_tolerance_us = 4, // ?
.use_urgent_burst_bw = 1, // ?
.do_urgent_latency_adjustment = true,
.urgent_latency_adjustment_fabric_clock_component_us = 1.0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 1000,
};
void optc3_fpu_set_vrr_m_const(struct timing_generator *optc,
double vtotal_avg)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
double vtotal_min, vtotal_max;
double ratio, modulo, phase;
uint32_t vblank_start;
uint32_t v_total_mask_value = 0;
dc_assert_fp_enabled();
/* Compute VTOTAL_MIN and VTOTAL_MAX, so that
* VOTAL_MAX - VTOTAL_MIN = 1
*/
v_total_mask_value = 16;
vtotal_min = dcn_bw_floor(vtotal_avg);
vtotal_max = dcn_bw_ceil(vtotal_avg);
/* Check that bottom VBLANK is at least 2 lines tall when running with
* VTOTAL_MIN. Note that VTOTAL registers are defined as 'total number
* of lines in a frame - 1'.
*/
REG_GET(OTG_V_BLANK_START_END, OTG_V_BLANK_START,
&vblank_start);
ASSERT(vtotal_min >= vblank_start + 1);
/* Special case where the average frame rate can be achieved
* without using the DTO
*/
if (vtotal_min == vtotal_max) {
REG_SET(OTG_V_TOTAL, 0, OTG_V_TOTAL, (uint32_t)vtotal_min);
optc->funcs->set_vtotal_min_max(optc, 0, 0);
REG_SET(OTG_M_CONST_DTO0, 0, OTG_M_CONST_DTO_PHASE, 0);
REG_SET(OTG_M_CONST_DTO1, 0, OTG_M_CONST_DTO_MODULO, 0);
REG_UPDATE_3(OTG_V_TOTAL_CONTROL,
OTG_V_TOTAL_MIN_SEL, 0,
OTG_V_TOTAL_MAX_SEL, 0,
OTG_SET_V_TOTAL_MIN_MASK_EN, 0);
return;
}
ratio = vtotal_max - vtotal_avg;
modulo = 65536.0 * 65536.0 - 1.0; /* 2^32 - 1 */
phase = ratio * modulo;
/* Special cases where the DTO phase gets rounded to 0 or
* to DTO modulo
*/
if (phase <= 0 || phase >= modulo) {
REG_SET(OTG_V_TOTAL, 0, OTG_V_TOTAL,
phase <= 0 ?
(uint32_t)vtotal_max : (uint32_t)vtotal_min);
REG_SET(OTG_V_TOTAL_MIN, 0, OTG_V_TOTAL_MIN, 0);
REG_SET(OTG_V_TOTAL_MAX, 0, OTG_V_TOTAL_MAX, 0);
REG_SET(OTG_M_CONST_DTO0, 0, OTG_M_CONST_DTO_PHASE, 0);
REG_SET(OTG_M_CONST_DTO1, 0, OTG_M_CONST_DTO_MODULO, 0);
REG_UPDATE_3(OTG_V_TOTAL_CONTROL,
OTG_V_TOTAL_MIN_SEL, 0,
OTG_V_TOTAL_MAX_SEL, 0,
OTG_SET_V_TOTAL_MIN_MASK_EN, 0);
return;
}
REG_UPDATE_6(OTG_V_TOTAL_CONTROL,
OTG_V_TOTAL_MIN_SEL, 1,
OTG_V_TOTAL_MAX_SEL, 1,
OTG_SET_V_TOTAL_MIN_MASK_EN, 1,
OTG_SET_V_TOTAL_MIN_MASK, v_total_mask_value,
OTG_VTOTAL_MID_REPLACING_MIN_EN, 0,
OTG_VTOTAL_MID_REPLACING_MAX_EN, 0);
REG_SET(OTG_V_TOTAL, 0, OTG_V_TOTAL, (uint32_t)vtotal_min);
optc->funcs->set_vtotal_min_max(optc, vtotal_min, vtotal_max);
REG_SET(OTG_M_CONST_DTO0, 0, OTG_M_CONST_DTO_PHASE, (uint32_t)phase);
REG_SET(OTG_M_CONST_DTO1, 0, OTG_M_CONST_DTO_MODULO, (uint32_t)modulo);
}
void dcn30_fpu_populate_dml_writeback_from_context(
struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
{
int pipe_cnt, i, j;
double max_calc_writeback_dispclk;
double writeback_dispclk;
struct writeback_st dout_wb;
dc_assert_fp_enabled();
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_stream_state *stream = res_ctx->pipe_ctx[i].stream;
if (!stream)
continue;
max_calc_writeback_dispclk = 0;
/* Set writeback information */
pipes[pipe_cnt].dout.wb_enable = 0;
pipes[pipe_cnt].dout.num_active_wb = 0;
for (j = 0; j < stream->num_wb_info; j++) {
struct dc_writeback_info *wb_info = &stream->writeback_info[j];
if (wb_info->wb_enabled && wb_info->writeback_source_plane &&
(wb_info->writeback_source_plane == res_ctx->pipe_ctx[i].plane_state)) {
pipes[pipe_cnt].dout.wb_enable = 1;
pipes[pipe_cnt].dout.num_active_wb++;
dout_wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_en ?
wb_info->dwb_params.cnv_params.crop_height :
wb_info->dwb_params.cnv_params.src_height;
dout_wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_en ?
wb_info->dwb_params.cnv_params.crop_width :
wb_info->dwb_params.cnv_params.src_width;
dout_wb.wb_dst_width = wb_info->dwb_params.dest_width;
dout_wb.wb_dst_height = wb_info->dwb_params.dest_height;
/* For IP that doesn't support WB scaling, set h/v taps to 1 to avoid DML validation failure */
if (dc->dml.ip.writeback_max_hscl_taps > 1) {
dout_wb.wb_htaps_luma = wb_info->dwb_params.scaler_taps.h_taps;
dout_wb.wb_vtaps_luma = wb_info->dwb_params.scaler_taps.v_taps;
} else {
dout_wb.wb_htaps_luma = 1;
dout_wb.wb_vtaps_luma = 1;
}
dout_wb.wb_htaps_chroma = 0;
dout_wb.wb_vtaps_chroma = 0;
dout_wb.wb_hratio = wb_info->dwb_params.cnv_params.crop_en ?
(double)wb_info->dwb_params.cnv_params.crop_width /
(double)wb_info->dwb_params.dest_width :
(double)wb_info->dwb_params.cnv_params.src_width /
(double)wb_info->dwb_params.dest_width;
dout_wb.wb_vratio = wb_info->dwb_params.cnv_params.crop_en ?
(double)wb_info->dwb_params.cnv_params.crop_height /
(double)wb_info->dwb_params.dest_height :
(double)wb_info->dwb_params.cnv_params.src_height /
(double)wb_info->dwb_params.dest_height;
if (wb_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_ARGB ||
wb_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_RGBA)
dout_wb.wb_pixel_format = dm_444_64;
else
dout_wb.wb_pixel_format = dm_444_32;
/* Workaround for cases where multiple writebacks are connected to same plane
* In which case, need to compute worst case and set the associated writeback parameters
* This workaround is necessary due to DML computation assuming only 1 set of writeback
* parameters per pipe
*/
writeback_dispclk = dml30_CalculateWriteBackDISPCLK(
dout_wb.wb_pixel_format,
pipes[pipe_cnt].pipe.dest.pixel_rate_mhz,
dout_wb.wb_hratio,
dout_wb.wb_vratio,
dout_wb.wb_htaps_luma,
dout_wb.wb_vtaps_luma,
dout_wb.wb_src_width,
dout_wb.wb_dst_width,
pipes[pipe_cnt].pipe.dest.htotal,
dc->current_state->bw_ctx.dml.ip.writeback_line_buffer_buffer_size);
if (writeback_dispclk > max_calc_writeback_dispclk) {
max_calc_writeback_dispclk = writeback_dispclk;
pipes[pipe_cnt].dout.wb = dout_wb;
}
}
}
pipe_cnt++;
}
}
void dcn30_fpu_set_mcif_arb_params(struct mcif_arb_params *wb_arb_params,
struct display_mode_lib *dml,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
int cur_pipe)
{
int i;
dc_assert_fp_enabled();
for (i = 0; i < ARRAY_SIZE(wb_arb_params->cli_watermark); i++) {
wb_arb_params->cli_watermark[i] = get_wm_writeback_urgent(dml, pipes, pipe_cnt) * 1000;
wb_arb_params->pstate_watermark[i] = get_wm_writeback_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
}
wb_arb_params->dram_speed_change_duration = dml->vba.WritebackAllowDRAMClockChangeEndPosition[cur_pipe] * pipes[0].clks_cfg.refclk_mhz; /* num_clock_cycles = us * MHz */
}
void dcn30_fpu_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
{
dc_assert_fp_enabled();
if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching ||
context->bw_ctx.dml.soc.dram_clock_change_latency_us == 0)
context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
}
}
void dcn30_fpu_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
int vlevel)
{
int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
int i, pipe_idx;
double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][maxMpcComb];
bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported;
unsigned int dummy_latency_index = 0;
dc_assert_fp_enabled();
context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
for (i = 0; i < context->stream_count; i++) {
if (context->streams[i])
context->streams[i]->fpo_in_use = false;
}
if (!pstate_en) {
/* only when the mclk switch can not be natural, is the fw based vblank stretch attempted */
context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching =
dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch(dc, context);
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
dummy_latency_index = dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(dc,
context, pipes, pipe_cnt, vlevel);
/* After calling dcn30_find_dummy_latency_index_for_fw_based_mclk_switch
* we reinstate the original dram_clock_change_latency_us on the context
* and all variables that may have changed up to this point, except the
* newly found dummy_latency_index
*/
context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true);
maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported;
}
}
if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
pipes[0].clks_cfg.voltage = vlevel;
pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
/* Set B:
* DCFCLK: 1GHz or min required above 1GHz
* FCLK/UCLK: Max
*/
if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
if (vlevel == 0) {
pipes[0].clks_cfg.voltage = 1;
pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
}
context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
}
context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
pipes[0].clks_cfg.voltage = vlevel;
pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
/* Set D:
* DCFCLK: Min Required
* FCLK(proportional to UCLK): 1GHz or Max
* MALL stutter, sr_enter_exit = 4, sr_exit = 2us
*/
/*
if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
}
context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
*/
/* Set C:
* DCFCLK: Min Required
* FCLK(proportional to UCLK): 1GHz or Max
* pstate latency overridden to 5us
*/
if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
unsigned int min_dram_speed_mts_margin = 160;
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->dummy_pstate_table[0].dummy_pstate_latency_us;
if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] ==
dm_dram_clock_change_unsupported) {
int min_dram_speed_mts_offset = dc->clk_mgr->bw_params->clk_table.num_entries - 1;
min_dram_speed_mts =
dc->clk_mgr->bw_params->clk_table.entries[min_dram_speed_mts_offset].memclk_mhz * 16;
}
if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
/* find largest table entry that is lower than dram speed,
* but lower than DPM0 still uses DPM0
*/
for (dummy_latency_index = 3; dummy_latency_index > 0; dummy_latency_index--)
if (min_dram_speed_mts + min_dram_speed_mts_margin >
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dram_speed_mts)
break;
}
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
}
context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
if (!pstate_en) {
/* The only difference between A and C is p-state latency, if p-state is not supported we want to
* calculate DLG based on dummy p-state latency, and max out the set A p-state watermark
*/
context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0;
} else {
/* Set A:
* DCFCLK: Min Required
* FCLK(proportional to UCLK): 1GHz or Max
*
* Set A calculated last so that following calculations are based on Set A
*/
dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
}
context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
/* Make set D = set A until set D is enabled */
context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
if (dc->config.forced_clocks) {
pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
}
if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
pipe_idx++;
}
// WA: restrict FPO to use first non-strobe mode (NV24 BW issue)
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching &&
dc->dml.soc.num_chans <= 4 &&
context->bw_ctx.dml.vba.DRAMSpeed <= 1700 &&
context->bw_ctx.dml.vba.DRAMSpeed >= 1500) {
for (i = 0; i < dc->dml.soc.num_states; i++) {
if (dc->dml.soc.clock_limits[i].dram_speed_mts > 1700) {
context->bw_ctx.dml.vba.DRAMSpeed = dc->dml.soc.clock_limits[i].dram_speed_mts;
break;
}
}
}
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
if (!pstate_en)
/* Restore full p-state latency */
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching)
dcn30_setup_mclk_switch_using_fw_based_vblank_stretch(dc, context);
}
void dcn30_fpu_update_dram_channel_width_bytes(struct dc *dc)
{
dc_assert_fp_enabled();
if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
dcn3_0_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
}
void dcn30_fpu_update_max_clk(struct dc_bounding_box_max_clk *dcn30_bb_max_clk)
{
dc_assert_fp_enabled();
if (!dcn30_bb_max_clk->max_dcfclk_mhz)
dcn30_bb_max_clk->max_dcfclk_mhz = dcn3_0_soc.clock_limits[0].dcfclk_mhz;
if (!dcn30_bb_max_clk->max_dispclk_mhz)
dcn30_bb_max_clk->max_dispclk_mhz = dcn3_0_soc.clock_limits[0].dispclk_mhz;
if (!dcn30_bb_max_clk->max_dppclk_mhz)
dcn30_bb_max_clk->max_dppclk_mhz = dcn3_0_soc.clock_limits[0].dppclk_mhz;
if (!dcn30_bb_max_clk->max_phyclk_mhz)
dcn30_bb_max_clk->max_phyclk_mhz = dcn3_0_soc.clock_limits[0].phyclk_mhz;
}
void dcn30_fpu_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
unsigned int *optimal_dcfclk,
unsigned int *optimal_fclk)
{
double bw_from_dram, bw_from_dram1, bw_from_dram2;
dc_assert_fp_enabled();
bw_from_dram1 = uclk_mts * dcn3_0_soc.num_chans *
dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_dram_bw_use_normal_percent / 100);
bw_from_dram2 = uclk_mts * dcn3_0_soc.num_chans *
dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100);
bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
if (optimal_fclk)
*optimal_fclk = bw_from_dram /
(dcn3_0_soc.fabric_datapath_to_dcn_data_return_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100));
if (optimal_dcfclk)
*optimal_dcfclk = bw_from_dram /
(dcn3_0_soc.return_bus_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100));
}
void dcn30_fpu_update_bw_bounding_box(struct dc *dc,
struct clk_bw_params *bw_params,
struct dc_bounding_box_max_clk *dcn30_bb_max_clk,
unsigned int *dcfclk_mhz,
unsigned int *dram_speed_mts)
{
unsigned int i;
dc_assert_fp_enabled();
dcn3_0_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
for (i = 0; i < dcn3_0_soc.num_states; i++) {
dcn3_0_soc.clock_limits[i].state = i;
dcn3_0_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
dcn3_0_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i];
dcn3_0_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
/* Fill all states with max values of all other clocks */
dcn3_0_soc.clock_limits[i].dispclk_mhz = dcn30_bb_max_clk->max_dispclk_mhz;
dcn3_0_soc.clock_limits[i].dppclk_mhz = dcn30_bb_max_clk->max_dppclk_mhz;
dcn3_0_soc.clock_limits[i].phyclk_mhz = dcn30_bb_max_clk->max_phyclk_mhz;
dcn3_0_soc.clock_limits[i].dtbclk_mhz = dcn3_0_soc.clock_limits[0].dtbclk_mhz;
/* These clocks cannot come from bw_params, always fill from dcn3_0_soc[1] */
/* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
dcn3_0_soc.clock_limits[i].phyclk_d18_mhz = dcn3_0_soc.clock_limits[0].phyclk_d18_mhz;
dcn3_0_soc.clock_limits[i].socclk_mhz = dcn3_0_soc.clock_limits[0].socclk_mhz;
dcn3_0_soc.clock_limits[i].dscclk_mhz = dcn3_0_soc.clock_limits[0].dscclk_mhz;
}
/* re-init DML with updated bb */
dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
if (dc->current_state)
dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
}
/**
* dcn30_find_dummy_latency_index_for_fw_based_mclk_switch() - Finds
* dummy_latency_index when MCLK switching using firmware based vblank stretch
* is enabled. This function will iterate through the table of dummy pstate
* latencies until the lowest value that allows
* dm_allow_self_refresh_and_mclk_switch to happen is found
*
* @dc: Current DC state
* @context: new dc state
* @pipes: DML pipe params
* @pipe_cnt: number of DML pipes
* @vlevel: Voltage level calculated by DML
*
* Return: lowest dummy_latency_index value
*/
int dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
int vlevel)
{
const int max_latency_table_entries = 4;
int dummy_latency_index = 0;
dc_assert_fp_enabled();
while (dummy_latency_index < max_latency_table_entries) {
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true);
if (context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank ==
dm_allow_self_refresh_and_mclk_switch)
break;
dummy_latency_index++;
}
if (dummy_latency_index == max_latency_table_entries) {
ASSERT(dummy_latency_index != max_latency_table_entries);
/* If the execution gets here, it means dummy p_states are
* not possible. This should never happen and would mean
* something is severely wrong.
* Here we reset dummy_latency_index to 3, because it is
* better to have underflows than system crashes.
*/
dummy_latency_index = 3;
}
return dummy_latency_index;
}
void dcn3_fpu_build_wm_range_table(struct clk_mgr *base)
{
/* defaults */
double pstate_latency_us = base->ctx->dc->dml.soc.dram_clock_change_latency_us;
double sr_exit_time_us = base->ctx->dc->dml.soc.sr_exit_time_us;
double sr_enter_plus_exit_time_us = base->ctx->dc->dml.soc.sr_enter_plus_exit_time_us;
uint16_t min_uclk_mhz = base->bw_params->clk_table.entries[0].memclk_mhz;
dc_assert_fp_enabled();
/* Set A - Normal - default values*/
base->bw_params->wm_table.nv_entries[WM_A].valid = true;
base->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us = pstate_latency_us;
base->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us = sr_exit_time_us;
base->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
base->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
base->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = 0;
base->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF;
base->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz;
base->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF;
/* Set B - Performance - higher minimum clocks */
// base->bw_params->wm_table.nv_entries[WM_B].valid = true;
// base->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us = pstate_latency_us;
// base->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us = sr_exit_time_us;
// base->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
// base->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
// base->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = TUNED VALUE;
// base->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_dcfclk = 0xFFFF;
// base->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_uclk = TUNED VALUE;
// base->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_uclk = 0xFFFF;
/* Set C - Dummy P-State - P-State latency set to "dummy p-state" value */
base->bw_params->wm_table.nv_entries[WM_C].valid = true;
base->bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = 0;
base->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us = sr_exit_time_us;
base->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
base->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE;
base->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_dcfclk = 0;
base->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_dcfclk = 0xFFFF;
base->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_uclk = min_uclk_mhz;
base->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF;
base->bw_params->dummy_pstate_table[0].dram_speed_mts = 1600;
base->bw_params->dummy_pstate_table[0].dummy_pstate_latency_us = 38;
base->bw_params->dummy_pstate_table[1].dram_speed_mts = 8000;
base->bw_params->dummy_pstate_table[1].dummy_pstate_latency_us = 9;
base->bw_params->dummy_pstate_table[2].dram_speed_mts = 10000;
base->bw_params->dummy_pstate_table[2].dummy_pstate_latency_us = 8;
base->bw_params->dummy_pstate_table[3].dram_speed_mts = 16000;
base->bw_params->dummy_pstate_table[3].dummy_pstate_latency_us = 5;
/* Set D - MALL - SR enter and exit times adjusted for MALL */
base->bw_params->wm_table.nv_entries[WM_D].valid = true;
base->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us = pstate_latency_us;
base->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us = 2;
base->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us = 4;
base->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.wm_type = WATERMARKS_MALL;
base->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_dcfclk = 0;
base->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_dcfclk = 0xFFFF;
base->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_uclk = min_uclk_mhz;
base->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF;
}
void patch_dcn30_soc_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *dcn3_0_ip)
{
dc_assert_fp_enabled();
if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
struct bp_soc_bb_info bb_info = {0};
if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
if (bb_info.dram_clock_change_latency_100ns > 0)
dcn3_0_soc.dram_clock_change_latency_us = bb_info.dram_clock_change_latency_100ns * 10;
if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
dcn3_0_soc.sr_enter_plus_exit_time_us = bb_info.dram_sr_enter_exit_latency_100ns * 10;
if (bb_info.dram_sr_exit_latency_100ns > 0)
dcn3_0_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10;
}
}
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c |
// SPDX-License-Identifier: MIT
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "clk_mgr.h"
#include "resource.h"
#include "dcn321_fpu.h"
#include "dcn32/dcn32_resource.h"
#include "dcn321/dcn321_resource.h"
#include "dml/dcn32/display_mode_vba_util_32.h"
#define DCN3_2_DEFAULT_DET_SIZE 256
struct _vcs_dpi_ip_params_st dcn3_21_ip = {
.gpuvm_enable = 0,
.gpuvm_max_page_table_levels = 4,
.hostvm_enable = 0,
.rob_buffer_size_kbytes = 128,
.det_buffer_size_kbytes = DCN3_2_DEFAULT_DET_SIZE,
.config_return_buffer_size_in_kbytes = 1280,
.compressed_buffer_segment_size_in_kbytes = 64,
.meta_fifo_size_in_kentries = 22,
.zero_size_buffer_entries = 512,
.compbuf_reserved_space_64b = 256,
.compbuf_reserved_space_zs = 64,
.dpp_output_buffer_pixels = 2560,
.opp_output_buffer_lines = 1,
.pixel_chunk_size_kbytes = 8,
.alpha_pixel_chunk_size_kbytes = 4,
.min_pixel_chunk_size_bytes = 1024,
.dcc_meta_buffer_size_bytes = 6272,
.meta_chunk_size_kbytes = 2,
.min_meta_chunk_size_bytes = 256,
.writeback_chunk_size_kbytes = 8,
.ptoi_supported = false,
.num_dsc = 4,
.maximum_dsc_bits_per_component = 12,
.maximum_pixels_per_line_per_dsc_unit = 6016,
.dsc422_native_support = true,
.is_line_buffer_bpp_fixed = true,
.line_buffer_fixed_bpp = 57,
.line_buffer_size_bits = 1171920,
.max_line_buffer_lines = 32,
.writeback_interface_buffer_size_kbytes = 90,
.max_num_dpp = 4,
.max_num_otg = 4,
.max_num_hdmi_frl_outputs = 1,
.max_num_wb = 1,
.max_dchub_pscl_bw_pix_per_clk = 4,
.max_pscl_lb_bw_pix_per_clk = 2,
.max_lb_vscl_bw_pix_per_clk = 4,
.max_vscl_hscl_bw_pix_per_clk = 4,
.max_hscl_ratio = 6,
.max_vscl_ratio = 6,
.max_hscl_taps = 8,
.max_vscl_taps = 8,
.dpte_buffer_size_in_pte_reqs_luma = 64,
.dpte_buffer_size_in_pte_reqs_chroma = 34,
.dispclk_ramp_margin_percent = 1,
.max_inter_dcn_tile_repeaters = 8,
.cursor_buffer_size = 16,
.cursor_chunk_size = 2,
.writeback_line_buffer_buffer_size = 0,
.writeback_min_hscl_ratio = 1,
.writeback_min_vscl_ratio = 1,
.writeback_max_hscl_ratio = 1,
.writeback_max_vscl_ratio = 1,
.writeback_max_hscl_taps = 1,
.writeback_max_vscl_taps = 1,
.dppclk_delay_subtotal = 47,
.dppclk_delay_scl = 50,
.dppclk_delay_scl_lb_only = 16,
.dppclk_delay_cnvc_formatter = 28,
.dppclk_delay_cnvc_cursor = 6,
.dispclk_delay_subtotal = 125,
.dynamic_metadata_vm_enabled = false,
.odm_combine_4to1_supported = false,
.dcc_supported = true,
.max_num_dp2p0_outputs = 2,
.max_num_dp2p0_streams = 4,
};
struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = {
.clock_limits = {
{
.state = 0,
.dcfclk_mhz = 1434.0,
.fabricclk_mhz = 2250.0,
.dispclk_mhz = 1720.0,
.dppclk_mhz = 1720.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.phyclk_d32_mhz = 313.0,
.socclk_mhz = 1200.0,
.dscclk_mhz = 573.333,
.dram_speed_mts = 16000.0,
.dtbclk_mhz = 1564.0,
},
},
.num_states = 1,
.sr_exit_time_us = 19.95,
.sr_enter_plus_exit_time_us = 24.36,
.sr_exit_z8_time_us = 285.0,
.sr_enter_plus_exit_z8_time_us = 320,
.writeback_latency_us = 12.0,
.round_trip_ping_latency_dcfclk_cycles = 207,
.urgent_latency_pixel_data_only_us = 4,
.urgent_latency_pixel_mixed_with_vm_data_us = 4,
.urgent_latency_vm_data_only_us = 4,
.fclk_change_latency_us = 7,
.usr_retraining_latency_us = 0,
.smn_latency_us = 0,
.mall_allocated_for_dcn_mbytes = 32,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
.pct_ideal_sdp_bw_after_urgent = 90.0,
.pct_ideal_fabric_bw_after_urgent = 67.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented
.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0, // N/A, for now keep as is until DML implemented
.pct_ideal_dram_bw_after_urgent_strobe = 67.0,
.max_avg_sdp_bw_use_normal_percent = 80.0,
.max_avg_fabric_bw_use_normal_percent = 60.0,
.max_avg_dram_bw_use_normal_strobe_percent = 50.0,
.max_avg_dram_bw_use_normal_percent = 15.0,
.num_chans = 8,
.dram_channel_width_bytes = 2,
.fabric_datapath_to_dcn_data_return_bytes = 64,
.return_bus_width_bytes = 64,
.downspread_percent = 0.38,
.dcn_downspread_percent = 0.5,
.dram_clock_change_latency_us = 400,
.dispclk_dppclk_vco_speed_mhz = 4300.0,
.do_urgent_latency_adjustment = true,
.urgent_latency_adjustment_fabric_clock_component_us = 1.0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
};
static void get_optimal_ntuple(struct _vcs_dpi_voltage_scaling_st *entry)
{
if (entry->dcfclk_mhz > 0) {
float bw_on_sdp = entry->dcfclk_mhz * dcn3_21_soc.return_bus_width_bytes * ((float)dcn3_21_soc.pct_ideal_sdp_bw_after_urgent / 100);
entry->fabricclk_mhz = bw_on_sdp / (dcn3_21_soc.return_bus_width_bytes * ((float)dcn3_21_soc.pct_ideal_fabric_bw_after_urgent / 100));
entry->dram_speed_mts = bw_on_sdp / (dcn3_21_soc.num_chans *
dcn3_21_soc.dram_channel_width_bytes * ((float)dcn3_21_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only / 100));
} else if (entry->fabricclk_mhz > 0) {
float bw_on_fabric = entry->fabricclk_mhz * dcn3_21_soc.return_bus_width_bytes * ((float)dcn3_21_soc.pct_ideal_fabric_bw_after_urgent / 100);
entry->dcfclk_mhz = bw_on_fabric / (dcn3_21_soc.return_bus_width_bytes * ((float)dcn3_21_soc.pct_ideal_sdp_bw_after_urgent / 100));
entry->dram_speed_mts = bw_on_fabric / (dcn3_21_soc.num_chans *
dcn3_21_soc.dram_channel_width_bytes * ((float)dcn3_21_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only / 100));
} else if (entry->dram_speed_mts > 0) {
float bw_on_dram = entry->dram_speed_mts * dcn3_21_soc.num_chans *
dcn3_21_soc.dram_channel_width_bytes * ((float)dcn3_21_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only / 100);
entry->fabricclk_mhz = bw_on_dram / (dcn3_21_soc.return_bus_width_bytes * ((float)dcn3_21_soc.pct_ideal_fabric_bw_after_urgent / 100));
entry->dcfclk_mhz = bw_on_dram / (dcn3_21_soc.return_bus_width_bytes * ((float)dcn3_21_soc.pct_ideal_sdp_bw_after_urgent / 100));
}
}
static float calculate_net_bw_in_kbytes_sec(struct _vcs_dpi_voltage_scaling_st *entry)
{
float memory_bw_kbytes_sec;
float fabric_bw_kbytes_sec;
float sdp_bw_kbytes_sec;
float limiting_bw_kbytes_sec;
memory_bw_kbytes_sec = entry->dram_speed_mts * dcn3_21_soc.num_chans *
dcn3_21_soc.dram_channel_width_bytes * ((float)dcn3_21_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only / 100);
fabric_bw_kbytes_sec = entry->fabricclk_mhz * dcn3_21_soc.return_bus_width_bytes * ((float)dcn3_21_soc.pct_ideal_fabric_bw_after_urgent / 100);
sdp_bw_kbytes_sec = entry->dcfclk_mhz * dcn3_21_soc.return_bus_width_bytes * ((float)dcn3_21_soc.pct_ideal_sdp_bw_after_urgent / 100);
limiting_bw_kbytes_sec = memory_bw_kbytes_sec;
if (fabric_bw_kbytes_sec < limiting_bw_kbytes_sec)
limiting_bw_kbytes_sec = fabric_bw_kbytes_sec;
if (sdp_bw_kbytes_sec < limiting_bw_kbytes_sec)
limiting_bw_kbytes_sec = sdp_bw_kbytes_sec;
return limiting_bw_kbytes_sec;
}
static void dcn321_insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
unsigned int *num_entries,
struct _vcs_dpi_voltage_scaling_st *entry)
{
int i = 0;
int index = 0;
dc_assert_fp_enabled();
if (*num_entries == 0) {
table[0] = *entry;
(*num_entries)++;
} else {
while (entry->net_bw_in_kbytes_sec > table[index].net_bw_in_kbytes_sec) {
index++;
if (index >= *num_entries)
break;
}
for (i = *num_entries; i > index; i--)
table[i] = table[i - 1];
table[index] = *entry;
(*num_entries)++;
}
}
static void remove_entry_from_table_at_index(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries,
unsigned int index)
{
int i;
if (*num_entries == 0)
return;
for (i = index; i < *num_entries - 1; i++) {
table[i] = table[i + 1];
}
memset(&table[--(*num_entries)], 0, sizeof(struct _vcs_dpi_voltage_scaling_st));
}
static void swap_table_entries(struct _vcs_dpi_voltage_scaling_st *first_entry,
struct _vcs_dpi_voltage_scaling_st *second_entry)
{
struct _vcs_dpi_voltage_scaling_st temp_entry = *first_entry;
*first_entry = *second_entry;
*second_entry = temp_entry;
}
/*
* sort_entries_with_same_bw - Sort entries sharing the same bandwidth by DCFCLK
*/
static void sort_entries_with_same_bw(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
{
unsigned int start_index = 0;
unsigned int end_index = 0;
unsigned int current_bw = 0;
for (int i = 0; i < (*num_entries - 1); i++) {
if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) {
current_bw = table[i].net_bw_in_kbytes_sec;
start_index = i;
end_index = ++i;
while ((i < (*num_entries - 1)) && (table[i+1].net_bw_in_kbytes_sec == current_bw))
end_index = ++i;
}
if (start_index != end_index) {
for (int j = start_index; j < end_index; j++) {
for (int k = start_index; k < end_index; k++) {
if (table[k].dcfclk_mhz > table[k+1].dcfclk_mhz)
swap_table_entries(&table[k], &table[k+1]);
}
}
}
start_index = 0;
end_index = 0;
}
}
/*
* remove_inconsistent_entries - Ensure entries with the same bandwidth have MEMCLK and FCLK monotonically increasing
* and remove entries that do not follow this order
*/
static void remove_inconsistent_entries(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
{
for (int i = 0; i < (*num_entries - 1); i++) {
if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) {
if ((table[i].dram_speed_mts > table[i+1].dram_speed_mts) ||
(table[i].fabricclk_mhz > table[i+1].fabricclk_mhz))
remove_entry_from_table_at_index(table, num_entries, i);
}
}
}
/*
* override_max_clk_values - Overwrite the max clock frequencies with the max DC mode timings
* Input:
* max_clk_limit - struct containing the desired clock timings
* Output:
* curr_clk_limit - struct containing the timings that need to be overwritten
* Return: 0 upon success, non-zero for failure
*/
static int override_max_clk_values(struct clk_limit_table_entry *max_clk_limit,
struct clk_limit_table_entry *curr_clk_limit)
{
if (NULL == max_clk_limit || NULL == curr_clk_limit)
return -1; //invalid parameters
//only overwrite if desired max clock frequency is initialized
if (max_clk_limit->dcfclk_mhz != 0)
curr_clk_limit->dcfclk_mhz = max_clk_limit->dcfclk_mhz;
if (max_clk_limit->fclk_mhz != 0)
curr_clk_limit->fclk_mhz = max_clk_limit->fclk_mhz;
if (max_clk_limit->memclk_mhz != 0)
curr_clk_limit->memclk_mhz = max_clk_limit->memclk_mhz;
if (max_clk_limit->socclk_mhz != 0)
curr_clk_limit->socclk_mhz = max_clk_limit->socclk_mhz;
if (max_clk_limit->dtbclk_mhz != 0)
curr_clk_limit->dtbclk_mhz = max_clk_limit->dtbclk_mhz;
if (max_clk_limit->dispclk_mhz != 0)
curr_clk_limit->dispclk_mhz = max_clk_limit->dispclk_mhz;
return 0;
}
static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk_bw_params *bw_params,
struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
{
int i, j;
struct _vcs_dpi_voltage_scaling_st entry = {0};
struct clk_limit_table_entry max_clk_data = {0};
unsigned int min_dcfclk_mhz = 199, min_fclk_mhz = 299;
static const unsigned int num_dcfclk_stas = 5;
unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {199, 615, 906, 1324, 1564};
unsigned int num_uclk_dpms = 0;
unsigned int num_fclk_dpms = 0;
unsigned int num_dcfclk_dpms = 0;
unsigned int num_dc_uclk_dpms = 0;
unsigned int num_dc_fclk_dpms = 0;
unsigned int num_dc_dcfclk_dpms = 0;
for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
if (bw_params->clk_table.entries[i].dcfclk_mhz > max_clk_data.dcfclk_mhz)
max_clk_data.dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
if (bw_params->clk_table.entries[i].fclk_mhz > max_clk_data.fclk_mhz)
max_clk_data.fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
if (bw_params->clk_table.entries[i].memclk_mhz > max_clk_data.memclk_mhz)
max_clk_data.memclk_mhz = bw_params->clk_table.entries[i].memclk_mhz;
if (bw_params->clk_table.entries[i].dispclk_mhz > max_clk_data.dispclk_mhz)
max_clk_data.dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
if (bw_params->clk_table.entries[i].dppclk_mhz > max_clk_data.dppclk_mhz)
max_clk_data.dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
if (bw_params->clk_table.entries[i].phyclk_mhz > max_clk_data.phyclk_mhz)
max_clk_data.phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
if (bw_params->clk_table.entries[i].dtbclk_mhz > max_clk_data.dtbclk_mhz)
max_clk_data.dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
if (bw_params->clk_table.entries[i].memclk_mhz > 0) {
num_uclk_dpms++;
if (bw_params->clk_table.entries[i].memclk_mhz <= bw_params->dc_mode_limit.memclk_mhz)
num_dc_uclk_dpms++;
}
if (bw_params->clk_table.entries[i].fclk_mhz > 0) {
num_fclk_dpms++;
if (bw_params->clk_table.entries[i].fclk_mhz <= bw_params->dc_mode_limit.fclk_mhz)
num_dc_fclk_dpms++;
}
if (bw_params->clk_table.entries[i].dcfclk_mhz > 0) {
num_dcfclk_dpms++;
if (bw_params->clk_table.entries[i].dcfclk_mhz <= bw_params->dc_mode_limit.dcfclk_mhz)
num_dc_dcfclk_dpms++;
}
}
if (!disable_dc_mode_overwrite) {
//Overwrite max frequencies with max DC mode frequencies for DC mode systems
override_max_clk_values(&bw_params->dc_mode_limit, &max_clk_data);
num_uclk_dpms = num_dc_uclk_dpms;
num_fclk_dpms = num_dc_fclk_dpms;
num_dcfclk_dpms = num_dc_dcfclk_dpms;
bw_params->clk_table.num_entries_per_clk.num_memclk_levels = num_uclk_dpms;
bw_params->clk_table.num_entries_per_clk.num_fclk_levels = num_fclk_dpms;
}
if (num_dcfclk_dpms > 0 && bw_params->clk_table.entries[0].fclk_mhz > min_fclk_mhz)
min_fclk_mhz = bw_params->clk_table.entries[0].fclk_mhz;
if (!max_clk_data.dcfclk_mhz || !max_clk_data.dispclk_mhz || !max_clk_data.dtbclk_mhz)
return -1;
if (max_clk_data.dppclk_mhz == 0)
max_clk_data.dppclk_mhz = max_clk_data.dispclk_mhz;
if (max_clk_data.fclk_mhz == 0)
max_clk_data.fclk_mhz = max_clk_data.dcfclk_mhz *
dcn3_21_soc.pct_ideal_sdp_bw_after_urgent /
dcn3_21_soc.pct_ideal_fabric_bw_after_urgent;
if (max_clk_data.phyclk_mhz == 0)
max_clk_data.phyclk_mhz = dcn3_21_soc.clock_limits[0].phyclk_mhz;
*num_entries = 0;
entry.dispclk_mhz = max_clk_data.dispclk_mhz;
entry.dscclk_mhz = max_clk_data.dispclk_mhz / 3;
entry.dppclk_mhz = max_clk_data.dppclk_mhz;
entry.dtbclk_mhz = max_clk_data.dtbclk_mhz;
entry.phyclk_mhz = max_clk_data.phyclk_mhz;
entry.phyclk_d18_mhz = dcn3_21_soc.clock_limits[0].phyclk_d18_mhz;
entry.phyclk_d32_mhz = dcn3_21_soc.clock_limits[0].phyclk_d32_mhz;
// Insert all the DCFCLK STAs
for (i = 0; i < num_dcfclk_stas; i++) {
entry.dcfclk_mhz = dcfclk_sta_targets[i];
entry.fabricclk_mhz = 0;
entry.dram_speed_mts = 0;
get_optimal_ntuple(&entry);
entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
dcn321_insert_entry_into_table_sorted(table, num_entries, &entry);
}
// Insert the max DCFCLK
entry.dcfclk_mhz = max_clk_data.dcfclk_mhz;
entry.fabricclk_mhz = 0;
entry.dram_speed_mts = 0;
get_optimal_ntuple(&entry);
entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
dcn321_insert_entry_into_table_sorted(table, num_entries, &entry);
// Insert the UCLK DPMS
for (i = 0; i < num_uclk_dpms; i++) {
entry.dcfclk_mhz = 0;
entry.fabricclk_mhz = 0;
entry.dram_speed_mts = bw_params->clk_table.entries[i].memclk_mhz * 16;
get_optimal_ntuple(&entry);
entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
dcn321_insert_entry_into_table_sorted(table, num_entries, &entry);
}
// If FCLK is coarse grained, insert individual DPMs.
if (num_fclk_dpms > 2) {
for (i = 0; i < num_fclk_dpms; i++) {
entry.dcfclk_mhz = 0;
entry.fabricclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
entry.dram_speed_mts = 0;
get_optimal_ntuple(&entry);
entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
dcn321_insert_entry_into_table_sorted(table, num_entries, &entry);
}
}
// If FCLK fine grained, only insert max
else {
entry.dcfclk_mhz = 0;
entry.fabricclk_mhz = max_clk_data.fclk_mhz;
entry.dram_speed_mts = 0;
get_optimal_ntuple(&entry);
entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
dcn321_insert_entry_into_table_sorted(table, num_entries, &entry);
}
// At this point, the table contains all "points of interest" based on
// DPMs from PMFW, and STAs. Table is sorted by BW, and all clock
// ratios (by derate, are exact).
// Remove states that require higher clocks than are supported
for (i = *num_entries - 1; i >= 0 ; i--) {
if (table[i].dcfclk_mhz > max_clk_data.dcfclk_mhz ||
table[i].fabricclk_mhz > max_clk_data.fclk_mhz ||
table[i].dram_speed_mts > max_clk_data.memclk_mhz * 16)
remove_entry_from_table_at_index(table, num_entries, i);
}
// Insert entry with all max dc limits without bandwitch matching
if (!disable_dc_mode_overwrite) {
struct _vcs_dpi_voltage_scaling_st max_dc_limits_entry = entry;
max_dc_limits_entry.dcfclk_mhz = max_clk_data.dcfclk_mhz;
max_dc_limits_entry.fabricclk_mhz = max_clk_data.fclk_mhz;
max_dc_limits_entry.dram_speed_mts = max_clk_data.memclk_mhz * 16;
max_dc_limits_entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&max_dc_limits_entry);
dcn321_insert_entry_into_table_sorted(table, num_entries, &max_dc_limits_entry);
sort_entries_with_same_bw(table, num_entries);
remove_inconsistent_entries(table, num_entries);
}
// At this point, the table only contains supported points of interest
// it could be used as is, but some states may be redundant due to
// coarse grained nature of some clocks, so we want to round up to
// coarse grained DPMs and remove duplicates.
// Round up UCLKs
for (i = *num_entries - 1; i >= 0 ; i--) {
for (j = 0; j < num_uclk_dpms; j++) {
if (bw_params->clk_table.entries[j].memclk_mhz * 16 >= table[i].dram_speed_mts) {
table[i].dram_speed_mts = bw_params->clk_table.entries[j].memclk_mhz * 16;
break;
}
}
}
// If FCLK is coarse grained, round up to next DPMs
if (num_fclk_dpms > 2) {
for (i = *num_entries - 1; i >= 0 ; i--) {
for (j = 0; j < num_fclk_dpms; j++) {
if (bw_params->clk_table.entries[j].fclk_mhz >= table[i].fabricclk_mhz) {
table[i].fabricclk_mhz = bw_params->clk_table.entries[j].fclk_mhz;
break;
}
}
}
}
// Otherwise, round up to minimum.
else {
for (i = *num_entries - 1; i >= 0 ; i--) {
if (table[i].fabricclk_mhz < min_fclk_mhz) {
table[i].fabricclk_mhz = min_fclk_mhz;
}
}
}
// Round DCFCLKs up to minimum
for (i = *num_entries - 1; i >= 0 ; i--) {
if (table[i].dcfclk_mhz < min_dcfclk_mhz) {
table[i].dcfclk_mhz = min_dcfclk_mhz;
}
}
// Remove duplicate states, note duplicate states are always neighbouring since table is sorted.
i = 0;
while (i < *num_entries - 1) {
if (table[i].dcfclk_mhz == table[i + 1].dcfclk_mhz &&
table[i].fabricclk_mhz == table[i + 1].fabricclk_mhz &&
table[i].dram_speed_mts == table[i + 1].dram_speed_mts)
remove_entry_from_table_at_index(table, num_entries, i + 1);
else
i++;
}
// Fix up the state indicies
for (i = *num_entries - 1; i >= 0 ; i--) {
table[i].state = i;
}
return 0;
}
static void dcn321_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
unsigned int *optimal_dcfclk,
unsigned int *optimal_fclk)
{
double bw_from_dram, bw_from_dram1, bw_from_dram2;
bw_from_dram1 = uclk_mts * dcn3_21_soc.num_chans *
dcn3_21_soc.dram_channel_width_bytes * (dcn3_21_soc.max_avg_dram_bw_use_normal_percent / 100);
bw_from_dram2 = uclk_mts * dcn3_21_soc.num_chans *
dcn3_21_soc.dram_channel_width_bytes * (dcn3_21_soc.max_avg_sdp_bw_use_normal_percent / 100);
bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
if (optimal_fclk)
*optimal_fclk = bw_from_dram /
(dcn3_21_soc.fabric_datapath_to_dcn_data_return_bytes * (dcn3_21_soc.max_avg_sdp_bw_use_normal_percent / 100));
if (optimal_dcfclk)
*optimal_dcfclk = bw_from_dram /
(dcn3_21_soc.return_bus_width_bytes * (dcn3_21_soc.max_avg_sdp_bw_use_normal_percent / 100));
}
/** dcn321_update_bw_bounding_box
* This would override some dcn3_2 ip_or_soc initial parameters hardcoded from spreadsheet
* with actual values as per dGPU SKU:
* -with passed few options from dc->config
* -with dentist_vco_frequency from Clk Mgr (currently hardcoded, but might need to get it from PM FW)
* -with passed latency values (passed in ns units) in dc-> bb override for debugging purposes
* -with passed latencies from VBIOS (in 100_ns units) if available for certain dGPU SKU
* -with number of DRAM channels from VBIOS (which differ for certain dGPU SKU of the same ASIC)
* -clocks levels with passed clk_table entries from Clk Mgr as reported by PM FW for different
* clocks (which might differ for certain dGPU SKU of the same ASIC)
*/
void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params)
{
dc_assert_fp_enabled();
/* Overrides from dc->config options */
dcn3_21_ip.clamp_min_dcfclk = dc->config.clamp_min_dcfclk;
/* Override from passed dc->bb_overrides if available*/
if ((int)(dcn3_21_soc.sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns
&& dc->bb_overrides.sr_exit_time_ns) {
dcn3_21_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
}
if ((int)(dcn3_21_soc.sr_enter_plus_exit_time_us * 1000)
!= dc->bb_overrides.sr_enter_plus_exit_time_ns
&& dc->bb_overrides.sr_enter_plus_exit_time_ns) {
dcn3_21_soc.sr_enter_plus_exit_time_us =
dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
}
if ((int)(dcn3_21_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns
&& dc->bb_overrides.urgent_latency_ns) {
dcn3_21_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
dcn3_21_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
}
if ((int)(dcn3_21_soc.dram_clock_change_latency_us * 1000)
!= dc->bb_overrides.dram_clock_change_latency_ns
&& dc->bb_overrides.dram_clock_change_latency_ns) {
dcn3_21_soc.dram_clock_change_latency_us =
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
}
if ((int)(dcn3_21_soc.fclk_change_latency_us * 1000)
!= dc->bb_overrides.fclk_clock_change_latency_ns
&& dc->bb_overrides.fclk_clock_change_latency_ns) {
dcn3_21_soc.fclk_change_latency_us =
dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
}
if ((int)(dcn3_21_soc.dummy_pstate_latency_us * 1000)
!= dc->bb_overrides.dummy_clock_change_latency_ns
&& dc->bb_overrides.dummy_clock_change_latency_ns) {
dcn3_21_soc.dummy_pstate_latency_us =
dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0;
}
/* Override from VBIOS if VBIOS bb_info available */
if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
struct bp_soc_bb_info bb_info = {0};
if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
if (bb_info.dram_clock_change_latency_100ns > 0)
dcn3_21_soc.dram_clock_change_latency_us =
bb_info.dram_clock_change_latency_100ns * 10;
if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
dcn3_21_soc.sr_enter_plus_exit_time_us =
bb_info.dram_sr_enter_exit_latency_100ns * 10;
if (bb_info.dram_sr_exit_latency_100ns > 0)
dcn3_21_soc.sr_exit_time_us =
bb_info.dram_sr_exit_latency_100ns * 10;
}
}
/* Override from VBIOS for num_chan */
if (dc->ctx->dc_bios->vram_info.num_chans) {
dcn3_21_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
dcn3_21_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc,
dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel);
}
if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
dcn3_21_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
/* DML DSC delay factor workaround */
dcn3_21_ip.dsc_delay_factor_wa = dc->debug.dsc_delay_factor_wa_x1000 / 1000.0;
dcn3_21_ip.min_prefetch_in_strobe_us = dc->debug.min_prefetch_in_strobe_ns / 1000.0;
/* Override dispclk_dppclk_vco_speed_mhz from Clk Mgr */
dcn3_21_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
/* Overrides Clock levelsfrom CLK Mgr table entries as reported by PM FW */
if (dc->debug.use_legacy_soc_bb_mechanism) {
unsigned int i = 0, j = 0, num_states = 0;
unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0};
unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0};
unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0};
unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0};
unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {615, 906, 1324, 1564};
unsigned int num_dcfclk_sta_targets = 4, num_uclk_states = 0;
unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0;
for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
}
if (!max_dcfclk_mhz)
max_dcfclk_mhz = dcn3_21_soc.clock_limits[0].dcfclk_mhz;
if (!max_dispclk_mhz)
max_dispclk_mhz = dcn3_21_soc.clock_limits[0].dispclk_mhz;
if (!max_dppclk_mhz)
max_dppclk_mhz = dcn3_21_soc.clock_limits[0].dppclk_mhz;
if (!max_phyclk_mhz)
max_phyclk_mhz = dcn3_21_soc.clock_limits[0].phyclk_mhz;
if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
// If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array
dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz;
num_dcfclk_sta_targets++;
} else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
// If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates
for (i = 0; i < num_dcfclk_sta_targets; i++) {
if (dcfclk_sta_targets[i] > max_dcfclk_mhz) {
dcfclk_sta_targets[i] = max_dcfclk_mhz;
break;
}
}
// Update size of array since we "removed" duplicates
num_dcfclk_sta_targets = i + 1;
}
num_uclk_states = bw_params->clk_table.num_entries;
// Calculate optimal dcfclk for each uclk
for (i = 0; i < num_uclk_states; i++) {
dcn321_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
&optimal_dcfclk_for_uclk[i], NULL);
if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) {
optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz;
}
}
// Calculate optimal uclk for each dcfclk sta target
for (i = 0; i < num_dcfclk_sta_targets; i++) {
for (j = 0; j < num_uclk_states; j++) {
if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
optimal_uclk_for_dcfclk_sta_targets[i] =
bw_params->clk_table.entries[j].memclk_mhz * 16;
break;
}
}
}
i = 0;
j = 0;
// create the final dcfclk and uclk table
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
} else {
j = num_uclk_states;
}
}
}
while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
}
while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES &&
optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
}
dcn3_21_soc.num_states = num_states;
for (i = 0; i < dcn3_21_soc.num_states; i++) {
dcn3_21_soc.clock_limits[i].state = i;
dcn3_21_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
dcn3_21_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i];
/* Fill all states with max values of all these clocks */
dcn3_21_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
dcn3_21_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
dcn3_21_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz;
dcn3_21_soc.clock_limits[i].dscclk_mhz = max_dispclk_mhz / 3;
/* Populate from bw_params for DTBCLK, SOCCLK */
if (i > 0) {
if (!bw_params->clk_table.entries[i].dtbclk_mhz) {
dcn3_21_soc.clock_limits[i].dtbclk_mhz = dcn3_21_soc.clock_limits[i-1].dtbclk_mhz;
} else {
dcn3_21_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
}
} else if (bw_params->clk_table.entries[i].dtbclk_mhz) {
dcn3_21_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
}
if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0)
dcn3_21_soc.clock_limits[i].socclk_mhz = dcn3_21_soc.clock_limits[i-1].socclk_mhz;
else
dcn3_21_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz;
if (!dram_speed_mts[i] && i > 0)
dcn3_21_soc.clock_limits[i].dram_speed_mts = dcn3_21_soc.clock_limits[i-1].dram_speed_mts;
else
dcn3_21_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
/* These clocks cannot come from bw_params, always fill from dcn3_21_soc[0] */
/* PHYCLK_D18, PHYCLK_D32 */
dcn3_21_soc.clock_limits[i].phyclk_d18_mhz = dcn3_21_soc.clock_limits[0].phyclk_d18_mhz;
dcn3_21_soc.clock_limits[i].phyclk_d32_mhz = dcn3_21_soc.clock_limits[0].phyclk_d32_mhz;
}
} else {
build_synthetic_soc_states(dc->debug.disable_dc_mode_overwrite, bw_params,
dcn3_21_soc.clock_limits, &dcn3_21_soc.num_states);
}
/* Re-init DML with updated bb */
dml_init_instance(&dc->dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32);
if (dc->current_state)
dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32);
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c |
/*
* Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "dcn_calc_auto.h"
#include "dcn_calc_math.h"
/*
* NOTE:
* This file is gcc-parseable HW gospel, coming straight from HW engineers.
*
* It doesn't adhere to Linux kernel style and sometimes will do things in odd
* ways. Unless there is something clearly wrong with it the code should
* remain as-is as it provides us with a guarantee from HW that it is correct.
*/
/*REVISION#250*/
void scaler_settings_calculation(struct dcn_bw_internal_vars *v)
{
int k;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->allow_different_hratio_vratio == dcn_bw_yes) {
if (v->source_scan[k] == dcn_bw_hor) {
v->h_ratio[k] = v->viewport_width[k] / v->scaler_rec_out_width[k];
v->v_ratio[k] = v->viewport_height[k] / v->scaler_recout_height[k];
}
else {
v->h_ratio[k] = v->viewport_height[k] / v->scaler_rec_out_width[k];
v->v_ratio[k] = v->viewport_width[k] / v->scaler_recout_height[k];
}
}
else {
if (v->source_scan[k] == dcn_bw_hor) {
v->h_ratio[k] =dcn_bw_max2(v->viewport_width[k] / v->scaler_rec_out_width[k], v->viewport_height[k] / v->scaler_recout_height[k]);
}
else {
v->h_ratio[k] =dcn_bw_max2(v->viewport_height[k] / v->scaler_rec_out_width[k], v->viewport_width[k] / v->scaler_recout_height[k]);
}
v->v_ratio[k] = v->h_ratio[k];
}
if (v->interlace_output[k] == 1.0) {
v->v_ratio[k] = 2.0 * v->v_ratio[k];
}
if (v->underscan_output[k] == 1.0) {
v->h_ratio[k] = v->h_ratio[k] * v->under_scan_factor;
v->v_ratio[k] = v->v_ratio[k] * v->under_scan_factor;
}
}
/*scaler taps calculation*/
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->h_ratio[k] > 1.0) {
v->acceptable_quality_hta_ps =dcn_bw_min2(v->max_hscl_taps, 2.0 *dcn_bw_ceil2(v->h_ratio[k], 1.0));
}
else if (v->h_ratio[k] < 1.0) {
v->acceptable_quality_hta_ps = 4.0;
}
else {
v->acceptable_quality_hta_ps = 1.0;
}
if (v->ta_pscalculation == dcn_bw_override) {
v->htaps[k] = v->override_hta_ps[k];
}
else {
v->htaps[k] = v->acceptable_quality_hta_ps;
}
if (v->v_ratio[k] > 1.0) {
v->acceptable_quality_vta_ps =dcn_bw_min2(v->max_vscl_taps, 2.0 *dcn_bw_ceil2(v->v_ratio[k], 1.0));
}
else if (v->v_ratio[k] < 1.0) {
v->acceptable_quality_vta_ps = 4.0;
}
else {
v->acceptable_quality_vta_ps = 1.0;
}
if (v->ta_pscalculation == dcn_bw_override) {
v->vtaps[k] = v->override_vta_ps[k];
}
else {
v->vtaps[k] = v->acceptable_quality_vta_ps;
}
if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16) {
v->vta_pschroma[k] = 0.0;
v->hta_pschroma[k] = 0.0;
}
else {
if (v->ta_pscalculation == dcn_bw_override) {
v->vta_pschroma[k] = v->override_vta_pschroma[k];
v->hta_pschroma[k] = v->override_hta_pschroma[k];
}
else {
v->vta_pschroma[k] = v->acceptable_quality_vta_ps;
v->hta_pschroma[k] = v->acceptable_quality_hta_ps;
}
}
}
}
void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v)
{
int i;
int j;
int k;
/*mode support, voltage state and soc configuration*/
/*scale ratio support check*/
v->scale_ratio_support = dcn_bw_yes;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->h_ratio[k] > v->max_hscl_ratio || v->v_ratio[k] > v->max_vscl_ratio || v->h_ratio[k] > v->htaps[k] || v->v_ratio[k] > v->vtaps[k] || (v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16 && (v->h_ratio[k] / 2.0 > v->hta_pschroma[k] || v->v_ratio[k] / 2.0 > v->vta_pschroma[k]))) {
v->scale_ratio_support = dcn_bw_no;
}
}
/*source format, pixel format and scan support check*/
v->source_format_pixel_and_scan_support = dcn_bw_yes;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if ((v->source_surface_mode[k] == dcn_bw_sw_linear && v->source_scan[k] != dcn_bw_hor) || ((v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x || v->source_surface_mode[k] == dcn_bw_sw_var_d || v->source_surface_mode[k] == dcn_bw_sw_var_d_x) && v->source_pixel_format[k] != dcn_bw_rgb_sub_64)) {
v->source_format_pixel_and_scan_support = dcn_bw_no;
}
}
/*bandwidth support check*/
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->source_scan[k] == dcn_bw_hor) {
v->swath_width_ysingle_dpp[k] = v->viewport_width[k];
}
else {
v->swath_width_ysingle_dpp[k] = v->viewport_height[k];
}
if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
v->byte_per_pixel_in_dety[k] = 8.0;
v->byte_per_pixel_in_detc[k] = 0.0;
}
else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) {
v->byte_per_pixel_in_dety[k] = 4.0;
v->byte_per_pixel_in_detc[k] = 0.0;
}
else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) {
v->byte_per_pixel_in_dety[k] = 2.0;
v->byte_per_pixel_in_detc[k] = 0.0;
}
else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
v->byte_per_pixel_in_dety[k] = 1.0;
v->byte_per_pixel_in_detc[k] = 2.0;
}
else {
v->byte_per_pixel_in_dety[k] = 4.0f / 3.0f;
v->byte_per_pixel_in_detc[k] = 8.0f / 3.0f;
}
}
v->total_read_bandwidth_consumed_gbyte_per_second = 0.0;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
v->read_bandwidth[k] = v->swath_width_ysingle_dpp[k] * (dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) * v->v_ratio[k] +dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 2.0 * v->v_ratio[k] / 2) / (v->htotal[k] / v->pixel_clock[k]);
if (v->dcc_enable[k] == dcn_bw_yes) {
v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 256);
}
if (v->pte_enable == dcn_bw_yes && v->source_scan[k] != dcn_bw_hor && (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x)) {
v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 64);
}
else if (v->pte_enable == dcn_bw_yes && v->source_scan[k] == dcn_bw_hor && (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32) && (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x)) {
v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 256);
}
else if (v->pte_enable == dcn_bw_yes) {
v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 512);
}
v->total_read_bandwidth_consumed_gbyte_per_second = v->total_read_bandwidth_consumed_gbyte_per_second + v->read_bandwidth[k] / 1000.0;
}
v->total_write_bandwidth_consumed_gbyte_per_second = 0.0;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->output[k] == dcn_bw_writeback && v->output_format[k] == dcn_bw_444) {
v->write_bandwidth[k] = v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0;
}
else if (v->output[k] == dcn_bw_writeback) {
v->write_bandwidth[k] = v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 1.5;
}
else {
v->write_bandwidth[k] = 0.0;
}
v->total_write_bandwidth_consumed_gbyte_per_second = v->total_write_bandwidth_consumed_gbyte_per_second + v->write_bandwidth[k] / 1000.0;
}
v->total_bandwidth_consumed_gbyte_per_second = v->total_read_bandwidth_consumed_gbyte_per_second + v->total_write_bandwidth_consumed_gbyte_per_second;
v->dcc_enabled_in_any_plane = dcn_bw_no;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->dcc_enable[k] == dcn_bw_yes) {
v->dcc_enabled_in_any_plane = dcn_bw_yes;
}
}
for (i = 0; i <= number_of_states_plus_one; i++) {
v->return_bw_todcn_per_state =dcn_bw_min2(v->return_bus_width * v->dcfclk_per_state[i], v->fabric_and_dram_bandwidth_per_state[i] * 1000.0 * v->percent_of_ideal_drambw_received_after_urg_latency / 100.0);
v->return_bw_per_state[i] = v->return_bw_todcn_per_state;
if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->return_bw_todcn_per_state > v->dcfclk_per_state[i] * v->return_bus_width / 4.0) {
v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], v->return_bw_todcn_per_state * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bw_todcn_per_state - v->dcfclk_per_state[i] * v->return_bus_width / 4.0) + v->urgent_latency)));
}
v->critical_point = 2.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0);
if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->critical_point > 1.0 && v->critical_point < 4.0) {
v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], dcn_bw_pow(4.0 * v->return_bw_todcn_per_state * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2));
}
v->return_bw_todcn_per_state =dcn_bw_min2(v->return_bus_width * v->dcfclk_per_state[i], v->fabric_and_dram_bandwidth_per_state[i] * 1000.0);
if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->return_bw_todcn_per_state > v->dcfclk_per_state[i] * v->return_bus_width / 4.0) {
v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], v->return_bw_todcn_per_state * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bw_todcn_per_state - v->dcfclk_per_state[i] * v->return_bus_width / 4.0) + v->urgent_latency)));
}
v->critical_point = 2.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0);
if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->critical_point > 1.0 && v->critical_point < 4.0) {
v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], dcn_bw_pow(4.0 * v->return_bw_todcn_per_state * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2));
}
}
for (i = 0; i <= number_of_states_plus_one; i++) {
if ((v->total_read_bandwidth_consumed_gbyte_per_second * 1000.0 <= v->return_bw_per_state[i]) && (v->total_bandwidth_consumed_gbyte_per_second * 1000.0 <= v->fabric_and_dram_bandwidth_per_state[i] * 1000.0 * v->percent_of_ideal_drambw_received_after_urg_latency / 100.0)) {
v->bandwidth_support[i] = dcn_bw_yes;
}
else {
v->bandwidth_support[i] = dcn_bw_no;
}
}
/*writeback latency support check*/
v->writeback_latency_support = dcn_bw_yes;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->output[k] == dcn_bw_writeback && v->output_format[k] == dcn_bw_444 && v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0 > (v->writeback_luma_buffer_size + v->writeback_chroma_buffer_size) * 1024.0 / v->write_back_latency) {
v->writeback_latency_support = dcn_bw_no;
}
else if (v->output[k] == dcn_bw_writeback && v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) >dcn_bw_min2(v->writeback_luma_buffer_size, 2.0 * v->writeback_chroma_buffer_size) * 1024.0 / v->write_back_latency) {
v->writeback_latency_support = dcn_bw_no;
}
}
/*re-ordering buffer support check*/
for (i = 0; i <= number_of_states_plus_one; i++) {
v->urgent_round_trip_and_out_of_order_latency_per_state[i] = (v->round_trip_ping_latency_cycles + 32.0) / v->dcfclk_per_state[i] + v->urgent_out_of_order_return_per_channel * v->number_of_channels / v->return_bw_per_state[i];
if ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / v->return_bw_per_state[i] > v->urgent_round_trip_and_out_of_order_latency_per_state[i]) {
v->rob_support[i] = dcn_bw_yes;
}
else {
v->rob_support[i] = dcn_bw_no;
}
}
/*display io support check*/
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->output[k] == dcn_bw_dp && v->dsc_capability == dcn_bw_yes) {
if (v->output_format[k] == dcn_bw_420) {
v->required_output_bw = v->pixel_clock[k] / 2.0;
}
else {
v->required_output_bw = v->pixel_clock[k];
}
}
else if (v->output_format[k] == dcn_bw_420) {
v->required_output_bw = v->pixel_clock[k] * 3.0 / 2.0;
}
else {
v->required_output_bw = v->pixel_clock[k] * 3.0;
}
if (v->output[k] == dcn_bw_hdmi) {
v->required_phyclk[k] = v->required_output_bw;
switch (v->output_deep_color[k]) {
case dcn_bw_encoder_10bpc:
v->required_phyclk[k] = v->required_phyclk[k] * 5.0 / 4;
break;
case dcn_bw_encoder_12bpc:
v->required_phyclk[k] = v->required_phyclk[k] * 3.0 / 2;
break;
default:
break;
}
v->required_phyclk[k] = v->required_phyclk[k] / 3.0;
}
else if (v->output[k] == dcn_bw_dp) {
v->required_phyclk[k] = v->required_output_bw / 4.0;
}
else {
v->required_phyclk[k] = 0.0;
}
}
for (i = 0; i <= number_of_states_plus_one; i++) {
v->dio_support[i] = dcn_bw_yes;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->required_phyclk[k] > v->phyclk_per_state[i] || (v->output[k] == dcn_bw_hdmi && v->required_phyclk[k] > 600.0)) {
v->dio_support[i] = dcn_bw_no;
}
}
}
/*total available writeback support check*/
v->total_number_of_active_writeback = 0.0;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->output[k] == dcn_bw_writeback) {
v->total_number_of_active_writeback = v->total_number_of_active_writeback + 1.0;
}
}
if (v->total_number_of_active_writeback <= v->max_num_writeback) {
v->total_available_writeback_support = dcn_bw_yes;
}
else {
v->total_available_writeback_support = dcn_bw_no;
}
/*maximum dispclk/dppclk support check*/
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->h_ratio[k] > 1.0) {
v->pscl_factor[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] /dcn_bw_ceil2(v->htaps[k] / 6.0, 1.0));
}
else {
v->pscl_factor[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput);
}
if (v->byte_per_pixel_in_detc[k] == 0.0) {
v->pscl_factor_chroma[k] = 0.0;
v->min_dppclk_using_single_dpp[k] = v->pixel_clock[k] *dcn_bw_max3(v->vtaps[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k]), v->h_ratio[k] * v->v_ratio[k] / v->pscl_factor[k], 1.0);
}
else {
if (v->h_ratio[k] / 2.0 > 1.0) {
v->pscl_factor_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] / 2.0 /dcn_bw_ceil2(v->hta_pschroma[k] / 6.0, 1.0));
}
else {
v->pscl_factor_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput);
}
v->min_dppclk_using_single_dpp[k] = v->pixel_clock[k] *dcn_bw_max5(v->vtaps[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k]), v->h_ratio[k] * v->v_ratio[k] / v->pscl_factor[k], v->vta_pschroma[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k] / 2.0), v->h_ratio[k] * v->v_ratio[k] / 4.0 / v->pscl_factor_chroma[k], 1.0);
}
}
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) {
if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
v->read256_block_height_y[k] = 1.0;
}
else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
v->read256_block_height_y[k] = 4.0;
}
else {
v->read256_block_height_y[k] = 8.0;
}
v->read256_block_width_y[k] = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->read256_block_height_y[k];
v->read256_block_height_c[k] = 0.0;
v->read256_block_width_c[k] = 0.0;
}
else {
if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
v->read256_block_height_y[k] = 1.0;
v->read256_block_height_c[k] = 1.0;
}
else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
v->read256_block_height_y[k] = 16.0;
v->read256_block_height_c[k] = 8.0;
}
else {
v->read256_block_height_y[k] = 8.0;
v->read256_block_height_c[k] = 8.0;
}
v->read256_block_width_y[k] = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->read256_block_height_y[k];
v->read256_block_width_c[k] = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->read256_block_height_c[k];
}
if (v->source_scan[k] == dcn_bw_hor) {
v->max_swath_height_y[k] = v->read256_block_height_y[k];
v->max_swath_height_c[k] = v->read256_block_height_c[k];
}
else {
v->max_swath_height_y[k] = v->read256_block_width_y[k];
v->max_swath_height_c[k] = v->read256_block_width_c[k];
}
if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) {
if (v->source_surface_mode[k] == dcn_bw_sw_linear || (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 && (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_var_s || v->source_surface_mode[k] == dcn_bw_sw_var_s_x) && v->source_scan[k] == dcn_bw_hor)) {
v->min_swath_height_y[k] = v->max_swath_height_y[k];
}
else {
v->min_swath_height_y[k] = v->max_swath_height_y[k] / 2.0;
}
v->min_swath_height_c[k] = v->max_swath_height_c[k];
}
else {
if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
v->min_swath_height_y[k] = v->max_swath_height_y[k];
v->min_swath_height_c[k] = v->max_swath_height_c[k];
}
else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8 && v->source_scan[k] == dcn_bw_hor) {
v->min_swath_height_y[k] = v->max_swath_height_y[k] / 2.0;
if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) {
v->min_swath_height_c[k] = v->max_swath_height_c[k];
}
else {
v->min_swath_height_c[k] = v->max_swath_height_c[k] / 2.0;
}
}
else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10 && v->source_scan[k] == dcn_bw_hor) {
v->min_swath_height_c[k] = v->max_swath_height_c[k] / 2.0;
if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) {
v->min_swath_height_y[k] = v->max_swath_height_y[k];
}
else {
v->min_swath_height_y[k] = v->max_swath_height_y[k] / 2.0;
}
}
else {
v->min_swath_height_y[k] = v->max_swath_height_y[k];
v->min_swath_height_c[k] = v->max_swath_height_c[k];
}
}
if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
v->maximum_swath_width = 8192.0;
}
else {
v->maximum_swath_width = 5120.0;
}
v->number_of_dpp_required_for_det_size =dcn_bw_ceil2(v->swath_width_ysingle_dpp[k] /dcn_bw_min2(v->maximum_swath_width, v->det_buffer_size_in_kbyte * 1024.0 / 2.0 / (v->byte_per_pixel_in_dety[k] * v->min_swath_height_y[k] + v->byte_per_pixel_in_detc[k] / 2.0 * v->min_swath_height_c[k])), 1.0);
if (v->byte_per_pixel_in_detc[k] == 0.0) {
v->number_of_dpp_required_for_lb_size =dcn_bw_ceil2((v->vtaps[k] +dcn_bw_max2(dcn_bw_ceil2(v->v_ratio[k], 1.0) - 2, 0.0)) * v->swath_width_ysingle_dpp[k] /dcn_bw_max2(v->h_ratio[k], 1.0) * v->lb_bit_per_pixel[k] / v->line_buffer_size, 1.0);
}
else {
v->number_of_dpp_required_for_lb_size =dcn_bw_max2(dcn_bw_ceil2((v->vtaps[k] +dcn_bw_max2(dcn_bw_ceil2(v->v_ratio[k], 1.0) - 2, 0.0)) * v->swath_width_ysingle_dpp[k] /dcn_bw_max2(v->h_ratio[k], 1.0) * v->lb_bit_per_pixel[k] / v->line_buffer_size, 1.0),dcn_bw_ceil2((v->vta_pschroma[k] +dcn_bw_max2(dcn_bw_ceil2(v->v_ratio[k] / 2.0, 1.0) - 2, 0.0)) * v->swath_width_ysingle_dpp[k] / 2.0 /dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0) * v->lb_bit_per_pixel[k] / v->line_buffer_size, 1.0));
}
v->number_of_dpp_required_for_det_and_lb_size[k] =dcn_bw_max2(v->number_of_dpp_required_for_det_size, v->number_of_dpp_required_for_lb_size);
}
for (i = 0; i <= number_of_states_plus_one; i++) {
for (j = 0; j <= 1; j++) {
v->total_number_of_active_dpp[i][j] = 0.0;
v->required_dispclk[i][j] = 0.0;
v->dispclk_dppclk_support[i][j] = dcn_bw_yes;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
v->min_dispclk_using_single_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] * (j + 1)) * (1.0 + v->downspreading / 100.0);
if (v->odm_capability == dcn_bw_yes) {
v->min_dispclk_using_dual_dpp =dcn_bw_max2(v->pixel_clock[k] / 2.0, v->min_dppclk_using_single_dpp[k] / 2.0 * (j + 1)) * (1.0 + v->downspreading / 100.0);
}
else {
v->min_dispclk_using_dual_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] / 2.0 * (j + 1)) * (1.0 + v->downspreading / 100.0);
}
if (i < number_of_states) {
v->min_dispclk_using_single_dpp = v->min_dispclk_using_single_dpp * (1.0 + v->dispclk_ramping_margin / 100.0);
v->min_dispclk_using_dual_dpp = v->min_dispclk_using_dual_dpp * (1.0 + v->dispclk_ramping_margin / 100.0);
}
if (v->min_dispclk_using_single_dpp <=dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i]) && v->number_of_dpp_required_for_det_and_lb_size[k] <= 1.0) {
v->no_of_dpp[i][j][k] = 1.0;
v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_single_dpp);
}
else if (v->min_dispclk_using_dual_dpp <=dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i])) {
v->no_of_dpp[i][j][k] = 2.0;
v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_dual_dpp);
}
else {
v->no_of_dpp[i][j][k] = 2.0;
v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_dual_dpp);
v->dispclk_dppclk_support[i][j] = dcn_bw_no;
}
v->total_number_of_active_dpp[i][j] = v->total_number_of_active_dpp[i][j] + v->no_of_dpp[i][j][k];
}
if (v->total_number_of_active_dpp[i][j] > v->max_num_dpp) {
v->total_number_of_active_dpp[i][j] = 0.0;
v->required_dispclk[i][j] = 0.0;
v->dispclk_dppclk_support[i][j] = dcn_bw_yes;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
v->min_dispclk_using_single_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] * (j + 1)) * (1.0 + v->downspreading / 100.0);
v->min_dispclk_using_dual_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] / 2.0 * (j + 1)) * (1.0 + v->downspreading / 100.0);
if (i < number_of_states) {
v->min_dispclk_using_single_dpp = v->min_dispclk_using_single_dpp * (1.0 + v->dispclk_ramping_margin / 100.0);
v->min_dispclk_using_dual_dpp = v->min_dispclk_using_dual_dpp * (1.0 + v->dispclk_ramping_margin / 100.0);
}
if (v->number_of_dpp_required_for_det_and_lb_size[k] <= 1.0) {
v->no_of_dpp[i][j][k] = 1.0;
v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_single_dpp);
if (v->min_dispclk_using_single_dpp >dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i])) {
v->dispclk_dppclk_support[i][j] = dcn_bw_no;
}
}
else {
v->no_of_dpp[i][j][k] = 2.0;
v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_dual_dpp);
if (v->min_dispclk_using_dual_dpp >dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i])) {
v->dispclk_dppclk_support[i][j] = dcn_bw_no;
}
}
v->total_number_of_active_dpp[i][j] = v->total_number_of_active_dpp[i][j] + v->no_of_dpp[i][j][k];
}
}
}
}
/*viewport size check*/
v->viewport_size_support = dcn_bw_yes;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->number_of_dpp_required_for_det_and_lb_size[k] > 2.0) {
v->viewport_size_support = dcn_bw_no;
}
}
/*total available pipes support check*/
for (i = 0; i <= number_of_states_plus_one; i++) {
for (j = 0; j <= 1; j++) {
if (v->total_number_of_active_dpp[i][j] <= v->max_num_dpp) {
v->total_available_pipes_support[i][j] = dcn_bw_yes;
}
else {
v->total_available_pipes_support[i][j] = dcn_bw_no;
}
}
}
/*urgent latency support check*/
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
for (i = 0; i <= number_of_states_plus_one; i++) {
for (j = 0; j <= 1; j++) {
v->swath_width_yper_state[i][j][k] = v->swath_width_ysingle_dpp[k] / v->no_of_dpp[i][j][k];
v->swath_width_granularity_y = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->max_swath_height_y[k];
v->rounded_up_max_swath_size_bytes_y = (dcn_bw_ceil2(v->swath_width_yper_state[i][j][k] - 1.0, v->swath_width_granularity_y) + v->swath_width_granularity_y) * v->byte_per_pixel_in_dety[k] * v->max_swath_height_y[k];
if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
v->rounded_up_max_swath_size_bytes_y =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_y, 256.0) + 256;
}
if (v->max_swath_height_c[k] > 0.0) {
v->swath_width_granularity_c = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->max_swath_height_c[k];
v->rounded_up_max_swath_size_bytes_c = (dcn_bw_ceil2(v->swath_width_yper_state[i][j][k] / 2.0 - 1.0, v->swath_width_granularity_c) + v->swath_width_granularity_c) * v->byte_per_pixel_in_detc[k] * v->max_swath_height_c[k];
if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
v->rounded_up_max_swath_size_bytes_c = dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_c, 256.0) + 256;
}
}
if (v->rounded_up_max_swath_size_bytes_y + v->rounded_up_max_swath_size_bytes_c <= v->det_buffer_size_in_kbyte * 1024.0 / 2.0) {
v->swath_height_yper_state[i][j][k] = v->max_swath_height_y[k];
v->swath_height_cper_state[i][j][k] = v->max_swath_height_c[k];
}
else {
v->swath_height_yper_state[i][j][k] = v->min_swath_height_y[k];
v->swath_height_cper_state[i][j][k] = v->min_swath_height_c[k];
}
if (v->byte_per_pixel_in_detc[k] == 0.0) {
v->lines_in_det_luma = v->det_buffer_size_in_kbyte * 1024.0 / v->byte_per_pixel_in_dety[k] / v->swath_width_yper_state[i][j][k];
v->lines_in_det_chroma = 0.0;
}
else if (v->swath_height_yper_state[i][j][k] <= v->swath_height_cper_state[i][j][k]) {
v->lines_in_det_luma = v->det_buffer_size_in_kbyte * 1024.0 / 2.0 / v->byte_per_pixel_in_dety[k] / v->swath_width_yper_state[i][j][k];
v->lines_in_det_chroma = v->det_buffer_size_in_kbyte * 1024.0 / 2.0 / v->byte_per_pixel_in_detc[k] / (v->swath_width_yper_state[i][j][k] / 2.0);
}
else {
v->lines_in_det_luma = v->det_buffer_size_in_kbyte * 1024.0 * 2.0 / 3.0 / v->byte_per_pixel_in_dety[k] / v->swath_width_yper_state[i][j][k];
v->lines_in_det_chroma = v->det_buffer_size_in_kbyte * 1024.0 / 3.0 / v->byte_per_pixel_in_dety[k] / (v->swath_width_yper_state[i][j][k] / 2.0);
}
v->effective_lb_latency_hiding_source_lines_luma =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_yper_state[i][j][k] /dcn_bw_max2(v->h_ratio[k], 1.0)), 1.0)) - (v->vtaps[k] - 1.0);
v->effective_detlb_lines_luma =dcn_bw_floor2(v->lines_in_det_luma +dcn_bw_min2(v->lines_in_det_luma * v->required_dispclk[i][j] * v->byte_per_pixel_in_dety[k] * v->pscl_factor[k] / v->return_bw_per_state[i], v->effective_lb_latency_hiding_source_lines_luma), v->swath_height_yper_state[i][j][k]);
if (v->byte_per_pixel_in_detc[k] == 0.0) {
v->urgent_latency_support_us_per_state[i][j][k] = v->effective_detlb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_detlb_lines_luma * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]);
}
else {
v->effective_lb_latency_hiding_source_lines_chroma = dcn_bw_min2(v->max_line_buffer_lines, dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_yper_state[i][j][k] / 2.0 / dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0)), 1.0)) - (v->vta_pschroma[k] - 1.0);
v->effective_detlb_lines_chroma = dcn_bw_floor2(v->lines_in_det_chroma + dcn_bw_min2(v->lines_in_det_chroma * v->required_dispclk[i][j] * v->byte_per_pixel_in_detc[k] * v->pscl_factor_chroma[k] / v->return_bw_per_state[i], v->effective_lb_latency_hiding_source_lines_chroma), v->swath_height_cper_state[i][j][k]);
v->urgent_latency_support_us_per_state[i][j][k] = dcn_bw_min2(v->effective_detlb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_detlb_lines_luma * v->swath_width_yper_state[i][j][k] * dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]), v->effective_detlb_lines_chroma * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0) - v->effective_detlb_lines_chroma * v->swath_width_yper_state[i][j][k] / 2.0 * dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]));
}
}
}
}
for (i = 0; i <= number_of_states_plus_one; i++) {
for (j = 0; j <= 1; j++) {
v->urgent_latency_support[i][j] = dcn_bw_yes;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->urgent_latency_support_us_per_state[i][j][k] < v->urgent_latency / 1.0) {
v->urgent_latency_support[i][j] = dcn_bw_no;
}
}
}
}
/*prefetch check*/
for (i = 0; i <= number_of_states_plus_one; i++) {
for (j = 0; j <= 1; j++) {
v->total_number_of_dcc_active_dpp[i][j] = 0.0;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->dcc_enable[k] == dcn_bw_yes) {
v->total_number_of_dcc_active_dpp[i][j] = v->total_number_of_dcc_active_dpp[i][j] + v->no_of_dpp[i][j][k];
}
}
}
}
for (i = 0; i <= number_of_states_plus_one; i++) {
for (j = 0; j <= 1; j++) {
v->projected_dcfclk_deep_sleep = 8.0;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, v->pixel_clock[k] / 16.0);
if (v->byte_per_pixel_in_detc[k] == 0.0) {
if (v->v_ratio[k] <= 1.0) {
v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 64.0 * v->h_ratio[k] * v->pixel_clock[k] / v->no_of_dpp[i][j][k]);
}
else {
v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 64.0 * v->pscl_factor[k] * v->required_dispclk[i][j] / (1 + j));
}
}
else {
if (v->v_ratio[k] <= 1.0) {
v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 32.0 * v->h_ratio[k] * v->pixel_clock[k] / v->no_of_dpp[i][j][k]);
}
else {
v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 32.0 * v->pscl_factor[k] * v->required_dispclk[i][j] / (1 + j));
}
if (v->v_ratio[k] / 2.0 <= 1.0) {
v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 32.0 * v->h_ratio[k] / 2.0 * v->pixel_clock[k] / v->no_of_dpp[i][j][k]);
}
else {
v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 32.0 * v->pscl_factor_chroma[k] * v->required_dispclk[i][j] / (1 + j));
}
}
}
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->dcc_enable[k] == dcn_bw_yes) {
v->meta_req_height_y = 8.0 * v->read256_block_height_y[k];
v->meta_req_width_y = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->meta_req_height_y;
v->meta_surface_width_y =dcn_bw_ceil2(v->viewport_width[k] / v->no_of_dpp[i][j][k] - 1.0, v->meta_req_width_y) + v->meta_req_width_y;
v->meta_surface_height_y =dcn_bw_ceil2(v->viewport_height[k] - 1.0, v->meta_req_height_y) + v->meta_req_height_y;
if (v->pte_enable == dcn_bw_yes) {
v->meta_pte_bytes_per_frame_y = (dcn_bw_ceil2((v->meta_surface_width_y * v->meta_surface_height_y *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0;
}
else {
v->meta_pte_bytes_per_frame_y = 0.0;
}
if (v->source_scan[k] == dcn_bw_hor) {
v->meta_row_bytes_y = v->meta_surface_width_y * v->meta_req_height_y *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 256.0;
}
else {
v->meta_row_bytes_y = v->meta_surface_height_y * v->meta_req_width_y *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 256.0;
}
}
else {
v->meta_pte_bytes_per_frame_y = 0.0;
v->meta_row_bytes_y = 0.0;
}
if (v->pte_enable == dcn_bw_yes) {
if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
v->macro_tile_block_size_bytes_y = 256.0;
v->macro_tile_block_height_y = 1.0;
}
else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) {
v->macro_tile_block_size_bytes_y = 4096.0;
v->macro_tile_block_height_y = 4.0 * v->read256_block_height_y[k];
}
else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) {
v->macro_tile_block_size_bytes_y = 64.0 * 1024;
v->macro_tile_block_height_y = 16.0 * v->read256_block_height_y[k];
}
else {
v->macro_tile_block_size_bytes_y = 256.0 * 1024;
v->macro_tile_block_height_y = 32.0 * v->read256_block_height_y[k];
}
if (v->macro_tile_block_size_bytes_y <= 65536.0) {
v->data_pte_req_height_y = v->macro_tile_block_height_y;
}
else {
v->data_pte_req_height_y = 16.0 * v->read256_block_height_y[k];
}
v->data_pte_req_width_y = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->data_pte_req_height_y * 8;
if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
v->dpte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] *dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->data_pte_req_width_y / (v->viewport_width[k] / v->no_of_dpp[i][j][k]), 2.0), 1.0))) - 1.0) / v->data_pte_req_width_y, 1.0) + 1);
}
else if (v->source_scan[k] == dcn_bw_hor) {
v->dpte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] - 1.0) / v->data_pte_req_width_y, 1.0) + 1);
}
else {
v->dpte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] - 1.0) / v->data_pte_req_height_y, 1.0) + 1);
}
}
else {
v->dpte_bytes_per_row_y = 0.0;
}
if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) {
if (v->dcc_enable[k] == dcn_bw_yes) {
v->meta_req_height_c = 8.0 * v->read256_block_height_c[k];
v->meta_req_width_c = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->meta_req_height_c;
v->meta_surface_width_c =dcn_bw_ceil2(v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0 - 1.0, v->meta_req_width_c) + v->meta_req_width_c;
v->meta_surface_height_c =dcn_bw_ceil2(v->viewport_height[k] / 2.0 - 1.0, v->meta_req_height_c) + v->meta_req_height_c;
if (v->pte_enable == dcn_bw_yes) {
v->meta_pte_bytes_per_frame_c = (dcn_bw_ceil2((v->meta_surface_width_c * v->meta_surface_height_c *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0;
}
else {
v->meta_pte_bytes_per_frame_c = 0.0;
}
if (v->source_scan[k] == dcn_bw_hor) {
v->meta_row_bytes_c = v->meta_surface_width_c * v->meta_req_height_c *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 256.0;
}
else {
v->meta_row_bytes_c = v->meta_surface_height_c * v->meta_req_width_c *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 256.0;
}
}
else {
v->meta_pte_bytes_per_frame_c = 0.0;
v->meta_row_bytes_c = 0.0;
}
if (v->pte_enable == dcn_bw_yes) {
if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
v->macro_tile_block_size_bytes_c = 256.0;
v->macro_tile_block_height_c = 1.0;
}
else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) {
v->macro_tile_block_size_bytes_c = 4096.0;
v->macro_tile_block_height_c = 4.0 * v->read256_block_height_c[k];
}
else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) {
v->macro_tile_block_size_bytes_c = 64.0 * 1024;
v->macro_tile_block_height_c = 16.0 * v->read256_block_height_c[k];
}
else {
v->macro_tile_block_size_bytes_c = 256.0 * 1024;
v->macro_tile_block_height_c = 32.0 * v->read256_block_height_c[k];
}
v->macro_tile_block_width_c = v->macro_tile_block_size_bytes_c /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->macro_tile_block_height_c;
if (v->macro_tile_block_size_bytes_c <= 65536.0) {
v->data_pte_req_height_c = v->macro_tile_block_height_c;
}
else {
v->data_pte_req_height_c = 16.0 * v->read256_block_height_c[k];
}
v->data_pte_req_width_c = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->data_pte_req_height_c * 8;
if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
v->dpte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0 * dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->data_pte_req_width_c / (v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0), 2.0), 1.0))) - 1.0) / v->data_pte_req_width_c, 1.0) + 1);
}
else if (v->source_scan[k] == dcn_bw_hor) {
v->dpte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0 - 1.0) / v->data_pte_req_width_c, 1.0) + 1);
}
else {
v->dpte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] / 2.0 - 1.0) / v->data_pte_req_height_c, 1.0) + 1);
}
}
else {
v->dpte_bytes_per_row_c = 0.0;
}
}
else {
v->dpte_bytes_per_row_c = 0.0;
v->meta_pte_bytes_per_frame_c = 0.0;
v->meta_row_bytes_c = 0.0;
}
v->dpte_bytes_per_row[k] = v->dpte_bytes_per_row_y + v->dpte_bytes_per_row_c;
v->meta_pte_bytes_per_frame[k] = v->meta_pte_bytes_per_frame_y + v->meta_pte_bytes_per_frame_c;
v->meta_row_bytes[k] = v->meta_row_bytes_y + v->meta_row_bytes_c;
v->v_init_y = (v->v_ratio[k] + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k]) / 2.0;
v->prefill_y[k] =dcn_bw_floor2(v->v_init_y, 1.0);
v->max_num_sw_y[k] =dcn_bw_ceil2((v->prefill_y[k] - 1.0) / v->swath_height_yper_state[i][j][k], 1.0) + 1;
if (v->prefill_y[k] > 1.0) {
v->max_partial_sw_y =dcn_bw_mod((v->prefill_y[k] - 2.0), v->swath_height_yper_state[i][j][k]);
}
else {
v->max_partial_sw_y =dcn_bw_mod((v->prefill_y[k] + v->swath_height_yper_state[i][j][k] - 2.0), v->swath_height_yper_state[i][j][k]);
}
v->max_partial_sw_y =dcn_bw_max2(1.0, v->max_partial_sw_y);
v->prefetch_lines_y[k] = v->max_num_sw_y[k] * v->swath_height_yper_state[i][j][k] + v->max_partial_sw_y;
if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) {
v->v_init_c = (v->v_ratio[k] / 2.0 + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k] / 2.0) / 2.0;
v->prefill_c[k] =dcn_bw_floor2(v->v_init_c, 1.0);
v->max_num_sw_c[k] =dcn_bw_ceil2((v->prefill_c[k] - 1.0) / v->swath_height_cper_state[i][j][k], 1.0) + 1;
if (v->prefill_c[k] > 1.0) {
v->max_partial_sw_c =dcn_bw_mod((v->prefill_c[k] - 2.0), v->swath_height_cper_state[i][j][k]);
}
else {
v->max_partial_sw_c =dcn_bw_mod((v->prefill_c[k] + v->swath_height_cper_state[i][j][k] - 2.0), v->swath_height_cper_state[i][j][k]);
}
v->max_partial_sw_c =dcn_bw_max2(1.0, v->max_partial_sw_c);
v->prefetch_lines_c[k] = v->max_num_sw_c[k] * v->swath_height_cper_state[i][j][k] + v->max_partial_sw_c;
}
else {
v->prefetch_lines_c[k] = 0.0;
}
v->dst_x_after_scaler = 90.0 * v->pixel_clock[k] / (v->required_dispclk[i][j] / (j + 1)) + 42.0 * v->pixel_clock[k] / v->required_dispclk[i][j];
if (v->no_of_dpp[i][j][k] > 1.0) {
v->dst_x_after_scaler = v->dst_x_after_scaler + v->scaler_rec_out_width[k] / 2.0;
}
if (v->output_format[k] == dcn_bw_420) {
v->dst_y_after_scaler = 1.0;
}
else {
v->dst_y_after_scaler = 0.0;
}
v->time_calc = 24.0 / v->projected_dcfclk_deep_sleep;
v->v_update_offset[k][j] = dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0);
v->total_repeater_delay = v->max_inter_dcn_tile_repeaters * (2.0 / (v->required_dispclk[i][j] / (j + 1)) + 3.0 / v->required_dispclk[i][j]);
v->v_update_width[k][j] = (14.0 / v->projected_dcfclk_deep_sleep + 12.0 / (v->required_dispclk[i][j] / (j + 1)) + v->total_repeater_delay) * v->pixel_clock[k];
v->v_ready_offset[k][j] = dcn_bw_max2(150.0 / (v->required_dispclk[i][j] / (j + 1)), v->total_repeater_delay + 20.0 / v->projected_dcfclk_deep_sleep + 10.0 / (v->required_dispclk[i][j] / (j + 1))) * v->pixel_clock[k];
v->time_setup = (v->v_update_offset[k][j] + v->v_update_width[k][j] + v->v_ready_offset[k][j]) / v->pixel_clock[k];
v->extra_latency = v->urgent_round_trip_and_out_of_order_latency_per_state[i] + (v->total_number_of_active_dpp[i][j] * v->pixel_chunk_size_in_kbyte + v->total_number_of_dcc_active_dpp[i][j] * v->meta_chunk_size) * 1024.0 / v->return_bw_per_state[i];
if (v->pte_enable == dcn_bw_yes) {
v->extra_latency = v->extra_latency + v->total_number_of_active_dpp[i][j] * v->pte_chunk_size * 1024.0 / v->return_bw_per_state[i];
}
if (v->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one == dcn_bw_yes) {
v->maximum_vstartup = v->vtotal[k] - v->vactive[k] - 1.0;
}
else {
v->maximum_vstartup = v->v_sync_plus_back_porch[k] - 1.0;
}
do {
v->line_times_for_prefetch[k] = v->maximum_vstartup - v->urgent_latency / (v->htotal[k] / v->pixel_clock[k]) - (v->time_calc + v->time_setup) / (v->htotal[k] / v->pixel_clock[k]) - (v->dst_y_after_scaler + v->dst_x_after_scaler / v->htotal[k]);
v->line_times_for_prefetch[k] =dcn_bw_floor2(4.0 * (v->line_times_for_prefetch[k] + 0.125), 1.0) / 4;
v->prefetch_bw[k] = (v->meta_pte_bytes_per_frame[k] + 2.0 * v->meta_row_bytes[k] + 2.0 * v->dpte_bytes_per_row[k] + v->prefetch_lines_y[k] * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) + v->prefetch_lines_c[k] * v->swath_width_yper_state[i][j][k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0)) / (v->line_times_for_prefetch[k] * v->htotal[k] / v->pixel_clock[k]);
if (v->pte_enable == dcn_bw_yes && v->dcc_enable[k] == dcn_bw_yes) {
v->time_for_meta_pte_without_immediate_flip = dcn_bw_max3(
v->meta_pte_bytes_frame[k] / v->prefetch_bw[k],
v->extra_latency,
v->htotal[k] / v->pixel_clock[k] / 4.0);
} else {
v->time_for_meta_pte_without_immediate_flip = v->htotal[k] / v->pixel_clock[k] / 4.0;
}
if (v->pte_enable == dcn_bw_yes || v->dcc_enable[k] == dcn_bw_yes) {
v->time_for_meta_and_dpte_row_without_immediate_flip = dcn_bw_max3((
v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / v->prefetch_bw[k],
v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_without_immediate_flip,
v->extra_latency);
} else {
v->time_for_meta_and_dpte_row_without_immediate_flip = dcn_bw_max2(
v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_without_immediate_flip,
v->extra_latency - v->time_for_meta_pte_with_immediate_flip);
}
v->lines_for_meta_pte_without_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_pte_without_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
v->lines_for_meta_and_dpte_row_without_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_and_dpte_row_without_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
v->maximum_vstartup = v->maximum_vstartup - 1;
if (v->lines_for_meta_pte_without_immediate_flip[k] < 32.0 && v->lines_for_meta_and_dpte_row_without_immediate_flip[k] < 16.0)
break;
} while(1);
}
v->bw_available_for_immediate_flip = v->return_bw_per_state[i];
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
v->bw_available_for_immediate_flip = v->bw_available_for_immediate_flip -dcn_bw_max2(v->read_bandwidth[k], v->prefetch_bw[k]);
}
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
v->total_immediate_flip_bytes[k] = 0.0;
if ((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
v->total_immediate_flip_bytes[k] = v->total_immediate_flip_bytes[k] + v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k];
}
}
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->pte_enable == dcn_bw_yes && v->dcc_enable[k] == dcn_bw_yes) {
v->time_for_meta_pte_with_immediate_flip =dcn_bw_max5(v->meta_pte_bytes_per_frame[k] / v->prefetch_bw[k], v->meta_pte_bytes_per_frame[k] * v->total_immediate_flip_bytes[k] / (v->bw_available_for_immediate_flip * (v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k])), v->extra_latency, v->urgent_latency, v->htotal[k] / v->pixel_clock[k] / 4.0);
}
else {
v->time_for_meta_pte_with_immediate_flip = v->htotal[k] / v->pixel_clock[k] / 4.0;
}
if (v->pte_enable == dcn_bw_yes || v->dcc_enable[k] == dcn_bw_yes) {
v->time_for_meta_and_dpte_row_with_immediate_flip =dcn_bw_max5((v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / v->prefetch_bw[k], (v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) * v->total_immediate_flip_bytes[k] / (v->bw_available_for_immediate_flip * (v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k])), v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_with_immediate_flip, v->extra_latency, 2.0 * v->urgent_latency);
}
else {
v->time_for_meta_and_dpte_row_with_immediate_flip =dcn_bw_max2(v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_with_immediate_flip, v->extra_latency - v->time_for_meta_pte_with_immediate_flip);
}
v->lines_for_meta_pte_with_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_pte_with_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
v->lines_for_meta_and_dpte_row_with_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_and_dpte_row_with_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
v->line_times_to_request_prefetch_pixel_data_with_immediate_flip = v->line_times_for_prefetch[k] - v->lines_for_meta_pte_with_immediate_flip[k] - v->lines_for_meta_and_dpte_row_with_immediate_flip[k];
v->line_times_to_request_prefetch_pixel_data_without_immediate_flip = v->line_times_for_prefetch[k] - v->lines_for_meta_pte_without_immediate_flip[k] - v->lines_for_meta_and_dpte_row_without_immediate_flip[k];
if (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip > 0.0) {
v->v_ratio_pre_ywith_immediate_flip[i][j][k] = v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip;
if ((v->swath_height_yper_state[i][j][k] > 4.0)) {
if (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0 > 0.0) {
v->v_ratio_pre_ywith_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_ywith_immediate_flip[i][j][k], (v->max_num_sw_y[k] * v->swath_height_yper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0));
}
else {
v->v_ratio_pre_ywith_immediate_flip[i][j][k] = 999999.0;
}
}
v->v_ratio_pre_cwith_immediate_flip[i][j][k] = v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip;
if ((v->swath_height_cper_state[i][j][k] > 4.0)) {
if (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0 > 0.0) {
v->v_ratio_pre_cwith_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_cwith_immediate_flip[i][j][k], (v->max_num_sw_c[k] * v->swath_height_cper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0));
}
else {
v->v_ratio_pre_cwith_immediate_flip[i][j][k] = 999999.0;
}
}
v->required_prefetch_pixel_data_bw_with_immediate_flip[i][j][k] = v->no_of_dpp[i][j][k] * (v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) + v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 2.0) * v->swath_width_yper_state[i][j][k] / (v->htotal[k] / v->pixel_clock[k]);
}
else {
v->v_ratio_pre_ywith_immediate_flip[i][j][k] = 999999.0;
v->v_ratio_pre_cwith_immediate_flip[i][j][k] = 999999.0;
v->required_prefetch_pixel_data_bw_with_immediate_flip[i][j][k] = 999999.0;
}
if (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip > 0.0) {
v->v_ratio_pre_ywithout_immediate_flip[i][j][k] = v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip;
if ((v->swath_height_yper_state[i][j][k] > 4.0)) {
if (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0 > 0.0) {
v->v_ratio_pre_ywithout_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_ywithout_immediate_flip[i][j][k], (v->max_num_sw_y[k] * v->swath_height_yper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0));
}
else {
v->v_ratio_pre_ywithout_immediate_flip[i][j][k] = 999999.0;
}
}
v->v_ratio_pre_cwithout_immediate_flip[i][j][k] = v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip;
if ((v->swath_height_cper_state[i][j][k] > 4.0)) {
if (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0 > 0.0) {
v->v_ratio_pre_cwithout_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_cwithout_immediate_flip[i][j][k], (v->max_num_sw_c[k] * v->swath_height_cper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0));
}
else {
v->v_ratio_pre_cwithout_immediate_flip[i][j][k] = 999999.0;
}
}
v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k] = v->no_of_dpp[i][j][k] * (v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) + v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 2.0) * v->swath_width_yper_state[i][j][k] / (v->htotal[k] / v->pixel_clock[k]);
}
else {
v->v_ratio_pre_ywithout_immediate_flip[i][j][k] = 999999.0;
v->v_ratio_pre_cwithout_immediate_flip[i][j][k] = 999999.0;
v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k] = 999999.0;
}
}
v->maximum_read_bandwidth_with_prefetch_with_immediate_flip = 0.0;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if ((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
v->maximum_read_bandwidth_with_prefetch_with_immediate_flip = v->maximum_read_bandwidth_with_prefetch_with_immediate_flip +dcn_bw_max2(v->read_bandwidth[k], v->required_prefetch_pixel_data_bw_with_immediate_flip[i][j][k]) +dcn_bw_max2(v->meta_pte_bytes_per_frame[k] / (v->lines_for_meta_pte_with_immediate_flip[k] * v->htotal[k] / v->pixel_clock[k]), (v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / (v->lines_for_meta_and_dpte_row_with_immediate_flip[k] * v->htotal[k] / v->pixel_clock[k]));
}
else {
v->maximum_read_bandwidth_with_prefetch_with_immediate_flip = v->maximum_read_bandwidth_with_prefetch_with_immediate_flip +dcn_bw_max2(v->read_bandwidth[k], v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k]);
}
}
v->maximum_read_bandwidth_with_prefetch_without_immediate_flip = 0.0;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
v->maximum_read_bandwidth_with_prefetch_without_immediate_flip = v->maximum_read_bandwidth_with_prefetch_without_immediate_flip +dcn_bw_max2(v->read_bandwidth[k], v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k]);
}
v->prefetch_supported_with_immediate_flip[i][j] = dcn_bw_yes;
if (v->maximum_read_bandwidth_with_prefetch_with_immediate_flip > v->return_bw_per_state[i]) {
v->prefetch_supported_with_immediate_flip[i][j] = dcn_bw_no;
}
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->line_times_for_prefetch[k] < 2.0 || v->lines_for_meta_pte_with_immediate_flip[k] >= 8.0 || v->lines_for_meta_and_dpte_row_with_immediate_flip[k] >= 16.0) {
v->prefetch_supported_with_immediate_flip[i][j] = dcn_bw_no;
}
}
v->prefetch_supported_without_immediate_flip[i][j] = dcn_bw_yes;
if (v->maximum_read_bandwidth_with_prefetch_without_immediate_flip > v->return_bw_per_state[i]) {
v->prefetch_supported_without_immediate_flip[i][j] = dcn_bw_no;
}
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->line_times_for_prefetch[k] < 2.0 || v->lines_for_meta_pte_without_immediate_flip[k] >= 8.0 || v->lines_for_meta_and_dpte_row_without_immediate_flip[k] >= 16.0) {
v->prefetch_supported_without_immediate_flip[i][j] = dcn_bw_no;
}
}
}
}
for (i = 0; i <= number_of_states_plus_one; i++) {
for (j = 0; j <= 1; j++) {
v->v_ratio_in_prefetch_supported_with_immediate_flip[i][j] = dcn_bw_yes;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if ((((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10) && (v->v_ratio_pre_ywith_immediate_flip[i][j][k] > 4.0 || v->v_ratio_pre_cwith_immediate_flip[i][j][k] > 4.0)) || ((v->source_pixel_format[k] == dcn_bw_yuv420_sub_8 || v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) && (v->v_ratio_pre_ywithout_immediate_flip[i][j][k] > 4.0 || v->v_ratio_pre_cwithout_immediate_flip[i][j][k] > 4.0)))) {
v->v_ratio_in_prefetch_supported_with_immediate_flip[i][j] = dcn_bw_no;
}
}
v->v_ratio_in_prefetch_supported_without_immediate_flip[i][j] = dcn_bw_yes;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if ((v->v_ratio_pre_ywithout_immediate_flip[i][j][k] > 4.0 || v->v_ratio_pre_cwithout_immediate_flip[i][j][k] > 4.0)) {
v->v_ratio_in_prefetch_supported_without_immediate_flip[i][j] = dcn_bw_no;
}
}
}
}
/*mode support, voltage state and soc configuration*/
for (i = number_of_states_plus_one; i >= 0; i--) {
for (j = 0; j <= 1; j++) {
if (v->scale_ratio_support == dcn_bw_yes && v->source_format_pixel_and_scan_support == dcn_bw_yes && v->viewport_size_support == dcn_bw_yes && v->bandwidth_support[i] == dcn_bw_yes && v->dio_support[i] == dcn_bw_yes && v->urgent_latency_support[i][j] == dcn_bw_yes && v->rob_support[i] == dcn_bw_yes && v->dispclk_dppclk_support[i][j] == dcn_bw_yes && v->total_available_pipes_support[i][j] == dcn_bw_yes && v->total_available_writeback_support == dcn_bw_yes && v->writeback_latency_support == dcn_bw_yes) {
if (v->prefetch_supported_with_immediate_flip[i][j] == dcn_bw_yes && v->v_ratio_in_prefetch_supported_with_immediate_flip[i][j] == dcn_bw_yes) {
v->mode_support_with_immediate_flip[i][j] = dcn_bw_yes;
}
else {
v->mode_support_with_immediate_flip[i][j] = dcn_bw_no;
}
if (v->prefetch_supported_without_immediate_flip[i][j] == dcn_bw_yes && v->v_ratio_in_prefetch_supported_without_immediate_flip[i][j] == dcn_bw_yes) {
v->mode_support_without_immediate_flip[i][j] = dcn_bw_yes;
}
else {
v->mode_support_without_immediate_flip[i][j] = dcn_bw_no;
}
}
else {
v->mode_support_with_immediate_flip[i][j] = dcn_bw_no;
v->mode_support_without_immediate_flip[i][j] = dcn_bw_no;
}
}
}
for (i = number_of_states_plus_one; i >= 0; i--) {
if ((i == number_of_states_plus_one || v->mode_support_with_immediate_flip[i][1] == dcn_bw_yes || v->mode_support_with_immediate_flip[i][0] == dcn_bw_yes) && i >= v->voltage_override_level) {
v->voltage_level_with_immediate_flip = i;
}
}
for (i = number_of_states_plus_one; i >= 0; i--) {
if ((i == number_of_states_plus_one || v->mode_support_without_immediate_flip[i][1] == dcn_bw_yes || v->mode_support_without_immediate_flip[i][0] == dcn_bw_yes) && i >= v->voltage_override_level) {
v->voltage_level_without_immediate_flip = i;
}
}
if (v->voltage_level_with_immediate_flip == number_of_states_plus_one) {
v->immediate_flip_supported = dcn_bw_no;
v->voltage_level = v->voltage_level_without_immediate_flip;
}
else {
v->immediate_flip_supported = dcn_bw_yes;
v->voltage_level = v->voltage_level_with_immediate_flip;
}
v->dcfclk = v->dcfclk_per_state[v->voltage_level];
v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_per_state[v->voltage_level];
for (j = 0; j <= 1; j++) {
v->required_dispclk_per_ratio[j] = v->required_dispclk[v->voltage_level][j];
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
v->dpp_per_plane_per_ratio[j][k] = v->no_of_dpp[v->voltage_level][j][k];
}
v->dispclk_dppclk_support_per_ratio[j] = v->dispclk_dppclk_support[v->voltage_level][j];
}
v->max_phyclk = v->phyclk_per_state[v->voltage_level];
}
void display_pipe_configuration(struct dcn_bw_internal_vars *v)
{
int j;
int k;
/*display pipe configuration*/
for (j = 0; j <= 1; j++) {
v->total_number_of_active_dpp_per_ratio[j] = 0.0;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
v->total_number_of_active_dpp_per_ratio[j] = v->total_number_of_active_dpp_per_ratio[j] + v->dpp_per_plane_per_ratio[j][k];
}
}
if ((v->dispclk_dppclk_support_per_ratio[0] == dcn_bw_yes && v->dispclk_dppclk_support_per_ratio[1] == dcn_bw_no) || (v->dispclk_dppclk_support_per_ratio[0] == v->dispclk_dppclk_support_per_ratio[1] && (v->total_number_of_active_dpp_per_ratio[0] < v->total_number_of_active_dpp_per_ratio[1] || (((v->total_number_of_active_dpp_per_ratio[0] == v->total_number_of_active_dpp_per_ratio[1]) && v->required_dispclk_per_ratio[0] <= 0.5 * v->required_dispclk_per_ratio[1]))))) {
v->dispclk_dppclk_ratio = 1;
v->final_error_message = v->error_message[0];
}
else {
v->dispclk_dppclk_ratio = 2;
v->final_error_message = v->error_message[1];
}
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
v->dpp_per_plane[k] = v->dpp_per_plane_per_ratio[v->dispclk_dppclk_ratio - 1][k];
}
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
v->byte_per_pix_dety = 8.0;
v->byte_per_pix_detc = 0.0;
}
else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) {
v->byte_per_pix_dety = 4.0;
v->byte_per_pix_detc = 0.0;
}
else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) {
v->byte_per_pix_dety = 2.0;
v->byte_per_pix_detc = 0.0;
}
else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
v->byte_per_pix_dety = 1.0;
v->byte_per_pix_detc = 2.0;
}
else {
v->byte_per_pix_dety = 4.0f / 3.0f;
v->byte_per_pix_detc = 8.0f / 3.0f;
}
if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) {
if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
v->read256_bytes_block_height_y = 1.0;
}
else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
v->read256_bytes_block_height_y = 4.0;
}
else {
v->read256_bytes_block_height_y = 8.0;
}
v->read256_bytes_block_width_y = 256.0 /dcn_bw_ceil2(v->byte_per_pix_dety, 1.0) / v->read256_bytes_block_height_y;
v->read256_bytes_block_height_c = 0.0;
v->read256_bytes_block_width_c = 0.0;
}
else {
if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
v->read256_bytes_block_height_y = 1.0;
v->read256_bytes_block_height_c = 1.0;
}
else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
v->read256_bytes_block_height_y = 16.0;
v->read256_bytes_block_height_c = 8.0;
}
else {
v->read256_bytes_block_height_y = 8.0;
v->read256_bytes_block_height_c = 8.0;
}
v->read256_bytes_block_width_y = 256.0 /dcn_bw_ceil2(v->byte_per_pix_dety, 1.0) / v->read256_bytes_block_height_y;
v->read256_bytes_block_width_c = 256.0 /dcn_bw_ceil2(v->byte_per_pix_detc, 2.0) / v->read256_bytes_block_height_c;
}
if (v->source_scan[k] == dcn_bw_hor) {
v->maximum_swath_height_y = v->read256_bytes_block_height_y;
v->maximum_swath_height_c = v->read256_bytes_block_height_c;
}
else {
v->maximum_swath_height_y = v->read256_bytes_block_width_y;
v->maximum_swath_height_c = v->read256_bytes_block_width_c;
}
if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) {
if (v->source_surface_mode[k] == dcn_bw_sw_linear || (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 && (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_var_s || v->source_surface_mode[k] == dcn_bw_sw_var_s_x) && v->source_scan[k] == dcn_bw_hor)) {
v->minimum_swath_height_y = v->maximum_swath_height_y;
}
else {
v->minimum_swath_height_y = v->maximum_swath_height_y / 2.0;
}
v->minimum_swath_height_c = v->maximum_swath_height_c;
}
else {
if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
v->minimum_swath_height_y = v->maximum_swath_height_y;
v->minimum_swath_height_c = v->maximum_swath_height_c;
}
else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8 && v->source_scan[k] == dcn_bw_hor) {
v->minimum_swath_height_y = v->maximum_swath_height_y / 2.0;
if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) {
v->minimum_swath_height_c = v->maximum_swath_height_c;
}
else {
v->minimum_swath_height_c = v->maximum_swath_height_c / 2.0;
}
}
else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10 && v->source_scan[k] == dcn_bw_hor) {
v->minimum_swath_height_c = v->maximum_swath_height_c / 2.0;
if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) {
v->minimum_swath_height_y = v->maximum_swath_height_y;
}
else {
v->minimum_swath_height_y = v->maximum_swath_height_y / 2.0;
}
}
else {
v->minimum_swath_height_y = v->maximum_swath_height_y;
v->minimum_swath_height_c = v->maximum_swath_height_c;
}
}
if (v->source_scan[k] == dcn_bw_hor) {
v->swath_width = v->viewport_width[k] / v->dpp_per_plane[k];
}
else {
v->swath_width = v->viewport_height[k] / v->dpp_per_plane[k];
}
v->swath_width_granularity_y = 256.0 /dcn_bw_ceil2(v->byte_per_pix_dety, 1.0) / v->maximum_swath_height_y;
v->rounded_up_max_swath_size_bytes_y = (dcn_bw_ceil2(v->swath_width - 1.0, v->swath_width_granularity_y) + v->swath_width_granularity_y) * v->byte_per_pix_dety * v->maximum_swath_height_y;
if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
v->rounded_up_max_swath_size_bytes_y =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_y, 256.0) + 256;
}
if (v->maximum_swath_height_c > 0.0) {
v->swath_width_granularity_c = 256.0 /dcn_bw_ceil2(v->byte_per_pix_detc, 2.0) / v->maximum_swath_height_c;
v->rounded_up_max_swath_size_bytes_c = (dcn_bw_ceil2(v->swath_width / 2.0 - 1.0, v->swath_width_granularity_c) + v->swath_width_granularity_c) * v->byte_per_pix_detc * v->maximum_swath_height_c;
if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
v->rounded_up_max_swath_size_bytes_c = dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_c, 256.0) + 256;
}
}
if (v->rounded_up_max_swath_size_bytes_y + v->rounded_up_max_swath_size_bytes_c <= v->det_buffer_size_in_kbyte * 1024.0 / 2.0) {
v->swath_height_y[k] = v->maximum_swath_height_y;
v->swath_height_c[k] = v->maximum_swath_height_c;
}
else {
v->swath_height_y[k] = v->minimum_swath_height_y;
v->swath_height_c[k] = v->minimum_swath_height_c;
}
if (v->swath_height_c[k] == 0.0) {
v->det_buffer_size_y[k] = v->det_buffer_size_in_kbyte * 1024.0;
v->det_buffer_size_c[k] = 0.0;
}
else if (v->swath_height_y[k] <= v->swath_height_c[k]) {
v->det_buffer_size_y[k] = v->det_buffer_size_in_kbyte * 1024.0 / 2.0;
v->det_buffer_size_c[k] = v->det_buffer_size_in_kbyte * 1024.0 / 2.0;
}
else {
v->det_buffer_size_y[k] = v->det_buffer_size_in_kbyte * 1024.0 * 2.0 / 3.0;
v->det_buffer_size_c[k] = v->det_buffer_size_in_kbyte * 1024.0 / 3.0;
}
}
}
void dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(struct dcn_bw_internal_vars *v)
{
int k;
/*dispclk and dppclk calculation*/
v->dispclk_with_ramping = 0.0;
v->dispclk_without_ramping = 0.0;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->h_ratio[k] > 1.0) {
v->pscl_throughput[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] /dcn_bw_ceil2(v->htaps[k] / 6.0, 1.0));
}
else {
v->pscl_throughput[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput);
}
v->dppclk_using_single_dpp_luma = v->pixel_clock[k] *dcn_bw_max3(v->vtaps[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k]), v->h_ratio[k] * v->v_ratio[k] / v->pscl_throughput[k], 1.0);
if ((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
v->pscl_throughput_chroma[k] = 0.0;
v->dppclk_using_single_dpp = v->dppclk_using_single_dpp_luma;
}
else {
if (v->h_ratio[k] > 1.0) {
v->pscl_throughput_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] / 2.0 /dcn_bw_ceil2(v->hta_pschroma[k] / 6.0, 1.0));
}
else {
v->pscl_throughput_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput);
}
v->dppclk_using_single_dpp_chroma = v->pixel_clock[k] *dcn_bw_max3(v->vta_pschroma[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k] / 2.0), v->h_ratio[k] * v->v_ratio[k] / 4.0 / v->pscl_throughput_chroma[k], 1.0);
v->dppclk_using_single_dpp =dcn_bw_max2(v->dppclk_using_single_dpp_luma, v->dppclk_using_single_dpp_chroma);
}
if (v->odm_capable == dcn_bw_yes) {
v->dispclk_with_ramping =dcn_bw_max2(v->dispclk_with_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k] / v->dpp_per_plane[k]) * (1.0 + v->downspreading / 100.0) * (1.0 + v->dispclk_ramping_margin / 100.0));
v->dispclk_without_ramping =dcn_bw_max2(v->dispclk_without_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k] / v->dpp_per_plane[k]) * (1.0 + v->downspreading / 100.0));
}
else {
v->dispclk_with_ramping =dcn_bw_max2(v->dispclk_with_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k]) * (1.0 + v->downspreading / 100.0) * (1.0 + v->dispclk_ramping_margin / 100.0));
v->dispclk_without_ramping =dcn_bw_max2(v->dispclk_without_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k]) * (1.0 + v->downspreading / 100.0));
}
}
if (v->dispclk_without_ramping > v->max_dispclk[number_of_states]) {
v->dispclk = v->dispclk_without_ramping;
}
else if (v->dispclk_with_ramping > v->max_dispclk[number_of_states]) {
v->dispclk = v->max_dispclk[number_of_states];
}
else {
v->dispclk = v->dispclk_with_ramping;
}
v->dppclk = v->dispclk / v->dispclk_dppclk_ratio;
/*urgent watermark*/
v->return_bandwidth_to_dcn =dcn_bw_min2(v->return_bus_width * v->dcfclk, v->fabric_and_dram_bandwidth * 1000.0 * v->percent_of_ideal_drambw_received_after_urg_latency / 100.0);
v->dcc_enabled_any_plane = dcn_bw_no;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->dcc_enable[k] == dcn_bw_yes) {
v->dcc_enabled_any_plane = dcn_bw_yes;
}
}
v->return_bw = v->return_bandwidth_to_dcn;
if (v->dcc_enabled_any_plane == dcn_bw_yes && v->return_bandwidth_to_dcn > v->dcfclk * v->return_bus_width / 4.0) {
v->return_bw =dcn_bw_min2(v->return_bw, v->return_bandwidth_to_dcn * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bandwidth_to_dcn - v->dcfclk * v->return_bus_width / 4.0) + v->urgent_latency)));
}
v->critical_compression = 2.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0);
if (v->dcc_enabled_any_plane == dcn_bw_yes && v->critical_compression > 1.0 && v->critical_compression < 4.0) {
v->return_bw =dcn_bw_min2(v->return_bw, dcn_bw_pow(4.0 * v->return_bandwidth_to_dcn * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2));
}
v->return_bandwidth_to_dcn =dcn_bw_min2(v->return_bus_width * v->dcfclk, v->fabric_and_dram_bandwidth * 1000.0);
if (v->dcc_enabled_any_plane == dcn_bw_yes && v->return_bandwidth_to_dcn > v->dcfclk * v->return_bus_width / 4.0) {
v->return_bw =dcn_bw_min2(v->return_bw, v->return_bandwidth_to_dcn * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bandwidth_to_dcn - v->dcfclk * v->return_bus_width / 4.0) + v->urgent_latency)));
}
v->critical_compression = 2.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0);
if (v->dcc_enabled_any_plane == dcn_bw_yes && v->critical_compression > 1.0 && v->critical_compression < 4.0) {
v->return_bw =dcn_bw_min2(v->return_bw, dcn_bw_pow(4.0 * v->return_bandwidth_to_dcn * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2));
}
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->source_scan[k] == dcn_bw_hor) {
v->swath_width_y[k] = v->viewport_width[k] / v->dpp_per_plane[k];
}
else {
v->swath_width_y[k] = v->viewport_height[k] / v->dpp_per_plane[k];
}
}
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
v->byte_per_pixel_dety[k] = 8.0;
v->byte_per_pixel_detc[k] = 0.0;
}
else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) {
v->byte_per_pixel_dety[k] = 4.0;
v->byte_per_pixel_detc[k] = 0.0;
}
else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) {
v->byte_per_pixel_dety[k] = 2.0;
v->byte_per_pixel_detc[k] = 0.0;
}
else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
v->byte_per_pixel_dety[k] = 1.0;
v->byte_per_pixel_detc[k] = 2.0;
}
else {
v->byte_per_pixel_dety[k] = 4.0f / 3.0f;
v->byte_per_pixel_detc[k] = 8.0f / 3.0f;
}
}
v->total_data_read_bandwidth = 0.0;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
v->read_bandwidth_plane_luma[k] = v->swath_width_y[k] * v->dpp_per_plane[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (v->htotal[k] / v->pixel_clock[k]) * v->v_ratio[k];
v->read_bandwidth_plane_chroma[k] = v->swath_width_y[k] / 2.0 * v->dpp_per_plane[k] *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (v->htotal[k] / v->pixel_clock[k]) * v->v_ratio[k] / 2.0;
v->total_data_read_bandwidth = v->total_data_read_bandwidth + v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k];
}
v->total_active_dpp = 0.0;
v->total_dcc_active_dpp = 0.0;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
v->total_active_dpp = v->total_active_dpp + v->dpp_per_plane[k];
if (v->dcc_enable[k] == dcn_bw_yes) {
v->total_dcc_active_dpp = v->total_dcc_active_dpp + v->dpp_per_plane[k];
}
}
v->urgent_round_trip_and_out_of_order_latency = (v->round_trip_ping_latency_cycles + 32.0) / v->dcfclk + v->urgent_out_of_order_return_per_channel * v->number_of_channels / v->return_bw;
v->last_pixel_of_line_extra_watermark = 0.0;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->v_ratio[k] <= 1.0) {
v->display_pipe_line_delivery_time_luma[k] = v->swath_width_y[k] * v->dpp_per_plane[k] / v->h_ratio[k] / v->pixel_clock[k];
}
else {
v->display_pipe_line_delivery_time_luma[k] = v->swath_width_y[k] / v->pscl_throughput[k] / v->dppclk;
}
v->data_fabric_line_delivery_time_luma = v->swath_width_y[k] * v->swath_height_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (v->return_bw * v->read_bandwidth_plane_luma[k] / v->dpp_per_plane[k] / v->total_data_read_bandwidth);
v->last_pixel_of_line_extra_watermark =dcn_bw_max2(v->last_pixel_of_line_extra_watermark, v->data_fabric_line_delivery_time_luma - v->display_pipe_line_delivery_time_luma[k]);
if (v->byte_per_pixel_detc[k] == 0.0) {
v->display_pipe_line_delivery_time_chroma[k] = 0.0;
}
else {
if (v->v_ratio[k] / 2.0 <= 1.0) {
v->display_pipe_line_delivery_time_chroma[k] = v->swath_width_y[k] / 2.0 * v->dpp_per_plane[k] / (v->h_ratio[k] / 2.0) / v->pixel_clock[k];
}
else {
v->display_pipe_line_delivery_time_chroma[k] = v->swath_width_y[k] / 2.0 / v->pscl_throughput_chroma[k] / v->dppclk;
}
v->data_fabric_line_delivery_time_chroma = v->swath_width_y[k] / 2.0 * v->swath_height_c[k] *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (v->return_bw * v->read_bandwidth_plane_chroma[k] / v->dpp_per_plane[k] / v->total_data_read_bandwidth);
v->last_pixel_of_line_extra_watermark =dcn_bw_max2(v->last_pixel_of_line_extra_watermark, v->data_fabric_line_delivery_time_chroma - v->display_pipe_line_delivery_time_chroma[k]);
}
}
v->urgent_extra_latency = v->urgent_round_trip_and_out_of_order_latency + (v->total_active_dpp * v->pixel_chunk_size_in_kbyte + v->total_dcc_active_dpp * v->meta_chunk_size) * 1024.0 / v->return_bw;
if (v->pte_enable == dcn_bw_yes) {
v->urgent_extra_latency = v->urgent_extra_latency + v->total_active_dpp * v->pte_chunk_size * 1024.0 / v->return_bw;
}
v->urgent_watermark = v->urgent_latency + v->last_pixel_of_line_extra_watermark + v->urgent_extra_latency;
v->ptemeta_urgent_watermark = v->urgent_watermark + 2.0 * v->urgent_latency;
/*nb p-state/dram clock change watermark*/
v->dram_clock_change_watermark = v->dram_clock_change_latency + v->urgent_watermark;
v->total_active_writeback = 0.0;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->output[k] == dcn_bw_writeback) {
v->total_active_writeback = v->total_active_writeback + 1.0;
}
}
if (v->total_active_writeback <= 1.0) {
v->writeback_dram_clock_change_watermark = v->dram_clock_change_latency + v->write_back_latency;
}
else {
v->writeback_dram_clock_change_watermark = v->dram_clock_change_latency + v->write_back_latency + v->writeback_chunk_size * 1024.0 / 32.0 / v->socclk;
}
/*stutter efficiency*/
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
v->lines_in_dety[k] = v->det_buffer_size_y[k] / v->byte_per_pixel_dety[k] / v->swath_width_y[k];
v->lines_in_dety_rounded_down_to_swath[k] =dcn_bw_floor2(v->lines_in_dety[k], v->swath_height_y[k]);
v->full_det_buffering_time_y[k] = v->lines_in_dety_rounded_down_to_swath[k] * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k];
if (v->byte_per_pixel_detc[k] > 0.0) {
v->lines_in_detc[k] = v->det_buffer_size_c[k] / v->byte_per_pixel_detc[k] / (v->swath_width_y[k] / 2.0);
v->lines_in_detc_rounded_down_to_swath[k] =dcn_bw_floor2(v->lines_in_detc[k], v->swath_height_c[k]);
v->full_det_buffering_time_c[k] = v->lines_in_detc_rounded_down_to_swath[k] * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0);
}
else {
v->lines_in_detc[k] = 0.0;
v->lines_in_detc_rounded_down_to_swath[k] = 0.0;
v->full_det_buffering_time_c[k] = 999999.0;
}
}
v->min_full_det_buffering_time = 999999.0;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->full_det_buffering_time_y[k] < v->min_full_det_buffering_time) {
v->min_full_det_buffering_time = v->full_det_buffering_time_y[k];
v->frame_time_for_min_full_det_buffering_time = v->vtotal[k] * v->htotal[k] / v->pixel_clock[k];
}
if (v->full_det_buffering_time_c[k] < v->min_full_det_buffering_time) {
v->min_full_det_buffering_time = v->full_det_buffering_time_c[k];
v->frame_time_for_min_full_det_buffering_time = v->vtotal[k] * v->htotal[k] / v->pixel_clock[k];
}
}
v->average_read_bandwidth_gbyte_per_second = 0.0;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->dcc_enable[k] == dcn_bw_yes) {
v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / v->dcc_rate[k] / 1000.0 + v->read_bandwidth_plane_chroma[k] / v->dcc_rate[k] / 1000.0;
}
else {
v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / 1000.0 + v->read_bandwidth_plane_chroma[k] / 1000.0;
}
if (v->dcc_enable[k] == dcn_bw_yes) {
v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / 1000.0 / 256.0 + v->read_bandwidth_plane_chroma[k] / 1000.0 / 256.0;
}
if (v->pte_enable == dcn_bw_yes) {
v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / 1000.0 / 512.0 + v->read_bandwidth_plane_chroma[k] / 1000.0 / 512.0;
}
}
v->part_of_burst_that_fits_in_rob =dcn_bw_min2(v->min_full_det_buffering_time * v->total_data_read_bandwidth, v->rob_buffer_size_in_kbyte * 1024.0 * v->total_data_read_bandwidth / (v->average_read_bandwidth_gbyte_per_second * 1000.0));
v->stutter_burst_time = v->part_of_burst_that_fits_in_rob * (v->average_read_bandwidth_gbyte_per_second * 1000.0) / v->total_data_read_bandwidth / v->return_bw + (v->min_full_det_buffering_time * v->total_data_read_bandwidth - v->part_of_burst_that_fits_in_rob) / (v->dcfclk * 64.0);
if (v->total_active_writeback == 0.0) {
v->stutter_efficiency_not_including_vblank = (1.0 - (v->sr_exit_time + v->stutter_burst_time) / v->min_full_det_buffering_time) * 100.0;
}
else {
v->stutter_efficiency_not_including_vblank = 0.0;
}
v->smallest_vblank = 999999.0;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->synchronized_vblank == dcn_bw_yes || v->number_of_active_planes == 1) {
v->v_blank_time = (v->vtotal[k] - v->vactive[k]) * v->htotal[k] / v->pixel_clock[k];
}
else {
v->v_blank_time = 0.0;
}
v->smallest_vblank =dcn_bw_min2(v->smallest_vblank, v->v_blank_time);
}
v->stutter_efficiency = (v->stutter_efficiency_not_including_vblank / 100.0 * (v->frame_time_for_min_full_det_buffering_time - v->smallest_vblank) + v->smallest_vblank) / v->frame_time_for_min_full_det_buffering_time * 100.0;
/*dcfclk deep sleep*/
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->byte_per_pixel_detc[k] > 0.0) {
v->dcfclk_deep_sleep_per_plane[k] =dcn_bw_max2(1.1 * v->swath_width_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 32.0 / v->display_pipe_line_delivery_time_luma[k], 1.1 * v->swath_width_y[k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 32.0 / v->display_pipe_line_delivery_time_chroma[k]);
}
else {
v->dcfclk_deep_sleep_per_plane[k] = 1.1 * v->swath_width_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 64.0 / v->display_pipe_line_delivery_time_luma[k];
}
v->dcfclk_deep_sleep_per_plane[k] =dcn_bw_max2(v->dcfclk_deep_sleep_per_plane[k], v->pixel_clock[k] / 16.0);
}
v->dcf_clk_deep_sleep = 8.0;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
v->dcf_clk_deep_sleep =dcn_bw_max2(v->dcf_clk_deep_sleep, v->dcfclk_deep_sleep_per_plane[k]);
}
/*stutter watermark*/
v->stutter_exit_watermark = v->sr_exit_time + v->last_pixel_of_line_extra_watermark + v->urgent_extra_latency + 10.0 / v->dcf_clk_deep_sleep;
v->stutter_enter_plus_exit_watermark = v->sr_enter_plus_exit_time + v->last_pixel_of_line_extra_watermark + v->urgent_extra_latency;
/*urgent latency supported*/
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
v->effective_det_plus_lb_lines_luma =dcn_bw_floor2(v->lines_in_dety[k] +dcn_bw_min2(v->lines_in_dety[k] * v->dppclk * v->byte_per_pixel_dety[k] * v->pscl_throughput[k] / (v->return_bw / v->dpp_per_plane[k]), v->effective_lb_latency_hiding_source_lines_luma), v->swath_height_y[k]);
v->urgent_latency_support_us_luma = v->effective_det_plus_lb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_det_plus_lb_lines_luma * v->swath_width_y[k] * v->byte_per_pixel_dety[k] / (v->return_bw / v->dpp_per_plane[k]);
if (v->byte_per_pixel_detc[k] > 0.0) {
v->effective_det_plus_lb_lines_chroma =dcn_bw_floor2(v->lines_in_detc[k] +dcn_bw_min2(v->lines_in_detc[k] * v->dppclk * v->byte_per_pixel_detc[k] * v->pscl_throughput_chroma[k] / (v->return_bw / v->dpp_per_plane[k]), v->effective_lb_latency_hiding_source_lines_chroma), v->swath_height_c[k]);
v->urgent_latency_support_us_chroma = v->effective_det_plus_lb_lines_chroma * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0) - v->effective_det_plus_lb_lines_chroma * (v->swath_width_y[k] / 2.0) * v->byte_per_pixel_detc[k] / (v->return_bw / v->dpp_per_plane[k]);
v->urgent_latency_support_us[k] =dcn_bw_min2(v->urgent_latency_support_us_luma, v->urgent_latency_support_us_chroma);
}
else {
v->urgent_latency_support_us[k] = v->urgent_latency_support_us_luma;
}
}
v->min_urgent_latency_support_us = 999999.0;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
v->min_urgent_latency_support_us =dcn_bw_min2(v->min_urgent_latency_support_us, v->urgent_latency_support_us[k]);
}
/*non-urgent latency tolerance*/
v->non_urgent_latency_tolerance = v->min_urgent_latency_support_us - v->urgent_watermark;
/*prefetch*/
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) {
if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
v->block_height256_bytes_y = 1.0;
}
else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
v->block_height256_bytes_y = 4.0;
}
else {
v->block_height256_bytes_y = 8.0;
}
v->block_height256_bytes_c = 0.0;
}
else {
if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
v->block_height256_bytes_y = 1.0;
v->block_height256_bytes_c = 1.0;
}
else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
v->block_height256_bytes_y = 16.0;
v->block_height256_bytes_c = 8.0;
}
else {
v->block_height256_bytes_y = 8.0;
v->block_height256_bytes_c = 8.0;
}
}
if (v->dcc_enable[k] == dcn_bw_yes) {
v->meta_request_width_y = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (8.0 * v->block_height256_bytes_y);
v->meta_surf_width_y =dcn_bw_ceil2(v->swath_width_y[k] - 1.0, v->meta_request_width_y) + v->meta_request_width_y;
v->meta_surf_height_y =dcn_bw_ceil2(v->viewport_height[k] - 1.0, 8.0 * v->block_height256_bytes_y) + 8.0 * v->block_height256_bytes_y;
if (v->pte_enable == dcn_bw_yes) {
v->meta_pte_bytes_frame_y = (dcn_bw_ceil2((v->meta_surf_width_y * v->meta_surf_height_y *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0;
}
else {
v->meta_pte_bytes_frame_y = 0.0;
}
if (v->source_scan[k] == dcn_bw_hor) {
v->meta_row_byte_y = v->meta_surf_width_y * 8.0 * v->block_height256_bytes_y *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 256.0;
}
else {
v->meta_row_byte_y = v->meta_surf_height_y * v->meta_request_width_y *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 256.0;
}
}
else {
v->meta_pte_bytes_frame_y = 0.0;
v->meta_row_byte_y = 0.0;
}
if (v->pte_enable == dcn_bw_yes) {
if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
v->macro_tile_size_byte_y = 256.0;
v->macro_tile_height_y = 1.0;
}
else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) {
v->macro_tile_size_byte_y = 4096.0;
v->macro_tile_height_y = 4.0 * v->block_height256_bytes_y;
}
else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) {
v->macro_tile_size_byte_y = 64.0 * 1024;
v->macro_tile_height_y = 16.0 * v->block_height256_bytes_y;
}
else {
v->macro_tile_size_byte_y = 256.0 * 1024;
v->macro_tile_height_y = 32.0 * v->block_height256_bytes_y;
}
if (v->macro_tile_size_byte_y <= 65536.0) {
v->pixel_pte_req_height_y = v->macro_tile_height_y;
}
else {
v->pixel_pte_req_height_y = 16.0 * v->block_height256_bytes_y;
}
v->pixel_pte_req_width_y = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / v->pixel_pte_req_height_y * 8;
if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
v->pixel_pte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] *dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->pixel_pte_req_width_y / v->swath_width_y[k], 2.0), 1.0))) - 1.0) / v->pixel_pte_req_width_y, 1.0) + 1);
}
else if (v->source_scan[k] == dcn_bw_hor) {
v->pixel_pte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] - 1.0) / v->pixel_pte_req_width_y, 1.0) + 1);
}
else {
v->pixel_pte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] - 1.0) / v->pixel_pte_req_height_y, 1.0) + 1);
}
}
else {
v->pixel_pte_bytes_per_row_y = 0.0;
}
if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) {
if (v->dcc_enable[k] == dcn_bw_yes) {
v->meta_request_width_c = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (8.0 * v->block_height256_bytes_c);
v->meta_surf_width_c =dcn_bw_ceil2(v->swath_width_y[k] / 2.0 - 1.0, v->meta_request_width_c) + v->meta_request_width_c;
v->meta_surf_height_c =dcn_bw_ceil2(v->viewport_height[k] / 2.0 - 1.0, 8.0 * v->block_height256_bytes_c) + 8.0 * v->block_height256_bytes_c;
if (v->pte_enable == dcn_bw_yes) {
v->meta_pte_bytes_frame_c = (dcn_bw_ceil2((v->meta_surf_width_c * v->meta_surf_height_c *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0;
}
else {
v->meta_pte_bytes_frame_c = 0.0;
}
if (v->source_scan[k] == dcn_bw_hor) {
v->meta_row_byte_c = v->meta_surf_width_c * 8.0 * v->block_height256_bytes_c *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 256.0;
}
else {
v->meta_row_byte_c = v->meta_surf_height_c * v->meta_request_width_c *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 256.0;
}
}
else {
v->meta_pte_bytes_frame_c = 0.0;
v->meta_row_byte_c = 0.0;
}
if (v->pte_enable == dcn_bw_yes) {
if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
v->macro_tile_size_bytes_c = 256.0;
v->macro_tile_height_c = 1.0;
}
else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) {
v->macro_tile_size_bytes_c = 4096.0;
v->macro_tile_height_c = 4.0 * v->block_height256_bytes_c;
}
else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) {
v->macro_tile_size_bytes_c = 64.0 * 1024;
v->macro_tile_height_c = 16.0 * v->block_height256_bytes_c;
}
else {
v->macro_tile_size_bytes_c = 256.0 * 1024;
v->macro_tile_height_c = 32.0 * v->block_height256_bytes_c;
}
if (v->macro_tile_size_bytes_c <= 65536.0) {
v->pixel_pte_req_height_c = v->macro_tile_height_c;
}
else {
v->pixel_pte_req_height_c = 16.0 * v->block_height256_bytes_c;
}
v->pixel_pte_req_width_c = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / v->pixel_pte_req_height_c * 8;
if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
v->pixel_pte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] / 2.0 * dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->pixel_pte_req_width_c / (v->swath_width_y[k] / 2.0), 2.0), 1.0))) - 1.0) / v->pixel_pte_req_width_c, 1.0) + 1);
}
else if (v->source_scan[k] == dcn_bw_hor) {
v->pixel_pte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] / 2.0 - 1.0) / v->pixel_pte_req_width_c, 1.0) + 1);
}
else {
v->pixel_pte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] / 2.0 - 1.0) / v->pixel_pte_req_height_c, 1.0) + 1);
}
}
else {
v->pixel_pte_bytes_per_row_c = 0.0;
}
}
else {
v->pixel_pte_bytes_per_row_c = 0.0;
v->meta_pte_bytes_frame_c = 0.0;
v->meta_row_byte_c = 0.0;
}
v->pixel_pte_bytes_per_row[k] = v->pixel_pte_bytes_per_row_y + v->pixel_pte_bytes_per_row_c;
v->meta_pte_bytes_frame[k] = v->meta_pte_bytes_frame_y + v->meta_pte_bytes_frame_c;
v->meta_row_byte[k] = v->meta_row_byte_y + v->meta_row_byte_c;
v->v_init_pre_fill_y[k] =dcn_bw_floor2((v->v_ratio[k] + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k]) / 2.0, 1.0);
v->max_num_swath_y[k] =dcn_bw_ceil2((v->v_init_pre_fill_y[k] - 1.0) / v->swath_height_y[k], 1.0) + 1;
if (v->v_init_pre_fill_y[k] > 1.0) {
v->max_partial_swath_y =dcn_bw_mod((v->v_init_pre_fill_y[k] - 2.0), v->swath_height_y[k]);
}
else {
v->max_partial_swath_y =dcn_bw_mod((v->v_init_pre_fill_y[k] + v->swath_height_y[k] - 2.0), v->swath_height_y[k]);
}
v->max_partial_swath_y =dcn_bw_max2(1.0, v->max_partial_swath_y);
v->prefetch_source_lines_y[k] = v->max_num_swath_y[k] * v->swath_height_y[k] + v->max_partial_swath_y;
if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) {
v->v_init_pre_fill_c[k] =dcn_bw_floor2((v->v_ratio[k] / 2.0 + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k] / 2.0) / 2.0, 1.0);
v->max_num_swath_c[k] =dcn_bw_ceil2((v->v_init_pre_fill_c[k] - 1.0) / v->swath_height_c[k], 1.0) + 1;
if (v->v_init_pre_fill_c[k] > 1.0) {
v->max_partial_swath_c =dcn_bw_mod((v->v_init_pre_fill_c[k] - 2.0), v->swath_height_c[k]);
}
else {
v->max_partial_swath_c =dcn_bw_mod((v->v_init_pre_fill_c[k] + v->swath_height_c[k] - 2.0), v->swath_height_c[k]);
}
v->max_partial_swath_c =dcn_bw_max2(1.0, v->max_partial_swath_c);
}
else {
v->max_num_swath_c[k] = 0.0;
v->max_partial_swath_c = 0.0;
}
v->prefetch_source_lines_c[k] = v->max_num_swath_c[k] * v->swath_height_c[k] + v->max_partial_swath_c;
}
v->t_calc = 24.0 / v->dcf_clk_deep_sleep;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one == dcn_bw_yes) {
v->max_vstartup_lines[k] = v->vtotal[k] - v->vactive[k] - 1.0;
}
else {
v->max_vstartup_lines[k] = v->v_sync_plus_back_porch[k] - 1.0;
}
}
v->next_prefetch_mode = 0.0;
do {
v->v_startup_lines = 13.0;
do {
v->planes_with_room_to_increase_vstartup_prefetch_bw_less_than_active_bw = dcn_bw_yes;
v->planes_with_room_to_increase_vstartup_vratio_prefetch_more_than4 = dcn_bw_no;
v->planes_with_room_to_increase_vstartup_destination_line_times_for_prefetch_less_than2 = dcn_bw_no;
v->v_ratio_prefetch_more_than4 = dcn_bw_no;
v->destination_line_times_for_prefetch_less_than2 = dcn_bw_no;
v->prefetch_mode = v->next_prefetch_mode;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
v->dstx_after_scaler = 90.0 * v->pixel_clock[k] / v->dppclk + 42.0 * v->pixel_clock[k] / v->dispclk;
if (v->dpp_per_plane[k] > 1.0) {
v->dstx_after_scaler = v->dstx_after_scaler + v->scaler_rec_out_width[k] / 2.0;
}
if (v->output_format[k] == dcn_bw_420) {
v->dsty_after_scaler = 1.0;
}
else {
v->dsty_after_scaler = 0.0;
}
v->v_update_offset_pix[k] = dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0);
v->total_repeater_delay_time = v->max_inter_dcn_tile_repeaters * (2.0 / v->dppclk + 3.0 / v->dispclk);
v->v_update_width_pix[k] = (14.0 / v->dcf_clk_deep_sleep + 12.0 / v->dppclk + v->total_repeater_delay_time) * v->pixel_clock[k];
v->v_ready_offset_pix[k] = dcn_bw_max2(150.0 / v->dppclk, v->total_repeater_delay_time + 20.0 / v->dcf_clk_deep_sleep + 10.0 / v->dppclk) * v->pixel_clock[k];
v->t_setup = (v->v_update_offset_pix[k] + v->v_update_width_pix[k] + v->v_ready_offset_pix[k]) / v->pixel_clock[k];
v->v_startup[k] =dcn_bw_min2(v->v_startup_lines, v->max_vstartup_lines[k]);
if (v->prefetch_mode == 0.0) {
v->t_wait =dcn_bw_max3(v->dram_clock_change_latency + v->urgent_latency, v->sr_enter_plus_exit_time, v->urgent_latency);
}
else if (v->prefetch_mode == 1.0) {
v->t_wait =dcn_bw_max2(v->sr_enter_plus_exit_time, v->urgent_latency);
}
else {
v->t_wait = v->urgent_latency;
}
v->destination_lines_for_prefetch[k] =dcn_bw_floor2(4.0 * (v->v_startup[k] - v->t_wait / (v->htotal[k] / v->pixel_clock[k]) - (v->t_calc + v->t_setup) / (v->htotal[k] / v->pixel_clock[k]) - (v->dsty_after_scaler + v->dstx_after_scaler / v->htotal[k]) + 0.125), 1.0) / 4;
if (v->destination_lines_for_prefetch[k] > 0.0) {
v->prefetch_bandwidth[k] = (v->meta_pte_bytes_frame[k] + 2.0 * v->meta_row_byte[k] + 2.0 * v->pixel_pte_bytes_per_row[k] + v->prefetch_source_lines_y[k] * v->swath_width_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) + v->prefetch_source_lines_c[k] * v->swath_width_y[k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0)) / (v->destination_lines_for_prefetch[k] * v->htotal[k] / v->pixel_clock[k]);
}
else {
v->prefetch_bandwidth[k] = 999999.0;
}
}
v->bandwidth_available_for_immediate_flip = v->return_bw;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
v->bandwidth_available_for_immediate_flip = v->bandwidth_available_for_immediate_flip -dcn_bw_max2(v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k], v->prefetch_bandwidth[k]);
}
v->tot_immediate_flip_bytes = 0.0;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
v->tot_immediate_flip_bytes = v->tot_immediate_flip_bytes + v->meta_pte_bytes_frame[k] + v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k];
}
}
v->max_rd_bandwidth = 0.0;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->pte_enable == dcn_bw_yes && v->dcc_enable[k] == dcn_bw_yes) {
if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
v->time_for_fetching_meta_pte =dcn_bw_max5(v->meta_pte_bytes_frame[k] / v->prefetch_bandwidth[k], v->meta_pte_bytes_frame[k] * v->tot_immediate_flip_bytes / (v->bandwidth_available_for_immediate_flip * (v->meta_pte_bytes_frame[k] + v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k])), v->urgent_extra_latency, v->urgent_latency, v->htotal[k] / v->pixel_clock[k] / 4.0);
}
else {
v->time_for_fetching_meta_pte =dcn_bw_max3(v->meta_pte_bytes_frame[k] / v->prefetch_bandwidth[k], v->urgent_extra_latency, v->htotal[k] / v->pixel_clock[k] / 4.0);
}
}
else {
v->time_for_fetching_meta_pte = v->htotal[k] / v->pixel_clock[k] / 4.0;
}
v->destination_lines_to_request_vm_inv_blank[k] =dcn_bw_floor2(4.0 * (v->time_for_fetching_meta_pte / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
if ((v->pte_enable == dcn_bw_yes || v->dcc_enable[k] == dcn_bw_yes)) {
if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
v->time_for_fetching_row_in_vblank =dcn_bw_max5((v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) / v->prefetch_bandwidth[k], (v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) * v->tot_immediate_flip_bytes / (v->bandwidth_available_for_immediate_flip * (v->meta_pte_bytes_frame[k] + v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k])), v->urgent_extra_latency, 2.0 * v->urgent_latency, v->htotal[k] / v->pixel_clock[k] - v->time_for_fetching_meta_pte);
}
else {
v->time_for_fetching_row_in_vblank =dcn_bw_max3((v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) / v->prefetch_bandwidth[k], v->urgent_extra_latency, v->htotal[k] / v->pixel_clock[k] - v->time_for_fetching_meta_pte);
}
}
else {
v->time_for_fetching_row_in_vblank =dcn_bw_max2(v->urgent_extra_latency - v->time_for_fetching_meta_pte, v->htotal[k] / v->pixel_clock[k] - v->time_for_fetching_meta_pte);
}
v->destination_lines_to_request_row_in_vblank[k] =dcn_bw_floor2(4.0 * (v->time_for_fetching_row_in_vblank / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
v->lines_to_request_prefetch_pixel_data = v->destination_lines_for_prefetch[k] - v->destination_lines_to_request_vm_inv_blank[k] - v->destination_lines_to_request_row_in_vblank[k];
if (v->lines_to_request_prefetch_pixel_data > 0.0) {
v->v_ratio_prefetch_y[k] = v->prefetch_source_lines_y[k] / v->lines_to_request_prefetch_pixel_data;
if ((v->swath_height_y[k] > 4.0)) {
if (v->lines_to_request_prefetch_pixel_data > (v->v_init_pre_fill_y[k] - 3.0) / 2.0) {
v->v_ratio_prefetch_y[k] =dcn_bw_max2(v->v_ratio_prefetch_y[k], v->max_num_swath_y[k] * v->swath_height_y[k] / (v->lines_to_request_prefetch_pixel_data - (v->v_init_pre_fill_y[k] - 3.0) / 2.0));
}
else {
v->v_ratio_prefetch_y[k] = 999999.0;
}
}
}
else {
v->v_ratio_prefetch_y[k] = 999999.0;
}
v->v_ratio_prefetch_y[k] =dcn_bw_max2(v->v_ratio_prefetch_y[k], 1.0);
if (v->lines_to_request_prefetch_pixel_data > 0.0) {
v->v_ratio_prefetch_c[k] = v->prefetch_source_lines_c[k] / v->lines_to_request_prefetch_pixel_data;
if ((v->swath_height_c[k] > 4.0)) {
if (v->lines_to_request_prefetch_pixel_data > (v->v_init_pre_fill_c[k] - 3.0) / 2.0) {
v->v_ratio_prefetch_c[k] =dcn_bw_max2(v->v_ratio_prefetch_c[k], v->max_num_swath_c[k] * v->swath_height_c[k] / (v->lines_to_request_prefetch_pixel_data - (v->v_init_pre_fill_c[k] - 3.0) / 2.0));
}
else {
v->v_ratio_prefetch_c[k] = 999999.0;
}
}
}
else {
v->v_ratio_prefetch_c[k] = 999999.0;
}
v->v_ratio_prefetch_c[k] =dcn_bw_max2(v->v_ratio_prefetch_c[k], 1.0);
if (v->lines_to_request_prefetch_pixel_data > 0.0) {
v->required_prefetch_pix_data_bw = v->dpp_per_plane[k] * (v->prefetch_source_lines_y[k] / v->lines_to_request_prefetch_pixel_data *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) + v->prefetch_source_lines_c[k] / v->lines_to_request_prefetch_pixel_data *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 2.0) * v->swath_width_y[k] / (v->htotal[k] / v->pixel_clock[k]);
}
else {
v->required_prefetch_pix_data_bw = 999999.0;
}
v->max_rd_bandwidth = v->max_rd_bandwidth +dcn_bw_max2(v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k], v->required_prefetch_pix_data_bw);
if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
v->max_rd_bandwidth = v->max_rd_bandwidth +dcn_bw_max2(v->meta_pte_bytes_frame[k] / (v->destination_lines_to_request_vm_inv_blank[k] * v->htotal[k] / v->pixel_clock[k]), (v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) / (v->destination_lines_to_request_row_in_vblank[k] * v->htotal[k] / v->pixel_clock[k]));
}
if (v->v_ratio_prefetch_y[k] > 4.0 || v->v_ratio_prefetch_c[k] > 4.0) {
v->v_ratio_prefetch_more_than4 = dcn_bw_yes;
}
if (v->destination_lines_for_prefetch[k] < 2.0) {
v->destination_line_times_for_prefetch_less_than2 = dcn_bw_yes;
}
if (v->max_vstartup_lines[k] > v->v_startup_lines) {
if (v->required_prefetch_pix_data_bw > (v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k])) {
v->planes_with_room_to_increase_vstartup_prefetch_bw_less_than_active_bw = dcn_bw_no;
}
if (v->v_ratio_prefetch_y[k] > 4.0 || v->v_ratio_prefetch_c[k] > 4.0) {
v->planes_with_room_to_increase_vstartup_vratio_prefetch_more_than4 = dcn_bw_yes;
}
if (v->destination_lines_for_prefetch[k] < 2.0) {
v->planes_with_room_to_increase_vstartup_destination_line_times_for_prefetch_less_than2 = dcn_bw_yes;
}
}
}
if (v->max_rd_bandwidth <= v->return_bw && v->v_ratio_prefetch_more_than4 == dcn_bw_no && v->destination_line_times_for_prefetch_less_than2 == dcn_bw_no) {
v->prefetch_mode_supported = dcn_bw_yes;
}
else {
v->prefetch_mode_supported = dcn_bw_no;
}
v->v_startup_lines = v->v_startup_lines + 1.0;
} while (!(v->prefetch_mode_supported == dcn_bw_yes || (v->planes_with_room_to_increase_vstartup_prefetch_bw_less_than_active_bw == dcn_bw_yes && v->planes_with_room_to_increase_vstartup_vratio_prefetch_more_than4 == dcn_bw_no && v->planes_with_room_to_increase_vstartup_destination_line_times_for_prefetch_less_than2 == dcn_bw_no)));
v->next_prefetch_mode = v->next_prefetch_mode + 1.0;
} while (!(v->prefetch_mode_supported == dcn_bw_yes || v->prefetch_mode == 2.0));
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->v_ratio_prefetch_y[k] <= 1.0) {
v->display_pipe_line_delivery_time_luma_prefetch[k] = v->swath_width_y[k] * v->dpp_per_plane[k] / v->h_ratio[k] / v->pixel_clock[k];
}
else {
v->display_pipe_line_delivery_time_luma_prefetch[k] = v->swath_width_y[k] / v->pscl_throughput[k] / v->dppclk;
}
if (v->byte_per_pixel_detc[k] == 0.0) {
v->display_pipe_line_delivery_time_chroma_prefetch[k] = 0.0;
}
else {
if (v->v_ratio_prefetch_c[k] <= 1.0) {
v->display_pipe_line_delivery_time_chroma_prefetch[k] = v->swath_width_y[k] * v->dpp_per_plane[k] / v->h_ratio[k] / v->pixel_clock[k];
}
else {
v->display_pipe_line_delivery_time_chroma_prefetch[k] = v->swath_width_y[k] / v->pscl_throughput[k] / v->dppclk;
}
}
}
/*min ttuv_blank*/
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->prefetch_mode == 0.0) {
v->allow_dram_clock_change_during_vblank[k] = dcn_bw_yes;
v->allow_dram_self_refresh_during_vblank[k] = dcn_bw_yes;
v->min_ttuv_blank[k] = v->t_calc +dcn_bw_max3(v->dram_clock_change_watermark, v->stutter_enter_plus_exit_watermark, v->urgent_watermark);
}
else if (v->prefetch_mode == 1.0) {
v->allow_dram_clock_change_during_vblank[k] = dcn_bw_no;
v->allow_dram_self_refresh_during_vblank[k] = dcn_bw_yes;
v->min_ttuv_blank[k] = v->t_calc +dcn_bw_max2(v->stutter_enter_plus_exit_watermark, v->urgent_watermark);
}
else {
v->allow_dram_clock_change_during_vblank[k] = dcn_bw_no;
v->allow_dram_self_refresh_during_vblank[k] = dcn_bw_no;
v->min_ttuv_blank[k] = v->t_calc + v->urgent_watermark;
}
}
/*nb p-state/dram clock change support*/
v->active_dp_ps = 0.0;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
v->active_dp_ps = v->active_dp_ps + v->dpp_per_plane[k];
}
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
v->lb_latency_hiding_source_lines_y =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_y[k] /dcn_bw_max2(v->h_ratio[k], 1.0)), 1.0)) - (v->vtaps[k] - 1.0);
v->lb_latency_hiding_source_lines_c =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_y[k] / 2.0 /dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0)), 1.0)) - (v->vta_pschroma[k] - 1.0);
v->effective_lb_latency_hiding_y = v->lb_latency_hiding_source_lines_y / v->v_ratio[k] * (v->htotal[k] / v->pixel_clock[k]);
v->effective_lb_latency_hiding_c = v->lb_latency_hiding_source_lines_c / (v->v_ratio[k] / 2.0) * (v->htotal[k] / v->pixel_clock[k]);
if (v->swath_width_y[k] > 2.0 * v->dpp_output_buffer_pixels) {
v->dpp_output_buffer_lines_y = v->dpp_output_buffer_pixels / v->swath_width_y[k];
}
else if (v->swath_width_y[k] > v->dpp_output_buffer_pixels) {
v->dpp_output_buffer_lines_y = 0.5;
}
else {
v->dpp_output_buffer_lines_y = 1.0;
}
if (v->swath_width_y[k] / 2.0 > 2.0 * v->dpp_output_buffer_pixels) {
v->dpp_output_buffer_lines_c = v->dpp_output_buffer_pixels / (v->swath_width_y[k] / 2.0);
}
else if (v->swath_width_y[k] / 2.0 > v->dpp_output_buffer_pixels) {
v->dpp_output_buffer_lines_c = 0.5;
}
else {
v->dpp_output_buffer_lines_c = 1.0;
}
v->dppopp_buffering_y = (v->htotal[k] / v->pixel_clock[k]) * (v->dpp_output_buffer_lines_y + v->opp_output_buffer_lines);
v->max_det_buffering_time_y = v->full_det_buffering_time_y[k] + (v->lines_in_dety[k] - v->lines_in_dety_rounded_down_to_swath[k]) / v->swath_height_y[k] * (v->htotal[k] / v->pixel_clock[k]);
v->active_dram_clock_change_latency_margin_y = v->dppopp_buffering_y + v->effective_lb_latency_hiding_y + v->max_det_buffering_time_y - v->dram_clock_change_watermark;
if (v->active_dp_ps > 1.0) {
v->active_dram_clock_change_latency_margin_y = v->active_dram_clock_change_latency_margin_y - (1.0 - 1.0 / (v->active_dp_ps - 1.0)) * v->swath_height_y[k] * (v->htotal[k] / v->pixel_clock[k]);
}
if (v->byte_per_pixel_detc[k] > 0.0) {
v->dppopp_buffering_c = (v->htotal[k] / v->pixel_clock[k]) * (v->dpp_output_buffer_lines_c + v->opp_output_buffer_lines);
v->max_det_buffering_time_c = v->full_det_buffering_time_c[k] + (v->lines_in_detc[k] - v->lines_in_detc_rounded_down_to_swath[k]) / v->swath_height_c[k] * (v->htotal[k] / v->pixel_clock[k]);
v->active_dram_clock_change_latency_margin_c = v->dppopp_buffering_c + v->effective_lb_latency_hiding_c + v->max_det_buffering_time_c - v->dram_clock_change_watermark;
if (v->active_dp_ps > 1.0) {
v->active_dram_clock_change_latency_margin_c = v->active_dram_clock_change_latency_margin_c - (1.0 - 1.0 / (v->active_dp_ps - 1.0)) * v->swath_height_c[k] * (v->htotal[k] / v->pixel_clock[k]);
}
v->active_dram_clock_change_latency_margin[k] =dcn_bw_min2(v->active_dram_clock_change_latency_margin_y, v->active_dram_clock_change_latency_margin_c);
}
else {
v->active_dram_clock_change_latency_margin[k] = v->active_dram_clock_change_latency_margin_y;
}
if (v->output_format[k] == dcn_bw_444) {
v->writeback_dram_clock_change_latency_margin = (v->writeback_luma_buffer_size + v->writeback_chroma_buffer_size) * 1024.0 / (v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0) - v->writeback_dram_clock_change_watermark;
}
else {
v->writeback_dram_clock_change_latency_margin =dcn_bw_min2(v->writeback_luma_buffer_size, 2.0 * v->writeback_chroma_buffer_size) * 1024.0 / (v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k])) - v->writeback_dram_clock_change_watermark;
}
if (v->output[k] == dcn_bw_writeback) {
v->active_dram_clock_change_latency_margin[k] =dcn_bw_min2(v->active_dram_clock_change_latency_margin[k], v->writeback_dram_clock_change_latency_margin);
}
}
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->allow_dram_clock_change_during_vblank[k] == dcn_bw_yes) {
v->v_blank_dram_clock_change_latency_margin[k] = (v->vtotal[k] - v->scaler_recout_height[k]) * (v->htotal[k] / v->pixel_clock[k]) -dcn_bw_max2(v->dram_clock_change_watermark, v->writeback_dram_clock_change_watermark);
}
else {
v->v_blank_dram_clock_change_latency_margin[k] = 0.0;
}
}
v->min_active_dram_clock_change_margin = 999999.0;
v->v_blank_of_min_active_dram_clock_change_margin = 999999.0;
v->second_min_active_dram_clock_change_margin = 999999.0;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->active_dram_clock_change_latency_margin[k] < v->min_active_dram_clock_change_margin) {
v->second_min_active_dram_clock_change_margin = v->min_active_dram_clock_change_margin;
v->min_active_dram_clock_change_margin = v->active_dram_clock_change_latency_margin[k];
v->v_blank_of_min_active_dram_clock_change_margin = v->v_blank_dram_clock_change_latency_margin[k];
}
else if (v->active_dram_clock_change_latency_margin[k] < v->second_min_active_dram_clock_change_margin) {
v->second_min_active_dram_clock_change_margin = v->active_dram_clock_change_latency_margin[k];
}
}
v->min_vblank_dram_clock_change_margin = 999999.0;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->min_vblank_dram_clock_change_margin > v->v_blank_dram_clock_change_latency_margin[k]) {
v->min_vblank_dram_clock_change_margin = v->v_blank_dram_clock_change_latency_margin[k];
}
}
if (v->synchronized_vblank == dcn_bw_yes || v->number_of_active_planes == 1) {
v->dram_clock_change_margin =dcn_bw_max2(v->min_active_dram_clock_change_margin, v->min_vblank_dram_clock_change_margin);
}
else if (v->v_blank_of_min_active_dram_clock_change_margin > v->min_active_dram_clock_change_margin) {
v->dram_clock_change_margin =dcn_bw_min2(v->second_min_active_dram_clock_change_margin, v->v_blank_of_min_active_dram_clock_change_margin);
}
else {
v->dram_clock_change_margin = v->min_active_dram_clock_change_margin;
}
if (v->min_active_dram_clock_change_margin > 0.0) {
v->dram_clock_change_support = dcn_bw_supported_in_v_active;
}
else if (v->dram_clock_change_margin > 0.0) {
v->dram_clock_change_support = dcn_bw_supported_in_v_blank;
}
else {
v->dram_clock_change_support = dcn_bw_not_supported;
}
/*maximum bandwidth used*/
v->wr_bandwidth = 0.0;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->output[k] == dcn_bw_writeback && v->output_format[k] == dcn_bw_444) {
v->wr_bandwidth = v->wr_bandwidth + v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0;
}
else if (v->output[k] == dcn_bw_writeback) {
v->wr_bandwidth = v->wr_bandwidth + v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 1.5;
}
}
v->max_used_bw = v->max_rd_bandwidth + v->wr_bandwidth;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.c |
/*
* Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "os_types.h"
#include "dcn_calc_math.h"
#define isNaN(number) ((number) != (number))
/*
* NOTE:
* This file is gcc-parseable HW gospel, coming straight from HW engineers.
*
* It doesn't adhere to Linux kernel style and sometimes will do things in odd
* ways. Unless there is something clearly wrong with it the code should
* remain as-is as it provides us with a guarantee from HW that it is correct.
*/
float dcn_bw_mod(const float arg1, const float arg2)
{
if (isNaN(arg1))
return arg2;
if (isNaN(arg2))
return arg1;
return arg1 - arg1 * ((int) (arg1 / arg2));
}
float dcn_bw_min2(const float arg1, const float arg2)
{
if (isNaN(arg1))
return arg2;
if (isNaN(arg2))
return arg1;
return arg1 < arg2 ? arg1 : arg2;
}
unsigned int dcn_bw_max(const unsigned int arg1, const unsigned int arg2)
{
return arg1 > arg2 ? arg1 : arg2;
}
float dcn_bw_max2(const float arg1, const float arg2)
{
if (isNaN(arg1))
return arg2;
if (isNaN(arg2))
return arg1;
return arg1 > arg2 ? arg1 : arg2;
}
float dcn_bw_floor2(const float arg, const float significance)
{
ASSERT(significance != 0);
return ((int) (arg / significance)) * significance;
}
float dcn_bw_floor(const float arg)
{
return ((int) (arg));
}
float dcn_bw_ceil(const float arg)
{
return (int) (arg + 0.99999);
}
float dcn_bw_ceil2(const float arg, const float significance)
{
ASSERT(significance != 0);
return ((int) (arg / significance + 0.99999)) * significance;
}
float dcn_bw_max3(float v1, float v2, float v3)
{
return v3 > dcn_bw_max2(v1, v2) ? v3 : dcn_bw_max2(v1, v2);
}
float dcn_bw_max5(float v1, float v2, float v3, float v4, float v5)
{
return dcn_bw_max3(v1, v2, v3) > dcn_bw_max2(v4, v5) ? dcn_bw_max3(v1, v2, v3) : dcn_bw_max2(v4, v5);
}
float dcn_bw_pow(float a, float exp)
{
float temp;
/*ASSERT(exp == (int)exp);*/
if ((int)exp == 0)
return 1;
temp = dcn_bw_pow(a, (int)(exp / 2));
if (((int)exp % 2) == 0) {
return temp * temp;
} else {
if ((int)exp > 0)
return a * temp * temp;
else
return (temp * temp) / a;
}
}
double dcn_bw_fabs(double a)
{
if (a > 0)
return (a);
else
return (-a);
}
float dcn_bw_log(float a, float b)
{
int * const exp_ptr = (int *)(&a);
int x = *exp_ptr;
const int log_2 = ((x >> 23) & 255) - 128;
x &= ~(255 << 23);
x += 127 << 23;
*exp_ptr = x;
a = ((-1.0f / 3) * a + 2) * a - 2.0f / 3;
if (b > 2.00001 || b < 1.99999)
return (a + log_2) / dcn_bw_log(b, 2);
else
return (a + log_2);
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_math.c |
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include <linux/slab.h>
#include "resource.h"
#include "dm_services.h"
#include "dce_calcs.h"
#include "dc.h"
#include "core_types.h"
#include "dal_asic_id.h"
#include "calcs_logger.h"
/*
* NOTE:
* This file is gcc-parseable HW gospel, coming straight from HW engineers.
*
* It doesn't adhere to Linux kernel style and sometimes will do things in odd
* ways. Unless there is something clearly wrong with it the code should
* remain as-is as it provides us with a guarantee from HW that it is correct.
*/
/*******************************************************************************
* Private Functions
******************************************************************************/
static enum bw_calcs_version bw_calcs_version_from_asic_id(struct hw_asic_id asic_id)
{
switch (asic_id.chip_family) {
case FAMILY_CZ:
if (ASIC_REV_IS_STONEY(asic_id.hw_internal_rev))
return BW_CALCS_VERSION_STONEY;
return BW_CALCS_VERSION_CARRIZO;
case FAMILY_VI:
if (ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev))
return BW_CALCS_VERSION_POLARIS12;
if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev))
return BW_CALCS_VERSION_POLARIS10;
if (ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev))
return BW_CALCS_VERSION_POLARIS11;
if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev))
return BW_CALCS_VERSION_VEGAM;
return BW_CALCS_VERSION_INVALID;
case FAMILY_AI:
return BW_CALCS_VERSION_VEGA10;
default:
return BW_CALCS_VERSION_INVALID;
}
}
static void calculate_bandwidth(
const struct bw_calcs_dceip *dceip,
const struct bw_calcs_vbios *vbios,
struct bw_calcs_data *data)
{
const int32_t pixels_per_chunk = 512;
const int32_t high = 2;
const int32_t mid = 1;
const int32_t low = 0;
const uint32_t s_low = 0;
const uint32_t s_mid1 = 1;
const uint32_t s_mid2 = 2;
const uint32_t s_mid3 = 3;
const uint32_t s_mid4 = 4;
const uint32_t s_mid5 = 5;
const uint32_t s_mid6 = 6;
const uint32_t s_high = 7;
const uint32_t dmif_chunk_buff_margin = 1;
uint32_t max_chunks_fbc_mode;
int32_t num_cursor_lines;
int32_t i, j, k;
struct bw_fixed *yclk;
struct bw_fixed *sclk;
bool d0_underlay_enable;
bool d1_underlay_enable;
bool fbc_enabled;
bool lpt_enabled;
enum bw_defines sclk_message;
enum bw_defines yclk_message;
enum bw_defines *tiling_mode;
enum bw_defines *surface_type;
enum bw_defines voltage;
enum bw_defines pipe_check;
enum bw_defines hsr_check;
enum bw_defines vsr_check;
enum bw_defines lb_size_check;
enum bw_defines fbc_check;
enum bw_defines rotation_check;
enum bw_defines mode_check;
enum bw_defines nbp_state_change_enable_blank;
/*initialize variables*/
int32_t number_of_displays_enabled = 0;
int32_t number_of_displays_enabled_with_margin = 0;
int32_t number_of_aligned_displays_with_no_margin = 0;
yclk = kcalloc(3, sizeof(*yclk), GFP_KERNEL);
if (!yclk)
return;
sclk = kcalloc(8, sizeof(*sclk), GFP_KERNEL);
if (!sclk)
goto free_yclk;
tiling_mode = kcalloc(maximum_number_of_surfaces, sizeof(*tiling_mode), GFP_KERNEL);
if (!tiling_mode)
goto free_sclk;
surface_type = kcalloc(maximum_number_of_surfaces, sizeof(*surface_type), GFP_KERNEL);
if (!surface_type)
goto free_tiling_mode;
yclk[low] = vbios->low_yclk;
yclk[mid] = vbios->mid_yclk;
yclk[high] = vbios->high_yclk;
sclk[s_low] = vbios->low_sclk;
sclk[s_mid1] = vbios->mid1_sclk;
sclk[s_mid2] = vbios->mid2_sclk;
sclk[s_mid3] = vbios->mid3_sclk;
sclk[s_mid4] = vbios->mid4_sclk;
sclk[s_mid5] = vbios->mid5_sclk;
sclk[s_mid6] = vbios->mid6_sclk;
sclk[s_high] = vbios->high_sclk;
/*''''''''''''''''''*/
/* surface assignment:*/
/* 0: d0 underlay or underlay luma*/
/* 1: d0 underlay chroma*/
/* 2: d1 underlay or underlay luma*/
/* 3: d1 underlay chroma*/
/* 4: d0 graphics*/
/* 5: d1 graphics*/
/* 6: d2 graphics*/
/* 7: d3 graphics, same mode as d2*/
/* 8: d4 graphics, same mode as d2*/
/* 9: d5 graphics, same mode as d2*/
/* ...*/
/* maximum_number_of_surfaces-2: d1 display_write_back420 luma*/
/* maximum_number_of_surfaces-1: d1 display_write_back420 chroma*/
/* underlay luma and chroma surface parameters from spreadsheet*/
if (data->d0_underlay_mode == bw_def_none)
d0_underlay_enable = false;
else
d0_underlay_enable = true;
if (data->d1_underlay_mode == bw_def_none)
d1_underlay_enable = false;
else
d1_underlay_enable = true;
data->number_of_underlay_surfaces = d0_underlay_enable + d1_underlay_enable;
switch (data->underlay_surface_type) {
case bw_def_420:
surface_type[0] = bw_def_underlay420_luma;
surface_type[2] = bw_def_underlay420_luma;
data->bytes_per_pixel[0] = 1;
data->bytes_per_pixel[2] = 1;
surface_type[1] = bw_def_underlay420_chroma;
surface_type[3] = bw_def_underlay420_chroma;
data->bytes_per_pixel[1] = 2;
data->bytes_per_pixel[3] = 2;
data->lb_size_per_component[0] = dceip->underlay420_luma_lb_size_per_component;
data->lb_size_per_component[1] = dceip->underlay420_chroma_lb_size_per_component;
data->lb_size_per_component[2] = dceip->underlay420_luma_lb_size_per_component;
data->lb_size_per_component[3] = dceip->underlay420_chroma_lb_size_per_component;
break;
case bw_def_422:
surface_type[0] = bw_def_underlay422;
surface_type[2] = bw_def_underlay422;
data->bytes_per_pixel[0] = 2;
data->bytes_per_pixel[2] = 2;
data->lb_size_per_component[0] = dceip->underlay422_lb_size_per_component;
data->lb_size_per_component[2] = dceip->underlay422_lb_size_per_component;
break;
default:
surface_type[0] = bw_def_underlay444;
surface_type[2] = bw_def_underlay444;
data->bytes_per_pixel[0] = 4;
data->bytes_per_pixel[2] = 4;
data->lb_size_per_component[0] = dceip->lb_size_per_component444;
data->lb_size_per_component[2] = dceip->lb_size_per_component444;
break;
}
if (d0_underlay_enable) {
switch (data->underlay_surface_type) {
case bw_def_420:
data->enable[0] = 1;
data->enable[1] = 1;
break;
default:
data->enable[0] = 1;
data->enable[1] = 0;
break;
}
}
else {
data->enable[0] = 0;
data->enable[1] = 0;
}
if (d1_underlay_enable) {
switch (data->underlay_surface_type) {
case bw_def_420:
data->enable[2] = 1;
data->enable[3] = 1;
break;
default:
data->enable[2] = 1;
data->enable[3] = 0;
break;
}
}
else {
data->enable[2] = 0;
data->enable[3] = 0;
}
data->use_alpha[0] = 0;
data->use_alpha[1] = 0;
data->use_alpha[2] = 0;
data->use_alpha[3] = 0;
data->scatter_gather_enable_for_pipe[0] = vbios->scatter_gather_enable;
data->scatter_gather_enable_for_pipe[1] = vbios->scatter_gather_enable;
data->scatter_gather_enable_for_pipe[2] = vbios->scatter_gather_enable;
data->scatter_gather_enable_for_pipe[3] = vbios->scatter_gather_enable;
/*underlay0 same and graphics display pipe0*/
data->interlace_mode[0] = data->interlace_mode[4];
data->interlace_mode[1] = data->interlace_mode[4];
/*underlay1 same and graphics display pipe1*/
data->interlace_mode[2] = data->interlace_mode[5];
data->interlace_mode[3] = data->interlace_mode[5];
/*underlay0 same and graphics display pipe0*/
data->h_total[0] = data->h_total[4];
data->v_total[0] = data->v_total[4];
data->h_total[1] = data->h_total[4];
data->v_total[1] = data->v_total[4];
/*underlay1 same and graphics display pipe1*/
data->h_total[2] = data->h_total[5];
data->v_total[2] = data->v_total[5];
data->h_total[3] = data->h_total[5];
data->v_total[3] = data->v_total[5];
/*underlay0 same and graphics display pipe0*/
data->pixel_rate[0] = data->pixel_rate[4];
data->pixel_rate[1] = data->pixel_rate[4];
/*underlay1 same and graphics display pipe1*/
data->pixel_rate[2] = data->pixel_rate[5];
data->pixel_rate[3] = data->pixel_rate[5];
if ((data->underlay_tiling_mode == bw_def_array_linear_general || data->underlay_tiling_mode == bw_def_array_linear_aligned)) {
tiling_mode[0] = bw_def_linear;
tiling_mode[1] = bw_def_linear;
tiling_mode[2] = bw_def_linear;
tiling_mode[3] = bw_def_linear;
}
else {
tiling_mode[0] = bw_def_landscape;
tiling_mode[1] = bw_def_landscape;
tiling_mode[2] = bw_def_landscape;
tiling_mode[3] = bw_def_landscape;
}
data->lb_bpc[0] = data->underlay_lb_bpc;
data->lb_bpc[1] = data->underlay_lb_bpc;
data->lb_bpc[2] = data->underlay_lb_bpc;
data->lb_bpc[3] = data->underlay_lb_bpc;
data->compression_rate[0] = bw_int_to_fixed(1);
data->compression_rate[1] = bw_int_to_fixed(1);
data->compression_rate[2] = bw_int_to_fixed(1);
data->compression_rate[3] = bw_int_to_fixed(1);
data->access_one_channel_only[0] = 0;
data->access_one_channel_only[1] = 0;
data->access_one_channel_only[2] = 0;
data->access_one_channel_only[3] = 0;
data->cursor_width_pixels[0] = bw_int_to_fixed(0);
data->cursor_width_pixels[1] = bw_int_to_fixed(0);
data->cursor_width_pixels[2] = bw_int_to_fixed(0);
data->cursor_width_pixels[3] = bw_int_to_fixed(0);
/* graphics surface parameters from spreadsheet*/
fbc_enabled = false;
lpt_enabled = false;
for (i = 4; i <= maximum_number_of_surfaces - 3; i++) {
if (i < data->number_of_displays + 4) {
if (i == 4 && data->d0_underlay_mode == bw_def_underlay_only) {
data->enable[i] = 0;
data->use_alpha[i] = 0;
}
else if (i == 4 && data->d0_underlay_mode == bw_def_blend) {
data->enable[i] = 1;
data->use_alpha[i] = 1;
}
else if (i == 4) {
data->enable[i] = 1;
data->use_alpha[i] = 0;
}
else if (i == 5 && data->d1_underlay_mode == bw_def_underlay_only) {
data->enable[i] = 0;
data->use_alpha[i] = 0;
}
else if (i == 5 && data->d1_underlay_mode == bw_def_blend) {
data->enable[i] = 1;
data->use_alpha[i] = 1;
}
else {
data->enable[i] = 1;
data->use_alpha[i] = 0;
}
}
else {
data->enable[i] = 0;
data->use_alpha[i] = 0;
}
data->scatter_gather_enable_for_pipe[i] = vbios->scatter_gather_enable;
surface_type[i] = bw_def_graphics;
data->lb_size_per_component[i] = dceip->lb_size_per_component444;
if (data->graphics_tiling_mode == bw_def_array_linear_general || data->graphics_tiling_mode == bw_def_array_linear_aligned) {
tiling_mode[i] = bw_def_linear;
}
else {
tiling_mode[i] = bw_def_tiled;
}
data->lb_bpc[i] = data->graphics_lb_bpc;
if ((data->fbc_en[i] == 1 && (dceip->argb_compression_support || data->d0_underlay_mode != bw_def_blended))) {
data->compression_rate[i] = bw_int_to_fixed(vbios->average_compression_rate);
data->access_one_channel_only[i] = data->lpt_en[i];
}
else {
data->compression_rate[i] = bw_int_to_fixed(1);
data->access_one_channel_only[i] = 0;
}
if (data->fbc_en[i] == 1) {
fbc_enabled = true;
if (data->lpt_en[i] == 1) {
lpt_enabled = true;
}
}
data->cursor_width_pixels[i] = bw_int_to_fixed(vbios->cursor_width);
}
/* display_write_back420*/
data->scatter_gather_enable_for_pipe[maximum_number_of_surfaces - 2] = 0;
data->scatter_gather_enable_for_pipe[maximum_number_of_surfaces - 1] = 0;
if (data->d1_display_write_back_dwb_enable == 1) {
data->enable[maximum_number_of_surfaces - 2] = 1;
data->enable[maximum_number_of_surfaces - 1] = 1;
}
else {
data->enable[maximum_number_of_surfaces - 2] = 0;
data->enable[maximum_number_of_surfaces - 1] = 0;
}
surface_type[maximum_number_of_surfaces - 2] = bw_def_display_write_back420_luma;
surface_type[maximum_number_of_surfaces - 1] = bw_def_display_write_back420_chroma;
data->lb_size_per_component[maximum_number_of_surfaces - 2] = dceip->underlay420_luma_lb_size_per_component;
data->lb_size_per_component[maximum_number_of_surfaces - 1] = dceip->underlay420_chroma_lb_size_per_component;
data->bytes_per_pixel[maximum_number_of_surfaces - 2] = 1;
data->bytes_per_pixel[maximum_number_of_surfaces - 1] = 2;
data->interlace_mode[maximum_number_of_surfaces - 2] = data->interlace_mode[5];
data->interlace_mode[maximum_number_of_surfaces - 1] = data->interlace_mode[5];
data->h_taps[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
data->h_taps[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
data->v_taps[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
data->v_taps[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
data->rotation_angle[maximum_number_of_surfaces - 2] = bw_int_to_fixed(0);
data->rotation_angle[maximum_number_of_surfaces - 1] = bw_int_to_fixed(0);
tiling_mode[maximum_number_of_surfaces - 2] = bw_def_linear;
tiling_mode[maximum_number_of_surfaces - 1] = bw_def_linear;
data->lb_bpc[maximum_number_of_surfaces - 2] = 8;
data->lb_bpc[maximum_number_of_surfaces - 1] = 8;
data->compression_rate[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
data->compression_rate[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
data->access_one_channel_only[maximum_number_of_surfaces - 2] = 0;
data->access_one_channel_only[maximum_number_of_surfaces - 1] = 0;
/*assume display pipe1 has dwb enabled*/
data->h_total[maximum_number_of_surfaces - 2] = data->h_total[5];
data->h_total[maximum_number_of_surfaces - 1] = data->h_total[5];
data->v_total[maximum_number_of_surfaces - 2] = data->v_total[5];
data->v_total[maximum_number_of_surfaces - 1] = data->v_total[5];
data->pixel_rate[maximum_number_of_surfaces - 2] = data->pixel_rate[5];
data->pixel_rate[maximum_number_of_surfaces - 1] = data->pixel_rate[5];
data->src_width[maximum_number_of_surfaces - 2] = data->src_width[5];
data->src_width[maximum_number_of_surfaces - 1] = data->src_width[5];
data->src_height[maximum_number_of_surfaces - 2] = data->src_height[5];
data->src_height[maximum_number_of_surfaces - 1] = data->src_height[5];
data->pitch_in_pixels[maximum_number_of_surfaces - 2] = data->src_width[5];
data->pitch_in_pixels[maximum_number_of_surfaces - 1] = data->src_width[5];
data->h_scale_ratio[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
data->h_scale_ratio[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
data->v_scale_ratio[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
data->v_scale_ratio[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
data->stereo_mode[maximum_number_of_surfaces - 2] = bw_def_mono;
data->stereo_mode[maximum_number_of_surfaces - 1] = bw_def_mono;
data->cursor_width_pixels[maximum_number_of_surfaces - 2] = bw_int_to_fixed(0);
data->cursor_width_pixels[maximum_number_of_surfaces - 1] = bw_int_to_fixed(0);
data->use_alpha[maximum_number_of_surfaces - 2] = 0;
data->use_alpha[maximum_number_of_surfaces - 1] = 0;
/*mode check calculations:*/
/* mode within dce ip capabilities*/
/* fbc*/
/* hsr*/
/* vsr*/
/* lb size*/
/*effective scaling source and ratios:*/
/*for graphics, non-stereo, non-interlace surfaces when the size of the source and destination are the same, only one tap is used*/
/*420 chroma has half the width, height, horizontal and vertical scaling ratios than luma*/
/*rotating a graphic or underlay surface swaps the width, height, horizontal and vertical scaling ratios*/
/*in top-bottom stereo mode there is 2:1 vertical downscaling for each eye*/
/*in side-by-side stereo mode there is 2:1 horizontal downscaling for each eye*/
/*in interlace mode there is 2:1 vertical downscaling for each field*/
/*in panning or bezel adjustment mode the source width has an extra 128 pixels*/
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if (bw_equ(data->h_scale_ratio[i], bw_int_to_fixed(1)) && bw_equ(data->v_scale_ratio[i], bw_int_to_fixed(1)) && surface_type[i] == bw_def_graphics && data->stereo_mode[i] == bw_def_mono && data->interlace_mode[i] == 0) {
data->h_taps[i] = bw_int_to_fixed(1);
data->v_taps[i] = bw_int_to_fixed(1);
}
if (surface_type[i] == bw_def_display_write_back420_chroma || surface_type[i] == bw_def_underlay420_chroma) {
data->pitch_in_pixels_after_surface_type[i] = bw_div(data->pitch_in_pixels[i], bw_int_to_fixed(2));
data->src_width_after_surface_type = bw_div(data->src_width[i], bw_int_to_fixed(2));
data->src_height_after_surface_type = bw_div(data->src_height[i], bw_int_to_fixed(2));
data->hsr_after_surface_type = bw_div(data->h_scale_ratio[i], bw_int_to_fixed(2));
data->vsr_after_surface_type = bw_div(data->v_scale_ratio[i], bw_int_to_fixed(2));
}
else {
data->pitch_in_pixels_after_surface_type[i] = data->pitch_in_pixels[i];
data->src_width_after_surface_type = data->src_width[i];
data->src_height_after_surface_type = data->src_height[i];
data->hsr_after_surface_type = data->h_scale_ratio[i];
data->vsr_after_surface_type = data->v_scale_ratio[i];
}
if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) && surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
data->src_width_after_rotation = data->src_height_after_surface_type;
data->src_height_after_rotation = data->src_width_after_surface_type;
data->hsr_after_rotation = data->vsr_after_surface_type;
data->vsr_after_rotation = data->hsr_after_surface_type;
}
else {
data->src_width_after_rotation = data->src_width_after_surface_type;
data->src_height_after_rotation = data->src_height_after_surface_type;
data->hsr_after_rotation = data->hsr_after_surface_type;
data->vsr_after_rotation = data->vsr_after_surface_type;
}
switch (data->stereo_mode[i]) {
case bw_def_top_bottom:
data->source_width_pixels[i] = data->src_width_after_rotation;
data->source_height_pixels = bw_mul(bw_int_to_fixed(2), data->src_height_after_rotation);
data->hsr_after_stereo = data->hsr_after_rotation;
data->vsr_after_stereo = bw_mul(bw_int_to_fixed(1), data->vsr_after_rotation);
break;
case bw_def_side_by_side:
data->source_width_pixels[i] = bw_mul(bw_int_to_fixed(2), data->src_width_after_rotation);
data->source_height_pixels = data->src_height_after_rotation;
data->hsr_after_stereo = bw_mul(bw_int_to_fixed(1), data->hsr_after_rotation);
data->vsr_after_stereo = data->vsr_after_rotation;
break;
default:
data->source_width_pixels[i] = data->src_width_after_rotation;
data->source_height_pixels = data->src_height_after_rotation;
data->hsr_after_stereo = data->hsr_after_rotation;
data->vsr_after_stereo = data->vsr_after_rotation;
break;
}
data->hsr[i] = data->hsr_after_stereo;
if (data->interlace_mode[i]) {
data->vsr[i] = bw_mul(data->vsr_after_stereo, bw_int_to_fixed(2));
}
else {
data->vsr[i] = data->vsr_after_stereo;
}
if (data->panning_and_bezel_adjustment != bw_def_none) {
data->source_width_rounded_up_to_chunks[i] = bw_add(bw_floor2(bw_sub(data->source_width_pixels[i], bw_int_to_fixed(1)), bw_int_to_fixed(128)), bw_int_to_fixed(256));
}
else {
data->source_width_rounded_up_to_chunks[i] = bw_ceil2(data->source_width_pixels[i], bw_int_to_fixed(128));
}
data->source_height_rounded_up_to_chunks[i] = data->source_height_pixels;
}
}
/*mode support checks:*/
/*the number of graphics and underlay pipes is limited by the ip support*/
/*maximum horizontal and vertical scale ratio is 4, and should not exceed the number of taps*/
/*for downscaling with the pre-downscaler, the horizontal scale ratio must be more than the ceiling of one quarter of the number of taps*/
/*the pre-downscaler reduces the line buffer source by the horizontal scale ratio*/
/*the number of lines in the line buffer has to exceed the number of vertical taps*/
/*the size of the line in the line buffer is the product of the source width and the bits per component, rounded up to a multiple of 48*/
/*the size of the line in the line buffer in the case of 10 bit per component is the product of the source width rounded up to multiple of 8 and 30.023438 / 3, rounded up to a multiple of 48*/
/*the size of the line in the line buffer in the case of 8 bit per component is the product of the source width rounded up to multiple of 8 and 30.023438 / 3, rounded up to a multiple of 48*/
/*frame buffer compression is not supported with stereo mode, rotation, or non- 888 formats*/
/*rotation is not supported with linear of stereo modes*/
if (dceip->number_of_graphics_pipes >= data->number_of_displays && dceip->number_of_underlay_pipes >= data->number_of_underlay_surfaces && !(dceip->display_write_back_supported == 0 && data->d1_display_write_back_dwb_enable == 1)) {
pipe_check = bw_def_ok;
}
else {
pipe_check = bw_def_notok;
}
hsr_check = bw_def_ok;
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if (bw_neq(data->hsr[i], bw_int_to_fixed(1))) {
if (bw_mtn(data->hsr[i], bw_int_to_fixed(4))) {
hsr_check = bw_def_hsr_mtn_4;
}
else {
if (bw_mtn(data->hsr[i], data->h_taps[i])) {
hsr_check = bw_def_hsr_mtn_h_taps;
}
else {
if (dceip->pre_downscaler_enabled == 1 && bw_mtn(data->hsr[i], bw_int_to_fixed(1)) && bw_leq(data->hsr[i], bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)))) {
hsr_check = bw_def_ceiling__h_taps_div_4___meq_hsr;
}
}
}
}
}
}
vsr_check = bw_def_ok;
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if (bw_neq(data->vsr[i], bw_int_to_fixed(1))) {
if (bw_mtn(data->vsr[i], bw_int_to_fixed(4))) {
vsr_check = bw_def_vsr_mtn_4;
}
else {
if (bw_mtn(data->vsr[i], data->v_taps[i])) {
vsr_check = bw_def_vsr_mtn_v_taps;
}
}
}
}
}
lb_size_check = bw_def_ok;
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if ((dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1)))) {
data->source_width_in_lb = bw_div(data->source_width_pixels[i], data->hsr[i]);
}
else {
data->source_width_in_lb = data->source_width_pixels[i];
}
switch (data->lb_bpc[i]) {
case 8:
data->lb_line_pitch = bw_ceil2(bw_mul(bw_div(bw_frc_to_fixed(2401171875ul, 100000000), bw_int_to_fixed(3)), bw_ceil2(data->source_width_in_lb, bw_int_to_fixed(8))), bw_int_to_fixed(48));
break;
case 10:
data->lb_line_pitch = bw_ceil2(bw_mul(bw_div(bw_frc_to_fixed(300234375, 10000000), bw_int_to_fixed(3)), bw_ceil2(data->source_width_in_lb, bw_int_to_fixed(8))), bw_int_to_fixed(48));
break;
default:
data->lb_line_pitch = bw_ceil2(bw_mul(bw_int_to_fixed(data->lb_bpc[i]), data->source_width_in_lb), bw_int_to_fixed(48));
break;
}
data->lb_partitions[i] = bw_floor2(bw_div(data->lb_size_per_component[i], data->lb_line_pitch), bw_int_to_fixed(1));
/*clamp the partitions to the maxium number supported by the lb*/
if ((surface_type[i] != bw_def_graphics || dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1)) {
data->lb_partitions_max[i] = bw_int_to_fixed(10);
}
else {
data->lb_partitions_max[i] = bw_int_to_fixed(7);
}
data->lb_partitions[i] = bw_min2(data->lb_partitions_max[i], data->lb_partitions[i]);
if (bw_mtn(bw_add(data->v_taps[i], bw_int_to_fixed(1)), data->lb_partitions[i])) {
lb_size_check = bw_def_notok;
}
}
}
fbc_check = bw_def_ok;
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i] && data->fbc_en[i] == 1 && (bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270)) || data->stereo_mode[i] != bw_def_mono || data->bytes_per_pixel[i] != 4)) {
fbc_check = bw_def_invalid_rotation_or_bpp_or_stereo;
}
}
rotation_check = bw_def_ok;
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) && (tiling_mode[i] == bw_def_linear || data->stereo_mode[i] != bw_def_mono)) {
rotation_check = bw_def_invalid_linear_or_stereo_mode;
}
}
}
if (pipe_check == bw_def_ok && hsr_check == bw_def_ok && vsr_check == bw_def_ok && lb_size_check == bw_def_ok && fbc_check == bw_def_ok && rotation_check == bw_def_ok) {
mode_check = bw_def_ok;
}
else {
mode_check = bw_def_notok;
}
/*number of memory channels for write-back client*/
data->number_of_dram_wrchannels = vbios->number_of_dram_channels;
data->number_of_dram_channels = vbios->number_of_dram_channels;
/*modify number of memory channels if lpt mode is enabled*/
/* low power tiling mode register*/
/* 0 = use channel 0*/
/* 1 = use channel 0 and 1*/
/* 2 = use channel 0,1,2,3*/
if ((fbc_enabled == 1 && lpt_enabled == 1)) {
if (vbios->memory_type == bw_def_hbm)
data->dram_efficiency = bw_frc_to_fixed(5, 10);
else
data->dram_efficiency = bw_int_to_fixed(1);
if (dceip->low_power_tiling_mode == 0) {
data->number_of_dram_channels = 1;
}
else if (dceip->low_power_tiling_mode == 1) {
data->number_of_dram_channels = 2;
}
else if (dceip->low_power_tiling_mode == 2) {
data->number_of_dram_channels = 4;
}
else {
data->number_of_dram_channels = 1;
}
}
else {
if (vbios->memory_type == bw_def_hbm)
data->dram_efficiency = bw_frc_to_fixed(5, 10);
else
data->dram_efficiency = bw_frc_to_fixed(8, 10);
}
/*memory request size and latency hiding:*/
/*request size is normally 64 byte, 2-line interleaved, with full latency hiding*/
/*the display write-back requests are single line*/
/*for tiled graphics surfaces, or undelay surfaces with width higher than the maximum size for full efficiency, request size is 32 byte in 8 and 16 bpp or if the rotation is orthogonal to the tiling grain. only half is useful of the bytes in the request size in 8 bpp or in 32 bpp if the rotation is orthogonal to the tiling grain.*/
/*for undelay surfaces with width lower than the maximum size for full efficiency, requests are 4-line interleaved in 16bpp if the rotation is parallel to the tiling grain, and 8-line interleaved with 4-line latency hiding in 8bpp or if the rotation is orthogonal to the tiling grain.*/
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270)))) {
if ((i < 4)) {
/*underlay portrait tiling mode is not supported*/
data->orthogonal_rotation[i] = 1;
}
else {
/*graphics portrait tiling mode*/
if (data->graphics_micro_tile_mode == bw_def_rotated_micro_tiling) {
data->orthogonal_rotation[i] = 0;
}
else {
data->orthogonal_rotation[i] = 1;
}
}
}
else {
if ((i < 4)) {
/*underlay landscape tiling mode is only supported*/
if (data->underlay_micro_tile_mode == bw_def_display_micro_tiling) {
data->orthogonal_rotation[i] = 0;
}
else {
data->orthogonal_rotation[i] = 1;
}
}
else {
/*graphics landscape tiling mode*/
if (data->graphics_micro_tile_mode == bw_def_display_micro_tiling) {
data->orthogonal_rotation[i] = 0;
}
else {
data->orthogonal_rotation[i] = 1;
}
}
}
if (bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) {
data->underlay_maximum_source_efficient_for_tiling = dceip->underlay_maximum_height_efficient_for_tiling;
}
else {
data->underlay_maximum_source_efficient_for_tiling = dceip->underlay_maximum_width_efficient_for_tiling;
}
if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) {
data->bytes_per_request[i] = bw_int_to_fixed(64);
data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(1);
data->latency_hiding_lines[i] = bw_int_to_fixed(1);
}
else if (tiling_mode[i] == bw_def_linear) {
data->bytes_per_request[i] = bw_int_to_fixed(64);
data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
data->latency_hiding_lines[i] = bw_int_to_fixed(2);
}
else {
if (surface_type[i] == bw_def_graphics || (bw_mtn(data->source_width_rounded_up_to_chunks[i], bw_ceil2(data->underlay_maximum_source_efficient_for_tiling, bw_int_to_fixed(256))))) {
switch (data->bytes_per_pixel[i]) {
case 8:
data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
data->latency_hiding_lines[i] = bw_int_to_fixed(2);
if (data->orthogonal_rotation[i]) {
data->bytes_per_request[i] = bw_int_to_fixed(32);
data->useful_bytes_per_request[i] = bw_int_to_fixed(32);
}
else {
data->bytes_per_request[i] = bw_int_to_fixed(64);
data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
}
break;
case 4:
if (data->orthogonal_rotation[i]) {
data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
data->latency_hiding_lines[i] = bw_int_to_fixed(2);
data->bytes_per_request[i] = bw_int_to_fixed(32);
data->useful_bytes_per_request[i] = bw_int_to_fixed(16);
}
else {
data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
data->latency_hiding_lines[i] = bw_int_to_fixed(2);
data->bytes_per_request[i] = bw_int_to_fixed(64);
data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
}
break;
case 2:
data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
data->latency_hiding_lines[i] = bw_int_to_fixed(2);
data->bytes_per_request[i] = bw_int_to_fixed(32);
data->useful_bytes_per_request[i] = bw_int_to_fixed(32);
break;
default:
data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
data->latency_hiding_lines[i] = bw_int_to_fixed(2);
data->bytes_per_request[i] = bw_int_to_fixed(32);
data->useful_bytes_per_request[i] = bw_int_to_fixed(16);
break;
}
}
else {
data->bytes_per_request[i] = bw_int_to_fixed(64);
data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
if (data->orthogonal_rotation[i]) {
data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(8);
data->latency_hiding_lines[i] = bw_int_to_fixed(4);
}
else {
switch (data->bytes_per_pixel[i]) {
case 4:
data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
data->latency_hiding_lines[i] = bw_int_to_fixed(2);
break;
case 2:
data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(4);
data->latency_hiding_lines[i] = bw_int_to_fixed(4);
break;
default:
data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(8);
data->latency_hiding_lines[i] = bw_int_to_fixed(4);
break;
}
}
}
}
}
}
/*requested peak bandwidth:*/
/*the peak request-per-second bandwidth is the product of the maximum source lines in per line out in the beginning*/
/*and in the middle of the frame, the ratio of the source width to the line time, the ratio of line interleaving*/
/*in memory to lines of latency hiding, and the ratio of bytes per pixel to useful bytes per request.*/
/**/
/*if the dmif data buffer size holds more than vta_ps worth of source lines, then only vsr is used.*/
/*the peak bandwidth is the peak request-per-second bandwidth times the request size.*/
/**/
/*the line buffer lines in per line out in the beginning of the frame is the vertical filter initialization value*/
/*rounded up to even and divided by the line times for initialization, which is normally three.*/
/*the line buffer lines in per line out in the middle of the frame is at least one, or the vertical scale ratio,*/
/*rounded up to line pairs if not doing line buffer prefetching.*/
/**/
/*the non-prefetching rounding up of the vertical scale ratio can also be done up to 1 (for a 0,2 pattern), 4/3 (for a 0,2,2 pattern),*/
/*6/4 (for a 0,2,2,2 pattern), or 3 (for a 2,4 pattern).*/
/**/
/*the scaler vertical filter initialization value is calculated by the hardware as the floor of the average of the*/
/*vertical scale ratio and the number of vertical taps increased by one. add one more for possible odd line*/
/*panning/bezel adjustment mode.*/
/**/
/*for the bottom interlace field an extra 50% of the vertical scale ratio is considered for this calculation.*/
/*in top-bottom stereo mode software has to set the filter initialization value manually and explicitly limit it to 4.*/
/*furthermore, there is only one line time for initialization.*/
/**/
/*line buffer prefetching is done when the number of lines in the line buffer exceeds the number of taps plus*/
/*the ceiling of the vertical scale ratio.*/
/**/
/*multi-line buffer prefetching is only done in the graphics pipe when the scaler is disabled or when upscaling and the vsr <= 0.8.'*/
/**/
/*the horizontal blank and chunk granularity factor is indirectly used indicate the interval of time required to transfer the source pixels.*/
/*the denominator of this term represents the total number of destination output pixels required for the input source pixels.*/
/*it applies when the lines in per line out is not 2 or 4. it does not apply when there is a line buffer between the scl and blnd.*/
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
data->v_filter_init[i] = bw_floor2(bw_div((bw_add(bw_add(bw_add(bw_int_to_fixed(1), data->v_taps[i]), data->vsr[i]), bw_mul(bw_mul(bw_int_to_fixed(data->interlace_mode[i]), bw_frc_to_fixed(5, 10)), data->vsr[i]))), bw_int_to_fixed(2)), bw_int_to_fixed(1));
if (data->panning_and_bezel_adjustment == bw_def_any_lines) {
data->v_filter_init[i] = bw_add(data->v_filter_init[i], bw_int_to_fixed(1));
}
if (data->stereo_mode[i] == bw_def_top_bottom) {
data->v_filter_init[i] = bw_min2(data->v_filter_init[i], bw_int_to_fixed(4));
}
if (data->stereo_mode[i] == bw_def_top_bottom) {
data->num_lines_at_frame_start = bw_int_to_fixed(1);
}
else {
data->num_lines_at_frame_start = bw_int_to_fixed(3);
}
if ((bw_mtn(data->vsr[i], bw_int_to_fixed(1)) && surface_type[i] == bw_def_graphics) || data->panning_and_bezel_adjustment == bw_def_any_lines) {
data->line_buffer_prefetch[i] = 0;
}
else if ((((dceip->underlay_downscale_prefetch_enabled == 1 && surface_type[i] != bw_def_graphics) || surface_type[i] == bw_def_graphics) && (bw_mtn(data->lb_partitions[i], bw_add(data->v_taps[i], bw_ceil2(data->vsr[i], bw_int_to_fixed(1))))))) {
data->line_buffer_prefetch[i] = 1;
}
else {
data->line_buffer_prefetch[i] = 0;
}
data->lb_lines_in_per_line_out_in_beginning_of_frame[i] = bw_div(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->num_lines_at_frame_start);
if (data->line_buffer_prefetch[i] == 1) {
data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_max2(bw_int_to_fixed(1), data->vsr[i]);
}
else if (bw_leq(data->vsr[i], bw_int_to_fixed(1))) {
data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(1);
} else if (bw_leq(data->vsr[i],
bw_frc_to_fixed(4, 3))) {
data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_div(bw_int_to_fixed(4), bw_int_to_fixed(3));
} else if (bw_leq(data->vsr[i],
bw_frc_to_fixed(6, 4))) {
data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_div(bw_int_to_fixed(6), bw_int_to_fixed(4));
}
else if (bw_leq(data->vsr[i], bw_int_to_fixed(2))) {
data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(2);
}
else if (bw_leq(data->vsr[i], bw_int_to_fixed(3))) {
data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(3);
}
else {
data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(4);
}
if (data->line_buffer_prefetch[i] == 1 || bw_equ(data->lb_lines_in_per_line_out_in_middle_of_frame[i], bw_int_to_fixed(2)) || bw_equ(data->lb_lines_in_per_line_out_in_middle_of_frame[i], bw_int_to_fixed(4))) {
data->horizontal_blank_and_chunk_granularity_factor[i] = bw_int_to_fixed(1);
}
else {
data->horizontal_blank_and_chunk_granularity_factor[i] = bw_div(data->h_total[i], (bw_div((bw_add(data->h_total[i], bw_div((bw_sub(data->source_width_pixels[i], bw_int_to_fixed(dceip->chunk_width))), data->hsr[i]))), bw_int_to_fixed(2))));
}
data->request_bandwidth[i] = bw_div(bw_mul(bw_div(bw_mul(bw_div(bw_mul(bw_max2(data->lb_lines_in_per_line_out_in_beginning_of_frame[i], data->lb_lines_in_per_line_out_in_middle_of_frame[i]), data->source_width_rounded_up_to_chunks[i]), (bw_div(data->h_total[i], data->pixel_rate[i]))), bw_int_to_fixed(data->bytes_per_pixel[i])), data->useful_bytes_per_request[i]), data->lines_interleaved_in_mem_access[i]), data->latency_hiding_lines[i]);
data->display_bandwidth[i] = bw_mul(data->request_bandwidth[i], data->bytes_per_request[i]);
}
}
/*outstanding chunk request limit*/
/*if underlay buffer sharing is enabled, the data buffer size for underlay in 422 or 444 is the sum of the luma and chroma data buffer sizes.*/
/*underlay buffer sharing mode is only permitted in orthogonal rotation modes.*/
/**/
/*if there is only one display enabled, the dmif data buffer size for the graphics surface is increased by concatenating the adjacent buffers.*/
/**/
/*the memory chunk size in bytes is 1024 for the writeback, and 256 times the memory line interleaving and the bytes per pixel for graphics*/
/*and underlay.*/
/**/
/*the pipe chunk size uses 2 for line interleaving, except for the write back, in which case it is 1.*/
/*graphics and underlay data buffer size is adjusted (limited) using the outstanding chunk request limit if there is more than one*/
/*display enabled or if the dmif request buffer is not large enough for the total data buffer size.*/
/*the outstanding chunk request limit is the ceiling of the adjusted data buffer size divided by the chunk size in bytes*/
/*the adjusted data buffer size is the product of the display bandwidth and the minimum effective data buffer size in terms of time,*/
/*rounded up to the chunk size in bytes, but should not exceed the original data buffer size*/
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if ((dceip->dmif_pipe_en_fbc_chunk_tracker + 3 == i && fbc_enabled == 0 && tiling_mode[i] != bw_def_linear)) {
data->max_chunks_non_fbc_mode[i] = 128 - dmif_chunk_buff_margin;
}
else {
data->max_chunks_non_fbc_mode[i] = 16 - dmif_chunk_buff_margin;
}
}
if (data->fbc_en[i] == 1) {
max_chunks_fbc_mode = 128 - dmif_chunk_buff_margin;
}
}
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
switch (surface_type[i]) {
case bw_def_display_write_back420_luma:
data->data_buffer_size[i] = bw_int_to_fixed(dceip->display_write_back420_luma_mcifwr_buffer_size);
break;
case bw_def_display_write_back420_chroma:
data->data_buffer_size[i] = bw_int_to_fixed(dceip->display_write_back420_chroma_mcifwr_buffer_size);
break;
case bw_def_underlay420_luma:
data->data_buffer_size[i] = bw_int_to_fixed(dceip->underlay_luma_dmif_size);
break;
case bw_def_underlay420_chroma:
data->data_buffer_size[i] = bw_div(bw_int_to_fixed(dceip->underlay_chroma_dmif_size), bw_int_to_fixed(2));
break;
case bw_def_underlay422:case bw_def_underlay444:
if (data->orthogonal_rotation[i] == 0) {
data->data_buffer_size[i] = bw_int_to_fixed(dceip->underlay_luma_dmif_size);
}
else {
data->data_buffer_size[i] = bw_add(bw_int_to_fixed(dceip->underlay_luma_dmif_size), bw_int_to_fixed(dceip->underlay_chroma_dmif_size));
}
break;
default:
if (data->fbc_en[i] == 1) {
/*data_buffer_size(i) = max_dmif_buffer_allocated * graphics_dmif_size*/
if (data->number_of_displays == 1) {
data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(max_chunks_fbc_mode), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_mul(bw_int_to_fixed(dceip->max_dmif_buffer_allocated), bw_int_to_fixed(dceip->graphics_dmif_size)));
}
else {
data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(max_chunks_fbc_mode), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_int_to_fixed(dceip->graphics_dmif_size));
}
}
else {
/*the effective dmif buffer size in non-fbc mode is limited by the 16 entry chunk tracker*/
if (data->number_of_displays == 1) {
data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(data->max_chunks_non_fbc_mode[i]), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_mul(bw_int_to_fixed(dceip->max_dmif_buffer_allocated), bw_int_to_fixed(dceip->graphics_dmif_size)));
}
else {
data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(data->max_chunks_non_fbc_mode[i]), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_int_to_fixed(dceip->graphics_dmif_size));
}
}
break;
}
if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) {
data->memory_chunk_size_in_bytes[i] = bw_int_to_fixed(1024);
data->pipe_chunk_size_in_bytes[i] = bw_int_to_fixed(1024);
}
else {
data->memory_chunk_size_in_bytes[i] = bw_mul(bw_mul(bw_int_to_fixed(dceip->chunk_width), data->lines_interleaved_in_mem_access[i]), bw_int_to_fixed(data->bytes_per_pixel[i]));
data->pipe_chunk_size_in_bytes[i] = bw_mul(bw_mul(bw_int_to_fixed(dceip->chunk_width), bw_int_to_fixed(dceip->lines_interleaved_into_lb)), bw_int_to_fixed(data->bytes_per_pixel[i]));
}
}
}
data->min_dmif_size_in_time = bw_int_to_fixed(9999);
data->min_mcifwr_size_in_time = bw_int_to_fixed(9999);
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
if (bw_ltn(bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]), data->min_dmif_size_in_time)) {
data->min_dmif_size_in_time = bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]);
}
}
else {
if (bw_ltn(bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]), data->min_mcifwr_size_in_time)) {
data->min_mcifwr_size_in_time = bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]);
}
}
}
}
data->total_requests_for_dmif_size = bw_int_to_fixed(0);
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i] && surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
data->total_requests_for_dmif_size = bw_add(data->total_requests_for_dmif_size, bw_div(data->data_buffer_size[i], data->useful_bytes_per_request[i]));
}
}
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma && dceip->limit_excessive_outstanding_dmif_requests && (data->number_of_displays > 1 || bw_mtn(data->total_requests_for_dmif_size, dceip->dmif_request_buffer_size))) {
data->adjusted_data_buffer_size[i] = bw_min2(data->data_buffer_size[i], bw_ceil2(bw_mul(data->min_dmif_size_in_time, data->display_bandwidth[i]), data->memory_chunk_size_in_bytes[i]));
}
else {
data->adjusted_data_buffer_size[i] = data->data_buffer_size[i];
}
}
}
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if (data->number_of_displays == 1 && data->number_of_underlay_surfaces == 0) {
/*set maximum chunk limit if only one graphic pipe is enabled*/
data->outstanding_chunk_request_limit[i] = bw_int_to_fixed(127);
}
else {
data->outstanding_chunk_request_limit[i] = bw_ceil2(bw_div(data->adjusted_data_buffer_size[i], data->pipe_chunk_size_in_bytes[i]), bw_int_to_fixed(1));
/*clamp maximum chunk limit in the graphic display pipe*/
if (i >= 4) {
data->outstanding_chunk_request_limit[i] = bw_max2(bw_int_to_fixed(127), data->outstanding_chunk_request_limit[i]);
}
}
}
}
/*outstanding pte request limit*/
/*in tiling mode with no rotation the sg pte requests are 8 useful pt_es, the sg row height is the page height and the sg page width x height is 64x64 for 8bpp, 64x32 for 16 bpp, 32x32 for 32 bpp*/
/*in tiling mode with rotation the sg pte requests are only one useful pte, and the sg row height is also the page height, but the sg page width and height are swapped*/
/*in linear mode the pte requests are 8 useful pt_es, the sg page width is 4096 divided by the bytes per pixel, the sg page height is 1, but there is just one row whose height is the lines of pte prefetching*/
/*the outstanding pte request limit is obtained by multiplying the outstanding chunk request limit by the peak pte request to eviction limiting ratio, rounding up to integer, multiplying by the pte requests per chunk, and rounding up to integer again*/
/*if not using peak pte request to eviction limiting, the outstanding pte request limit is the pte requests in the vblank*/
/*the pte requests in the vblank is the product of the number of pte request rows times the number of pte requests in a row*/
/*the number of pte requests in a row is the quotient of the source width divided by 256, multiplied by the pte requests per chunk, rounded up to even, multiplied by the scatter-gather row height and divided by the scatter-gather page height*/
/*the pte requests per chunk is 256 divided by the scatter-gather page width and the useful pt_es per pte request*/
if (data->number_of_displays > 1 || (bw_neq(data->rotation_angle[4], bw_int_to_fixed(0)) && bw_neq(data->rotation_angle[4], bw_int_to_fixed(180)))) {
data->peak_pte_request_to_eviction_ratio_limiting = dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display;
}
else {
data->peak_pte_request_to_eviction_ratio_limiting = dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation;
}
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i] && data->scatter_gather_enable_for_pipe[i] == 1) {
if (tiling_mode[i] == bw_def_linear) {
data->useful_pte_per_pte_request = bw_int_to_fixed(8);
data->scatter_gather_page_width[i] = bw_div(bw_int_to_fixed(4096), bw_int_to_fixed(data->bytes_per_pixel[i]));
data->scatter_gather_page_height[i] = bw_int_to_fixed(1);
data->scatter_gather_pte_request_rows = bw_int_to_fixed(1);
data->scatter_gather_row_height = bw_int_to_fixed(dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode);
}
else if (bw_equ(data->rotation_angle[i], bw_int_to_fixed(0)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(180))) {
data->useful_pte_per_pte_request = bw_int_to_fixed(8);
switch (data->bytes_per_pixel[i]) {
case 4:
data->scatter_gather_page_width[i] = bw_int_to_fixed(32);
data->scatter_gather_page_height[i] = bw_int_to_fixed(32);
break;
case 2:
data->scatter_gather_page_width[i] = bw_int_to_fixed(64);
data->scatter_gather_page_height[i] = bw_int_to_fixed(32);
break;
default:
data->scatter_gather_page_width[i] = bw_int_to_fixed(64);
data->scatter_gather_page_height[i] = bw_int_to_fixed(64);
break;
}
data->scatter_gather_pte_request_rows = bw_int_to_fixed(dceip->scatter_gather_pte_request_rows_in_tiling_mode);
data->scatter_gather_row_height = data->scatter_gather_page_height[i];
}
else {
data->useful_pte_per_pte_request = bw_int_to_fixed(1);
switch (data->bytes_per_pixel[i]) {
case 4:
data->scatter_gather_page_width[i] = bw_int_to_fixed(32);
data->scatter_gather_page_height[i] = bw_int_to_fixed(32);
break;
case 2:
data->scatter_gather_page_width[i] = bw_int_to_fixed(32);
data->scatter_gather_page_height[i] = bw_int_to_fixed(64);
break;
default:
data->scatter_gather_page_width[i] = bw_int_to_fixed(64);
data->scatter_gather_page_height[i] = bw_int_to_fixed(64);
break;
}
data->scatter_gather_pte_request_rows = bw_int_to_fixed(dceip->scatter_gather_pte_request_rows_in_tiling_mode);
data->scatter_gather_row_height = data->scatter_gather_page_height[i];
}
data->pte_request_per_chunk[i] = bw_div(bw_div(bw_int_to_fixed(dceip->chunk_width), data->scatter_gather_page_width[i]), data->useful_pte_per_pte_request);
data->scatter_gather_pte_requests_in_row[i] = bw_div(bw_mul(bw_ceil2(bw_mul(bw_div(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(dceip->chunk_width)), data->pte_request_per_chunk[i]), bw_int_to_fixed(1)), data->scatter_gather_row_height), data->scatter_gather_page_height[i]);
data->scatter_gather_pte_requests_in_vblank = bw_mul(data->scatter_gather_pte_request_rows, data->scatter_gather_pte_requests_in_row[i]);
if (bw_equ(data->peak_pte_request_to_eviction_ratio_limiting, bw_int_to_fixed(0))) {
data->scatter_gather_pte_request_limit[i] = data->scatter_gather_pte_requests_in_vblank;
}
else {
data->scatter_gather_pte_request_limit[i] = bw_max2(dceip->minimum_outstanding_pte_request_limit, bw_min2(data->scatter_gather_pte_requests_in_vblank, bw_ceil2(bw_mul(bw_mul(bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->memory_chunk_size_in_bytes[i]), data->pte_request_per_chunk[i]), data->peak_pte_request_to_eviction_ratio_limiting), bw_int_to_fixed(1))));
}
}
}
/*pitch padding recommended for efficiency in linear mode*/
/*in linear mode graphics or underlay with scatter gather, a pitch that is a multiple of the channel interleave (256 bytes) times the channel-bank rotation is not efficient*/
/*if that is the case it is recommended to pad the pitch by at least 256 pixels*/
data->inefficient_linear_pitch_in_bytes = bw_mul(bw_mul(bw_int_to_fixed(256), bw_int_to_fixed(vbios->number_of_dram_banks)), bw_int_to_fixed(data->number_of_dram_channels));
/*pixel transfer time*/
/*the dmif and mcifwr yclk(pclk) required is the one that allows the transfer of all pipe's data buffer size in memory in the time for data transfer*/
/*for dmif, pte and cursor requests have to be included.*/
/*the dram data requirement is doubled when the data request size in bytes is less than the dram channel width times the burst size (8)*/
/*the dram data requirement is also multiplied by the number of channels in the case of low power tiling*/
/*the page close-open time is determined by trc and the number of page close-opens*/
/*in tiled mode graphics or underlay with scatter-gather enabled the bytes per page close-open is the product of the memory line interleave times the maximum of the scatter-gather page width and the product of the tile width (8 pixels) times the number of channels times the number of banks.*/
/*in linear mode graphics or underlay with scatter-gather enabled and inefficient pitch, the bytes per page close-open is the line request alternation slice, because different lines are in completely different 4k address bases.*/
/*otherwise, the bytes page close-open is the chunk size because that is the arbitration slice.*/
/*pte requests are grouped by pte requests per chunk if that is more than 1. each group costs a page close-open time for dmif reads*/
/*cursor requests outstanding are limited to a group of two source lines. each group costs a page close-open time for dmif reads*/
/*the display reads and writes time for data transfer is the minimum data or cursor buffer size in time minus the mc urgent latency*/
/*the mc urgent latency is experienced more than one time if the number of dmif requests in the data buffer exceeds the request buffer size plus the request slots reserved for dmif in the dram channel arbiter queues*/
/*the dispclk required is the maximum for all surfaces of the maximum of the source pixels for first output pixel times the throughput factor, divided by the pixels per dispclk, and divided by the minimum latency hiding minus the dram speed/p-state change latency minus the burst time, and the source pixels for last output pixel, times the throughput factor, divided by the pixels per dispclk, and divided by the minimum latency hiding minus the dram speed/p-state change latency minus the burst time, plus the active time.*/
/*the data burst time is the maximum of the total page close-open time, total dmif/mcifwr buffer size in memory divided by the dram bandwidth, and the total dmif/mcifwr buffer size in memory divided by the 32 byte sclk data bus bandwidth, each multiplied by its efficiency.*/
/*the source line transfer time is the maximum for all surfaces of the maximum of the burst time plus the urgent latency times the floor of the data required divided by the buffer size for the fist pixel, and the burst time plus the urgent latency times the floor of the data required divided by the buffer size for the last pixel plus the active time.*/
/*the source pixels for the first output pixel is 512 if the scaler vertical filter initialization value is greater than 2, and it is 4 times the source width if it is greater than 4.*/
/*the source pixels for the last output pixel is the source width times the scaler vertical filter initialization value rounded up to even*/
/*the source data for these pixels is the number of pixels times the bytes per pixel times the bytes per request divided by the useful bytes per request.*/
data->cursor_total_data = bw_int_to_fixed(0);
data->cursor_total_request_groups = bw_int_to_fixed(0);
data->scatter_gather_total_pte_requests = bw_int_to_fixed(0);
data->scatter_gather_total_pte_request_groups = bw_int_to_fixed(0);
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
data->cursor_total_data = bw_add(data->cursor_total_data, bw_mul(bw_mul(bw_int_to_fixed(2), data->cursor_width_pixels[i]), bw_int_to_fixed(4)));
if (dceip->large_cursor == 1) {
data->cursor_total_request_groups = bw_add(data->cursor_total_request_groups, bw_int_to_fixed((dceip->cursor_max_outstanding_group_num + 1)));
}
else {
data->cursor_total_request_groups = bw_add(data->cursor_total_request_groups, bw_ceil2(bw_div(data->cursor_width_pixels[i], dceip->cursor_chunk_width), bw_int_to_fixed(1)));
}
if (data->scatter_gather_enable_for_pipe[i]) {
data->scatter_gather_total_pte_requests = bw_add(data->scatter_gather_total_pte_requests, data->scatter_gather_pte_request_limit[i]);
data->scatter_gather_total_pte_request_groups = bw_add(data->scatter_gather_total_pte_request_groups, bw_ceil2(bw_div(data->scatter_gather_pte_request_limit[i], bw_ceil2(data->pte_request_per_chunk[i], bw_int_to_fixed(1))), bw_int_to_fixed(1)));
}
}
}
data->tile_width_in_pixels = bw_int_to_fixed(8);
data->dmif_total_number_of_data_request_page_close_open = bw_int_to_fixed(0);
data->mcifwr_total_number_of_data_request_page_close_open = bw_int_to_fixed(0);
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if (data->scatter_gather_enable_for_pipe[i] == 1 && tiling_mode[i] != bw_def_linear) {
data->bytes_per_page_close_open = bw_mul(data->lines_interleaved_in_mem_access[i], bw_max2(bw_mul(bw_mul(bw_mul(bw_int_to_fixed(data->bytes_per_pixel[i]), data->tile_width_in_pixels), bw_int_to_fixed(vbios->number_of_dram_banks)), bw_int_to_fixed(data->number_of_dram_channels)), bw_mul(bw_int_to_fixed(data->bytes_per_pixel[i]), data->scatter_gather_page_width[i])));
}
else if (data->scatter_gather_enable_for_pipe[i] == 1 && tiling_mode[i] == bw_def_linear && bw_equ(bw_mod((bw_mul(data->pitch_in_pixels_after_surface_type[i], bw_int_to_fixed(data->bytes_per_pixel[i]))), data->inefficient_linear_pitch_in_bytes), bw_int_to_fixed(0))) {
data->bytes_per_page_close_open = dceip->linear_mode_line_request_alternation_slice;
}
else {
data->bytes_per_page_close_open = data->memory_chunk_size_in_bytes[i];
}
if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
data->dmif_total_number_of_data_request_page_close_open = bw_add(data->dmif_total_number_of_data_request_page_close_open, bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->bytes_per_page_close_open));
}
else {
data->mcifwr_total_number_of_data_request_page_close_open = bw_add(data->mcifwr_total_number_of_data_request_page_close_open, bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->bytes_per_page_close_open));
}
}
}
data->dmif_total_page_close_open_time = bw_div(bw_mul((bw_add(bw_add(data->dmif_total_number_of_data_request_page_close_open, data->scatter_gather_total_pte_request_groups), data->cursor_total_request_groups)), vbios->trc), bw_int_to_fixed(1000));
data->mcifwr_total_page_close_open_time = bw_div(bw_mul(data->mcifwr_total_number_of_data_request_page_close_open, vbios->trc), bw_int_to_fixed(1000));
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
data->adjusted_data_buffer_size_in_memory[i] = bw_div(bw_mul(data->adjusted_data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
}
}
data->total_requests_for_adjusted_dmif_size = bw_int_to_fixed(0);
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
data->total_requests_for_adjusted_dmif_size = bw_add(data->total_requests_for_adjusted_dmif_size, bw_div(data->adjusted_data_buffer_size[i], data->useful_bytes_per_request[i]));
}
}
}
data->total_dmifmc_urgent_trips = bw_ceil2(bw_div(data->total_requests_for_adjusted_dmif_size, (bw_add(dceip->dmif_request_buffer_size, bw_int_to_fixed(vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel * data->number_of_dram_channels)))), bw_int_to_fixed(1));
data->total_dmifmc_urgent_latency = bw_mul(vbios->dmifmc_urgent_latency, data->total_dmifmc_urgent_trips);
data->total_display_reads_required_data = bw_int_to_fixed(0);
data->total_display_reads_required_dram_access_data = bw_int_to_fixed(0);
data->total_display_writes_required_data = bw_int_to_fixed(0);
data->total_display_writes_required_dram_access_data = bw_int_to_fixed(0);
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
data->display_reads_required_data = data->adjusted_data_buffer_size_in_memory[i];
/*for hbm memories, each channel is split into 2 pseudo-channels that are each 64 bits in width. each*/
/*pseudo-channel may be read independently of one another.*/
/*the read burst length (bl) for hbm memories is 4, so each read command will access 32 bytes of data.*/
/*the 64 or 32 byte sized data is stored in one pseudo-channel.*/
/*it will take 4 memclk cycles or 8 yclk cycles to fetch 64 bytes of data from the hbm memory (2 read commands).*/
/*it will take 2 memclk cycles or 4 yclk cycles to fetch 32 bytes of data from the hbm memory (1 read command).*/
/*for gddr5/ddr4 memories, there is additional overhead if the size of the request is smaller than 64 bytes.*/
/*the read burst length (bl) for gddr5/ddr4 memories is 8, regardless of the size of the data request.*/
/*therefore it will require 8 cycles to fetch 64 or 32 bytes of data from the memory.*/
/*the memory efficiency will be 50% for the 32 byte sized data.*/
if (vbios->memory_type == bw_def_hbm) {
data->display_reads_required_dram_access_data = data->adjusted_data_buffer_size_in_memory[i];
}
else {
data->display_reads_required_dram_access_data = bw_mul(data->adjusted_data_buffer_size_in_memory[i], bw_ceil2(bw_div(bw_int_to_fixed((8 * vbios->dram_channel_width_in_bits / 8)), data->bytes_per_request[i]), bw_int_to_fixed(1)));
}
data->total_display_reads_required_data = bw_add(data->total_display_reads_required_data, data->display_reads_required_data);
data->total_display_reads_required_dram_access_data = bw_add(data->total_display_reads_required_dram_access_data, data->display_reads_required_dram_access_data);
}
else {
data->total_display_writes_required_data = bw_add(data->total_display_writes_required_data, data->adjusted_data_buffer_size_in_memory[i]);
data->total_display_writes_required_dram_access_data = bw_add(data->total_display_writes_required_dram_access_data, bw_mul(data->adjusted_data_buffer_size_in_memory[i], bw_ceil2(bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits), data->bytes_per_request[i]), bw_int_to_fixed(1))));
}
}
}
data->total_display_reads_required_data = bw_add(bw_add(data->total_display_reads_required_data, data->cursor_total_data), bw_mul(data->scatter_gather_total_pte_requests, bw_int_to_fixed(64)));
data->total_display_reads_required_dram_access_data = bw_add(bw_add(data->total_display_reads_required_dram_access_data, data->cursor_total_data), bw_mul(data->scatter_gather_total_pte_requests, bw_int_to_fixed(64)));
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if (bw_mtn(data->v_filter_init[i], bw_int_to_fixed(4))) {
data->src_pixels_for_first_output_pixel[i] = bw_mul(bw_int_to_fixed(4), data->source_width_rounded_up_to_chunks[i]);
}
else {
if (bw_mtn(data->v_filter_init[i], bw_int_to_fixed(2))) {
data->src_pixels_for_first_output_pixel[i] = bw_int_to_fixed(512);
}
else {
data->src_pixels_for_first_output_pixel[i] = bw_int_to_fixed(0);
}
}
data->src_data_for_first_output_pixel[i] = bw_div(bw_mul(bw_mul(data->src_pixels_for_first_output_pixel[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
data->src_pixels_for_last_output_pixel[i] = bw_mul(data->source_width_rounded_up_to_chunks[i], bw_max2(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), bw_mul(bw_ceil2(data->vsr[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->horizontal_blank_and_chunk_granularity_factor[i])));
data->src_data_for_last_output_pixel[i] = bw_div(bw_mul(bw_mul(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_max2(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->lines_interleaved_in_mem_access[i])), bw_int_to_fixed(data->bytes_per_pixel[i])), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
data->active_time[i] = bw_div(bw_div(data->source_width_rounded_up_to_chunks[i], data->hsr[i]), data->pixel_rate[i]);
}
}
for (i = 0; i <= 2; i++) {
for (j = 0; j <= 7; j++) {
data->dmif_burst_time[i][j] = bw_max3(data->dmif_total_page_close_open_time, bw_div(data->total_display_reads_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))), bw_div(data->total_display_reads_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100)))));
if (data->d1_display_write_back_dwb_enable == 1) {
data->mcifwr_burst_time[i][j] = bw_max3(data->mcifwr_total_page_close_open_time, bw_div(data->total_display_writes_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_wrchannels)))), bw_div(data->total_display_writes_required_data, (bw_mul(sclk[j], vbios->data_return_bus_width))));
}
}
}
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
for (j = 0; j <= 2; j++) {
for (k = 0; k <= 7; k++) {
if (data->enable[i]) {
if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
/*time to transfer data from the dmif buffer to the lb. since the mc to dmif transfer time overlaps*/
/*with the dmif to lb transfer time, only time to transfer the last chunk is considered.*/
data->dmif_buffer_transfer_time[i] = bw_mul(data->source_width_rounded_up_to_chunks[i], (bw_div(dceip->lb_write_pixels_per_dispclk, (bw_div(vbios->low_voltage_max_dispclk, dceip->display_pipe_throughput_factor)))));
data->line_source_transfer_time[i][j][k] = bw_max2(bw_mul((bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), bw_sub(bw_add(bw_mul((bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->dmif_buffer_transfer_time[i]), data->active_time[i]));
/*during an mclk switch the requests from the dce ip are stored in the gmc/arb. these requests should be serviced immediately*/
/*after the mclk switch sequence and not incur an urgent latency penalty. it is assumed that the gmc/arb can hold up to 256 requests*/
/*per memory channel. if the dce ip is urgent after the mclk switch sequence, all pending requests and subsequent requests should be*/
/*immediately serviced without a gap in the urgent requests.*/
/*the latency incurred would be the time to issue the requests and return the data for the first or last output pixel.*/
if (surface_type[i] == bw_def_graphics) {
switch (data->lb_bpc[i]) {
case 6:
data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency6_bit_per_component;
break;
case 8:
data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency8_bit_per_component;
break;
case 10:
data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency10_bit_per_component;
break;
default:
data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency12_bit_per_component;
break;
}
if (data->use_alpha[i] == 1) {
data->v_scaler_efficiency = bw_min2(data->v_scaler_efficiency, dceip->alpha_vscaler_efficiency);
}
}
else {
switch (data->lb_bpc[i]) {
case 6:
data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency6_bit_per_component;
break;
case 8:
data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency8_bit_per_component;
break;
case 10:
data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency10_bit_per_component;
break;
default:
data->v_scaler_efficiency = bw_int_to_fixed(3);
break;
}
}
if (dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1))) {
data->scaler_limits_factor = bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_div(data->source_width_rounded_up_to_chunks[i], data->h_total[i]));
}
else {
data->scaler_limits_factor = bw_max3(bw_int_to_fixed(1), bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)), bw_mul(data->hsr[i], bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_int_to_fixed(1))));
}
data->dram_speed_change_line_source_transfer_time[i][j][k] = bw_mul(bw_int_to_fixed(2), bw_max2((bw_add((bw_div(data->src_data_for_first_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(bw_mul(data->bytes_per_request[i], data->pixel_rate[i]), data->scaler_limits_factor), bw_int_to_fixed(2))))), (bw_mul(data->dmif_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1)))))), (bw_add((bw_div(data->src_data_for_last_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(bw_mul(data->bytes_per_request[i], data->pixel_rate[i]), data->scaler_limits_factor), bw_int_to_fixed(2))))), (bw_sub(bw_mul(data->dmif_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i]))))));
}
else {
data->line_source_transfer_time[i][j][k] = bw_max2(bw_mul((bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), bw_sub(bw_mul((bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i]));
/*during an mclk switch the requests from the dce ip are stored in the gmc/arb. these requests should be serviced immediately*/
/*after the mclk switch sequence and not incur an urgent latency penalty. it is assumed that the gmc/arb can hold up to 256 requests*/
/*per memory channel. if the dce ip is urgent after the mclk switch sequence, all pending requests and subsequent requests should be*/
/*immediately serviced without a gap in the urgent requests.*/
/*the latency incurred would be the time to issue the requests and return the data for the first or last output pixel.*/
data->dram_speed_change_line_source_transfer_time[i][j][k] = bw_max2((bw_add((bw_div(data->src_data_for_first_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(data->bytes_per_request[i], vbios->low_voltage_max_dispclk), bw_int_to_fixed(2))))), (bw_mul(data->mcifwr_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1)))))), (bw_add((bw_div(data->src_data_for_last_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(data->bytes_per_request[i], vbios->low_voltage_max_dispclk), bw_int_to_fixed(2))))), (bw_sub(bw_mul(data->mcifwr_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i])))));
}
}
}
}
}
/*cpu c-state and p-state change enable*/
/*for cpu p-state change to be possible for a yclk(pclk) and sclk level the dispclk required has to be enough for the blackout duration*/
/*for cpu c-state change to be possible for a yclk(pclk) and sclk level the dispclk required has to be enough for the blackout duration and recovery*/
/*condition for the blackout duration:*/
/* minimum latency hiding > blackout duration + dmif burst time + line source transfer time*/
/*condition for the blackout recovery:*/
/* recovery time > dmif burst time + 2 * urgent latency*/
/* recovery time > (display bw * blackout duration + (2 * urgent latency + dmif burst time)*dispclk - dmif size )*/
/* / (dispclk - display bw)*/
/*the minimum latency hiding is the minimum for all pipes of one screen line time, plus one more line time if doing lb prefetch, plus the dmif data buffer size equivalent in time, minus the urgent latency.*/
/*the minimum latency hiding is further limited by the cursor. the cursor latency hiding is the number of lines of the cursor buffer, minus one if the downscaling is less than two, or minus three if it is more*/
/*initialize variables*/
number_of_displays_enabled = 0;
number_of_displays_enabled_with_margin = 0;
for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
if (data->enable[k]) {
number_of_displays_enabled = number_of_displays_enabled + 1;
}
data->display_pstate_change_enable[k] = 0;
}
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if ((bw_equ(dceip->stutter_and_dram_clock_state_change_gated_before_cursor, bw_int_to_fixed(0)) && bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0)))) {
if (bw_ltn(data->vsr[i], bw_int_to_fixed(2))) {
data->cursor_latency_hiding[i] = bw_div(bw_div(bw_mul((bw_sub(dceip->cursor_dcp_buffer_lines, bw_int_to_fixed(1))), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]);
}
else {
data->cursor_latency_hiding[i] = bw_div(bw_div(bw_mul((bw_sub(dceip->cursor_dcp_buffer_lines, bw_int_to_fixed(3))), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]);
}
}
else {
data->cursor_latency_hiding[i] = bw_int_to_fixed(9999);
}
}
}
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if (dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1 && (bw_equ(data->vsr[i], bw_int_to_fixed(1)) || (bw_leq(data->vsr[i], bw_frc_to_fixed(8, 10)) && bw_leq(data->v_taps[i], bw_int_to_fixed(2)) && data->lb_bpc[i] == 8)) && surface_type[i] == bw_def_graphics) {
if (number_of_displays_enabled > 2)
data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(2)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
else
data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(1)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
}
else {
data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_int_to_fixed(1 + data->line_buffer_prefetch[i]), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
}
data->minimum_latency_hiding_with_cursor[i] = bw_min2(data->minimum_latency_hiding[i], data->cursor_latency_hiding[i]);
}
}
for (i = 0; i <= 2; i++) {
for (j = 0; j <= 7; j++) {
data->blackout_duration_margin[i][j] = bw_int_to_fixed(9999);
data->dispclk_required_for_blackout_duration[i][j] = bw_int_to_fixed(0);
data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(0);
for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
if (data->enable[k] && bw_mtn(vbios->blackout_duration, bw_int_to_fixed(0))) {
if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) {
data->blackout_duration_margin[i][j] = bw_min2(data->blackout_duration_margin[i][j], bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->line_source_transfer_time[k][i][j]));
data->dispclk_required_for_blackout_duration[i][j] = bw_max3(data->dispclk_required_for_blackout_duration[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->active_time[k]))));
if (bw_leq(vbios->maximum_blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j]))) {
data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(9999);
}
else if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))))) {
data->dispclk_required_for_blackout_recovery[i][j] = bw_max2(data->dispclk_required_for_blackout_recovery[i][j], bw_div(bw_mul(bw_div(bw_div((bw_sub(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, vbios->maximum_blackout_recovery_time))), data->adjusted_data_buffer_size[k])), bw_int_to_fixed(data->bytes_per_pixel[k])), (bw_sub(vbios->maximum_blackout_recovery_time, bw_sub(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))), data->latency_hiding_lines[k]), data->lines_interleaved_in_mem_access[k]));
}
}
else {
data->blackout_duration_margin[i][j] = bw_min2(data->blackout_duration_margin[i][j], bw_sub(bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->line_source_transfer_time[k][i][j]));
data->dispclk_required_for_blackout_duration[i][j] = bw_max3(data->dispclk_required_for_blackout_duration[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k]))));
if (bw_ltn(vbios->maximum_blackout_recovery_time, bw_add(bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]))) {
data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(9999);
}
else if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))))) {
data->dispclk_required_for_blackout_recovery[i][j] = bw_max2(data->dispclk_required_for_blackout_recovery[i][j], bw_div(bw_mul(bw_div(bw_div((bw_sub(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, vbios->maximum_blackout_recovery_time))), data->adjusted_data_buffer_size[k])), bw_int_to_fixed(data->bytes_per_pixel[k])), (bw_sub(vbios->maximum_blackout_recovery_time, (bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j]))))), data->latency_hiding_lines[k]), data->lines_interleaved_in_mem_access[k]));
}
}
}
}
}
}
if (bw_mtn(data->blackout_duration_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[high][s_high], vbios->high_voltage_max_dispclk)) {
data->cpup_state_change_enable = bw_def_yes;
if (bw_ltn(data->dispclk_required_for_blackout_recovery[high][s_high], vbios->high_voltage_max_dispclk)) {
data->cpuc_state_change_enable = bw_def_yes;
}
else {
data->cpuc_state_change_enable = bw_def_no;
}
}
else {
data->cpup_state_change_enable = bw_def_no;
data->cpuc_state_change_enable = bw_def_no;
}
/*nb p-state change enable*/
/*for dram speed/p-state change to be possible for a yclk(pclk) and sclk level there has to be positive margin and the dispclk required has to be*/
/*below the maximum.*/
/*the dram speed/p-state change margin is the minimum for all surfaces of the maximum latency hiding minus the dram speed/p-state change latency,*/
/*minus the dmif burst time, minus the source line transfer time*/
/*the maximum latency hiding is the minimum latency hiding plus one source line used for de-tiling in the line buffer, plus half the urgent latency*/
/*if stutter and dram clock state change are gated before cursor then the cursor latency hiding does not limit stutter or dram clock state change*/
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
/*maximum_latency_hiding(i) = minimum_latency_hiding(i) + 1 / vsr(i) **/
/* h_total(i) / pixel_rate(i) + 0.5 * total_dmifmc_urgent_latency*/
data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i],
bw_mul(bw_frc_to_fixed(5, 10), data->total_dmifmc_urgent_latency));
data->maximum_latency_hiding_with_cursor[i] = bw_min2(data->maximum_latency_hiding[i], data->cursor_latency_hiding[i]);
}
}
for (i = 0; i <= 2; i++) {
for (j = 0; j <= 7; j++) {
data->min_dram_speed_change_margin[i][j] = bw_int_to_fixed(9999);
data->dram_speed_change_margin = bw_int_to_fixed(9999);
data->dispclk_required_for_dram_speed_change[i][j] = bw_int_to_fixed(0);
data->num_displays_with_margin[i][j] = 0;
for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
if (data->enable[k]) {
if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) {
data->dram_speed_change_margin = bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]);
if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) {
/*determine the minimum dram clock change margin for each set of clock frequencies*/
data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
/*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->active_time[k]))));
if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) {
data->display_pstate_change_enable[k] = 1;
data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1;
data->dispclk_required_for_dram_speed_change[i][j] = bw_max2(data->dispclk_required_for_dram_speed_change[i][j], data->dispclk_required_for_dram_speed_change_pipe[i][j]);
}
}
}
else {
data->dram_speed_change_margin = bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]);
if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) {
/*determine the minimum dram clock change margin for each display pipe*/
data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
/*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k]))));
if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) {
data->display_pstate_change_enable[k] = 1;
data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1;
data->dispclk_required_for_dram_speed_change[i][j] = bw_max2(data->dispclk_required_for_dram_speed_change[i][j], data->dispclk_required_for_dram_speed_change_pipe[i][j]);
}
}
}
}
}
}
}
/*determine the number of displays with margin to switch in the v_active region*/
for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
if (data->enable[k] == 1 && data->display_pstate_change_enable[k] == 1) {
number_of_displays_enabled_with_margin = number_of_displays_enabled_with_margin + 1;
}
}
/*determine the number of displays that don't have any dram clock change margin, but*/
/*have the same resolution. these displays can switch in a common vblank region if*/
/*their frames are aligned.*/
data->min_vblank_dram_speed_change_margin = bw_int_to_fixed(9999);
for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
if (data->enable[k]) {
if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) {
data->v_blank_dram_speed_change_margin[k] = bw_sub(bw_sub(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[k], bw_sub(bw_div(data->src_height[k], data->v_scale_ratio[k]), bw_int_to_fixed(4)))), data->h_total[k]), data->pixel_rate[k]), vbios->nbp_state_change_latency), data->dmif_burst_time[low][s_low]), data->dram_speed_change_line_source_transfer_time[k][low][s_low]);
data->min_vblank_dram_speed_change_margin = bw_min2(data->min_vblank_dram_speed_change_margin, data->v_blank_dram_speed_change_margin[k]);
}
else {
data->v_blank_dram_speed_change_margin[k] = bw_sub(bw_sub(bw_sub(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[k], bw_sub(bw_div(data->src_height[k], data->v_scale_ratio[k]), bw_int_to_fixed(4)))), data->h_total[k]), data->pixel_rate[k]), vbios->nbp_state_change_latency), data->dmif_burst_time[low][s_low]), data->mcifwr_burst_time[low][s_low]), data->dram_speed_change_line_source_transfer_time[k][low][s_low]);
data->min_vblank_dram_speed_change_margin = bw_min2(data->min_vblank_dram_speed_change_margin, data->v_blank_dram_speed_change_margin[k]);
}
}
}
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
data->displays_with_same_mode[i] = bw_int_to_fixed(0);
if (data->enable[i] == 1 && data->display_pstate_change_enable[i] == 0 && bw_mtn(data->v_blank_dram_speed_change_margin[i], bw_int_to_fixed(0))) {
for (j = 0; j <= maximum_number_of_surfaces - 1; j++) {
if ((i == j || data->display_synchronization_enabled) && (data->enable[j] == 1 && bw_equ(data->source_width_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[j]) && bw_equ(data->source_height_rounded_up_to_chunks[i], data->source_height_rounded_up_to_chunks[j]) && bw_equ(data->vsr[i], data->vsr[j]) && bw_equ(data->hsr[i], data->hsr[j]) && bw_equ(data->pixel_rate[i], data->pixel_rate[j]))) {
data->displays_with_same_mode[i] = bw_add(data->displays_with_same_mode[i], bw_int_to_fixed(1));
}
}
}
}
/*compute the maximum number of aligned displays with no margin*/
number_of_aligned_displays_with_no_margin = 0;
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
number_of_aligned_displays_with_no_margin = bw_fixed_to_int(bw_max2(bw_int_to_fixed(number_of_aligned_displays_with_no_margin), data->displays_with_same_mode[i]));
}
/*dram clock change is possible, if all displays have positive margin except for one display or a group of*/
/*aligned displays with the same timing.*/
/*the display(s) with the negative margin can be switched in the v_blank region while the other*/
/*displays are in v_blank or v_active.*/
if (number_of_displays_enabled_with_margin > 0 && (number_of_displays_enabled_with_margin + number_of_aligned_displays_with_no_margin) == number_of_displays_enabled && bw_mtn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(9999)) && bw_ltn(data->dispclk_required_for_dram_speed_change[high][s_high], vbios->high_voltage_max_dispclk)) {
data->nbp_state_change_enable = bw_def_yes;
}
else {
data->nbp_state_change_enable = bw_def_no;
}
/*dram clock change is possible only in vblank if all displays are aligned and have no margin*/
if (number_of_aligned_displays_with_no_margin == number_of_displays_enabled) {
nbp_state_change_enable_blank = bw_def_yes;
}
else {
nbp_state_change_enable_blank = bw_def_no;
}
/*average bandwidth*/
/*the average bandwidth with no compression is the vertical active time is the source width times the bytes per pixel divided by the line time, multiplied by the vertical scale ratio and the ratio of bytes per request divided by the useful bytes per request.*/
/*the average bandwidth with compression is the same, divided by the compression ratio*/
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
data->average_bandwidth_no_compression[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(data->bytes_per_pixel[i])), (bw_div(data->h_total[i], data->pixel_rate[i]))), data->vsr[i]), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
data->average_bandwidth[i] = bw_div(data->average_bandwidth_no_compression[i], data->compression_rate[i]);
}
}
data->total_average_bandwidth_no_compression = bw_int_to_fixed(0);
data->total_average_bandwidth = bw_int_to_fixed(0);
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
data->total_average_bandwidth_no_compression = bw_add(data->total_average_bandwidth_no_compression, data->average_bandwidth_no_compression[i]);
data->total_average_bandwidth = bw_add(data->total_average_bandwidth, data->average_bandwidth[i]);
}
}
/*required yclk(pclk)*/
/*yclk requirement only makes sense if the dmif and mcifwr data total page close-open time is less than the time for data transfer and the total pte requests fit in the scatter-gather saw queque size*/
/*if that is the case, the yclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/low yclk(pclk) is chosen accordingly*/
/*high yclk(pclk) has to be selected when dram speed/p-state change is not possible.*/
data->min_cursor_memory_interface_buffer_size_in_time = bw_int_to_fixed(9999);
/* number of cursor lines stored in the cursor data return buffer*/
num_cursor_lines = 0;
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if (bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0))) {
/*compute number of cursor lines stored in data return buffer*/
if (bw_leq(data->cursor_width_pixels[i], bw_int_to_fixed(64)) && dceip->large_cursor == 1) {
num_cursor_lines = 4;
}
else {
num_cursor_lines = 2;
}
data->min_cursor_memory_interface_buffer_size_in_time = bw_min2(data->min_cursor_memory_interface_buffer_size_in_time, bw_div(bw_mul(bw_div(bw_int_to_fixed(num_cursor_lines), data->vsr[i]), data->h_total[i]), data->pixel_rate[i]));
}
}
}
/*compute minimum time to read one chunk from the dmif buffer*/
if (number_of_displays_enabled > 2) {
data->chunk_request_delay = 0;
}
else {
data->chunk_request_delay = bw_fixed_to_int(bw_div(bw_int_to_fixed(512), vbios->high_voltage_max_dispclk));
}
data->min_read_buffer_size_in_time = bw_min2(data->min_cursor_memory_interface_buffer_size_in_time, data->min_dmif_size_in_time);
data->display_reads_time_for_data_transfer = bw_sub(bw_sub(data->min_read_buffer_size_in_time, data->total_dmifmc_urgent_latency), bw_int_to_fixed(data->chunk_request_delay));
data->display_writes_time_for_data_transfer = bw_sub(data->min_mcifwr_size_in_time, vbios->mcifwrmc_urgent_latency);
data->dmif_required_dram_bandwidth = bw_div(data->total_display_reads_required_dram_access_data, data->display_reads_time_for_data_transfer);
data->mcifwr_required_dram_bandwidth = bw_div(data->total_display_writes_required_dram_access_data, data->display_writes_time_for_data_transfer);
data->required_dmifmc_urgent_latency_for_page_close_open = bw_div((bw_sub(data->min_read_buffer_size_in_time, data->dmif_total_page_close_open_time)), data->total_dmifmc_urgent_trips);
data->required_mcifmcwr_urgent_latency = bw_sub(data->min_mcifwr_size_in_time, data->mcifwr_total_page_close_open_time);
if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) {
data->required_dram_bandwidth_gbyte_per_second = bw_int_to_fixed(9999);
yclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size;
data->y_clk_level = high;
data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
}
else if (bw_mtn(vbios->dmifmc_urgent_latency, data->required_dmifmc_urgent_latency_for_page_close_open) || bw_mtn(vbios->mcifwrmc_urgent_latency, data->required_mcifmcwr_urgent_latency)) {
data->required_dram_bandwidth_gbyte_per_second = bw_int_to_fixed(9999);
yclk_message = bw_def_exceeded_allowed_page_close_open;
data->y_clk_level = high;
data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
}
else {
data->required_dram_bandwidth_gbyte_per_second = bw_div(bw_max2(data->dmif_required_dram_bandwidth, data->mcifwr_required_dram_bandwidth), bw_int_to_fixed(1000));
if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[low]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels)))
&& bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[low][s_high], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[low][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[low][s_high] == number_of_displays_enabled_with_margin))) {
yclk_message = bw_fixed_to_int(vbios->low_yclk);
data->y_clk_level = low;
data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
}
else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[mid]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels)))
&& bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[mid][s_high], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[mid][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[mid][s_high] == number_of_displays_enabled_with_margin))) {
yclk_message = bw_fixed_to_int(vbios->mid_yclk);
data->y_clk_level = mid;
data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
}
else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[high]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels)))
&& bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))) {
yclk_message = bw_fixed_to_int(vbios->high_yclk);
data->y_clk_level = high;
data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
}
else {
yclk_message = bw_def_exceeded_allowed_maximum_bw;
data->y_clk_level = high;
data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
}
}
/*required sclk*/
/*sclk requirement only makes sense if the total pte requests fit in the scatter-gather saw queque size*/
/*if that is the case, the sclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/mid/low sclk is chosen accordingly, unless that choice results in foresaking dram speed/nb p-state change.*/
/*the dmif and mcifwr sclk required is the one that allows the transfer of all pipe's data buffer size through the sclk bus in the time for data transfer*/
/*for dmif, pte and cursor requests have to be included.*/
data->dmif_required_sclk = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100))));
data->mcifwr_required_sclk = bw_div(bw_div(data->total_display_writes_required_data, data->display_writes_time_for_data_transfer), vbios->data_return_bus_width);
if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) {
data->required_sclk = bw_int_to_fixed(9999);
sclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size;
data->sclk_level = s_high;
}
else if (bw_mtn(vbios->dmifmc_urgent_latency, data->required_dmifmc_urgent_latency_for_page_close_open) || bw_mtn(vbios->mcifwrmc_urgent_latency, data->required_mcifmcwr_urgent_latency)) {
data->required_sclk = bw_int_to_fixed(9999);
sclk_message = bw_def_exceeded_allowed_page_close_open;
data->sclk_level = s_high;
}
else {
data->required_sclk = bw_max2(data->dmif_required_sclk, data->mcifwr_required_sclk);
if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[low]),vbios->data_return_bus_width))
&& bw_ltn(data->required_sclk, sclk[s_low]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_low], vbios->low_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_low] == number_of_displays_enabled_with_margin))) {
sclk_message = bw_def_low;
data->sclk_level = s_low;
data->required_sclk = vbios->low_sclk;
}
else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[mid]),vbios->data_return_bus_width))
&& bw_ltn(data->required_sclk, sclk[s_mid1]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid1], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid1] == number_of_displays_enabled_with_margin))) {
sclk_message = bw_def_mid;
data->sclk_level = s_mid1;
data->required_sclk = vbios->mid1_sclk;
}
else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid2]),vbios->data_return_bus_width))
&& bw_ltn(data->required_sclk, sclk[s_mid2]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid2], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid2] == number_of_displays_enabled_with_margin))) {
sclk_message = bw_def_mid;
data->sclk_level = s_mid2;
data->required_sclk = vbios->mid2_sclk;
}
else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid3]),vbios->data_return_bus_width))
&& bw_ltn(data->required_sclk, sclk[s_mid3]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid3], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid3] == number_of_displays_enabled_with_margin))) {
sclk_message = bw_def_mid;
data->sclk_level = s_mid3;
data->required_sclk = vbios->mid3_sclk;
}
else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid4]),vbios->data_return_bus_width))
&& bw_ltn(data->required_sclk, sclk[s_mid4]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid4], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid4] == number_of_displays_enabled_with_margin))) {
sclk_message = bw_def_mid;
data->sclk_level = s_mid4;
data->required_sclk = vbios->mid4_sclk;
}
else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid5]),vbios->data_return_bus_width))
&& bw_ltn(data->required_sclk, sclk[s_mid5]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid5], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid5] == number_of_displays_enabled_with_margin))) {
sclk_message = bw_def_mid;
data->sclk_level = s_mid5;
data->required_sclk = vbios->mid5_sclk;
}
else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid6]),vbios->data_return_bus_width))
&& bw_ltn(data->required_sclk, sclk[s_mid6]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid6] == number_of_displays_enabled_with_margin))) {
sclk_message = bw_def_mid;
data->sclk_level = s_mid6;
data->required_sclk = vbios->mid6_sclk;
}
else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_high]),vbios->data_return_bus_width))
&& bw_ltn(data->required_sclk, sclk[s_high])) {
sclk_message = bw_def_high;
data->sclk_level = s_high;
data->required_sclk = vbios->high_sclk;
}
else if (bw_meq(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_high]),vbios->data_return_bus_width))
&& bw_ltn(data->required_sclk, sclk[s_high])) {
sclk_message = bw_def_high;
data->sclk_level = s_high;
data->required_sclk = vbios->high_sclk;
}
else {
sclk_message = bw_def_exceeded_allowed_maximum_sclk;
data->sclk_level = s_high;
/*required_sclk = high_sclk*/
}
}
/*dispclk*/
/*if dispclk is set to the maximum, ramping is not required. dispclk required without ramping is less than the dispclk required with ramping.*/
/*if dispclk required without ramping is more than the maximum dispclk, that is the dispclk required, and the mode is not supported*/
/*if that does not happen, but dispclk required with ramping is more than the maximum dispclk, dispclk required is just the maximum dispclk*/
/*if that does not happen either, dispclk required is the dispclk required with ramping.*/
/*dispclk required without ramping is the maximum of the one required for display pipe pixel throughput, for scaler throughput, for total read request thrrougput and for dram/np p-state change if enabled.*/
/*the display pipe pixel throughput is the maximum of lines in per line out in the beginning of the frame and lines in per line out in the middle of the frame multiplied by the horizontal blank and chunk granularity factor, altogether multiplied by the ratio of the source width to the line time, divided by the line buffer pixels per dispclk throughput, and multiplied by the display pipe throughput factor.*/
/*the horizontal blank and chunk granularity factor is the ratio of the line time divided by the line time minus half the horizontal blank and chunk time. it applies when the lines in per line out is not 2 or 4.*/
/*the dispclk required for scaler throughput is the product of the pixel rate and the scaling limits factor.*/
/*the dispclk required for total read request throughput is the product of the peak request-per-second bandwidth and the dispclk cycles per request, divided by the request efficiency.*/
/*for the dispclk required with ramping, instead of multiplying just the pipe throughput by the display pipe throughput factor, we multiply the scaler and pipe throughput by the ramping factor.*/
/*the scaling limits factor is the product of the horizontal scale ratio, and the ratio of the vertical taps divided by the scaler efficiency clamped to at least 1.*/
/*the scaling limits factor itself it also clamped to at least 1*/
/*if doing downscaling with the pre-downscaler enabled, the horizontal scale ratio should not be considered above (use "1")*/
data->downspread_factor = bw_add(bw_int_to_fixed(1), bw_div(vbios->down_spread_percentage, bw_int_to_fixed(100)));
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if (surface_type[i] == bw_def_graphics) {
switch (data->lb_bpc[i]) {
case 6:
data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency6_bit_per_component;
break;
case 8:
data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency8_bit_per_component;
break;
case 10:
data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency10_bit_per_component;
break;
default:
data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency12_bit_per_component;
break;
}
if (data->use_alpha[i] == 1) {
data->v_scaler_efficiency = bw_min2(data->v_scaler_efficiency, dceip->alpha_vscaler_efficiency);
}
}
else {
switch (data->lb_bpc[i]) {
case 6:
data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency6_bit_per_component;
break;
case 8:
data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency8_bit_per_component;
break;
case 10:
data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency10_bit_per_component;
break;
default:
data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency12_bit_per_component;
break;
}
}
if (dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1))) {
data->scaler_limits_factor = bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_div(data->source_width_rounded_up_to_chunks[i], data->h_total[i]));
}
else {
data->scaler_limits_factor = bw_max3(bw_int_to_fixed(1), bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)), bw_mul(data->hsr[i], bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_int_to_fixed(1))));
}
data->display_pipe_pixel_throughput = bw_div(bw_div(bw_mul(bw_max2(data->lb_lines_in_per_line_out_in_beginning_of_frame[i], bw_mul(data->lb_lines_in_per_line_out_in_middle_of_frame[i], data->horizontal_blank_and_chunk_granularity_factor[i])), data->source_width_rounded_up_to_chunks[i]), (bw_div(data->h_total[i], data->pixel_rate[i]))), dceip->lb_write_pixels_per_dispclk);
data->dispclk_required_without_ramping[i] = bw_mul(data->downspread_factor, bw_max2(bw_mul(data->pixel_rate[i], data->scaler_limits_factor), bw_mul(dceip->display_pipe_throughput_factor, data->display_pipe_pixel_throughput)));
data->dispclk_required_with_ramping[i] = bw_mul(dceip->dispclk_ramping_factor, bw_max2(bw_mul(data->pixel_rate[i], data->scaler_limits_factor), data->display_pipe_pixel_throughput));
}
}
data->total_dispclk_required_with_ramping = bw_int_to_fixed(0);
data->total_dispclk_required_without_ramping = bw_int_to_fixed(0);
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if (bw_ltn(data->total_dispclk_required_with_ramping, data->dispclk_required_with_ramping[i])) {
data->total_dispclk_required_with_ramping = data->dispclk_required_with_ramping[i];
}
if (bw_ltn(data->total_dispclk_required_without_ramping, data->dispclk_required_without_ramping[i])) {
data->total_dispclk_required_without_ramping = data->dispclk_required_without_ramping[i];
}
}
}
data->total_read_request_bandwidth = bw_int_to_fixed(0);
data->total_write_request_bandwidth = bw_int_to_fixed(0);
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
data->total_read_request_bandwidth = bw_add(data->total_read_request_bandwidth, data->request_bandwidth[i]);
}
else {
data->total_write_request_bandwidth = bw_add(data->total_write_request_bandwidth, data->request_bandwidth[i]);
}
}
}
data->dispclk_required_for_total_read_request_bandwidth = bw_div(bw_mul(data->total_read_request_bandwidth, dceip->dispclk_per_request), dceip->request_efficiency);
data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping, data->dispclk_required_for_total_read_request_bandwidth);
data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping, data->dispclk_required_for_total_read_request_bandwidth);
if (data->cpuc_state_change_enable == bw_def_yes) {
data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max3(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level], data->dispclk_required_for_blackout_recovery[data->y_clk_level][data->sclk_level]);
data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max3(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level], data->dispclk_required_for_blackout_recovery[data->y_clk_level][data->sclk_level]);
}
if (data->cpup_state_change_enable == bw_def_yes) {
data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]);
data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]);
}
if (data->nbp_state_change_enable == bw_def_yes && data->increase_voltage_to_support_mclk_switch) {
data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]);
data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]);
}
if (bw_ltn(data->total_dispclk_required_with_ramping_with_request_bandwidth, vbios->high_voltage_max_dispclk)) {
data->dispclk = data->total_dispclk_required_with_ramping_with_request_bandwidth;
}
else if (bw_ltn(data->total_dispclk_required_without_ramping_with_request_bandwidth, vbios->high_voltage_max_dispclk)) {
data->dispclk = vbios->high_voltage_max_dispclk;
}
else {
data->dispclk = data->total_dispclk_required_without_ramping_with_request_bandwidth;
}
/* required core voltage*/
/* the core voltage required is low if sclk, yclk(pclk)and dispclk are within the low limits*/
/* otherwise, the core voltage required is medium if yclk (pclk) is within the low limit and sclk and dispclk are within the medium limit*/
/* otherwise, the core voltage required is high if the three clocks are within the high limits*/
/* otherwise, or if the mode is not supported, core voltage requirement is not applicable*/
if (pipe_check == bw_def_notok) {
voltage = bw_def_na;
}
else if (mode_check == bw_def_notok) {
voltage = bw_def_notok;
}
else if (bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) && sclk_message == bw_def_low && bw_ltn(data->dispclk, vbios->low_voltage_max_dispclk)) {
voltage = bw_def_0_72;
}
else if ((bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->mid_yclk)) && (sclk_message == bw_def_low || sclk_message == bw_def_mid) && bw_ltn(data->dispclk, vbios->mid_voltage_max_dispclk)) {
voltage = bw_def_0_8;
}
else if ((bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->mid_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->high_yclk)) && (sclk_message == bw_def_low || sclk_message == bw_def_mid || sclk_message == bw_def_high) && bw_leq(data->dispclk, vbios->high_voltage_max_dispclk)) {
if ((data->nbp_state_change_enable == bw_def_no && nbp_state_change_enable_blank == bw_def_no)) {
voltage = bw_def_high_no_nbp_state_change;
}
else {
voltage = bw_def_0_9;
}
}
else {
voltage = bw_def_notok;
}
if (voltage == bw_def_0_72) {
data->max_phyclk = vbios->low_voltage_max_phyclk;
}
else if (voltage == bw_def_0_8) {
data->max_phyclk = vbios->mid_voltage_max_phyclk;
}
else {
data->max_phyclk = vbios->high_voltage_max_phyclk;
}
/*required blackout recovery time*/
data->blackout_recovery_time = bw_int_to_fixed(0);
for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
if (data->enable[k] && bw_mtn(vbios->blackout_duration, bw_int_to_fixed(0)) && data->cpup_state_change_enable == bw_def_yes) {
if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) {
data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level]));
if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level])))))) {
data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_div((bw_add(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), vbios->blackout_duration), bw_sub(bw_div(bw_mul(bw_mul(bw_mul((bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level])), data->dispclk), bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), data->adjusted_data_buffer_size[k]))), (bw_sub(bw_div(bw_mul(bw_mul(data->dispclk, bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k])))));
}
}
else {
data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]));
if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level])))))) {
data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_div((bw_add(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), vbios->blackout_duration), bw_sub(bw_div(bw_mul(bw_mul(bw_mul((bw_add(bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level]), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level])), data->dispclk), bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), data->adjusted_data_buffer_size[k]))), (bw_sub(bw_div(bw_mul(bw_mul(data->dispclk, bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k])))));
}
}
}
}
/*sclk deep sleep*/
/*during self-refresh, sclk can be reduced to dispclk divided by the minimum pixels in the data fifo entry, with 15% margin, but shoudl not be set to less than the request bandwidth.*/
/*the data fifo entry is 16 pixels for the writeback, 64 bytes/bytes_per_pixel for the graphics, 16 pixels for the parallel rotation underlay,*/
/*and 16 bytes/bytes_per_pixel for the orthogonal rotation underlay.*/
/*in parallel mode (underlay pipe), the data read from the dmifv buffer is variable and based on the pixel depth (8bbp - 16 bytes, 16 bpp - 32 bytes, 32 bpp - 64 bytes)*/
/*in orthogonal mode (underlay pipe), the data read from the dmifv buffer is fixed at 16 bytes.*/
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) {
data->pixels_per_data_fifo_entry[i] = bw_int_to_fixed(16);
}
else if (surface_type[i] == bw_def_graphics) {
data->pixels_per_data_fifo_entry[i] = bw_div(bw_int_to_fixed(64), bw_int_to_fixed(data->bytes_per_pixel[i]));
}
else if (data->orthogonal_rotation[i] == 0) {
data->pixels_per_data_fifo_entry[i] = bw_int_to_fixed(16);
}
else {
data->pixels_per_data_fifo_entry[i] = bw_div(bw_int_to_fixed(16), bw_int_to_fixed(data->bytes_per_pixel[i]));
}
}
}
data->min_pixels_per_data_fifo_entry = bw_int_to_fixed(9999);
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if (bw_mtn(data->min_pixels_per_data_fifo_entry, data->pixels_per_data_fifo_entry[i])) {
data->min_pixels_per_data_fifo_entry = data->pixels_per_data_fifo_entry[i];
}
}
}
data->sclk_deep_sleep = bw_max2(bw_div(bw_mul(data->dispclk, bw_frc_to_fixed(115, 100)), data->min_pixels_per_data_fifo_entry), data->total_read_request_bandwidth);
/*urgent, stutter and nb-p_state watermark*/
/*the urgent watermark is the maximum of the urgent trip time plus the pixel transfer time, the urgent trip times to get data for the first pixel, and the urgent trip times to get data for the last pixel.*/
/*the stutter exit watermark is the self refresh exit time plus the maximum of the data burst time plus the pixel transfer time, the data burst times to get data for the first pixel, and the data burst times to get data for the last pixel. it does not apply to the writeback.*/
/*the nb p-state change watermark is the dram speed/p-state change time plus the maximum of the data burst time plus the pixel transfer time, the data burst times to get data for the first pixel, and the data burst times to get data for the last pixel.*/
/*the pixel transfer time is the maximum of the time to transfer the source pixels required for the first output pixel, and the time to transfer the pixels for the last output pixel minus the active line time.*/
/*blackout_duration is added to the urgent watermark*/
data->chunk_request_time = bw_int_to_fixed(0);
data->cursor_request_time = bw_int_to_fixed(0);
/*compute total time to request one chunk from each active display pipe*/
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
data->chunk_request_time = bw_add(data->chunk_request_time, (bw_div((bw_div(bw_int_to_fixed(pixels_per_chunk * data->bytes_per_pixel[i]), data->useful_bytes_per_request[i])), bw_min2(sclk[data->sclk_level], bw_div(data->dispclk, bw_int_to_fixed(2))))));
}
}
/*compute total time to request cursor data*/
data->cursor_request_time = (bw_div(data->cursor_total_data, (bw_mul(bw_int_to_fixed(32), sclk[data->sclk_level]))));
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
data->line_source_pixels_transfer_time = bw_max2(bw_div(bw_div(data->src_pixels_for_first_output_pixel[i], dceip->lb_write_pixels_per_dispclk), (bw_div(data->dispclk, dceip->display_pipe_throughput_factor))), bw_sub(bw_div(bw_div(data->src_pixels_for_last_output_pixel[i], dceip->lb_write_pixels_per_dispclk), (bw_div(data->dispclk, dceip->display_pipe_throughput_factor))), data->active_time[i]));
if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
data->urgent_watermark[i] = bw_add(bw_add(bw_add(bw_add(bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->line_source_transfer_time[i][data->y_clk_level][data->sclk_level])), vbios->blackout_duration), data->chunk_request_time), data->cursor_request_time);
data->stutter_exit_watermark[i] = bw_add(bw_sub(vbios->stutter_self_refresh_exit_latency, data->total_dmifmc_urgent_latency), data->urgent_watermark[i]);
data->stutter_entry_watermark[i] = bw_add(bw_sub(bw_add(vbios->stutter_self_refresh_exit_latency, vbios->stutter_self_refresh_entry_latency), data->total_dmifmc_urgent_latency), data->urgent_watermark[i]);
/*unconditionally remove black out time from the nb p_state watermark*/
if (data->display_pstate_change_enable[i] == 1) {
data->nbp_state_change_watermark[i] = bw_add(bw_add(vbios->nbp_state_change_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->dram_speed_change_line_source_transfer_time[i][data->y_clk_level][data->sclk_level]));
}
else {
/*maximize the watermark to force the switch in the vb_lank region of the frame*/
data->nbp_state_change_watermark[i] = bw_int_to_fixed(131000);
}
}
else {
data->urgent_watermark[i] = bw_add(bw_add(bw_add(bw_add(bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->line_source_transfer_time[i][data->y_clk_level][data->sclk_level])), vbios->blackout_duration), data->chunk_request_time), data->cursor_request_time);
data->stutter_exit_watermark[i] = bw_int_to_fixed(0);
data->stutter_entry_watermark[i] = bw_int_to_fixed(0);
if (data->display_pstate_change_enable[i] == 1) {
data->nbp_state_change_watermark[i] = bw_add(bw_add(vbios->nbp_state_change_latency, data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->dram_speed_change_line_source_transfer_time[i][data->y_clk_level][data->sclk_level]));
}
else {
/*maximize the watermark to force the switch in the vb_lank region of the frame*/
data->nbp_state_change_watermark[i] = bw_int_to_fixed(131000);
}
}
}
}
/*stutter mode enable*/
/*in the multi-display case the stutter exit or entry watermark cannot exceed the minimum latency hiding capabilities of the*/
/*display pipe.*/
data->stutter_mode_enable = data->cpuc_state_change_enable;
if (data->number_of_displays > 1) {
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if ((bw_mtn(data->stutter_exit_watermark[i], data->minimum_latency_hiding[i]) || bw_mtn(data->stutter_entry_watermark[i], data->minimum_latency_hiding[i]))) {
data->stutter_mode_enable = bw_def_no;
}
}
}
}
/*performance metrics*/
/* display read access efficiency (%)*/
/* display write back access efficiency (%)*/
/* stutter efficiency (%)*/
/* extra underlay pitch recommended for efficiency (pixels)*/
/* immediate flip time (us)*/
/* latency for other clients due to urgent display read (us)*/
/* latency for other clients due to urgent display write (us)*/
/* average bandwidth consumed by display (no compression) (gb/s)*/
/* required dram bandwidth (gb/s)*/
/* required sclk (m_hz)*/
/* required rd urgent latency (us)*/
/* nb p-state change margin (us)*/
/*dmif and mcifwr dram access efficiency*/
/*is the ratio between the ideal dram access time (which is the data buffer size in memory divided by the dram bandwidth), and the actual time which is the total page close-open time. but it cannot exceed the dram efficiency provided by the memory subsystem*/
data->dmifdram_access_efficiency = bw_min2(bw_div(bw_div(data->total_display_reads_required_dram_access_data, data->dram_bandwidth), data->dmif_total_page_close_open_time), bw_int_to_fixed(1));
if (bw_mtn(data->total_display_writes_required_dram_access_data, bw_int_to_fixed(0))) {
data->mcifwrdram_access_efficiency = bw_min2(bw_div(bw_div(data->total_display_writes_required_dram_access_data, data->dram_bandwidth), data->mcifwr_total_page_close_open_time), bw_int_to_fixed(1));
}
else {
data->mcifwrdram_access_efficiency = bw_int_to_fixed(0);
}
/*stutter efficiency*/
/*the stutter efficiency is the frame-average time in self-refresh divided by the frame-average stutter cycle duration. only applies if the display write-back is not enabled.*/
/*the frame-average stutter cycle used is the minimum for all pipes of the frame-average data buffer size in time, times the compression rate*/
/*the frame-average time in self-refresh is the stutter cycle minus the self refresh exit latency and the burst time*/
/*the stutter cycle is the dmif buffer size reduced by the excess of the stutter exit watermark over the lb size in time.*/
/*the burst time is the data needed during the stutter cycle divided by the available bandwidth*/
/*compute the time read all the data from the dmif buffer to the lb (dram refresh period)*/
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
data->stutter_refresh_duration[i] = bw_sub(bw_mul(bw_div(bw_div(bw_mul(bw_div(bw_div(data->adjusted_data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_rounded_up_to_chunks[i]), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]), data->compression_rate[i]), bw_max2(bw_int_to_fixed(0), bw_sub(data->stutter_exit_watermark[i], bw_div(bw_mul((bw_sub(data->lb_partitions[i], bw_int_to_fixed(1))), data->h_total[i]), data->pixel_rate[i]))));
data->stutter_dmif_buffer_size[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(bw_mul(data->stutter_refresh_duration[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_rounded_up_to_chunks[i]), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]), data->compression_rate[i]);
}
}
data->min_stutter_refresh_duration = bw_int_to_fixed(9999);
data->total_stutter_dmif_buffer_size = 0;
data->total_bytes_requested = 0;
data->min_stutter_dmif_buffer_size = 9999;
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if (bw_mtn(data->min_stutter_refresh_duration, data->stutter_refresh_duration[i])) {
data->min_stutter_refresh_duration = data->stutter_refresh_duration[i];
data->total_bytes_requested = bw_fixed_to_int(bw_add(bw_int_to_fixed(data->total_bytes_requested), (bw_mul(bw_mul(data->source_height_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[i]), bw_int_to_fixed(data->bytes_per_pixel[i])))));
data->min_stutter_dmif_buffer_size = bw_fixed_to_int(data->stutter_dmif_buffer_size[i]);
}
data->total_stutter_dmif_buffer_size = bw_fixed_to_int(bw_add(data->stutter_dmif_buffer_size[i], bw_int_to_fixed(data->total_stutter_dmif_buffer_size)));
}
}
data->stutter_burst_time = bw_div(bw_int_to_fixed(data->total_stutter_dmif_buffer_size), bw_mul(sclk[data->sclk_level], vbios->data_return_bus_width));
data->num_stutter_bursts = data->total_bytes_requested / data->min_stutter_dmif_buffer_size;
data->total_stutter_cycle_duration = bw_add(bw_add(data->min_stutter_refresh_duration, vbios->stutter_self_refresh_exit_latency), data->stutter_burst_time);
data->time_in_self_refresh = data->min_stutter_refresh_duration;
if (data->d1_display_write_back_dwb_enable == 1) {
data->stutter_efficiency = bw_int_to_fixed(0);
}
else if (bw_ltn(data->time_in_self_refresh, bw_int_to_fixed(0))) {
data->stutter_efficiency = bw_int_to_fixed(0);
}
else {
/*compute stutter efficiency assuming 60 hz refresh rate*/
data->stutter_efficiency = bw_max2(bw_int_to_fixed(0), bw_mul((bw_sub(bw_int_to_fixed(1), (bw_div(bw_mul((bw_add(vbios->stutter_self_refresh_exit_latency, data->stutter_burst_time)), bw_int_to_fixed(data->num_stutter_bursts)), bw_frc_to_fixed(166666667, 10000))))), bw_int_to_fixed(100)));
}
/*immediate flip time*/
/*if scatter gather is enabled, the immediate flip takes a number of urgent memory trips equivalent to the pte requests in a row divided by the pte request limit.*/
/*otherwise, it may take just one urgenr memory trip*/
data->worst_number_of_trips_to_memory = bw_int_to_fixed(1);
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i] && data->scatter_gather_enable_for_pipe[i] == 1) {
data->number_of_trips_to_memory_for_getting_apte_row[i] = bw_ceil2(bw_div(data->scatter_gather_pte_requests_in_row[i], data->scatter_gather_pte_request_limit[i]), bw_int_to_fixed(1));
if (bw_ltn(data->worst_number_of_trips_to_memory, data->number_of_trips_to_memory_for_getting_apte_row[i])) {
data->worst_number_of_trips_to_memory = data->number_of_trips_to_memory_for_getting_apte_row[i];
}
}
}
data->immediate_flip_time = bw_mul(data->worst_number_of_trips_to_memory, data->total_dmifmc_urgent_latency);
/*worst latency for other clients*/
/*it is the urgent latency plus the urgent burst time*/
data->latency_for_non_dmif_clients = bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]);
if (data->d1_display_write_back_dwb_enable == 1) {
data->latency_for_non_mcifwr_clients = bw_add(vbios->mcifwrmc_urgent_latency, dceip->mcifwr_all_surfaces_burst_time);
}
else {
data->latency_for_non_mcifwr_clients = bw_int_to_fixed(0);
}
/*dmif mc urgent latency supported in high sclk and yclk*/
data->dmifmc_urgent_latency_supported_in_high_sclk_and_yclk = bw_div((bw_sub(data->min_read_buffer_size_in_time, data->dmif_burst_time[high][s_high])), data->total_dmifmc_urgent_trips);
/*dram speed/p-state change margin*/
/*in the multi-display case the nb p-state change watermark cannot exceed the average lb size plus the dmif size or the cursor dcp buffer size*/
data->v_blank_nbp_state_dram_speed_change_latency_supported = bw_int_to_fixed(99999);
data->nbp_state_dram_speed_change_latency_supported = bw_int_to_fixed(99999);
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
data->nbp_state_dram_speed_change_latency_supported = bw_min2(data->nbp_state_dram_speed_change_latency_supported, bw_add(bw_sub(data->maximum_latency_hiding_with_cursor[i], data->nbp_state_change_watermark[i]), vbios->nbp_state_change_latency));
data->v_blank_nbp_state_dram_speed_change_latency_supported = bw_min2(data->v_blank_nbp_state_dram_speed_change_latency_supported, bw_add(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[i], bw_sub(bw_div(data->src_height[i], data->v_scale_ratio[i]), bw_int_to_fixed(4)))), data->h_total[i]), data->pixel_rate[i]), data->nbp_state_change_watermark[i]), vbios->nbp_state_change_latency));
}
}
/*sclk required vs urgent latency*/
for (i = 1; i <= 5; i++) {
data->display_reads_time_for_data_transfer_and_urgent_latency = bw_sub(data->min_read_buffer_size_in_time, bw_mul(data->total_dmifmc_urgent_trips, bw_int_to_fixed(i)));
if (pipe_check == bw_def_ok && (bw_mtn(data->display_reads_time_for_data_transfer_and_urgent_latency, data->dmif_total_page_close_open_time))) {
data->dmif_required_sclk_for_urgent_latency[i] = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer_and_urgent_latency), (bw_mul(vbios->data_return_bus_width, bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100))));
}
else {
data->dmif_required_sclk_for_urgent_latency[i] = bw_int_to_fixed(bw_def_na);
}
}
/*output link bit per pixel supported*/
for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
data->output_bpphdmi[k] = bw_def_na;
data->output_bppdp4_lane_hbr[k] = bw_def_na;
data->output_bppdp4_lane_hbr2[k] = bw_def_na;
data->output_bppdp4_lane_hbr3[k] = bw_def_na;
if (data->enable[k]) {
data->output_bpphdmi[k] = bw_fixed_to_int(bw_mul(bw_div(bw_min2(bw_int_to_fixed(600), data->max_phyclk), data->pixel_rate[k]), bw_int_to_fixed(24)));
if (bw_meq(data->max_phyclk, bw_int_to_fixed(270))) {
data->output_bppdp4_lane_hbr[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(270), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8)));
}
if (bw_meq(data->max_phyclk, bw_int_to_fixed(540))) {
data->output_bppdp4_lane_hbr2[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(540), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8)));
}
if (bw_meq(data->max_phyclk, bw_int_to_fixed(810))) {
data->output_bppdp4_lane_hbr3[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(810), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8)));
}
}
}
kfree(surface_type);
free_tiling_mode:
kfree(tiling_mode);
free_sclk:
kfree(sclk);
free_yclk:
kfree(yclk);
}
/*******************************************************************************
* Public functions
******************************************************************************/
void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
struct bw_calcs_vbios *bw_vbios,
struct hw_asic_id asic_id)
{
struct bw_calcs_dceip *dceip;
struct bw_calcs_vbios *vbios;
enum bw_calcs_version version = bw_calcs_version_from_asic_id(asic_id);
dceip = kzalloc(sizeof(*dceip), GFP_KERNEL);
if (!dceip)
return;
vbios = kzalloc(sizeof(*vbios), GFP_KERNEL);
if (!vbios) {
kfree(dceip);
return;
}
dceip->version = version;
switch (version) {
case BW_CALCS_VERSION_CARRIZO:
vbios->memory_type = bw_def_gddr5;
vbios->dram_channel_width_in_bits = 64;
vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits;
vbios->number_of_dram_banks = 8;
vbios->high_yclk = bw_int_to_fixed(1600);
vbios->mid_yclk = bw_int_to_fixed(1600);
vbios->low_yclk = bw_frc_to_fixed(66666, 100);
vbios->low_sclk = bw_int_to_fixed(200);
vbios->mid1_sclk = bw_int_to_fixed(300);
vbios->mid2_sclk = bw_int_to_fixed(300);
vbios->mid3_sclk = bw_int_to_fixed(300);
vbios->mid4_sclk = bw_int_to_fixed(300);
vbios->mid5_sclk = bw_int_to_fixed(300);
vbios->mid6_sclk = bw_int_to_fixed(300);
vbios->high_sclk = bw_frc_to_fixed(62609, 100);
vbios->low_voltage_max_dispclk = bw_int_to_fixed(352);
vbios->mid_voltage_max_dispclk = bw_int_to_fixed(467);
vbios->high_voltage_max_dispclk = bw_int_to_fixed(643);
vbios->low_voltage_max_phyclk = bw_int_to_fixed(540);
vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810);
vbios->high_voltage_max_phyclk = bw_int_to_fixed(810);
vbios->data_return_bus_width = bw_int_to_fixed(32);
vbios->trc = bw_int_to_fixed(50);
vbios->dmifmc_urgent_latency = bw_int_to_fixed(4);
vbios->stutter_self_refresh_exit_latency = bw_frc_to_fixed(153, 10);
vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
vbios->nbp_state_change_latency = bw_frc_to_fixed(19649, 1000);
vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10);
vbios->scatter_gather_enable = true;
vbios->down_spread_percentage = bw_frc_to_fixed(5, 10);
vbios->cursor_width = 32;
vbios->average_compression_rate = 4;
vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
vbios->blackout_duration = bw_int_to_fixed(0); /* us */
vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0);
dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100;
dceip->large_cursor = false;
dceip->dmif_request_buffer_size = bw_int_to_fixed(768);
dceip->dmif_pipe_en_fbc_chunk_tracker = false;
dceip->cursor_max_outstanding_group_num = 1;
dceip->lines_interleaved_into_lb = 2;
dceip->chunk_width = 256;
dceip->number_of_graphics_pipes = 3;
dceip->number_of_underlay_pipes = 1;
dceip->low_power_tiling_mode = 0;
dceip->display_write_back_supported = false;
dceip->argb_compression_support = false;
dceip->underlay_vscaler_efficiency6_bit_per_component =
bw_frc_to_fixed(35556, 10000);
dceip->underlay_vscaler_efficiency8_bit_per_component =
bw_frc_to_fixed(34286, 10000);
dceip->underlay_vscaler_efficiency10_bit_per_component =
bw_frc_to_fixed(32, 10);
dceip->underlay_vscaler_efficiency12_bit_per_component =
bw_int_to_fixed(3);
dceip->graphics_vscaler_efficiency6_bit_per_component =
bw_frc_to_fixed(35, 10);
dceip->graphics_vscaler_efficiency8_bit_per_component =
bw_frc_to_fixed(34286, 10000);
dceip->graphics_vscaler_efficiency10_bit_per_component =
bw_frc_to_fixed(32, 10);
dceip->graphics_vscaler_efficiency12_bit_per_component =
bw_int_to_fixed(3);
dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3);
dceip->max_dmif_buffer_allocated = 2;
dceip->graphics_dmif_size = 12288;
dceip->underlay_luma_dmif_size = 19456;
dceip->underlay_chroma_dmif_size = 23552;
dceip->pre_downscaler_enabled = true;
dceip->underlay_downscale_prefetch_enabled = true;
dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
dceip->lb_size_per_component444 = bw_int_to_fixed(82176);
dceip->graphics_lb_nodownscaling_multi_line_prefetching = false;
dceip->stutter_and_dram_clock_state_change_gated_before_cursor =
bw_int_to_fixed(0);
dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed(
82176);
dceip->underlay420_chroma_lb_size_per_component =
bw_int_to_fixed(164352);
dceip->underlay422_lb_size_per_component = bw_int_to_fixed(
82176);
dceip->cursor_chunk_width = bw_int_to_fixed(64);
dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4);
dceip->underlay_maximum_width_efficient_for_tiling =
bw_int_to_fixed(1920);
dceip->underlay_maximum_height_efficient_for_tiling =
bw_int_to_fixed(1080);
dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
bw_frc_to_fixed(3, 10);
dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
bw_int_to_fixed(25);
dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed(
2);
dceip->maximum_total_outstanding_pte_requests_allowed_by_saw =
bw_int_to_fixed(128);
dceip->limit_excessive_outstanding_dmif_requests = true;
dceip->linear_mode_line_request_alternation_slice =
bw_int_to_fixed(64);
dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode =
32;
dceip->display_write_back420_luma_mcifwr_buffer_size = 12288;
dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192;
dceip->request_efficiency = bw_frc_to_fixed(8, 10);
dceip->dispclk_per_request = bw_int_to_fixed(2);
dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2;
dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); /* todo: this is a bug*/
break;
case BW_CALCS_VERSION_POLARIS10:
/* TODO: Treat VEGAM the same as P10 for now
* Need to tune the para for VEGAM if needed */
case BW_CALCS_VERSION_VEGAM:
vbios->memory_type = bw_def_gddr5;
vbios->dram_channel_width_in_bits = 32;
vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits;
vbios->number_of_dram_banks = 8;
vbios->high_yclk = bw_int_to_fixed(6000);
vbios->mid_yclk = bw_int_to_fixed(3200);
vbios->low_yclk = bw_int_to_fixed(1000);
vbios->low_sclk = bw_int_to_fixed(300);
vbios->mid1_sclk = bw_int_to_fixed(400);
vbios->mid2_sclk = bw_int_to_fixed(500);
vbios->mid3_sclk = bw_int_to_fixed(600);
vbios->mid4_sclk = bw_int_to_fixed(700);
vbios->mid5_sclk = bw_int_to_fixed(800);
vbios->mid6_sclk = bw_int_to_fixed(974);
vbios->high_sclk = bw_int_to_fixed(1154);
vbios->low_voltage_max_dispclk = bw_int_to_fixed(459);
vbios->mid_voltage_max_dispclk = bw_int_to_fixed(654);
vbios->high_voltage_max_dispclk = bw_int_to_fixed(1108);
vbios->low_voltage_max_phyclk = bw_int_to_fixed(540);
vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810);
vbios->high_voltage_max_phyclk = bw_int_to_fixed(810);
vbios->data_return_bus_width = bw_int_to_fixed(32);
vbios->trc = bw_int_to_fixed(48);
vbios->dmifmc_urgent_latency = bw_int_to_fixed(3);
vbios->stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
vbios->nbp_state_change_latency = bw_int_to_fixed(45);
vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10);
vbios->scatter_gather_enable = true;
vbios->down_spread_percentage = bw_frc_to_fixed(5, 10);
vbios->cursor_width = 32;
vbios->average_compression_rate = 4;
vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
vbios->blackout_duration = bw_int_to_fixed(0); /* us */
vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0);
dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100;
dceip->large_cursor = false;
dceip->dmif_request_buffer_size = bw_int_to_fixed(768);
dceip->dmif_pipe_en_fbc_chunk_tracker = false;
dceip->cursor_max_outstanding_group_num = 1;
dceip->lines_interleaved_into_lb = 2;
dceip->chunk_width = 256;
dceip->number_of_graphics_pipes = 6;
dceip->number_of_underlay_pipes = 0;
dceip->low_power_tiling_mode = 0;
dceip->display_write_back_supported = false;
dceip->argb_compression_support = true;
dceip->underlay_vscaler_efficiency6_bit_per_component =
bw_frc_to_fixed(35556, 10000);
dceip->underlay_vscaler_efficiency8_bit_per_component =
bw_frc_to_fixed(34286, 10000);
dceip->underlay_vscaler_efficiency10_bit_per_component =
bw_frc_to_fixed(32, 10);
dceip->underlay_vscaler_efficiency12_bit_per_component =
bw_int_to_fixed(3);
dceip->graphics_vscaler_efficiency6_bit_per_component =
bw_frc_to_fixed(35, 10);
dceip->graphics_vscaler_efficiency8_bit_per_component =
bw_frc_to_fixed(34286, 10000);
dceip->graphics_vscaler_efficiency10_bit_per_component =
bw_frc_to_fixed(32, 10);
dceip->graphics_vscaler_efficiency12_bit_per_component =
bw_int_to_fixed(3);
dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3);
dceip->max_dmif_buffer_allocated = 4;
dceip->graphics_dmif_size = 12288;
dceip->underlay_luma_dmif_size = 19456;
dceip->underlay_chroma_dmif_size = 23552;
dceip->pre_downscaler_enabled = true;
dceip->underlay_downscale_prefetch_enabled = true;
dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
dceip->lb_size_per_component444 = bw_int_to_fixed(245952);
dceip->graphics_lb_nodownscaling_multi_line_prefetching = true;
dceip->stutter_and_dram_clock_state_change_gated_before_cursor =
bw_int_to_fixed(1);
dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed(
82176);
dceip->underlay420_chroma_lb_size_per_component =
bw_int_to_fixed(164352);
dceip->underlay422_lb_size_per_component = bw_int_to_fixed(
82176);
dceip->cursor_chunk_width = bw_int_to_fixed(64);
dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4);
dceip->underlay_maximum_width_efficient_for_tiling =
bw_int_to_fixed(1920);
dceip->underlay_maximum_height_efficient_for_tiling =
bw_int_to_fixed(1080);
dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
bw_frc_to_fixed(3, 10);
dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
bw_int_to_fixed(25);
dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed(
2);
dceip->maximum_total_outstanding_pte_requests_allowed_by_saw =
bw_int_to_fixed(128);
dceip->limit_excessive_outstanding_dmif_requests = true;
dceip->linear_mode_line_request_alternation_slice =
bw_int_to_fixed(64);
dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode =
32;
dceip->display_write_back420_luma_mcifwr_buffer_size = 12288;
dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192;
dceip->request_efficiency = bw_frc_to_fixed(8, 10);
dceip->dispclk_per_request = bw_int_to_fixed(2);
dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2;
dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
break;
case BW_CALCS_VERSION_POLARIS11:
vbios->memory_type = bw_def_gddr5;
vbios->dram_channel_width_in_bits = 32;
vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits;
vbios->number_of_dram_banks = 8;
vbios->high_yclk = bw_int_to_fixed(6000);
vbios->mid_yclk = bw_int_to_fixed(3200);
vbios->low_yclk = bw_int_to_fixed(1000);
vbios->low_sclk = bw_int_to_fixed(300);
vbios->mid1_sclk = bw_int_to_fixed(400);
vbios->mid2_sclk = bw_int_to_fixed(500);
vbios->mid3_sclk = bw_int_to_fixed(600);
vbios->mid4_sclk = bw_int_to_fixed(700);
vbios->mid5_sclk = bw_int_to_fixed(800);
vbios->mid6_sclk = bw_int_to_fixed(974);
vbios->high_sclk = bw_int_to_fixed(1154);
vbios->low_voltage_max_dispclk = bw_int_to_fixed(459);
vbios->mid_voltage_max_dispclk = bw_int_to_fixed(654);
vbios->high_voltage_max_dispclk = bw_int_to_fixed(1108);
vbios->low_voltage_max_phyclk = bw_int_to_fixed(540);
vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810);
vbios->high_voltage_max_phyclk = bw_int_to_fixed(810);
vbios->data_return_bus_width = bw_int_to_fixed(32);
vbios->trc = bw_int_to_fixed(48);
if (vbios->number_of_dram_channels == 2) // 64-bit
vbios->dmifmc_urgent_latency = bw_int_to_fixed(4);
else
vbios->dmifmc_urgent_latency = bw_int_to_fixed(3);
vbios->stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
vbios->nbp_state_change_latency = bw_int_to_fixed(45);
vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10);
vbios->scatter_gather_enable = true;
vbios->down_spread_percentage = bw_frc_to_fixed(5, 10);
vbios->cursor_width = 32;
vbios->average_compression_rate = 4;
vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
vbios->blackout_duration = bw_int_to_fixed(0); /* us */
vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0);
dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100;
dceip->large_cursor = false;
dceip->dmif_request_buffer_size = bw_int_to_fixed(768);
dceip->dmif_pipe_en_fbc_chunk_tracker = false;
dceip->cursor_max_outstanding_group_num = 1;
dceip->lines_interleaved_into_lb = 2;
dceip->chunk_width = 256;
dceip->number_of_graphics_pipes = 5;
dceip->number_of_underlay_pipes = 0;
dceip->low_power_tiling_mode = 0;
dceip->display_write_back_supported = false;
dceip->argb_compression_support = true;
dceip->underlay_vscaler_efficiency6_bit_per_component =
bw_frc_to_fixed(35556, 10000);
dceip->underlay_vscaler_efficiency8_bit_per_component =
bw_frc_to_fixed(34286, 10000);
dceip->underlay_vscaler_efficiency10_bit_per_component =
bw_frc_to_fixed(32, 10);
dceip->underlay_vscaler_efficiency12_bit_per_component =
bw_int_to_fixed(3);
dceip->graphics_vscaler_efficiency6_bit_per_component =
bw_frc_to_fixed(35, 10);
dceip->graphics_vscaler_efficiency8_bit_per_component =
bw_frc_to_fixed(34286, 10000);
dceip->graphics_vscaler_efficiency10_bit_per_component =
bw_frc_to_fixed(32, 10);
dceip->graphics_vscaler_efficiency12_bit_per_component =
bw_int_to_fixed(3);
dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3);
dceip->max_dmif_buffer_allocated = 4;
dceip->graphics_dmif_size = 12288;
dceip->underlay_luma_dmif_size = 19456;
dceip->underlay_chroma_dmif_size = 23552;
dceip->pre_downscaler_enabled = true;
dceip->underlay_downscale_prefetch_enabled = true;
dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
dceip->lb_size_per_component444 = bw_int_to_fixed(245952);
dceip->graphics_lb_nodownscaling_multi_line_prefetching = true;
dceip->stutter_and_dram_clock_state_change_gated_before_cursor =
bw_int_to_fixed(1);
dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed(
82176);
dceip->underlay420_chroma_lb_size_per_component =
bw_int_to_fixed(164352);
dceip->underlay422_lb_size_per_component = bw_int_to_fixed(
82176);
dceip->cursor_chunk_width = bw_int_to_fixed(64);
dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4);
dceip->underlay_maximum_width_efficient_for_tiling =
bw_int_to_fixed(1920);
dceip->underlay_maximum_height_efficient_for_tiling =
bw_int_to_fixed(1080);
dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
bw_frc_to_fixed(3, 10);
dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
bw_int_to_fixed(25);
dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed(
2);
dceip->maximum_total_outstanding_pte_requests_allowed_by_saw =
bw_int_to_fixed(128);
dceip->limit_excessive_outstanding_dmif_requests = true;
dceip->linear_mode_line_request_alternation_slice =
bw_int_to_fixed(64);
dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode =
32;
dceip->display_write_back420_luma_mcifwr_buffer_size = 12288;
dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192;
dceip->request_efficiency = bw_frc_to_fixed(8, 10);
dceip->dispclk_per_request = bw_int_to_fixed(2);
dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2;
dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
break;
case BW_CALCS_VERSION_POLARIS12:
vbios->memory_type = bw_def_gddr5;
vbios->dram_channel_width_in_bits = 32;
vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits;
vbios->number_of_dram_banks = 8;
vbios->high_yclk = bw_int_to_fixed(6000);
vbios->mid_yclk = bw_int_to_fixed(3200);
vbios->low_yclk = bw_int_to_fixed(1000);
vbios->low_sclk = bw_int_to_fixed(678);
vbios->mid1_sclk = bw_int_to_fixed(864);
vbios->mid2_sclk = bw_int_to_fixed(900);
vbios->mid3_sclk = bw_int_to_fixed(920);
vbios->mid4_sclk = bw_int_to_fixed(940);
vbios->mid5_sclk = bw_int_to_fixed(960);
vbios->mid6_sclk = bw_int_to_fixed(980);
vbios->high_sclk = bw_int_to_fixed(1049);
vbios->low_voltage_max_dispclk = bw_int_to_fixed(459);
vbios->mid_voltage_max_dispclk = bw_int_to_fixed(654);
vbios->high_voltage_max_dispclk = bw_int_to_fixed(1108);
vbios->low_voltage_max_phyclk = bw_int_to_fixed(540);
vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810);
vbios->high_voltage_max_phyclk = bw_int_to_fixed(810);
vbios->data_return_bus_width = bw_int_to_fixed(32);
vbios->trc = bw_int_to_fixed(48);
if (vbios->number_of_dram_channels == 2) // 64-bit
vbios->dmifmc_urgent_latency = bw_int_to_fixed(4);
else
vbios->dmifmc_urgent_latency = bw_int_to_fixed(3);
vbios->stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
vbios->nbp_state_change_latency = bw_int_to_fixed(250);
vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10);
vbios->scatter_gather_enable = false;
vbios->down_spread_percentage = bw_frc_to_fixed(5, 10);
vbios->cursor_width = 32;
vbios->average_compression_rate = 4;
vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
vbios->blackout_duration = bw_int_to_fixed(0); /* us */
vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0);
dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100;
dceip->large_cursor = false;
dceip->dmif_request_buffer_size = bw_int_to_fixed(768);
dceip->dmif_pipe_en_fbc_chunk_tracker = false;
dceip->cursor_max_outstanding_group_num = 1;
dceip->lines_interleaved_into_lb = 2;
dceip->chunk_width = 256;
dceip->number_of_graphics_pipes = 5;
dceip->number_of_underlay_pipes = 0;
dceip->low_power_tiling_mode = 0;
dceip->display_write_back_supported = true;
dceip->argb_compression_support = true;
dceip->underlay_vscaler_efficiency6_bit_per_component =
bw_frc_to_fixed(35556, 10000);
dceip->underlay_vscaler_efficiency8_bit_per_component =
bw_frc_to_fixed(34286, 10000);
dceip->underlay_vscaler_efficiency10_bit_per_component =
bw_frc_to_fixed(32, 10);
dceip->underlay_vscaler_efficiency12_bit_per_component =
bw_int_to_fixed(3);
dceip->graphics_vscaler_efficiency6_bit_per_component =
bw_frc_to_fixed(35, 10);
dceip->graphics_vscaler_efficiency8_bit_per_component =
bw_frc_to_fixed(34286, 10000);
dceip->graphics_vscaler_efficiency10_bit_per_component =
bw_frc_to_fixed(32, 10);
dceip->graphics_vscaler_efficiency12_bit_per_component =
bw_int_to_fixed(3);
dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3);
dceip->max_dmif_buffer_allocated = 4;
dceip->graphics_dmif_size = 12288;
dceip->underlay_luma_dmif_size = 19456;
dceip->underlay_chroma_dmif_size = 23552;
dceip->pre_downscaler_enabled = true;
dceip->underlay_downscale_prefetch_enabled = true;
dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
dceip->lb_size_per_component444 = bw_int_to_fixed(245952);
dceip->graphics_lb_nodownscaling_multi_line_prefetching = true;
dceip->stutter_and_dram_clock_state_change_gated_before_cursor =
bw_int_to_fixed(1);
dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed(
82176);
dceip->underlay420_chroma_lb_size_per_component =
bw_int_to_fixed(164352);
dceip->underlay422_lb_size_per_component = bw_int_to_fixed(
82176);
dceip->cursor_chunk_width = bw_int_to_fixed(64);
dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4);
dceip->underlay_maximum_width_efficient_for_tiling =
bw_int_to_fixed(1920);
dceip->underlay_maximum_height_efficient_for_tiling =
bw_int_to_fixed(1080);
dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
bw_frc_to_fixed(3, 10);
dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
bw_int_to_fixed(25);
dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed(
2);
dceip->maximum_total_outstanding_pte_requests_allowed_by_saw =
bw_int_to_fixed(128);
dceip->limit_excessive_outstanding_dmif_requests = true;
dceip->linear_mode_line_request_alternation_slice =
bw_int_to_fixed(64);
dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode =
32;
dceip->display_write_back420_luma_mcifwr_buffer_size = 12288;
dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192;
dceip->request_efficiency = bw_frc_to_fixed(8, 10);
dceip->dispclk_per_request = bw_int_to_fixed(2);
dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2;
dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
break;
case BW_CALCS_VERSION_STONEY:
vbios->memory_type = bw_def_gddr5;
vbios->dram_channel_width_in_bits = 64;
vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits;
vbios->number_of_dram_banks = 8;
vbios->high_yclk = bw_int_to_fixed(1866);
vbios->mid_yclk = bw_int_to_fixed(1866);
vbios->low_yclk = bw_int_to_fixed(1333);
vbios->low_sclk = bw_int_to_fixed(200);
vbios->mid1_sclk = bw_int_to_fixed(600);
vbios->mid2_sclk = bw_int_to_fixed(600);
vbios->mid3_sclk = bw_int_to_fixed(600);
vbios->mid4_sclk = bw_int_to_fixed(600);
vbios->mid5_sclk = bw_int_to_fixed(600);
vbios->mid6_sclk = bw_int_to_fixed(600);
vbios->high_sclk = bw_int_to_fixed(800);
vbios->low_voltage_max_dispclk = bw_int_to_fixed(352);
vbios->mid_voltage_max_dispclk = bw_int_to_fixed(467);
vbios->high_voltage_max_dispclk = bw_int_to_fixed(643);
vbios->low_voltage_max_phyclk = bw_int_to_fixed(540);
vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810);
vbios->high_voltage_max_phyclk = bw_int_to_fixed(810);
vbios->data_return_bus_width = bw_int_to_fixed(32);
vbios->trc = bw_int_to_fixed(50);
vbios->dmifmc_urgent_latency = bw_int_to_fixed(4);
vbios->stutter_self_refresh_exit_latency = bw_frc_to_fixed(158, 10);
vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
vbios->nbp_state_change_latency = bw_frc_to_fixed(2008, 100);
vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10);
vbios->scatter_gather_enable = true;
vbios->down_spread_percentage = bw_frc_to_fixed(5, 10);
vbios->cursor_width = 32;
vbios->average_compression_rate = 4;
vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
vbios->blackout_duration = bw_int_to_fixed(0); /* us */
vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0);
dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100;
dceip->large_cursor = false;
dceip->dmif_request_buffer_size = bw_int_to_fixed(768);
dceip->dmif_pipe_en_fbc_chunk_tracker = false;
dceip->cursor_max_outstanding_group_num = 1;
dceip->lines_interleaved_into_lb = 2;
dceip->chunk_width = 256;
dceip->number_of_graphics_pipes = 2;
dceip->number_of_underlay_pipes = 1;
dceip->low_power_tiling_mode = 0;
dceip->display_write_back_supported = false;
dceip->argb_compression_support = true;
dceip->underlay_vscaler_efficiency6_bit_per_component =
bw_frc_to_fixed(35556, 10000);
dceip->underlay_vscaler_efficiency8_bit_per_component =
bw_frc_to_fixed(34286, 10000);
dceip->underlay_vscaler_efficiency10_bit_per_component =
bw_frc_to_fixed(32, 10);
dceip->underlay_vscaler_efficiency12_bit_per_component =
bw_int_to_fixed(3);
dceip->graphics_vscaler_efficiency6_bit_per_component =
bw_frc_to_fixed(35, 10);
dceip->graphics_vscaler_efficiency8_bit_per_component =
bw_frc_to_fixed(34286, 10000);
dceip->graphics_vscaler_efficiency10_bit_per_component =
bw_frc_to_fixed(32, 10);
dceip->graphics_vscaler_efficiency12_bit_per_component =
bw_int_to_fixed(3);
dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3);
dceip->max_dmif_buffer_allocated = 2;
dceip->graphics_dmif_size = 12288;
dceip->underlay_luma_dmif_size = 19456;
dceip->underlay_chroma_dmif_size = 23552;
dceip->pre_downscaler_enabled = true;
dceip->underlay_downscale_prefetch_enabled = true;
dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
dceip->lb_size_per_component444 = bw_int_to_fixed(82176);
dceip->graphics_lb_nodownscaling_multi_line_prefetching = false;
dceip->stutter_and_dram_clock_state_change_gated_before_cursor =
bw_int_to_fixed(0);
dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed(
82176);
dceip->underlay420_chroma_lb_size_per_component =
bw_int_to_fixed(164352);
dceip->underlay422_lb_size_per_component = bw_int_to_fixed(
82176);
dceip->cursor_chunk_width = bw_int_to_fixed(64);
dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4);
dceip->underlay_maximum_width_efficient_for_tiling =
bw_int_to_fixed(1920);
dceip->underlay_maximum_height_efficient_for_tiling =
bw_int_to_fixed(1080);
dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
bw_frc_to_fixed(3, 10);
dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
bw_int_to_fixed(25);
dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed(
2);
dceip->maximum_total_outstanding_pte_requests_allowed_by_saw =
bw_int_to_fixed(128);
dceip->limit_excessive_outstanding_dmif_requests = true;
dceip->linear_mode_line_request_alternation_slice =
bw_int_to_fixed(64);
dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode =
32;
dceip->display_write_back420_luma_mcifwr_buffer_size = 12288;
dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192;
dceip->request_efficiency = bw_frc_to_fixed(8, 10);
dceip->dispclk_per_request = bw_int_to_fixed(2);
dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2;
dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
break;
case BW_CALCS_VERSION_VEGA10:
vbios->memory_type = bw_def_hbm;
vbios->dram_channel_width_in_bits = 128;
vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits;
vbios->number_of_dram_banks = 16;
vbios->high_yclk = bw_int_to_fixed(2400);
vbios->mid_yclk = bw_int_to_fixed(1700);
vbios->low_yclk = bw_int_to_fixed(1000);
vbios->low_sclk = bw_int_to_fixed(300);
vbios->mid1_sclk = bw_int_to_fixed(350);
vbios->mid2_sclk = bw_int_to_fixed(400);
vbios->mid3_sclk = bw_int_to_fixed(500);
vbios->mid4_sclk = bw_int_to_fixed(600);
vbios->mid5_sclk = bw_int_to_fixed(700);
vbios->mid6_sclk = bw_int_to_fixed(760);
vbios->high_sclk = bw_int_to_fixed(776);
vbios->low_voltage_max_dispclk = bw_int_to_fixed(460);
vbios->mid_voltage_max_dispclk = bw_int_to_fixed(670);
vbios->high_voltage_max_dispclk = bw_int_to_fixed(1133);
vbios->low_voltage_max_phyclk = bw_int_to_fixed(540);
vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810);
vbios->high_voltage_max_phyclk = bw_int_to_fixed(810);
vbios->data_return_bus_width = bw_int_to_fixed(32);
vbios->trc = bw_int_to_fixed(48);
vbios->dmifmc_urgent_latency = bw_int_to_fixed(3);
vbios->stutter_self_refresh_exit_latency = bw_frc_to_fixed(75, 10);
vbios->stutter_self_refresh_entry_latency = bw_frc_to_fixed(19, 10);
vbios->nbp_state_change_latency = bw_int_to_fixed(39);
vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10);
vbios->scatter_gather_enable = false;
vbios->down_spread_percentage = bw_frc_to_fixed(5, 10);
vbios->cursor_width = 32;
vbios->average_compression_rate = 4;
vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 8;
vbios->blackout_duration = bw_int_to_fixed(0); /* us */
vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0);
dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100;
dceip->large_cursor = false;
dceip->dmif_request_buffer_size = bw_int_to_fixed(2304);
dceip->dmif_pipe_en_fbc_chunk_tracker = true;
dceip->cursor_max_outstanding_group_num = 1;
dceip->lines_interleaved_into_lb = 2;
dceip->chunk_width = 256;
dceip->number_of_graphics_pipes = 6;
dceip->number_of_underlay_pipes = 0;
dceip->low_power_tiling_mode = 0;
dceip->display_write_back_supported = true;
dceip->argb_compression_support = true;
dceip->underlay_vscaler_efficiency6_bit_per_component =
bw_frc_to_fixed(35556, 10000);
dceip->underlay_vscaler_efficiency8_bit_per_component =
bw_frc_to_fixed(34286, 10000);
dceip->underlay_vscaler_efficiency10_bit_per_component =
bw_frc_to_fixed(32, 10);
dceip->underlay_vscaler_efficiency12_bit_per_component =
bw_int_to_fixed(3);
dceip->graphics_vscaler_efficiency6_bit_per_component =
bw_frc_to_fixed(35, 10);
dceip->graphics_vscaler_efficiency8_bit_per_component =
bw_frc_to_fixed(34286, 10000);
dceip->graphics_vscaler_efficiency10_bit_per_component =
bw_frc_to_fixed(32, 10);
dceip->graphics_vscaler_efficiency12_bit_per_component =
bw_int_to_fixed(3);
dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3);
dceip->max_dmif_buffer_allocated = 4;
dceip->graphics_dmif_size = 24576;
dceip->underlay_luma_dmif_size = 19456;
dceip->underlay_chroma_dmif_size = 23552;
dceip->pre_downscaler_enabled = true;
dceip->underlay_downscale_prefetch_enabled = false;
dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
dceip->lb_size_per_component444 = bw_int_to_fixed(245952);
dceip->graphics_lb_nodownscaling_multi_line_prefetching = true;
dceip->stutter_and_dram_clock_state_change_gated_before_cursor =
bw_int_to_fixed(1);
dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed(
82176);
dceip->underlay420_chroma_lb_size_per_component =
bw_int_to_fixed(164352);
dceip->underlay422_lb_size_per_component = bw_int_to_fixed(
82176);
dceip->cursor_chunk_width = bw_int_to_fixed(64);
dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4);
dceip->underlay_maximum_width_efficient_for_tiling =
bw_int_to_fixed(1920);
dceip->underlay_maximum_height_efficient_for_tiling =
bw_int_to_fixed(1080);
dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
bw_frc_to_fixed(3, 10);
dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
bw_int_to_fixed(25);
dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed(
2);
dceip->maximum_total_outstanding_pte_requests_allowed_by_saw =
bw_int_to_fixed(128);
dceip->limit_excessive_outstanding_dmif_requests = true;
dceip->linear_mode_line_request_alternation_slice =
bw_int_to_fixed(64);
dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode =
32;
dceip->display_write_back420_luma_mcifwr_buffer_size = 12288;
dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192;
dceip->request_efficiency = bw_frc_to_fixed(8, 10);
dceip->dispclk_per_request = bw_int_to_fixed(2);
dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2;
dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
break;
default:
break;
}
*bw_dceip = *dceip;
*bw_vbios = *vbios;
kfree(dceip);
kfree(vbios);
}
/*
* Compare calculated (required) clocks against the clocks available at
* maximum voltage (max Performance Level).
*/
static bool is_display_configuration_supported(
const struct bw_calcs_vbios *vbios,
const struct dce_bw_output *calcs_output)
{
uint32_t int_max_clk;
int_max_clk = bw_fixed_to_int(vbios->high_voltage_max_dispclk);
int_max_clk *= 1000; /* MHz to kHz */
if (calcs_output->dispclk_khz > int_max_clk)
return false;
int_max_clk = bw_fixed_to_int(vbios->high_sclk);
int_max_clk *= 1000; /* MHz to kHz */
if (calcs_output->sclk_khz > int_max_clk)
return false;
return true;
}
static void populate_initial_data(
const struct pipe_ctx pipe[], int pipe_count, struct bw_calcs_data *data)
{
int i, j;
int num_displays = 0;
data->underlay_surface_type = bw_def_420;
data->panning_and_bezel_adjustment = bw_def_none;
data->graphics_lb_bpc = 10;
data->underlay_lb_bpc = 8;
data->underlay_tiling_mode = bw_def_tiled;
data->graphics_tiling_mode = bw_def_tiled;
data->underlay_micro_tile_mode = bw_def_display_micro_tiling;
data->graphics_micro_tile_mode = bw_def_display_micro_tiling;
data->increase_voltage_to_support_mclk_switch = true;
/* Pipes with underlay first */
for (i = 0; i < pipe_count; i++) {
if (!pipe[i].stream || !pipe[i].bottom_pipe)
continue;
ASSERT(pipe[i].plane_state);
if (num_displays == 0) {
if (!pipe[i].plane_state->visible)
data->d0_underlay_mode = bw_def_underlay_only;
else
data->d0_underlay_mode = bw_def_blend;
} else {
if (!pipe[i].plane_state->visible)
data->d1_underlay_mode = bw_def_underlay_only;
else
data->d1_underlay_mode = bw_def_blend;
}
data->fbc_en[num_displays + 4] = false;
data->lpt_en[num_displays + 4] = false;
data->h_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_total);
data->v_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_total);
data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->timing.pix_clk_100hz, 10000);
data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.width);
data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];
data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.height);
data->h_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.h_taps);
data->v_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.v_taps);
data->h_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.horz.value);
data->v_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.vert.value);
switch (pipe[i].plane_state->rotation) {
case ROTATION_ANGLE_0:
data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0);
break;
case ROTATION_ANGLE_90:
data->rotation_angle[num_displays + 4] = bw_int_to_fixed(90);
break;
case ROTATION_ANGLE_180:
data->rotation_angle[num_displays + 4] = bw_int_to_fixed(180);
break;
case ROTATION_ANGLE_270:
data->rotation_angle[num_displays + 4] = bw_int_to_fixed(270);
break;
default:
break;
}
switch (pipe[i].plane_state->format) {
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
data->bytes_per_pixel[num_displays + 4] = 2;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
data->bytes_per_pixel[num_displays + 4] = 4;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
data->bytes_per_pixel[num_displays + 4] = 8;
break;
default:
data->bytes_per_pixel[num_displays + 4] = 4;
break;
}
data->interlace_mode[num_displays + 4] = false;
data->stereo_mode[num_displays + 4] = bw_def_mono;
for (j = 0; j < 2; j++) {
data->fbc_en[num_displays * 2 + j] = false;
data->lpt_en[num_displays * 2 + j] = false;
data->src_height[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.viewport.height);
data->src_width[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.viewport.width);
data->pitch_in_pixels[num_displays * 2 + j] = bw_int_to_fixed(
pipe[i].bottom_pipe->plane_state->plane_size.surface_pitch);
data->h_taps[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.taps.h_taps);
data->v_taps[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.taps.v_taps);
data->h_scale_ratio[num_displays * 2 + j] = fixed31_32_to_bw_fixed(
pipe[i].bottom_pipe->plane_res.scl_data.ratios.horz.value);
data->v_scale_ratio[num_displays * 2 + j] = fixed31_32_to_bw_fixed(
pipe[i].bottom_pipe->plane_res.scl_data.ratios.vert.value);
switch (pipe[i].bottom_pipe->plane_state->rotation) {
case ROTATION_ANGLE_0:
data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(0);
break;
case ROTATION_ANGLE_90:
data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(90);
break;
case ROTATION_ANGLE_180:
data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(180);
break;
case ROTATION_ANGLE_270:
data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(270);
break;
default:
break;
}
data->stereo_mode[num_displays * 2 + j] = bw_def_mono;
}
num_displays++;
}
/* Pipes without underlay after */
for (i = 0; i < pipe_count; i++) {
unsigned int pixel_clock_100hz;
if (!pipe[i].stream || pipe[i].bottom_pipe)
continue;
data->fbc_en[num_displays + 4] = false;
data->lpt_en[num_displays + 4] = false;
data->h_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_total);
data->v_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_total);
pixel_clock_100hz = pipe[i].stream->timing.pix_clk_100hz;
if (pipe[i].stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
pixel_clock_100hz *= 2;
data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pixel_clock_100hz, 10000);
if (pipe[i].plane_state) {
data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.width);
data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];
data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.height);
data->h_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.h_taps);
data->v_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.v_taps);
data->h_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.horz.value);
data->v_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.vert.value);
switch (pipe[i].plane_state->rotation) {
case ROTATION_ANGLE_0:
data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0);
break;
case ROTATION_ANGLE_90:
data->rotation_angle[num_displays + 4] = bw_int_to_fixed(90);
break;
case ROTATION_ANGLE_180:
data->rotation_angle[num_displays + 4] = bw_int_to_fixed(180);
break;
case ROTATION_ANGLE_270:
data->rotation_angle[num_displays + 4] = bw_int_to_fixed(270);
break;
default:
break;
}
switch (pipe[i].plane_state->format) {
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
data->bytes_per_pixel[num_displays + 4] = 2;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
data->bytes_per_pixel[num_displays + 4] = 4;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
data->bytes_per_pixel[num_displays + 4] = 8;
break;
default:
data->bytes_per_pixel[num_displays + 4] = 4;
break;
}
} else if (pipe[i].stream->dst.width != 0 &&
pipe[i].stream->dst.height != 0 &&
pipe[i].stream->src.width != 0 &&
pipe[i].stream->src.height != 0) {
data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->src.width);
data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];
data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->src.height);
data->h_taps[num_displays + 4] = pipe[i].stream->src.width == pipe[i].stream->dst.width ? bw_int_to_fixed(1) : bw_int_to_fixed(2);
data->v_taps[num_displays + 4] = pipe[i].stream->src.height == pipe[i].stream->dst.height ? bw_int_to_fixed(1) : bw_int_to_fixed(2);
data->h_scale_ratio[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->src.width, pipe[i].stream->dst.width);
data->v_scale_ratio[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->src.height, pipe[i].stream->dst.height);
data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0);
data->bytes_per_pixel[num_displays + 4] = 4;
} else {
data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_addressable);
data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];
data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_addressable);
data->h_taps[num_displays + 4] = bw_int_to_fixed(1);
data->v_taps[num_displays + 4] = bw_int_to_fixed(1);
data->h_scale_ratio[num_displays + 4] = bw_int_to_fixed(1);
data->v_scale_ratio[num_displays + 4] = bw_int_to_fixed(1);
data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0);
data->bytes_per_pixel[num_displays + 4] = 4;
}
data->interlace_mode[num_displays + 4] = false;
data->stereo_mode[num_displays + 4] = bw_def_mono;
num_displays++;
}
data->number_of_displays = num_displays;
}
static bool all_displays_in_sync(const struct pipe_ctx pipe[],
int pipe_count)
{
const struct pipe_ctx *active_pipes[MAX_PIPES];
int i, num_active_pipes = 0;
for (i = 0; i < pipe_count; i++) {
if (!resource_is_pipe_type(&pipe[i], OPP_HEAD))
continue;
active_pipes[num_active_pipes++] = &pipe[i];
}
if (!num_active_pipes)
return false;
for (i = 1; i < num_active_pipes; ++i) {
if (!resource_are_streams_timing_synchronizable(
active_pipes[0]->stream, active_pipes[i]->stream)) {
return false;
}
}
return true;
}
/*
* Return:
* true - Display(s) configuration supported.
* In this case 'calcs_output' contains data for HW programming
* false - Display(s) configuration not supported (not enough bandwidth).
*/
bool bw_calcs(struct dc_context *ctx,
const struct bw_calcs_dceip *dceip,
const struct bw_calcs_vbios *vbios,
const struct pipe_ctx pipe[],
int pipe_count,
struct dce_bw_output *calcs_output)
{
struct bw_calcs_data *data = kzalloc(sizeof(struct bw_calcs_data),
GFP_KERNEL);
if (!data)
return false;
populate_initial_data(pipe, pipe_count, data);
if (ctx->dc->config.multi_mon_pp_mclk_switch)
calcs_output->all_displays_in_sync = all_displays_in_sync(pipe, pipe_count);
else
calcs_output->all_displays_in_sync = false;
if (data->number_of_displays != 0) {
uint8_t yclk_lvl;
struct bw_fixed high_sclk = vbios->high_sclk;
struct bw_fixed mid1_sclk = vbios->mid1_sclk;
struct bw_fixed mid2_sclk = vbios->mid2_sclk;
struct bw_fixed mid3_sclk = vbios->mid3_sclk;
struct bw_fixed mid4_sclk = vbios->mid4_sclk;
struct bw_fixed mid5_sclk = vbios->mid5_sclk;
struct bw_fixed mid6_sclk = vbios->mid6_sclk;
struct bw_fixed low_sclk = vbios->low_sclk;
struct bw_fixed high_yclk = vbios->high_yclk;
struct bw_fixed mid_yclk = vbios->mid_yclk;
struct bw_fixed low_yclk = vbios->low_yclk;
if (ctx->dc->debug.bandwidth_calcs_trace) {
print_bw_calcs_dceip(ctx, dceip);
print_bw_calcs_vbios(ctx, vbios);
print_bw_calcs_data(ctx, data);
}
calculate_bandwidth(dceip, vbios, data);
yclk_lvl = data->y_clk_level;
calcs_output->nbp_state_change_enable =
data->nbp_state_change_enable;
calcs_output->cpuc_state_change_enable =
data->cpuc_state_change_enable;
calcs_output->cpup_state_change_enable =
data->cpup_state_change_enable;
calcs_output->stutter_mode_enable =
data->stutter_mode_enable;
calcs_output->dispclk_khz =
bw_fixed_to_int(bw_mul(data->dispclk,
bw_int_to_fixed(1000)));
calcs_output->blackout_recovery_time_us =
bw_fixed_to_int(data->blackout_recovery_time);
calcs_output->sclk_khz =
bw_fixed_to_int(bw_mul(data->required_sclk,
bw_int_to_fixed(1000)));
calcs_output->sclk_deep_sleep_khz =
bw_fixed_to_int(bw_mul(data->sclk_deep_sleep,
bw_int_to_fixed(1000)));
if (yclk_lvl == 0)
calcs_output->yclk_khz = bw_fixed_to_int(
bw_mul(low_yclk, bw_int_to_fixed(1000)));
else if (yclk_lvl == 1)
calcs_output->yclk_khz = bw_fixed_to_int(
bw_mul(mid_yclk, bw_int_to_fixed(1000)));
else
calcs_output->yclk_khz = bw_fixed_to_int(
bw_mul(high_yclk, bw_int_to_fixed(1000)));
/* units: nanosecond, 16bit storage. */
calcs_output->nbp_state_change_wm_ns[0].a_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[4], bw_int_to_fixed(1000)));
calcs_output->nbp_state_change_wm_ns[1].a_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[5], bw_int_to_fixed(1000)));
calcs_output->nbp_state_change_wm_ns[2].a_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[6], bw_int_to_fixed(1000)));
if (ctx->dc->caps.max_slave_planes) {
calcs_output->nbp_state_change_wm_ns[3].a_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[0], bw_int_to_fixed(1000)));
calcs_output->nbp_state_change_wm_ns[4].a_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[1], bw_int_to_fixed(1000)));
} else {
calcs_output->nbp_state_change_wm_ns[3].a_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[7], bw_int_to_fixed(1000)));
calcs_output->nbp_state_change_wm_ns[4].a_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[8], bw_int_to_fixed(1000)));
}
calcs_output->nbp_state_change_wm_ns[5].a_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[9], bw_int_to_fixed(1000)));
calcs_output->stutter_exit_wm_ns[0].a_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[4], bw_int_to_fixed(1000)));
calcs_output->stutter_exit_wm_ns[1].a_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[5], bw_int_to_fixed(1000)));
calcs_output->stutter_exit_wm_ns[2].a_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[6], bw_int_to_fixed(1000)));
if (ctx->dc->caps.max_slave_planes) {
calcs_output->stutter_exit_wm_ns[3].a_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[0], bw_int_to_fixed(1000)));
calcs_output->stutter_exit_wm_ns[4].a_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[1], bw_int_to_fixed(1000)));
} else {
calcs_output->stutter_exit_wm_ns[3].a_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[7], bw_int_to_fixed(1000)));
calcs_output->stutter_exit_wm_ns[4].a_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[8], bw_int_to_fixed(1000)));
}
calcs_output->stutter_exit_wm_ns[5].a_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[9], bw_int_to_fixed(1000)));
calcs_output->stutter_entry_wm_ns[0].a_mark =
bw_fixed_to_int(bw_mul(data->
stutter_entry_watermark[4], bw_int_to_fixed(1000)));
calcs_output->stutter_entry_wm_ns[1].a_mark =
bw_fixed_to_int(bw_mul(data->
stutter_entry_watermark[5], bw_int_to_fixed(1000)));
calcs_output->stutter_entry_wm_ns[2].a_mark =
bw_fixed_to_int(bw_mul(data->
stutter_entry_watermark[6], bw_int_to_fixed(1000)));
if (ctx->dc->caps.max_slave_planes) {
calcs_output->stutter_entry_wm_ns[3].a_mark =
bw_fixed_to_int(bw_mul(data->
stutter_entry_watermark[0], bw_int_to_fixed(1000)));
calcs_output->stutter_entry_wm_ns[4].a_mark =
bw_fixed_to_int(bw_mul(data->
stutter_entry_watermark[1], bw_int_to_fixed(1000)));
} else {
calcs_output->stutter_entry_wm_ns[3].a_mark =
bw_fixed_to_int(bw_mul(data->
stutter_entry_watermark[7], bw_int_to_fixed(1000)));
calcs_output->stutter_entry_wm_ns[4].a_mark =
bw_fixed_to_int(bw_mul(data->
stutter_entry_watermark[8], bw_int_to_fixed(1000)));
}
calcs_output->stutter_entry_wm_ns[5].a_mark =
bw_fixed_to_int(bw_mul(data->
stutter_entry_watermark[9], bw_int_to_fixed(1000)));
calcs_output->urgent_wm_ns[0].a_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[4], bw_int_to_fixed(1000)));
calcs_output->urgent_wm_ns[1].a_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[5], bw_int_to_fixed(1000)));
calcs_output->urgent_wm_ns[2].a_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[6], bw_int_to_fixed(1000)));
if (ctx->dc->caps.max_slave_planes) {
calcs_output->urgent_wm_ns[3].a_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[0], bw_int_to_fixed(1000)));
calcs_output->urgent_wm_ns[4].a_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[1], bw_int_to_fixed(1000)));
} else {
calcs_output->urgent_wm_ns[3].a_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[7], bw_int_to_fixed(1000)));
calcs_output->urgent_wm_ns[4].a_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[8], bw_int_to_fixed(1000)));
}
calcs_output->urgent_wm_ns[5].a_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[9], bw_int_to_fixed(1000)));
if (dceip->version != BW_CALCS_VERSION_CARRIZO) {
((struct bw_calcs_vbios *)vbios)->low_sclk = mid3_sclk;
((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid3_sclk;
((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid3_sclk;
calculate_bandwidth(dceip, vbios, data);
calcs_output->nbp_state_change_wm_ns[0].b_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[4],bw_int_to_fixed(1000)));
calcs_output->nbp_state_change_wm_ns[1].b_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[5], bw_int_to_fixed(1000)));
calcs_output->nbp_state_change_wm_ns[2].b_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[6], bw_int_to_fixed(1000)));
if (ctx->dc->caps.max_slave_planes) {
calcs_output->nbp_state_change_wm_ns[3].b_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[0], bw_int_to_fixed(1000)));
calcs_output->nbp_state_change_wm_ns[4].b_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[1], bw_int_to_fixed(1000)));
} else {
calcs_output->nbp_state_change_wm_ns[3].b_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[7], bw_int_to_fixed(1000)));
calcs_output->nbp_state_change_wm_ns[4].b_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[8], bw_int_to_fixed(1000)));
}
calcs_output->nbp_state_change_wm_ns[5].b_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[9], bw_int_to_fixed(1000)));
calcs_output->stutter_exit_wm_ns[0].b_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[4], bw_int_to_fixed(1000)));
calcs_output->stutter_exit_wm_ns[1].b_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[5], bw_int_to_fixed(1000)));
calcs_output->stutter_exit_wm_ns[2].b_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[6], bw_int_to_fixed(1000)));
if (ctx->dc->caps.max_slave_planes) {
calcs_output->stutter_exit_wm_ns[3].b_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[0], bw_int_to_fixed(1000)));
calcs_output->stutter_exit_wm_ns[4].b_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[1], bw_int_to_fixed(1000)));
} else {
calcs_output->stutter_exit_wm_ns[3].b_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[7], bw_int_to_fixed(1000)));
calcs_output->stutter_exit_wm_ns[4].b_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[8], bw_int_to_fixed(1000)));
}
calcs_output->stutter_exit_wm_ns[5].b_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[9], bw_int_to_fixed(1000)));
calcs_output->stutter_entry_wm_ns[0].b_mark =
bw_fixed_to_int(bw_mul(data->
stutter_entry_watermark[4], bw_int_to_fixed(1000)));
calcs_output->stutter_entry_wm_ns[1].b_mark =
bw_fixed_to_int(bw_mul(data->
stutter_entry_watermark[5], bw_int_to_fixed(1000)));
calcs_output->stutter_entry_wm_ns[2].b_mark =
bw_fixed_to_int(bw_mul(data->
stutter_entry_watermark[6], bw_int_to_fixed(1000)));
if (ctx->dc->caps.max_slave_planes) {
calcs_output->stutter_entry_wm_ns[3].b_mark =
bw_fixed_to_int(bw_mul(data->
stutter_entry_watermark[0], bw_int_to_fixed(1000)));
calcs_output->stutter_entry_wm_ns[4].b_mark =
bw_fixed_to_int(bw_mul(data->
stutter_entry_watermark[1], bw_int_to_fixed(1000)));
} else {
calcs_output->stutter_entry_wm_ns[3].b_mark =
bw_fixed_to_int(bw_mul(data->
stutter_entry_watermark[7], bw_int_to_fixed(1000)));
calcs_output->stutter_entry_wm_ns[4].b_mark =
bw_fixed_to_int(bw_mul(data->
stutter_entry_watermark[8], bw_int_to_fixed(1000)));
}
calcs_output->stutter_entry_wm_ns[5].b_mark =
bw_fixed_to_int(bw_mul(data->
stutter_entry_watermark[9], bw_int_to_fixed(1000)));
calcs_output->urgent_wm_ns[0].b_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[4], bw_int_to_fixed(1000)));
calcs_output->urgent_wm_ns[1].b_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[5], bw_int_to_fixed(1000)));
calcs_output->urgent_wm_ns[2].b_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[6], bw_int_to_fixed(1000)));
if (ctx->dc->caps.max_slave_planes) {
calcs_output->urgent_wm_ns[3].b_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[0], bw_int_to_fixed(1000)));
calcs_output->urgent_wm_ns[4].b_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[1], bw_int_to_fixed(1000)));
} else {
calcs_output->urgent_wm_ns[3].b_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[7], bw_int_to_fixed(1000)));
calcs_output->urgent_wm_ns[4].b_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[8], bw_int_to_fixed(1000)));
}
calcs_output->urgent_wm_ns[5].b_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[9], bw_int_to_fixed(1000)));
((struct bw_calcs_vbios *)vbios)->low_sclk = low_sclk;
((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid1_sclk;
((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid2_sclk;
((struct bw_calcs_vbios *)vbios)->low_yclk = mid_yclk;
calculate_bandwidth(dceip, vbios, data);
calcs_output->nbp_state_change_wm_ns[0].c_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[4], bw_int_to_fixed(1000)));
calcs_output->nbp_state_change_wm_ns[1].c_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[5], bw_int_to_fixed(1000)));
calcs_output->nbp_state_change_wm_ns[2].c_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[6], bw_int_to_fixed(1000)));
if (ctx->dc->caps.max_slave_planes) {
calcs_output->nbp_state_change_wm_ns[3].c_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[0], bw_int_to_fixed(1000)));
calcs_output->nbp_state_change_wm_ns[4].c_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[1], bw_int_to_fixed(1000)));
} else {
calcs_output->nbp_state_change_wm_ns[3].c_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[7], bw_int_to_fixed(1000)));
calcs_output->nbp_state_change_wm_ns[4].c_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[8], bw_int_to_fixed(1000)));
}
calcs_output->nbp_state_change_wm_ns[5].c_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[9], bw_int_to_fixed(1000)));
calcs_output->stutter_exit_wm_ns[0].c_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[4], bw_int_to_fixed(1000)));
calcs_output->stutter_exit_wm_ns[1].c_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[5], bw_int_to_fixed(1000)));
calcs_output->stutter_exit_wm_ns[2].c_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[6], bw_int_to_fixed(1000)));
if (ctx->dc->caps.max_slave_planes) {
calcs_output->stutter_exit_wm_ns[3].c_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[0], bw_int_to_fixed(1000)));
calcs_output->stutter_exit_wm_ns[4].c_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[1], bw_int_to_fixed(1000)));
} else {
calcs_output->stutter_exit_wm_ns[3].c_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[7], bw_int_to_fixed(1000)));
calcs_output->stutter_exit_wm_ns[4].c_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[8], bw_int_to_fixed(1000)));
}
calcs_output->stutter_exit_wm_ns[5].c_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[9], bw_int_to_fixed(1000)));
calcs_output->stutter_entry_wm_ns[0].c_mark =
bw_fixed_to_int(bw_mul(data->
stutter_entry_watermark[4], bw_int_to_fixed(1000)));
calcs_output->stutter_entry_wm_ns[1].c_mark =
bw_fixed_to_int(bw_mul(data->
stutter_entry_watermark[5], bw_int_to_fixed(1000)));
calcs_output->stutter_entry_wm_ns[2].c_mark =
bw_fixed_to_int(bw_mul(data->
stutter_entry_watermark[6], bw_int_to_fixed(1000)));
if (ctx->dc->caps.max_slave_planes) {
calcs_output->stutter_entry_wm_ns[3].c_mark =
bw_fixed_to_int(bw_mul(data->stutter_entry_watermark[0],
bw_int_to_fixed(1000)));
calcs_output->stutter_entry_wm_ns[4].c_mark =
bw_fixed_to_int(bw_mul(data->stutter_entry_watermark[1],
bw_int_to_fixed(1000)));
} else {
calcs_output->stutter_entry_wm_ns[3].c_mark =
bw_fixed_to_int(bw_mul(data->stutter_entry_watermark[7],
bw_int_to_fixed(1000)));
calcs_output->stutter_entry_wm_ns[4].c_mark =
bw_fixed_to_int(bw_mul(data->stutter_entry_watermark[8],
bw_int_to_fixed(1000)));
}
calcs_output->stutter_entry_wm_ns[5].c_mark =
bw_fixed_to_int(bw_mul(data->
stutter_entry_watermark[9], bw_int_to_fixed(1000)));
calcs_output->urgent_wm_ns[0].c_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[4], bw_int_to_fixed(1000)));
calcs_output->urgent_wm_ns[1].c_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[5], bw_int_to_fixed(1000)));
calcs_output->urgent_wm_ns[2].c_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[6], bw_int_to_fixed(1000)));
if (ctx->dc->caps.max_slave_planes) {
calcs_output->urgent_wm_ns[3].c_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[0], bw_int_to_fixed(1000)));
calcs_output->urgent_wm_ns[4].c_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[1], bw_int_to_fixed(1000)));
} else {
calcs_output->urgent_wm_ns[3].c_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[7], bw_int_to_fixed(1000)));
calcs_output->urgent_wm_ns[4].c_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[8], bw_int_to_fixed(1000)));
}
calcs_output->urgent_wm_ns[5].c_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[9], bw_int_to_fixed(1000)));
}
if (dceip->version == BW_CALCS_VERSION_CARRIZO) {
((struct bw_calcs_vbios *)vbios)->low_yclk = high_yclk;
((struct bw_calcs_vbios *)vbios)->mid_yclk = high_yclk;
((struct bw_calcs_vbios *)vbios)->low_sclk = high_sclk;
((struct bw_calcs_vbios *)vbios)->mid1_sclk = high_sclk;
((struct bw_calcs_vbios *)vbios)->mid2_sclk = high_sclk;
((struct bw_calcs_vbios *)vbios)->mid3_sclk = high_sclk;
((struct bw_calcs_vbios *)vbios)->mid4_sclk = high_sclk;
((struct bw_calcs_vbios *)vbios)->mid5_sclk = high_sclk;
((struct bw_calcs_vbios *)vbios)->mid6_sclk = high_sclk;
} else {
((struct bw_calcs_vbios *)vbios)->low_yclk = mid_yclk;
((struct bw_calcs_vbios *)vbios)->low_sclk = mid3_sclk;
((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid3_sclk;
((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid3_sclk;
}
calculate_bandwidth(dceip, vbios, data);
calcs_output->nbp_state_change_wm_ns[0].d_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[4], bw_int_to_fixed(1000)));
calcs_output->nbp_state_change_wm_ns[1].d_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[5], bw_int_to_fixed(1000)));
calcs_output->nbp_state_change_wm_ns[2].d_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[6], bw_int_to_fixed(1000)));
if (ctx->dc->caps.max_slave_planes) {
calcs_output->nbp_state_change_wm_ns[3].d_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[0], bw_int_to_fixed(1000)));
calcs_output->nbp_state_change_wm_ns[4].d_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[1], bw_int_to_fixed(1000)));
} else {
calcs_output->nbp_state_change_wm_ns[3].d_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[7], bw_int_to_fixed(1000)));
calcs_output->nbp_state_change_wm_ns[4].d_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[8], bw_int_to_fixed(1000)));
}
calcs_output->nbp_state_change_wm_ns[5].d_mark =
bw_fixed_to_int(bw_mul(data->
nbp_state_change_watermark[9], bw_int_to_fixed(1000)));
calcs_output->stutter_exit_wm_ns[0].d_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[4], bw_int_to_fixed(1000)));
calcs_output->stutter_exit_wm_ns[1].d_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[5], bw_int_to_fixed(1000)));
calcs_output->stutter_exit_wm_ns[2].d_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[6], bw_int_to_fixed(1000)));
if (ctx->dc->caps.max_slave_planes) {
calcs_output->stutter_exit_wm_ns[3].d_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[0], bw_int_to_fixed(1000)));
calcs_output->stutter_exit_wm_ns[4].d_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[1], bw_int_to_fixed(1000)));
} else {
calcs_output->stutter_exit_wm_ns[3].d_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[7], bw_int_to_fixed(1000)));
calcs_output->stutter_exit_wm_ns[4].d_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[8], bw_int_to_fixed(1000)));
}
calcs_output->stutter_exit_wm_ns[5].d_mark =
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[9], bw_int_to_fixed(1000)));
calcs_output->stutter_entry_wm_ns[0].d_mark =
bw_fixed_to_int(bw_mul(data->
stutter_entry_watermark[4], bw_int_to_fixed(1000)));
calcs_output->stutter_entry_wm_ns[1].d_mark =
bw_fixed_to_int(bw_mul(data->
stutter_entry_watermark[5], bw_int_to_fixed(1000)));
calcs_output->stutter_entry_wm_ns[2].d_mark =
bw_fixed_to_int(bw_mul(data->
stutter_entry_watermark[6], bw_int_to_fixed(1000)));
if (ctx->dc->caps.max_slave_planes) {
calcs_output->stutter_entry_wm_ns[3].d_mark =
bw_fixed_to_int(bw_mul(data->
stutter_entry_watermark[0], bw_int_to_fixed(1000)));
calcs_output->stutter_entry_wm_ns[4].d_mark =
bw_fixed_to_int(bw_mul(data->
stutter_entry_watermark[1], bw_int_to_fixed(1000)));
} else {
calcs_output->stutter_entry_wm_ns[3].d_mark =
bw_fixed_to_int(bw_mul(data->
stutter_entry_watermark[7], bw_int_to_fixed(1000)));
calcs_output->stutter_entry_wm_ns[4].d_mark =
bw_fixed_to_int(bw_mul(data->
stutter_entry_watermark[8], bw_int_to_fixed(1000)));
}
calcs_output->stutter_entry_wm_ns[5].d_mark =
bw_fixed_to_int(bw_mul(data->
stutter_entry_watermark[9], bw_int_to_fixed(1000)));
calcs_output->urgent_wm_ns[0].d_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[4], bw_int_to_fixed(1000)));
calcs_output->urgent_wm_ns[1].d_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[5], bw_int_to_fixed(1000)));
calcs_output->urgent_wm_ns[2].d_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[6], bw_int_to_fixed(1000)));
if (ctx->dc->caps.max_slave_planes) {
calcs_output->urgent_wm_ns[3].d_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[0], bw_int_to_fixed(1000)));
calcs_output->urgent_wm_ns[4].d_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[1], bw_int_to_fixed(1000)));
} else {
calcs_output->urgent_wm_ns[3].d_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[7], bw_int_to_fixed(1000)));
calcs_output->urgent_wm_ns[4].d_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[8], bw_int_to_fixed(1000)));
}
calcs_output->urgent_wm_ns[5].d_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[9], bw_int_to_fixed(1000)));
((struct bw_calcs_vbios *)vbios)->low_yclk = low_yclk;
((struct bw_calcs_vbios *)vbios)->mid_yclk = mid_yclk;
((struct bw_calcs_vbios *)vbios)->low_sclk = low_sclk;
((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid1_sclk;
((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid2_sclk;
((struct bw_calcs_vbios *)vbios)->mid3_sclk = mid3_sclk;
((struct bw_calcs_vbios *)vbios)->mid4_sclk = mid4_sclk;
((struct bw_calcs_vbios *)vbios)->mid5_sclk = mid5_sclk;
((struct bw_calcs_vbios *)vbios)->mid6_sclk = mid6_sclk;
((struct bw_calcs_vbios *)vbios)->high_sclk = high_sclk;
} else {
calcs_output->nbp_state_change_enable = true;
calcs_output->cpuc_state_change_enable = true;
calcs_output->cpup_state_change_enable = true;
calcs_output->stutter_mode_enable = true;
calcs_output->dispclk_khz = 0;
calcs_output->sclk_khz = 0;
}
kfree(data);
return is_display_configuration_supported(vbios, calcs_output);
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c |
/*
* Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "custom_float.h"
static bool build_custom_float(
struct fixed31_32 value,
const struct custom_float_format *format,
bool *negative,
uint32_t *mantissa,
uint32_t *exponenta)
{
uint32_t exp_offset = (1 << (format->exponenta_bits - 1)) - 1;
const struct fixed31_32 mantissa_constant_plus_max_fraction =
dc_fixpt_from_fraction(
(1LL << (format->mantissa_bits + 1)) - 1,
1LL << format->mantissa_bits);
struct fixed31_32 mantiss;
if (dc_fixpt_eq(
value,
dc_fixpt_zero)) {
*negative = false;
*mantissa = 0;
*exponenta = 0;
return true;
}
if (dc_fixpt_lt(
value,
dc_fixpt_zero)) {
*negative = format->sign;
value = dc_fixpt_neg(value);
} else {
*negative = false;
}
if (dc_fixpt_lt(
value,
dc_fixpt_one)) {
uint32_t i = 1;
do {
value = dc_fixpt_shl(value, 1);
++i;
} while (dc_fixpt_lt(
value,
dc_fixpt_one));
--i;
if (exp_offset <= i) {
*mantissa = 0;
*exponenta = 0;
return true;
}
*exponenta = exp_offset - i;
} else if (dc_fixpt_le(
mantissa_constant_plus_max_fraction,
value)) {
uint32_t i = 1;
do {
value = dc_fixpt_shr(value, 1);
++i;
} while (dc_fixpt_lt(
mantissa_constant_plus_max_fraction,
value));
*exponenta = exp_offset + i - 1;
} else {
*exponenta = exp_offset;
}
mantiss = dc_fixpt_sub(
value,
dc_fixpt_one);
if (dc_fixpt_lt(
mantiss,
dc_fixpt_zero) ||
dc_fixpt_lt(
dc_fixpt_one,
mantiss))
mantiss = dc_fixpt_zero;
else
mantiss = dc_fixpt_shl(
mantiss,
format->mantissa_bits);
*mantissa = dc_fixpt_floor(mantiss);
return true;
}
static bool setup_custom_float(
const struct custom_float_format *format,
bool negative,
uint32_t mantissa,
uint32_t exponenta,
uint32_t *result)
{
uint32_t i = 0;
uint32_t j = 0;
uint32_t value = 0;
/* verification code:
* once calculation is ok we can remove it
*/
const uint32_t mantissa_mask =
(1 << (format->mantissa_bits + 1)) - 1;
const uint32_t exponenta_mask =
(1 << (format->exponenta_bits + 1)) - 1;
if (mantissa & ~mantissa_mask) {
BREAK_TO_DEBUGGER();
mantissa = mantissa_mask;
}
if (exponenta & ~exponenta_mask) {
BREAK_TO_DEBUGGER();
exponenta = exponenta_mask;
}
/* end of verification code */
while (i < format->mantissa_bits) {
uint32_t mask = 1 << i;
if (mantissa & mask)
value |= mask;
++i;
}
while (j < format->exponenta_bits) {
uint32_t mask = 1 << j;
if (exponenta & mask)
value |= mask << i;
++j;
}
if (negative && format->sign)
value |= 1 << (i + j);
*result = value;
return true;
}
bool convert_to_custom_float_format(
struct fixed31_32 value,
const struct custom_float_format *format,
uint32_t *result)
{
uint32_t mantissa;
uint32_t exponenta;
bool negative;
return build_custom_float(
value, format, &negative, &mantissa, &exponenta) &&
setup_custom_float(
format, negative, mantissa, exponenta, result);
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/calcs/custom_float.c |
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "bw_fixed.h"
#define MAX_I64 \
((int64_t)((1ULL << 63) - 1))
#define MIN_I64 \
(-MAX_I64 - 1)
#define FRACTIONAL_PART_MASK \
((1ULL << BW_FIXED_BITS_PER_FRACTIONAL_PART) - 1)
#define GET_FRACTIONAL_PART(x) \
(FRACTIONAL_PART_MASK & (x))
static uint64_t abs_i64(int64_t arg)
{
if (arg >= 0)
return (uint64_t)(arg);
else
return (uint64_t)(-arg);
}
struct bw_fixed bw_int_to_fixed_nonconst(int64_t value)
{
struct bw_fixed res;
ASSERT(value < BW_FIXED_MAX_I32 && value > BW_FIXED_MIN_I32);
res.value = value << BW_FIXED_BITS_PER_FRACTIONAL_PART;
return res;
}
struct bw_fixed bw_frc_to_fixed(int64_t numerator, int64_t denominator)
{
struct bw_fixed res;
bool arg1_negative = numerator < 0;
bool arg2_negative = denominator < 0;
uint64_t arg1_value;
uint64_t arg2_value;
uint64_t remainder;
/* determine integer part */
uint64_t res_value;
ASSERT(denominator != 0);
arg1_value = abs_i64(numerator);
arg2_value = abs_i64(denominator);
res_value = div64_u64_rem(arg1_value, arg2_value, &remainder);
ASSERT(res_value <= BW_FIXED_MAX_I32);
/* determine fractional part */
{
uint32_t i = BW_FIXED_BITS_PER_FRACTIONAL_PART;
do {
remainder <<= 1;
res_value <<= 1;
if (remainder >= arg2_value) {
res_value |= 1;
remainder -= arg2_value;
}
} while (--i != 0);
}
/* round up LSB */
{
uint64_t summand = (remainder << 1) >= arg2_value;
ASSERT(res_value <= MAX_I64 - summand);
res_value += summand;
}
res.value = (int64_t)(res_value);
if (arg1_negative ^ arg2_negative)
res.value = -res.value;
return res;
}
struct bw_fixed bw_floor2(
const struct bw_fixed arg,
const struct bw_fixed significance)
{
struct bw_fixed result;
int64_t multiplicand;
multiplicand = div64_s64(arg.value, abs_i64(significance.value));
result.value = abs_i64(significance.value) * multiplicand;
ASSERT(abs_i64(result.value) <= abs_i64(arg.value));
return result;
}
struct bw_fixed bw_ceil2(
const struct bw_fixed arg,
const struct bw_fixed significance)
{
struct bw_fixed result;
int64_t multiplicand;
multiplicand = div64_s64(arg.value, abs_i64(significance.value));
result.value = abs_i64(significance.value) * multiplicand;
if (abs_i64(result.value) < abs_i64(arg.value)) {
if (arg.value < 0)
result.value -= abs_i64(significance.value);
else
result.value += abs_i64(significance.value);
}
return result;
}
struct bw_fixed bw_mul(const struct bw_fixed arg1, const struct bw_fixed arg2)
{
struct bw_fixed res;
bool arg1_negative = arg1.value < 0;
bool arg2_negative = arg2.value < 0;
uint64_t arg1_value = abs_i64(arg1.value);
uint64_t arg2_value = abs_i64(arg2.value);
uint64_t arg1_int = BW_FIXED_GET_INTEGER_PART(arg1_value);
uint64_t arg2_int = BW_FIXED_GET_INTEGER_PART(arg2_value);
uint64_t arg1_fra = GET_FRACTIONAL_PART(arg1_value);
uint64_t arg2_fra = GET_FRACTIONAL_PART(arg2_value);
uint64_t tmp;
res.value = arg1_int * arg2_int;
ASSERT(res.value <= BW_FIXED_MAX_I32);
res.value <<= BW_FIXED_BITS_PER_FRACTIONAL_PART;
tmp = arg1_int * arg2_fra;
ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value));
res.value += tmp;
tmp = arg2_int * arg1_fra;
ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value));
res.value += tmp;
tmp = arg1_fra * arg2_fra;
tmp = (tmp >> BW_FIXED_BITS_PER_FRACTIONAL_PART) +
(tmp >= (uint64_t)(bw_frc_to_fixed(1, 2).value));
ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value));
res.value += tmp;
if (arg1_negative ^ arg2_negative)
res.value = -res.value;
return res;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/calcs/bw_fixed.c |
/*
* Copyright 2017 Advanced Micro Devices, Inc.
* Copyright 2019 Raptor Engineering, LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "dc.h"
#include "dcn_calcs.h"
#include "dcn_calc_auto.h"
#include "dal_asic_id.h"
#include "resource.h"
#include "dcn10/dcn10_resource.h"
#include "dcn10/dcn10_hubbub.h"
#include "dml/dml1_display_rq_dlg_calc.h"
#include "dcn_calc_math.h"
#define DC_LOGGER \
dc->ctx->logger
#define WM_SET_COUNT 4
#define WM_A 0
#define WM_B 1
#define WM_C 2
#define WM_D 3
/*
* NOTE:
* This file is gcc-parseable HW gospel, coming straight from HW engineers.
*
* It doesn't adhere to Linux kernel style and sometimes will do things in odd
* ways. Unless there is something clearly wrong with it the code should
* remain as-is as it provides us with a guarantee from HW that it is correct.
*/
/* Defaults from spreadsheet rev#247.
* RV2 delta: dram_clock_change_latency, max_num_dpp
*/
const struct dcn_soc_bounding_box dcn10_soc_defaults = {
/* latencies */
.sr_exit_time = 17, /*us*/
.sr_enter_plus_exit_time = 19, /*us*/
.urgent_latency = 4, /*us*/
.dram_clock_change_latency = 17, /*us*/
.write_back_latency = 12, /*us*/
.percent_of_ideal_drambw_received_after_urg_latency = 80, /*%*/
/* below default clocks derived from STA target base on
* slow-slow corner + 10% margin with voltages aligned to FCLK.
*
* Use these value if fused value doesn't make sense as earlier
* part don't have correct value fused */
/* default DCF CLK DPM on RV*/
.dcfclkv_max0p9 = 655, /* MHz, = 3600/5.5 */
.dcfclkv_nom0p8 = 626, /* MHz, = 3600/5.75 */
.dcfclkv_mid0p72 = 600, /* MHz, = 3600/6, bypass */
.dcfclkv_min0p65 = 300, /* MHz, = 3600/12, bypass */
/* default DISP CLK voltage state on RV */
.max_dispclk_vmax0p9 = 1108, /* MHz, = 3600/3.25 */
.max_dispclk_vnom0p8 = 1029, /* MHz, = 3600/3.5 */
.max_dispclk_vmid0p72 = 960, /* MHz, = 3600/3.75 */
.max_dispclk_vmin0p65 = 626, /* MHz, = 3600/5.75 */
/* default DPP CLK voltage state on RV */
.max_dppclk_vmax0p9 = 720, /* MHz, = 3600/5 */
.max_dppclk_vnom0p8 = 686, /* MHz, = 3600/5.25 */
.max_dppclk_vmid0p72 = 626, /* MHz, = 3600/5.75 */
.max_dppclk_vmin0p65 = 400, /* MHz, = 3600/9 */
/* default PHY CLK voltage state on RV */
.phyclkv_max0p9 = 900, /*MHz*/
.phyclkv_nom0p8 = 847, /*MHz*/
.phyclkv_mid0p72 = 800, /*MHz*/
.phyclkv_min0p65 = 600, /*MHz*/
/* BW depend on FCLK, MCLK, # of channels */
/* dual channel BW */
.fabric_and_dram_bandwidth_vmax0p9 = 38.4f, /*GB/s*/
.fabric_and_dram_bandwidth_vnom0p8 = 34.133f, /*GB/s*/
.fabric_and_dram_bandwidth_vmid0p72 = 29.866f, /*GB/s*/
.fabric_and_dram_bandwidth_vmin0p65 = 12.8f, /*GB/s*/
/* single channel BW
.fabric_and_dram_bandwidth_vmax0p9 = 19.2f,
.fabric_and_dram_bandwidth_vnom0p8 = 17.066f,
.fabric_and_dram_bandwidth_vmid0p72 = 14.933f,
.fabric_and_dram_bandwidth_vmin0p65 = 12.8f,
*/
.number_of_channels = 2,
.socclk = 208, /*MHz*/
.downspreading = 0.5f, /*%*/
.round_trip_ping_latency_cycles = 128, /*DCFCLK Cycles*/
.urgent_out_of_order_return_per_channel = 256, /*bytes*/
.vmm_page_size = 4096, /*bytes*/
.return_bus_width = 64, /*bytes*/
.max_request_size = 256, /*bytes*/
/* Depends on user class (client vs embedded, workstation, etc) */
.percent_disp_bw_limit = 0.3f /*%*/
};
const struct dcn_ip_params dcn10_ip_defaults = {
.rob_buffer_size_in_kbyte = 64,
.det_buffer_size_in_kbyte = 164,
.dpp_output_buffer_pixels = 2560,
.opp_output_buffer_lines = 1,
.pixel_chunk_size_in_kbyte = 8,
.pte_enable = dcn_bw_yes,
.pte_chunk_size = 2, /*kbytes*/
.meta_chunk_size = 2, /*kbytes*/
.writeback_chunk_size = 2, /*kbytes*/
.odm_capability = dcn_bw_no,
.dsc_capability = dcn_bw_no,
.line_buffer_size = 589824, /*bit*/
.max_line_buffer_lines = 12,
.is_line_buffer_bpp_fixed = dcn_bw_no,
.line_buffer_fixed_bpp = dcn_bw_na,
.writeback_luma_buffer_size = 12, /*kbytes*/
.writeback_chroma_buffer_size = 8, /*kbytes*/
.max_num_dpp = 4,
.max_num_writeback = 2,
.max_dchub_topscl_throughput = 4, /*pixels/dppclk*/
.max_pscl_tolb_throughput = 2, /*pixels/dppclk*/
.max_lb_tovscl_throughput = 4, /*pixels/dppclk*/
.max_vscl_tohscl_throughput = 4, /*pixels/dppclk*/
.max_hscl_ratio = 4,
.max_vscl_ratio = 4,
.max_hscl_taps = 8,
.max_vscl_taps = 8,
.pte_buffer_size_in_requests = 42,
.dispclk_ramping_margin = 1, /*%*/
.under_scan_factor = 1.11f,
.max_inter_dcn_tile_repeaters = 8,
.can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = dcn_bw_no,
.bug_forcing_luma_and_chroma_request_to_same_size_fixed = dcn_bw_no,
.dcfclk_cstate_latency = 10 /*TODO clone of something else? sr_enter_plus_exit_time?*/
};
static enum dcn_bw_defs tl_sw_mode_to_bw_defs(enum swizzle_mode_values sw_mode)
{
switch (sw_mode) {
case DC_SW_LINEAR:
return dcn_bw_sw_linear;
case DC_SW_4KB_S:
return dcn_bw_sw_4_kb_s;
case DC_SW_4KB_D:
return dcn_bw_sw_4_kb_d;
case DC_SW_64KB_S:
return dcn_bw_sw_64_kb_s;
case DC_SW_64KB_D:
return dcn_bw_sw_64_kb_d;
case DC_SW_VAR_S:
return dcn_bw_sw_var_s;
case DC_SW_VAR_D:
return dcn_bw_sw_var_d;
case DC_SW_64KB_S_T:
return dcn_bw_sw_64_kb_s_t;
case DC_SW_64KB_D_T:
return dcn_bw_sw_64_kb_d_t;
case DC_SW_4KB_S_X:
return dcn_bw_sw_4_kb_s_x;
case DC_SW_4KB_D_X:
return dcn_bw_sw_4_kb_d_x;
case DC_SW_64KB_S_X:
return dcn_bw_sw_64_kb_s_x;
case DC_SW_64KB_D_X:
return dcn_bw_sw_64_kb_d_x;
case DC_SW_VAR_S_X:
return dcn_bw_sw_var_s_x;
case DC_SW_VAR_D_X:
return dcn_bw_sw_var_d_x;
case DC_SW_256B_S:
case DC_SW_256_D:
case DC_SW_256_R:
case DC_SW_4KB_R:
case DC_SW_64KB_R:
case DC_SW_VAR_R:
case DC_SW_4KB_R_X:
case DC_SW_64KB_R_X:
case DC_SW_VAR_R_X:
default:
BREAK_TO_DEBUGGER(); /*not in formula*/
return dcn_bw_sw_4_kb_s;
}
}
static int tl_lb_bpp_to_int(enum lb_pixel_depth depth)
{
switch (depth) {
case LB_PIXEL_DEPTH_18BPP:
return 18;
case LB_PIXEL_DEPTH_24BPP:
return 24;
case LB_PIXEL_DEPTH_30BPP:
return 30;
case LB_PIXEL_DEPTH_36BPP:
return 36;
default:
return 30;
}
}
static enum dcn_bw_defs tl_pixel_format_to_bw_defs(enum surface_pixel_format format)
{
switch (format) {
case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
return dcn_bw_rgb_sub_16;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
return dcn_bw_rgb_sub_32;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
return dcn_bw_rgb_sub_64;
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
return dcn_bw_yuv420_sub_8;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
return dcn_bw_yuv420_sub_10;
default:
return dcn_bw_rgb_sub_32;
}
}
enum source_macro_tile_size swizzle_mode_to_macro_tile_size(enum swizzle_mode_values sw_mode)
{
switch (sw_mode) {
/* for 4/8/16 high tiles */
case DC_SW_LINEAR:
return dm_4k_tile;
case DC_SW_4KB_S:
case DC_SW_4KB_S_X:
return dm_4k_tile;
case DC_SW_64KB_S:
case DC_SW_64KB_S_X:
case DC_SW_64KB_S_T:
return dm_64k_tile;
case DC_SW_VAR_S:
case DC_SW_VAR_S_X:
return dm_256k_tile;
/* For 64bpp 2 high tiles */
case DC_SW_4KB_D:
case DC_SW_4KB_D_X:
return dm_4k_tile;
case DC_SW_64KB_D:
case DC_SW_64KB_D_X:
case DC_SW_64KB_D_T:
return dm_64k_tile;
case DC_SW_VAR_D:
case DC_SW_VAR_D_X:
return dm_256k_tile;
case DC_SW_4KB_R:
case DC_SW_4KB_R_X:
return dm_4k_tile;
case DC_SW_64KB_R:
case DC_SW_64KB_R_X:
return dm_64k_tile;
case DC_SW_VAR_R:
case DC_SW_VAR_R_X:
return dm_256k_tile;
/* Unsupported swizzle modes for dcn */
case DC_SW_256B_S:
default:
ASSERT(0); /* Not supported */
return 0;
}
}
static void pipe_ctx_to_e2e_pipe_params (
const struct pipe_ctx *pipe,
struct _vcs_dpi_display_pipe_params_st *input)
{
input->src.is_hsplit = false;
/* stereo can never be split */
if (pipe->plane_state->stereo_format == PLANE_STEREO_FORMAT_SIDE_BY_SIDE ||
pipe->plane_state->stereo_format == PLANE_STEREO_FORMAT_TOP_AND_BOTTOM) {
/* reset the split group if it was already considered split. */
input->src.hsplit_grp = pipe->pipe_idx;
} else if (pipe->top_pipe != NULL && pipe->top_pipe->plane_state == pipe->plane_state) {
input->src.is_hsplit = true;
} else if (pipe->bottom_pipe != NULL && pipe->bottom_pipe->plane_state == pipe->plane_state) {
input->src.is_hsplit = true;
}
if (pipe->plane_res.dpp->ctx->dc->debug.optimized_watermark) {
/*
* this method requires us to always re-calculate watermark when dcc change
* between flip.
*/
input->src.dcc = pipe->plane_state->dcc.enable ? 1 : 0;
} else {
/*
* allow us to disable dcc on the fly without re-calculating WM
*
* extra overhead for DCC is quite small. for 1080p WM without
* DCC is only 0.417us lower (urgent goes from 6.979us to 6.562us)
*/
unsigned int bpe;
input->src.dcc = pipe->plane_res.dpp->ctx->dc->res_pool->hubbub->funcs->
dcc_support_pixel_format(pipe->plane_state->format, &bpe) ? 1 : 0;
}
input->src.dcc_rate = 1;
input->src.meta_pitch = pipe->plane_state->dcc.meta_pitch;
input->src.source_scan = dm_horz;
input->src.sw_mode = pipe->plane_state->tiling_info.gfx9.swizzle;
input->src.viewport_width = pipe->plane_res.scl_data.viewport.width;
input->src.viewport_height = pipe->plane_res.scl_data.viewport.height;
input->src.data_pitch = pipe->plane_res.scl_data.viewport.width;
input->src.data_pitch_c = pipe->plane_res.scl_data.viewport.width;
input->src.cur0_src_width = 128; /* TODO: Cursor calcs, not curently stored */
input->src.cur0_bpp = 32;
input->src.macro_tile_size = swizzle_mode_to_macro_tile_size(pipe->plane_state->tiling_info.gfx9.swizzle);
switch (pipe->plane_state->rotation) {
case ROTATION_ANGLE_0:
case ROTATION_ANGLE_180:
input->src.source_scan = dm_horz;
break;
case ROTATION_ANGLE_90:
case ROTATION_ANGLE_270:
input->src.source_scan = dm_vert;
break;
default:
ASSERT(0); /* Not supported */
break;
}
/* TODO: Fix pixel format mappings */
switch (pipe->plane_state->format) {
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
input->src.source_format = dm_420_8;
input->src.viewport_width_c = input->src.viewport_width / 2;
input->src.viewport_height_c = input->src.viewport_height / 2;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
input->src.source_format = dm_420_10;
input->src.viewport_width_c = input->src.viewport_width / 2;
input->src.viewport_height_c = input->src.viewport_height / 2;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
input->src.source_format = dm_444_64;
input->src.viewport_width_c = input->src.viewport_width;
input->src.viewport_height_c = input->src.viewport_height;
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
input->src.source_format = dm_rgbe_alpha;
input->src.viewport_width_c = input->src.viewport_width;
input->src.viewport_height_c = input->src.viewport_height;
break;
default:
input->src.source_format = dm_444_32;
input->src.viewport_width_c = input->src.viewport_width;
input->src.viewport_height_c = input->src.viewport_height;
break;
}
input->scale_taps.htaps = pipe->plane_res.scl_data.taps.h_taps;
input->scale_ratio_depth.hscl_ratio = pipe->plane_res.scl_data.ratios.horz.value/4294967296.0;
input->scale_ratio_depth.vscl_ratio = pipe->plane_res.scl_data.ratios.vert.value/4294967296.0;
input->scale_ratio_depth.vinit = pipe->plane_res.scl_data.inits.v.value/4294967296.0;
if (input->scale_ratio_depth.vinit < 1.0)
input->scale_ratio_depth.vinit = 1;
input->scale_taps.vtaps = pipe->plane_res.scl_data.taps.v_taps;
input->scale_taps.vtaps_c = pipe->plane_res.scl_data.taps.v_taps_c;
input->scale_taps.htaps_c = pipe->plane_res.scl_data.taps.h_taps_c;
input->scale_ratio_depth.hscl_ratio_c = pipe->plane_res.scl_data.ratios.horz_c.value/4294967296.0;
input->scale_ratio_depth.vscl_ratio_c = pipe->plane_res.scl_data.ratios.vert_c.value/4294967296.0;
input->scale_ratio_depth.vinit_c = pipe->plane_res.scl_data.inits.v_c.value/4294967296.0;
if (input->scale_ratio_depth.vinit_c < 1.0)
input->scale_ratio_depth.vinit_c = 1;
switch (pipe->plane_res.scl_data.lb_params.depth) {
case LB_PIXEL_DEPTH_30BPP:
input->scale_ratio_depth.lb_depth = 30; break;
case LB_PIXEL_DEPTH_36BPP:
input->scale_ratio_depth.lb_depth = 36; break;
default:
input->scale_ratio_depth.lb_depth = 24; break;
}
input->dest.vactive = pipe->stream->timing.v_addressable + pipe->stream->timing.v_border_top
+ pipe->stream->timing.v_border_bottom;
input->dest.recout_width = pipe->plane_res.scl_data.recout.width;
input->dest.recout_height = pipe->plane_res.scl_data.recout.height;
input->dest.full_recout_width = pipe->plane_res.scl_data.recout.width;
input->dest.full_recout_height = pipe->plane_res.scl_data.recout.height;
input->dest.htotal = pipe->stream->timing.h_total;
input->dest.hblank_start = input->dest.htotal - pipe->stream->timing.h_front_porch;
input->dest.hblank_end = input->dest.hblank_start
- pipe->stream->timing.h_addressable
- pipe->stream->timing.h_border_left
- pipe->stream->timing.h_border_right;
input->dest.vtotal = pipe->stream->timing.v_total;
input->dest.vblank_start = input->dest.vtotal - pipe->stream->timing.v_front_porch;
input->dest.vblank_end = input->dest.vblank_start
- pipe->stream->timing.v_addressable
- pipe->stream->timing.v_border_bottom
- pipe->stream->timing.v_border_top;
input->dest.pixel_rate_mhz = pipe->stream->timing.pix_clk_100hz/10000.0;
input->dest.vstartup_start = pipe->pipe_dlg_param.vstartup_start;
input->dest.vupdate_offset = pipe->pipe_dlg_param.vupdate_offset;
input->dest.vupdate_offset = pipe->pipe_dlg_param.vupdate_offset;
input->dest.vupdate_width = pipe->pipe_dlg_param.vupdate_width;
}
static void dcn_bw_calc_rq_dlg_ttu(
const struct dc *dc,
const struct dcn_bw_internal_vars *v,
struct pipe_ctx *pipe,
int in_idx)
{
struct display_mode_lib *dml = (struct display_mode_lib *)(&dc->dml);
struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &pipe->dlg_regs;
struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &pipe->ttu_regs;
struct _vcs_dpi_display_rq_regs_st *rq_regs = &pipe->rq_regs;
struct _vcs_dpi_display_rq_params_st *rq_param = &pipe->dml_rq_param;
struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param = &pipe->dml_dlg_sys_param;
struct _vcs_dpi_display_e2e_pipe_params_st *input = &pipe->dml_input;
float total_active_bw = 0;
float total_prefetch_bw = 0;
int total_flip_bytes = 0;
int i;
memset(dlg_regs, 0, sizeof(*dlg_regs));
memset(ttu_regs, 0, sizeof(*ttu_regs));
memset(rq_regs, 0, sizeof(*rq_regs));
memset(rq_param, 0, sizeof(*rq_param));
memset(dlg_sys_param, 0, sizeof(*dlg_sys_param));
memset(input, 0, sizeof(*input));
for (i = 0; i < number_of_planes; i++) {
total_active_bw += v->read_bandwidth[i];
total_prefetch_bw += v->prefetch_bandwidth[i];
total_flip_bytes += v->total_immediate_flip_bytes[i];
}
dlg_sys_param->total_flip_bw = v->return_bw - dcn_bw_max2(total_active_bw, total_prefetch_bw);
if (dlg_sys_param->total_flip_bw < 0.0)
dlg_sys_param->total_flip_bw = 0;
dlg_sys_param->t_mclk_wm_us = v->dram_clock_change_watermark;
dlg_sys_param->t_sr_wm_us = v->stutter_enter_plus_exit_watermark;
dlg_sys_param->t_urg_wm_us = v->urgent_watermark;
dlg_sys_param->t_extra_us = v->urgent_extra_latency;
dlg_sys_param->deepsleep_dcfclk_mhz = v->dcf_clk_deep_sleep;
dlg_sys_param->total_flip_bytes = total_flip_bytes;
pipe_ctx_to_e2e_pipe_params(pipe, &input->pipe);
input->clks_cfg.dcfclk_mhz = v->dcfclk;
input->clks_cfg.dispclk_mhz = v->dispclk;
input->clks_cfg.dppclk_mhz = v->dppclk;
input->clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
input->clks_cfg.socclk_mhz = v->socclk;
input->clks_cfg.voltage = v->voltage_level;
// dc->dml.logger = pool->base.logger;
input->dout.output_format = (v->output_format[in_idx] == dcn_bw_420) ? dm_420 : dm_444;
input->dout.output_type = (v->output[in_idx] == dcn_bw_hdmi) ? dm_hdmi : dm_dp;
//input[in_idx].dout.output_standard;
/*todo: soc->sr_enter_plus_exit_time??*/
dml1_rq_dlg_get_rq_params(dml, rq_param, &input->pipe.src);
dml1_extract_rq_regs(dml, rq_regs, rq_param);
dml1_rq_dlg_get_dlg_params(
dml,
dlg_regs,
ttu_regs,
&rq_param->dlg,
dlg_sys_param,
input,
true,
true,
v->pte_enable == dcn_bw_yes,
pipe->plane_state->flip_immediate);
}
static void split_stream_across_pipes(
struct resource_context *res_ctx,
const struct resource_pool *pool,
struct pipe_ctx *primary_pipe,
struct pipe_ctx *secondary_pipe)
{
int pipe_idx = secondary_pipe->pipe_idx;
if (!primary_pipe->plane_state)
return;
*secondary_pipe = *primary_pipe;
secondary_pipe->pipe_idx = pipe_idx;
secondary_pipe->plane_res.mi = pool->mis[secondary_pipe->pipe_idx];
secondary_pipe->plane_res.hubp = pool->hubps[secondary_pipe->pipe_idx];
secondary_pipe->plane_res.ipp = pool->ipps[secondary_pipe->pipe_idx];
secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx];
secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx];
secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst;
if (primary_pipe->bottom_pipe) {
ASSERT(primary_pipe->bottom_pipe != secondary_pipe);
secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe;
secondary_pipe->bottom_pipe->top_pipe = secondary_pipe;
}
primary_pipe->bottom_pipe = secondary_pipe;
secondary_pipe->top_pipe = primary_pipe;
resource_build_scaling_params(primary_pipe);
resource_build_scaling_params(secondary_pipe);
}
#if 0
static void calc_wm_sets_and_perf_params(
struct dc_state *context,
struct dcn_bw_internal_vars *v)
{
/* Calculate set A last to keep internal var state consistent for required config */
if (v->voltage_level < 2) {
v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vnom0p8;
v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vnom0p8;
v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_vnom0p8;
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = v->urgent_watermark * 1000;
v->dcfclk_per_state[1] = v->dcfclkv_nom0p8;
v->dcfclk_per_state[0] = v->dcfclkv_nom0p8;
v->dcfclk = v->dcfclkv_nom0p8;
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = v->urgent_watermark * 1000;
}
if (v->voltage_level < 3) {
v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vmax0p9;
v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vmax0p9;
v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vmax0p9;
v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_vmax0p9;
v->dcfclk_per_state[2] = v->dcfclkv_max0p9;
v->dcfclk_per_state[1] = v->dcfclkv_max0p9;
v->dcfclk_per_state[0] = v->dcfclkv_max0p9;
v->dcfclk = v->dcfclkv_max0p9;
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = v->urgent_watermark * 1000;
}
v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vnom0p8;
v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vmid0p72;
v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vmin0p65;
v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_per_state[v->voltage_level];
v->dcfclk_per_state[2] = v->dcfclkv_nom0p8;
v->dcfclk_per_state[1] = v->dcfclkv_mid0p72;
v->dcfclk_per_state[0] = v->dcfclkv_min0p65;
v->dcfclk = v->dcfclk_per_state[v->voltage_level];
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
if (v->voltage_level >= 2) {
context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a;
context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a;
}
if (v->voltage_level >= 3)
context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
}
#endif
static bool dcn_bw_apply_registry_override(struct dc *dc)
{
bool updated = false;
if ((int)(dc->dcn_soc->sr_exit_time * 1000) != dc->debug.sr_exit_time_ns
&& dc->debug.sr_exit_time_ns) {
updated = true;
dc->dcn_soc->sr_exit_time = dc->debug.sr_exit_time_ns / 1000.0;
}
if ((int)(dc->dcn_soc->sr_enter_plus_exit_time * 1000)
!= dc->debug.sr_enter_plus_exit_time_ns
&& dc->debug.sr_enter_plus_exit_time_ns) {
updated = true;
dc->dcn_soc->sr_enter_plus_exit_time =
dc->debug.sr_enter_plus_exit_time_ns / 1000.0;
}
if ((int)(dc->dcn_soc->urgent_latency * 1000) != dc->debug.urgent_latency_ns
&& dc->debug.urgent_latency_ns) {
updated = true;
dc->dcn_soc->urgent_latency = dc->debug.urgent_latency_ns / 1000.0;
}
if ((int)(dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency * 1000)
!= dc->debug.percent_of_ideal_drambw
&& dc->debug.percent_of_ideal_drambw) {
updated = true;
dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency =
dc->debug.percent_of_ideal_drambw;
}
if ((int)(dc->dcn_soc->dram_clock_change_latency * 1000)
!= dc->debug.dram_clock_change_latency_ns
&& dc->debug.dram_clock_change_latency_ns) {
updated = true;
dc->dcn_soc->dram_clock_change_latency =
dc->debug.dram_clock_change_latency_ns / 1000.0;
}
return updated;
}
static void hack_disable_optional_pipe_split(struct dcn_bw_internal_vars *v)
{
/*
* disable optional pipe split by lower dispclk bounding box
* at DPM0
*/
v->max_dispclk[0] = v->max_dppclk_vmin0p65;
}
static void hack_force_pipe_split(struct dcn_bw_internal_vars *v,
unsigned int pixel_rate_100hz)
{
float pixel_rate_mhz = pixel_rate_100hz / 10000;
/*
* force enabling pipe split by lower dpp clock for DPM0 to just
* below the specify pixel_rate, so bw calc would split pipe.
*/
if (pixel_rate_mhz < v->max_dppclk[0])
v->max_dppclk[0] = pixel_rate_mhz;
}
static void hack_bounding_box(struct dcn_bw_internal_vars *v,
struct dc_debug_options *dbg,
struct dc_state *context)
{
int i;
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
/**
* Workaround for avoiding pipe-split in cases where we'd split
* planes that are too small, resulting in splits that aren't
* valid for the scaler.
*/
if (pipe->plane_state &&
(pipe->plane_state->dst_rect.width <= 16 ||
pipe->plane_state->dst_rect.height <= 16 ||
pipe->plane_state->src_rect.width <= 16 ||
pipe->plane_state->src_rect.height <= 16)) {
hack_disable_optional_pipe_split(v);
return;
}
}
if (dbg->pipe_split_policy == MPC_SPLIT_AVOID)
hack_disable_optional_pipe_split(v);
if (dbg->pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP &&
context->stream_count >= 2)
hack_disable_optional_pipe_split(v);
if (context->stream_count == 1 &&
dbg->force_single_disp_pipe_split)
hack_force_pipe_split(v, context->streams[0]->timing.pix_clk_100hz);
}
static unsigned int get_highest_allowed_voltage_level(bool is_vmin_only_asic)
{
/* for low power RV2 variants, the highest voltage level we want is 0 */
if (is_vmin_only_asic)
return 0;
else /* we are ok with all levels */
return 4;
}
bool dcn_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
/*
* we want a breakdown of the various stages of validation, which the
* perf_trace macro doesn't support
*/
BW_VAL_TRACE_SETUP();
const struct resource_pool *pool = dc->res_pool;
struct dcn_bw_internal_vars *v = &context->dcn_bw_vars;
int i, input_idx, k;
int vesa_sync_start, asic_blank_end, asic_blank_start;
bool bw_limit_pass;
float bw_limit;
PERFORMANCE_TRACE_START();
BW_VAL_TRACE_COUNT();
if (dcn_bw_apply_registry_override(dc))
dcn_bw_sync_calcs_and_dml(dc);
memset(v, 0, sizeof(*v));
v->sr_exit_time = dc->dcn_soc->sr_exit_time;
v->sr_enter_plus_exit_time = dc->dcn_soc->sr_enter_plus_exit_time;
v->urgent_latency = dc->dcn_soc->urgent_latency;
v->write_back_latency = dc->dcn_soc->write_back_latency;
v->percent_of_ideal_drambw_received_after_urg_latency =
dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency;
v->dcfclkv_min0p65 = dc->dcn_soc->dcfclkv_min0p65;
v->dcfclkv_mid0p72 = dc->dcn_soc->dcfclkv_mid0p72;
v->dcfclkv_nom0p8 = dc->dcn_soc->dcfclkv_nom0p8;
v->dcfclkv_max0p9 = dc->dcn_soc->dcfclkv_max0p9;
v->max_dispclk_vmin0p65 = dc->dcn_soc->max_dispclk_vmin0p65;
v->max_dispclk_vmid0p72 = dc->dcn_soc->max_dispclk_vmid0p72;
v->max_dispclk_vnom0p8 = dc->dcn_soc->max_dispclk_vnom0p8;
v->max_dispclk_vmax0p9 = dc->dcn_soc->max_dispclk_vmax0p9;
v->max_dppclk_vmin0p65 = dc->dcn_soc->max_dppclk_vmin0p65;
v->max_dppclk_vmid0p72 = dc->dcn_soc->max_dppclk_vmid0p72;
v->max_dppclk_vnom0p8 = dc->dcn_soc->max_dppclk_vnom0p8;
v->max_dppclk_vmax0p9 = dc->dcn_soc->max_dppclk_vmax0p9;
v->socclk = dc->dcn_soc->socclk;
v->fabric_and_dram_bandwidth_vmin0p65 = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65;
v->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72;
v->fabric_and_dram_bandwidth_vnom0p8 = dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8;
v->fabric_and_dram_bandwidth_vmax0p9 = dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9;
v->phyclkv_min0p65 = dc->dcn_soc->phyclkv_min0p65;
v->phyclkv_mid0p72 = dc->dcn_soc->phyclkv_mid0p72;
v->phyclkv_nom0p8 = dc->dcn_soc->phyclkv_nom0p8;
v->phyclkv_max0p9 = dc->dcn_soc->phyclkv_max0p9;
v->downspreading = dc->dcn_soc->downspreading;
v->round_trip_ping_latency_cycles = dc->dcn_soc->round_trip_ping_latency_cycles;
v->urgent_out_of_order_return_per_channel = dc->dcn_soc->urgent_out_of_order_return_per_channel;
v->number_of_channels = dc->dcn_soc->number_of_channels;
v->vmm_page_size = dc->dcn_soc->vmm_page_size;
v->dram_clock_change_latency = dc->dcn_soc->dram_clock_change_latency;
v->return_bus_width = dc->dcn_soc->return_bus_width;
v->rob_buffer_size_in_kbyte = dc->dcn_ip->rob_buffer_size_in_kbyte;
v->det_buffer_size_in_kbyte = dc->dcn_ip->det_buffer_size_in_kbyte;
v->dpp_output_buffer_pixels = dc->dcn_ip->dpp_output_buffer_pixels;
v->opp_output_buffer_lines = dc->dcn_ip->opp_output_buffer_lines;
v->pixel_chunk_size_in_kbyte = dc->dcn_ip->pixel_chunk_size_in_kbyte;
v->pte_enable = dc->dcn_ip->pte_enable;
v->pte_chunk_size = dc->dcn_ip->pte_chunk_size;
v->meta_chunk_size = dc->dcn_ip->meta_chunk_size;
v->writeback_chunk_size = dc->dcn_ip->writeback_chunk_size;
v->odm_capability = dc->dcn_ip->odm_capability;
v->dsc_capability = dc->dcn_ip->dsc_capability;
v->line_buffer_size = dc->dcn_ip->line_buffer_size;
v->is_line_buffer_bpp_fixed = dc->dcn_ip->is_line_buffer_bpp_fixed;
v->line_buffer_fixed_bpp = dc->dcn_ip->line_buffer_fixed_bpp;
v->max_line_buffer_lines = dc->dcn_ip->max_line_buffer_lines;
v->writeback_luma_buffer_size = dc->dcn_ip->writeback_luma_buffer_size;
v->writeback_chroma_buffer_size = dc->dcn_ip->writeback_chroma_buffer_size;
v->max_num_dpp = dc->dcn_ip->max_num_dpp;
v->max_num_writeback = dc->dcn_ip->max_num_writeback;
v->max_dchub_topscl_throughput = dc->dcn_ip->max_dchub_topscl_throughput;
v->max_pscl_tolb_throughput = dc->dcn_ip->max_pscl_tolb_throughput;
v->max_lb_tovscl_throughput = dc->dcn_ip->max_lb_tovscl_throughput;
v->max_vscl_tohscl_throughput = dc->dcn_ip->max_vscl_tohscl_throughput;
v->max_hscl_ratio = dc->dcn_ip->max_hscl_ratio;
v->max_vscl_ratio = dc->dcn_ip->max_vscl_ratio;
v->max_hscl_taps = dc->dcn_ip->max_hscl_taps;
v->max_vscl_taps = dc->dcn_ip->max_vscl_taps;
v->under_scan_factor = dc->dcn_ip->under_scan_factor;
v->pte_buffer_size_in_requests = dc->dcn_ip->pte_buffer_size_in_requests;
v->dispclk_ramping_margin = dc->dcn_ip->dispclk_ramping_margin;
v->max_inter_dcn_tile_repeaters = dc->dcn_ip->max_inter_dcn_tile_repeaters;
v->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one =
dc->dcn_ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one;
v->bug_forcing_luma_and_chroma_request_to_same_size_fixed =
dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed;
v->voltage[5] = dcn_bw_no_support;
v->voltage[4] = dcn_bw_v_max0p9;
v->voltage[3] = dcn_bw_v_max0p9;
v->voltage[2] = dcn_bw_v_nom0p8;
v->voltage[1] = dcn_bw_v_mid0p72;
v->voltage[0] = dcn_bw_v_min0p65;
v->fabric_and_dram_bandwidth_per_state[5] = v->fabric_and_dram_bandwidth_vmax0p9;
v->fabric_and_dram_bandwidth_per_state[4] = v->fabric_and_dram_bandwidth_vmax0p9;
v->fabric_and_dram_bandwidth_per_state[3] = v->fabric_and_dram_bandwidth_vmax0p9;
v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vnom0p8;
v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vmid0p72;
v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vmin0p65;
v->dcfclk_per_state[5] = v->dcfclkv_max0p9;
v->dcfclk_per_state[4] = v->dcfclkv_max0p9;
v->dcfclk_per_state[3] = v->dcfclkv_max0p9;
v->dcfclk_per_state[2] = v->dcfclkv_nom0p8;
v->dcfclk_per_state[1] = v->dcfclkv_mid0p72;
v->dcfclk_per_state[0] = v->dcfclkv_min0p65;
v->max_dispclk[5] = v->max_dispclk_vmax0p9;
v->max_dispclk[4] = v->max_dispclk_vmax0p9;
v->max_dispclk[3] = v->max_dispclk_vmax0p9;
v->max_dispclk[2] = v->max_dispclk_vnom0p8;
v->max_dispclk[1] = v->max_dispclk_vmid0p72;
v->max_dispclk[0] = v->max_dispclk_vmin0p65;
v->max_dppclk[5] = v->max_dppclk_vmax0p9;
v->max_dppclk[4] = v->max_dppclk_vmax0p9;
v->max_dppclk[3] = v->max_dppclk_vmax0p9;
v->max_dppclk[2] = v->max_dppclk_vnom0p8;
v->max_dppclk[1] = v->max_dppclk_vmid0p72;
v->max_dppclk[0] = v->max_dppclk_vmin0p65;
v->phyclk_per_state[5] = v->phyclkv_max0p9;
v->phyclk_per_state[4] = v->phyclkv_max0p9;
v->phyclk_per_state[3] = v->phyclkv_max0p9;
v->phyclk_per_state[2] = v->phyclkv_nom0p8;
v->phyclk_per_state[1] = v->phyclkv_mid0p72;
v->phyclk_per_state[0] = v->phyclkv_min0p65;
v->synchronized_vblank = dcn_bw_no;
v->ta_pscalculation = dcn_bw_override;
v->allow_different_hratio_vratio = dcn_bw_yes;
for (i = 0, input_idx = 0; i < pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->stream)
continue;
/* skip all but first of split pipes */
if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
continue;
v->underscan_output[input_idx] = false; /* taken care of in recout already*/
v->interlace_output[input_idx] = false;
v->htotal[input_idx] = pipe->stream->timing.h_total;
v->vtotal[input_idx] = pipe->stream->timing.v_total;
v->vactive[input_idx] = pipe->stream->timing.v_addressable +
pipe->stream->timing.v_border_top + pipe->stream->timing.v_border_bottom;
v->v_sync_plus_back_porch[input_idx] = pipe->stream->timing.v_total
- v->vactive[input_idx]
- pipe->stream->timing.v_front_porch;
v->pixel_clock[input_idx] = pipe->stream->timing.pix_clk_100hz/10000.0;
if (pipe->stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
v->pixel_clock[input_idx] *= 2;
if (!pipe->plane_state) {
v->dcc_enable[input_idx] = dcn_bw_yes;
v->source_pixel_format[input_idx] = dcn_bw_rgb_sub_32;
v->source_surface_mode[input_idx] = dcn_bw_sw_4_kb_s;
v->lb_bit_per_pixel[input_idx] = 30;
v->viewport_width[input_idx] = pipe->stream->timing.h_addressable;
v->viewport_height[input_idx] = pipe->stream->timing.v_addressable;
/*
* for cases where we have no plane, we want to validate up to 1080p
* source size because here we are only interested in if the output
* timing is supported or not. if we cannot support native resolution
* of the high res display, we still want to support lower res up scale
* to native
*/
if (v->viewport_width[input_idx] > 1920)
v->viewport_width[input_idx] = 1920;
if (v->viewport_height[input_idx] > 1080)
v->viewport_height[input_idx] = 1080;
v->scaler_rec_out_width[input_idx] = v->viewport_width[input_idx];
v->scaler_recout_height[input_idx] = v->viewport_height[input_idx];
v->override_hta_ps[input_idx] = 1;
v->override_vta_ps[input_idx] = 1;
v->override_hta_pschroma[input_idx] = 1;
v->override_vta_pschroma[input_idx] = 1;
v->source_scan[input_idx] = dcn_bw_hor;
} else {
v->viewport_height[input_idx] = pipe->plane_res.scl_data.viewport.height;
v->viewport_width[input_idx] = pipe->plane_res.scl_data.viewport.width;
v->scaler_rec_out_width[input_idx] = pipe->plane_res.scl_data.recout.width;
v->scaler_recout_height[input_idx] = pipe->plane_res.scl_data.recout.height;
if (pipe->bottom_pipe && pipe->bottom_pipe->plane_state == pipe->plane_state) {
if (pipe->plane_state->rotation % 2 == 0) {
int viewport_end = pipe->plane_res.scl_data.viewport.width
+ pipe->plane_res.scl_data.viewport.x;
int viewport_b_end = pipe->bottom_pipe->plane_res.scl_data.viewport.width
+ pipe->bottom_pipe->plane_res.scl_data.viewport.x;
if (viewport_end > viewport_b_end)
v->viewport_width[input_idx] = viewport_end
- pipe->bottom_pipe->plane_res.scl_data.viewport.x;
else
v->viewport_width[input_idx] = viewport_b_end
- pipe->plane_res.scl_data.viewport.x;
} else {
int viewport_end = pipe->plane_res.scl_data.viewport.height
+ pipe->plane_res.scl_data.viewport.y;
int viewport_b_end = pipe->bottom_pipe->plane_res.scl_data.viewport.height
+ pipe->bottom_pipe->plane_res.scl_data.viewport.y;
if (viewport_end > viewport_b_end)
v->viewport_height[input_idx] = viewport_end
- pipe->bottom_pipe->plane_res.scl_data.viewport.y;
else
v->viewport_height[input_idx] = viewport_b_end
- pipe->plane_res.scl_data.viewport.y;
}
v->scaler_rec_out_width[input_idx] = pipe->plane_res.scl_data.recout.width
+ pipe->bottom_pipe->plane_res.scl_data.recout.width;
}
if (pipe->plane_state->rotation % 2 == 0) {
ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dc_fixpt_one.value
|| v->scaler_rec_out_width[input_idx] == v->viewport_width[input_idx]);
ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dc_fixpt_one.value
|| v->scaler_recout_height[input_idx] == v->viewport_height[input_idx]);
} else {
ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dc_fixpt_one.value
|| v->scaler_recout_height[input_idx] == v->viewport_width[input_idx]);
ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dc_fixpt_one.value
|| v->scaler_rec_out_width[input_idx] == v->viewport_height[input_idx]);
}
if (dc->debug.optimized_watermark) {
/*
* this method requires us to always re-calculate watermark when dcc change
* between flip.
*/
v->dcc_enable[input_idx] = pipe->plane_state->dcc.enable ? dcn_bw_yes : dcn_bw_no;
} else {
/*
* allow us to disable dcc on the fly without re-calculating WM
*
* extra overhead for DCC is quite small. for 1080p WM without
* DCC is only 0.417us lower (urgent goes from 6.979us to 6.562us)
*/
unsigned int bpe;
v->dcc_enable[input_idx] = dc->res_pool->hubbub->funcs->dcc_support_pixel_format(
pipe->plane_state->format, &bpe) ? dcn_bw_yes : dcn_bw_no;
}
v->source_pixel_format[input_idx] = tl_pixel_format_to_bw_defs(
pipe->plane_state->format);
v->source_surface_mode[input_idx] = tl_sw_mode_to_bw_defs(
pipe->plane_state->tiling_info.gfx9.swizzle);
v->lb_bit_per_pixel[input_idx] = tl_lb_bpp_to_int(pipe->plane_res.scl_data.lb_params.depth);
v->override_hta_ps[input_idx] = pipe->plane_res.scl_data.taps.h_taps;
v->override_vta_ps[input_idx] = pipe->plane_res.scl_data.taps.v_taps;
v->override_hta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.h_taps_c;
v->override_vta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.v_taps_c;
/*
* Spreadsheet doesn't handle taps_c is one properly,
* need to force Chroma to always be scaled to pass
* bandwidth validation.
*/
if (v->override_hta_pschroma[input_idx] == 1)
v->override_hta_pschroma[input_idx] = 2;
if (v->override_vta_pschroma[input_idx] == 1)
v->override_vta_pschroma[input_idx] = 2;
v->source_scan[input_idx] = (pipe->plane_state->rotation % 2) ? dcn_bw_vert : dcn_bw_hor;
}
if (v->is_line_buffer_bpp_fixed == dcn_bw_yes)
v->lb_bit_per_pixel[input_idx] = v->line_buffer_fixed_bpp;
v->dcc_rate[input_idx] = 1; /*TODO: Worst case? does this change?*/
v->output_format[input_idx] = pipe->stream->timing.pixel_encoding ==
PIXEL_ENCODING_YCBCR420 ? dcn_bw_420 : dcn_bw_444;
v->output[input_idx] = pipe->stream->signal ==
SIGNAL_TYPE_HDMI_TYPE_A ? dcn_bw_hdmi : dcn_bw_dp;
v->output_deep_color[input_idx] = dcn_bw_encoder_8bpc;
if (v->output[input_idx] == dcn_bw_hdmi) {
switch (pipe->stream->timing.display_color_depth) {
case COLOR_DEPTH_101010:
v->output_deep_color[input_idx] = dcn_bw_encoder_10bpc;
break;
case COLOR_DEPTH_121212:
v->output_deep_color[input_idx] = dcn_bw_encoder_12bpc;
break;
case COLOR_DEPTH_161616:
v->output_deep_color[input_idx] = dcn_bw_encoder_16bpc;
break;
default:
break;
}
}
input_idx++;
}
v->number_of_active_planes = input_idx;
scaler_settings_calculation(v);
hack_bounding_box(v, &dc->debug, context);
mode_support_and_system_configuration(v);
/* Unhack dppclk: dont bother with trying to pipe split if we cannot maintain dpm0 */
if (v->voltage_level != 0
&& context->stream_count == 1
&& dc->debug.force_single_disp_pipe_split) {
v->max_dppclk[0] = v->max_dppclk_vmin0p65;
mode_support_and_system_configuration(v);
}
if (v->voltage_level == 0 &&
(dc->debug.sr_exit_time_dpm0_ns
|| dc->debug.sr_enter_plus_exit_time_dpm0_ns)) {
if (dc->debug.sr_enter_plus_exit_time_dpm0_ns)
v->sr_enter_plus_exit_time =
dc->debug.sr_enter_plus_exit_time_dpm0_ns / 1000.0f;
if (dc->debug.sr_exit_time_dpm0_ns)
v->sr_exit_time = dc->debug.sr_exit_time_dpm0_ns / 1000.0f;
context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = v->sr_enter_plus_exit_time;
context->bw_ctx.dml.soc.sr_exit_time_us = v->sr_exit_time;
mode_support_and_system_configuration(v);
}
display_pipe_configuration(v);
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->source_scan[k] == dcn_bw_hor)
v->swath_width_y[k] = v->viewport_width[k] / v->dpp_per_plane[k];
else
v->swath_width_y[k] = v->viewport_height[k] / v->dpp_per_plane[k];
}
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
v->byte_per_pixel_dety[k] = 8.0;
v->byte_per_pixel_detc[k] = 0.0;
} else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) {
v->byte_per_pixel_dety[k] = 4.0;
v->byte_per_pixel_detc[k] = 0.0;
} else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) {
v->byte_per_pixel_dety[k] = 2.0;
v->byte_per_pixel_detc[k] = 0.0;
} else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
v->byte_per_pixel_dety[k] = 1.0;
v->byte_per_pixel_detc[k] = 2.0;
} else {
v->byte_per_pixel_dety[k] = 4.0f / 3.0f;
v->byte_per_pixel_detc[k] = 8.0f / 3.0f;
}
}
v->total_data_read_bandwidth = 0.0;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
v->read_bandwidth_plane_luma[k] = v->swath_width_y[k] * v->dpp_per_plane[k] *
dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (v->htotal[k] / v->pixel_clock[k]) * v->v_ratio[k];
v->read_bandwidth_plane_chroma[k] = v->swath_width_y[k] / 2.0 * v->dpp_per_plane[k] *
dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (v->htotal[k] / v->pixel_clock[k]) * v->v_ratio[k] / 2.0;
v->total_data_read_bandwidth = v->total_data_read_bandwidth +
v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k];
}
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
if (v->voltage_level != number_of_states_plus_one && !fast_validate) {
float bw_consumed = v->total_bandwidth_consumed_gbyte_per_second;
if (bw_consumed < v->fabric_and_dram_bandwidth_vmin0p65)
bw_consumed = v->fabric_and_dram_bandwidth_vmin0p65;
else if (bw_consumed < v->fabric_and_dram_bandwidth_vmid0p72)
bw_consumed = v->fabric_and_dram_bandwidth_vmid0p72;
else if (bw_consumed < v->fabric_and_dram_bandwidth_vnom0p8)
bw_consumed = v->fabric_and_dram_bandwidth_vnom0p8;
else
bw_consumed = v->fabric_and_dram_bandwidth_vmax0p9;
if (bw_consumed < v->fabric_and_dram_bandwidth)
if (dc->debug.voltage_align_fclk)
bw_consumed = v->fabric_and_dram_bandwidth;
display_pipe_configuration(v);
/*calc_wm_sets_and_perf_params(context, v);*/
/* Only 1 set is used by dcn since no noticeable
* performance improvement was measured and due to hw bug DEGVIDCN10-254
*/
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a;
context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a;
context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
context->bw_ctx.bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 /
(ddr4_dram_factor_single_Channel * v->number_of_channels));
if (bw_consumed == v->fabric_and_dram_bandwidth_vmin0p65)
context->bw_ctx.bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
context->bw_ctx.bw.dcn.clk.dcfclk_khz = (int)(v->dcfclk * 1000);
context->bw_ctx.bw.dcn.clk.dispclk_khz = (int)(v->dispclk * 1000);
if (dc->debug.max_disp_clk == true)
context->bw_ctx.bw.dcn.clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
dc->debug.min_disp_clk_khz) {
context->bw_ctx.bw.dcn.clk.dispclk_khz =
dc->debug.min_disp_clk_khz;
}
context->bw_ctx.bw.dcn.clk.dppclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz /
v->dispclk_dppclk_ratio;
context->bw_ctx.bw.dcn.clk.phyclk_khz = v->phyclk_per_state[v->voltage_level];
switch (v->voltage_level) {
case 0:
context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmin0p65 * 1000);
break;
case 1:
context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmid0p72 * 1000);
break;
case 2:
context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vnom0p8 * 1000);
break;
default:
context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmax0p9 * 1000);
break;
}
BW_VAL_TRACE_END_WATERMARKS();
for (i = 0, input_idx = 0; i < pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
/* skip inactive pipe */
if (!pipe->stream)
continue;
/* skip all but first of split pipes */
if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
continue;
pipe->pipe_dlg_param.vupdate_width = v->v_update_width_pix[input_idx];
pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset_pix[input_idx];
pipe->pipe_dlg_param.vready_offset = v->v_ready_offset_pix[input_idx];
pipe->pipe_dlg_param.vstartup_start = v->v_startup[input_idx];
pipe->pipe_dlg_param.htotal = pipe->stream->timing.h_total;
pipe->pipe_dlg_param.vtotal = pipe->stream->timing.v_total;
vesa_sync_start = pipe->stream->timing.v_addressable +
pipe->stream->timing.v_border_bottom +
pipe->stream->timing.v_front_porch;
asic_blank_end = (pipe->stream->timing.v_total -
vesa_sync_start -
pipe->stream->timing.v_border_top)
* (pipe->stream->timing.flags.INTERLACE ? 1 : 0);
asic_blank_start = asic_blank_end +
(pipe->stream->timing.v_border_top +
pipe->stream->timing.v_addressable +
pipe->stream->timing.v_border_bottom)
* (pipe->stream->timing.flags.INTERLACE ? 1 : 0);
pipe->pipe_dlg_param.vblank_start = asic_blank_start;
pipe->pipe_dlg_param.vblank_end = asic_blank_end;
if (pipe->plane_state) {
struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
pipe->plane_state->update_flags.bits.full_update = 1;
if (v->dpp_per_plane[input_idx] == 2 ||
((pipe->stream->view_format ==
VIEW_3D_FORMAT_SIDE_BY_SIDE ||
pipe->stream->view_format ==
VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
(pipe->stream->timing.timing_3d_format ==
TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
pipe->stream->timing.timing_3d_format ==
TIMING_3D_FORMAT_SIDE_BY_SIDE))) {
if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
/* update previously split pipe */
hsplit_pipe->pipe_dlg_param.vupdate_width = v->v_update_width_pix[input_idx];
hsplit_pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset_pix[input_idx];
hsplit_pipe->pipe_dlg_param.vready_offset = v->v_ready_offset_pix[input_idx];
hsplit_pipe->pipe_dlg_param.vstartup_start = v->v_startup[input_idx];
hsplit_pipe->pipe_dlg_param.htotal = pipe->stream->timing.h_total;
hsplit_pipe->pipe_dlg_param.vtotal = pipe->stream->timing.v_total;
hsplit_pipe->pipe_dlg_param.vblank_start = pipe->pipe_dlg_param.vblank_start;
hsplit_pipe->pipe_dlg_param.vblank_end = pipe->pipe_dlg_param.vblank_end;
} else {
/* pipe not split previously needs split */
hsplit_pipe = resource_find_free_secondary_pipe_legacy(&context->res_ctx, pool, pipe);
ASSERT(hsplit_pipe);
split_stream_across_pipes(&context->res_ctx, pool, pipe, hsplit_pipe);
}
dcn_bw_calc_rq_dlg_ttu(dc, v, hsplit_pipe, input_idx);
} else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
/* merge previously split pipe */
pipe->bottom_pipe = hsplit_pipe->bottom_pipe;
if (hsplit_pipe->bottom_pipe)
hsplit_pipe->bottom_pipe->top_pipe = pipe;
hsplit_pipe->plane_state = NULL;
hsplit_pipe->stream = NULL;
hsplit_pipe->top_pipe = NULL;
hsplit_pipe->bottom_pipe = NULL;
/* Clear plane_res and stream_res */
memset(&hsplit_pipe->plane_res, 0, sizeof(hsplit_pipe->plane_res));
memset(&hsplit_pipe->stream_res, 0, sizeof(hsplit_pipe->stream_res));
resource_build_scaling_params(pipe);
}
/* for now important to do this after pipe split for building e2e params */
dcn_bw_calc_rq_dlg_ttu(dc, v, pipe, input_idx);
}
input_idx++;
}
} else if (v->voltage_level == number_of_states_plus_one) {
BW_VAL_TRACE_SKIP(fail);
} else if (fast_validate) {
BW_VAL_TRACE_SKIP(fast);
}
if (v->voltage_level == 0) {
context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us =
dc->dcn_soc->sr_enter_plus_exit_time;
context->bw_ctx.dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time;
}
/*
* BW limit is set to prevent display from impacting other system functions
*/
bw_limit = dc->dcn_soc->percent_disp_bw_limit * v->fabric_and_dram_bandwidth_vmax0p9;
bw_limit_pass = (v->total_data_read_bandwidth / 1000.0) < bw_limit;
PERFORMANCE_TRACE_END();
BW_VAL_TRACE_FINISH();
if (bw_limit_pass && v->voltage_level <= get_highest_allowed_voltage_level(dc->config.is_vmin_only_asic))
return true;
else
return false;
}
static unsigned int dcn_find_normalized_clock_vdd_Level(
const struct dc *dc,
enum dm_pp_clock_type clocks_type,
int clocks_in_khz)
{
int vdd_level = dcn_bw_v_min0p65;
if (clocks_in_khz == 0)/*todo some clock not in the considerations*/
return vdd_level;
switch (clocks_type) {
case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmax0p9*1000) {
vdd_level = dcn_bw_v_max0p91;
BREAK_TO_DEBUGGER();
} else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vnom0p8*1000) {
vdd_level = dcn_bw_v_max0p9;
} else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmid0p72*1000) {
vdd_level = dcn_bw_v_nom0p8;
} else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmin0p65*1000) {
vdd_level = dcn_bw_v_mid0p72;
} else
vdd_level = dcn_bw_v_min0p65;
break;
case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
if (clocks_in_khz > dc->dcn_soc->phyclkv_max0p9*1000) {
vdd_level = dcn_bw_v_max0p91;
BREAK_TO_DEBUGGER();
} else if (clocks_in_khz > dc->dcn_soc->phyclkv_nom0p8*1000) {
vdd_level = dcn_bw_v_max0p9;
} else if (clocks_in_khz > dc->dcn_soc->phyclkv_mid0p72*1000) {
vdd_level = dcn_bw_v_nom0p8;
} else if (clocks_in_khz > dc->dcn_soc->phyclkv_min0p65*1000) {
vdd_level = dcn_bw_v_mid0p72;
} else
vdd_level = dcn_bw_v_min0p65;
break;
case DM_PP_CLOCK_TYPE_DPPCLK:
if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmax0p9*1000) {
vdd_level = dcn_bw_v_max0p91;
BREAK_TO_DEBUGGER();
} else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vnom0p8*1000) {
vdd_level = dcn_bw_v_max0p9;
} else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmid0p72*1000) {
vdd_level = dcn_bw_v_nom0p8;
} else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmin0p65*1000) {
vdd_level = dcn_bw_v_mid0p72;
} else
vdd_level = dcn_bw_v_min0p65;
break;
case DM_PP_CLOCK_TYPE_MEMORY_CLK:
{
unsigned factor = (ddr4_dram_factor_single_Channel * dc->dcn_soc->number_of_channels);
if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9*1000000/factor) {
vdd_level = dcn_bw_v_max0p91;
BREAK_TO_DEBUGGER();
} else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8*1000000/factor) {
vdd_level = dcn_bw_v_max0p9;
} else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72*1000000/factor) {
vdd_level = dcn_bw_v_nom0p8;
} else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65*1000000/factor) {
vdd_level = dcn_bw_v_mid0p72;
} else
vdd_level = dcn_bw_v_min0p65;
}
break;
case DM_PP_CLOCK_TYPE_DCFCLK:
if (clocks_in_khz > dc->dcn_soc->dcfclkv_max0p9*1000) {
vdd_level = dcn_bw_v_max0p91;
BREAK_TO_DEBUGGER();
} else if (clocks_in_khz > dc->dcn_soc->dcfclkv_nom0p8*1000) {
vdd_level = dcn_bw_v_max0p9;
} else if (clocks_in_khz > dc->dcn_soc->dcfclkv_mid0p72*1000) {
vdd_level = dcn_bw_v_nom0p8;
} else if (clocks_in_khz > dc->dcn_soc->dcfclkv_min0p65*1000) {
vdd_level = dcn_bw_v_mid0p72;
} else
vdd_level = dcn_bw_v_min0p65;
break;
default:
break;
}
return vdd_level;
}
unsigned int dcn_find_dcfclk_suits_all(
const struct dc *dc,
struct dc_clocks *clocks)
{
unsigned vdd_level, vdd_level_temp;
unsigned dcf_clk;
/*find a common supported voltage level*/
vdd_level = dcn_find_normalized_clock_vdd_Level(
dc, DM_PP_CLOCK_TYPE_DISPLAY_CLK, clocks->dispclk_khz);
vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
dc, DM_PP_CLOCK_TYPE_DISPLAYPHYCLK, clocks->phyclk_khz);
vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
dc, DM_PP_CLOCK_TYPE_DPPCLK, clocks->dppclk_khz);
vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
dc, DM_PP_CLOCK_TYPE_MEMORY_CLK, clocks->fclk_khz);
vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
dc, DM_PP_CLOCK_TYPE_DCFCLK, clocks->dcfclk_khz);
/*find that level conresponding dcfclk*/
vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
if (vdd_level == dcn_bw_v_max0p91) {
BREAK_TO_DEBUGGER();
dcf_clk = dc->dcn_soc->dcfclkv_max0p9*1000;
} else if (vdd_level == dcn_bw_v_max0p9)
dcf_clk = dc->dcn_soc->dcfclkv_max0p9*1000;
else if (vdd_level == dcn_bw_v_nom0p8)
dcf_clk = dc->dcn_soc->dcfclkv_nom0p8*1000;
else if (vdd_level == dcn_bw_v_mid0p72)
dcf_clk = dc->dcn_soc->dcfclkv_mid0p72*1000;
else
dcf_clk = dc->dcn_soc->dcfclkv_min0p65*1000;
DC_LOG_BANDWIDTH_CALCS("\tdcf_clk for voltage = %d\n", dcf_clk);
return dcf_clk;
}
void dcn_bw_update_from_pplib_fclks(
struct dc *dc,
struct dm_pp_clock_levels_with_voltage *fclks)
{
unsigned vmin0p65_idx, vmid0p72_idx, vnom0p8_idx, vmax0p9_idx;
ASSERT(fclks->num_levels);
vmin0p65_idx = 0;
vmid0p72_idx = fclks->num_levels -
(fclks->num_levels > 2 ? 3 : (fclks->num_levels > 1 ? 2 : 1));
vnom0p8_idx = fclks->num_levels - (fclks->num_levels > 1 ? 2 : 1);
vmax0p9_idx = fclks->num_levels - 1;
dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 =
32 * (fclks->data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0;
dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 =
dc->dcn_soc->number_of_channels *
(fclks->data[vmid0p72_idx].clocks_in_khz / 1000.0)
* ddr4_dram_factor_single_Channel / 1000.0;
dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 =
dc->dcn_soc->number_of_channels *
(fclks->data[vnom0p8_idx].clocks_in_khz / 1000.0)
* ddr4_dram_factor_single_Channel / 1000.0;
dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 =
dc->dcn_soc->number_of_channels *
(fclks->data[vmax0p9_idx].clocks_in_khz / 1000.0)
* ddr4_dram_factor_single_Channel / 1000.0;
}
void dcn_bw_update_from_pplib_dcfclks(
struct dc *dc,
struct dm_pp_clock_levels_with_voltage *dcfclks)
{
if (dcfclks->num_levels >= 3) {
dc->dcn_soc->dcfclkv_min0p65 = dcfclks->data[0].clocks_in_khz / 1000.0;
dc->dcn_soc->dcfclkv_mid0p72 = dcfclks->data[dcfclks->num_levels - 3].clocks_in_khz / 1000.0;
dc->dcn_soc->dcfclkv_nom0p8 = dcfclks->data[dcfclks->num_levels - 2].clocks_in_khz / 1000.0;
dc->dcn_soc->dcfclkv_max0p9 = dcfclks->data[dcfclks->num_levels - 1].clocks_in_khz / 1000.0;
}
}
void dcn_get_soc_clks(
struct dc *dc,
int *min_fclk_khz,
int *min_dcfclk_khz,
int *socclk_khz)
{
*min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
*min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
*socclk_khz = dc->dcn_soc->socclk * 1000;
}
void dcn_bw_notify_pplib_of_wm_ranges(
struct dc *dc,
int min_fclk_khz,
int min_dcfclk_khz,
int socclk_khz)
{
struct pp_smu_funcs_rv *pp = NULL;
struct pp_smu_wm_range_sets ranges = {0};
const int overdrive = 5000000; /* 5 GHz to cover Overdrive */
if (dc->res_pool->pp_smu)
pp = &dc->res_pool->pp_smu->rv_funcs;
if (!pp || !pp->set_wm_ranges)
return;
/* Now notify PPLib/SMU about which Watermarks sets they should select
* depending on DPM state they are in. And update BW MGR GFX Engine and
* Memory clock member variables for Watermarks calculations for each
* Watermark Set. Only one watermark set for dcn1 due to hw bug DEGVIDCN10-254.
*/
/* SOCCLK does not affect anytihng but writeback for DCN so for now we dont
* care what the value is, hence min to overdrive level
*/
ranges.num_reader_wm_sets = WM_SET_COUNT;
ranges.num_writer_wm_sets = WM_SET_COUNT;
ranges.reader_wm_sets[0].wm_inst = WM_A;
ranges.reader_wm_sets[0].min_drain_clk_mhz = min_dcfclk_khz / 1000;
ranges.reader_wm_sets[0].max_drain_clk_mhz = overdrive / 1000;
ranges.reader_wm_sets[0].min_fill_clk_mhz = min_fclk_khz / 1000;
ranges.reader_wm_sets[0].max_fill_clk_mhz = overdrive / 1000;
ranges.writer_wm_sets[0].wm_inst = WM_A;
ranges.writer_wm_sets[0].min_fill_clk_mhz = socclk_khz / 1000;
ranges.writer_wm_sets[0].max_fill_clk_mhz = overdrive / 1000;
ranges.writer_wm_sets[0].min_drain_clk_mhz = min_fclk_khz / 1000;
ranges.writer_wm_sets[0].max_drain_clk_mhz = overdrive / 1000;
if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
ranges.reader_wm_sets[0].wm_inst = WM_A;
ranges.reader_wm_sets[0].min_drain_clk_mhz = 300;
ranges.reader_wm_sets[0].max_drain_clk_mhz = 5000;
ranges.reader_wm_sets[0].min_fill_clk_mhz = 800;
ranges.reader_wm_sets[0].max_fill_clk_mhz = 5000;
ranges.writer_wm_sets[0].wm_inst = WM_A;
ranges.writer_wm_sets[0].min_fill_clk_mhz = 200;
ranges.writer_wm_sets[0].max_fill_clk_mhz = 5000;
ranges.writer_wm_sets[0].min_drain_clk_mhz = 800;
ranges.writer_wm_sets[0].max_drain_clk_mhz = 5000;
}
ranges.reader_wm_sets[1] = ranges.writer_wm_sets[0];
ranges.reader_wm_sets[1].wm_inst = WM_B;
ranges.reader_wm_sets[2] = ranges.writer_wm_sets[0];
ranges.reader_wm_sets[2].wm_inst = WM_C;
ranges.reader_wm_sets[3] = ranges.writer_wm_sets[0];
ranges.reader_wm_sets[3].wm_inst = WM_D;
/* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
pp->set_wm_ranges(&pp->pp_smu, &ranges);
}
void dcn_bw_sync_calcs_and_dml(struct dc *dc)
{
DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %f ns\n"
"sr_enter_plus_exit_time: %f ns\n"
"urgent_latency: %f ns\n"
"write_back_latency: %f ns\n"
"percent_of_ideal_drambw_received_after_urg_latency: %f %%\n"
"max_request_size: %d bytes\n"
"dcfclkv_max0p9: %f kHz\n"
"dcfclkv_nom0p8: %f kHz\n"
"dcfclkv_mid0p72: %f kHz\n"
"dcfclkv_min0p65: %f kHz\n"
"max_dispclk_vmax0p9: %f kHz\n"
"max_dispclk_vnom0p8: %f kHz\n"
"max_dispclk_vmid0p72: %f kHz\n"
"max_dispclk_vmin0p65: %f kHz\n"
"max_dppclk_vmax0p9: %f kHz\n"
"max_dppclk_vnom0p8: %f kHz\n"
"max_dppclk_vmid0p72: %f kHz\n"
"max_dppclk_vmin0p65: %f kHz\n"
"socclk: %f kHz\n"
"fabric_and_dram_bandwidth_vmax0p9: %f MB/s\n"
"fabric_and_dram_bandwidth_vnom0p8: %f MB/s\n"
"fabric_and_dram_bandwidth_vmid0p72: %f MB/s\n"
"fabric_and_dram_bandwidth_vmin0p65: %f MB/s\n"
"phyclkv_max0p9: %f kHz\n"
"phyclkv_nom0p8: %f kHz\n"
"phyclkv_mid0p72: %f kHz\n"
"phyclkv_min0p65: %f kHz\n"
"downspreading: %f %%\n"
"round_trip_ping_latency_cycles: %d DCFCLK Cycles\n"
"urgent_out_of_order_return_per_channel: %d Bytes\n"
"number_of_channels: %d\n"
"vmm_page_size: %d Bytes\n"
"dram_clock_change_latency: %f ns\n"
"return_bus_width: %d Bytes\n",
dc->dcn_soc->sr_exit_time * 1000,
dc->dcn_soc->sr_enter_plus_exit_time * 1000,
dc->dcn_soc->urgent_latency * 1000,
dc->dcn_soc->write_back_latency * 1000,
dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency,
dc->dcn_soc->max_request_size,
dc->dcn_soc->dcfclkv_max0p9 * 1000,
dc->dcn_soc->dcfclkv_nom0p8 * 1000,
dc->dcn_soc->dcfclkv_mid0p72 * 1000,
dc->dcn_soc->dcfclkv_min0p65 * 1000,
dc->dcn_soc->max_dispclk_vmax0p9 * 1000,
dc->dcn_soc->max_dispclk_vnom0p8 * 1000,
dc->dcn_soc->max_dispclk_vmid0p72 * 1000,
dc->dcn_soc->max_dispclk_vmin0p65 * 1000,
dc->dcn_soc->max_dppclk_vmax0p9 * 1000,
dc->dcn_soc->max_dppclk_vnom0p8 * 1000,
dc->dcn_soc->max_dppclk_vmid0p72 * 1000,
dc->dcn_soc->max_dppclk_vmin0p65 * 1000,
dc->dcn_soc->socclk * 1000,
dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 * 1000,
dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 * 1000,
dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 * 1000,
dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000,
dc->dcn_soc->phyclkv_max0p9 * 1000,
dc->dcn_soc->phyclkv_nom0p8 * 1000,
dc->dcn_soc->phyclkv_mid0p72 * 1000,
dc->dcn_soc->phyclkv_min0p65 * 1000,
dc->dcn_soc->downspreading * 100,
dc->dcn_soc->round_trip_ping_latency_cycles,
dc->dcn_soc->urgent_out_of_order_return_per_channel,
dc->dcn_soc->number_of_channels,
dc->dcn_soc->vmm_page_size,
dc->dcn_soc->dram_clock_change_latency * 1000,
dc->dcn_soc->return_bus_width);
DC_LOG_BANDWIDTH_CALCS("rob_buffer_size_in_kbyte: %f\n"
"det_buffer_size_in_kbyte: %f\n"
"dpp_output_buffer_pixels: %f\n"
"opp_output_buffer_lines: %f\n"
"pixel_chunk_size_in_kbyte: %f\n"
"pte_enable: %d\n"
"pte_chunk_size: %d kbytes\n"
"meta_chunk_size: %d kbytes\n"
"writeback_chunk_size: %d kbytes\n"
"odm_capability: %d\n"
"dsc_capability: %d\n"
"line_buffer_size: %d bits\n"
"max_line_buffer_lines: %d\n"
"is_line_buffer_bpp_fixed: %d\n"
"line_buffer_fixed_bpp: %d\n"
"writeback_luma_buffer_size: %d kbytes\n"
"writeback_chroma_buffer_size: %d kbytes\n"
"max_num_dpp: %d\n"
"max_num_writeback: %d\n"
"max_dchub_topscl_throughput: %d pixels/dppclk\n"
"max_pscl_tolb_throughput: %d pixels/dppclk\n"
"max_lb_tovscl_throughput: %d pixels/dppclk\n"
"max_vscl_tohscl_throughput: %d pixels/dppclk\n"
"max_hscl_ratio: %f\n"
"max_vscl_ratio: %f\n"
"max_hscl_taps: %d\n"
"max_vscl_taps: %d\n"
"pte_buffer_size_in_requests: %d\n"
"dispclk_ramping_margin: %f %%\n"
"under_scan_factor: %f %%\n"
"max_inter_dcn_tile_repeaters: %d\n"
"can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one: %d\n"
"bug_forcing_luma_and_chroma_request_to_same_size_fixed: %d\n"
"dcfclk_cstate_latency: %d\n",
dc->dcn_ip->rob_buffer_size_in_kbyte,
dc->dcn_ip->det_buffer_size_in_kbyte,
dc->dcn_ip->dpp_output_buffer_pixels,
dc->dcn_ip->opp_output_buffer_lines,
dc->dcn_ip->pixel_chunk_size_in_kbyte,
dc->dcn_ip->pte_enable,
dc->dcn_ip->pte_chunk_size,
dc->dcn_ip->meta_chunk_size,
dc->dcn_ip->writeback_chunk_size,
dc->dcn_ip->odm_capability,
dc->dcn_ip->dsc_capability,
dc->dcn_ip->line_buffer_size,
dc->dcn_ip->max_line_buffer_lines,
dc->dcn_ip->is_line_buffer_bpp_fixed,
dc->dcn_ip->line_buffer_fixed_bpp,
dc->dcn_ip->writeback_luma_buffer_size,
dc->dcn_ip->writeback_chroma_buffer_size,
dc->dcn_ip->max_num_dpp,
dc->dcn_ip->max_num_writeback,
dc->dcn_ip->max_dchub_topscl_throughput,
dc->dcn_ip->max_pscl_tolb_throughput,
dc->dcn_ip->max_lb_tovscl_throughput,
dc->dcn_ip->max_vscl_tohscl_throughput,
dc->dcn_ip->max_hscl_ratio,
dc->dcn_ip->max_vscl_ratio,
dc->dcn_ip->max_hscl_taps,
dc->dcn_ip->max_vscl_taps,
dc->dcn_ip->pte_buffer_size_in_requests,
dc->dcn_ip->dispclk_ramping_margin,
dc->dcn_ip->under_scan_factor * 100,
dc->dcn_ip->max_inter_dcn_tile_repeaters,
dc->dcn_ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one,
dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed,
dc->dcn_ip->dcfclk_cstate_latency);
dc->dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time;
dc->dml.soc.sr_enter_plus_exit_time_us = dc->dcn_soc->sr_enter_plus_exit_time;
dc->dml.soc.urgent_latency_us = dc->dcn_soc->urgent_latency;
dc->dml.soc.writeback_latency_us = dc->dcn_soc->write_back_latency;
dc->dml.soc.ideal_dram_bw_after_urgent_percent =
dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency;
dc->dml.soc.max_request_size_bytes = dc->dcn_soc->max_request_size;
dc->dml.soc.downspread_percent = dc->dcn_soc->downspreading;
dc->dml.soc.round_trip_ping_latency_dcfclk_cycles =
dc->dcn_soc->round_trip_ping_latency_cycles;
dc->dml.soc.urgent_out_of_order_return_per_channel_bytes =
dc->dcn_soc->urgent_out_of_order_return_per_channel;
dc->dml.soc.num_chans = dc->dcn_soc->number_of_channels;
dc->dml.soc.vmm_page_size_bytes = dc->dcn_soc->vmm_page_size;
dc->dml.soc.dram_clock_change_latency_us = dc->dcn_soc->dram_clock_change_latency;
dc->dml.soc.return_bus_width_bytes = dc->dcn_soc->return_bus_width;
dc->dml.ip.rob_buffer_size_kbytes = dc->dcn_ip->rob_buffer_size_in_kbyte;
dc->dml.ip.det_buffer_size_kbytes = dc->dcn_ip->det_buffer_size_in_kbyte;
dc->dml.ip.dpp_output_buffer_pixels = dc->dcn_ip->dpp_output_buffer_pixels;
dc->dml.ip.opp_output_buffer_lines = dc->dcn_ip->opp_output_buffer_lines;
dc->dml.ip.pixel_chunk_size_kbytes = dc->dcn_ip->pixel_chunk_size_in_kbyte;
dc->dml.ip.pte_enable = dc->dcn_ip->pte_enable == dcn_bw_yes;
dc->dml.ip.pte_chunk_size_kbytes = dc->dcn_ip->pte_chunk_size;
dc->dml.ip.meta_chunk_size_kbytes = dc->dcn_ip->meta_chunk_size;
dc->dml.ip.writeback_chunk_size_kbytes = dc->dcn_ip->writeback_chunk_size;
dc->dml.ip.line_buffer_size_bits = dc->dcn_ip->line_buffer_size;
dc->dml.ip.max_line_buffer_lines = dc->dcn_ip->max_line_buffer_lines;
dc->dml.ip.IsLineBufferBppFixed = dc->dcn_ip->is_line_buffer_bpp_fixed == dcn_bw_yes;
dc->dml.ip.LineBufferFixedBpp = dc->dcn_ip->line_buffer_fixed_bpp;
dc->dml.ip.writeback_luma_buffer_size_kbytes = dc->dcn_ip->writeback_luma_buffer_size;
dc->dml.ip.writeback_chroma_buffer_size_kbytes = dc->dcn_ip->writeback_chroma_buffer_size;
dc->dml.ip.max_num_dpp = dc->dcn_ip->max_num_dpp;
dc->dml.ip.max_num_wb = dc->dcn_ip->max_num_writeback;
dc->dml.ip.max_dchub_pscl_bw_pix_per_clk = dc->dcn_ip->max_dchub_topscl_throughput;
dc->dml.ip.max_pscl_lb_bw_pix_per_clk = dc->dcn_ip->max_pscl_tolb_throughput;
dc->dml.ip.max_lb_vscl_bw_pix_per_clk = dc->dcn_ip->max_lb_tovscl_throughput;
dc->dml.ip.max_vscl_hscl_bw_pix_per_clk = dc->dcn_ip->max_vscl_tohscl_throughput;
dc->dml.ip.max_hscl_ratio = dc->dcn_ip->max_hscl_ratio;
dc->dml.ip.max_vscl_ratio = dc->dcn_ip->max_vscl_ratio;
dc->dml.ip.max_hscl_taps = dc->dcn_ip->max_hscl_taps;
dc->dml.ip.max_vscl_taps = dc->dcn_ip->max_vscl_taps;
/*pte_buffer_size_in_requests missing in dml*/
dc->dml.ip.dispclk_ramp_margin_percent = dc->dcn_ip->dispclk_ramping_margin;
dc->dml.ip.underscan_factor = dc->dcn_ip->under_scan_factor;
dc->dml.ip.max_inter_dcn_tile_repeaters = dc->dcn_ip->max_inter_dcn_tile_repeaters;
dc->dml.ip.can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one =
dc->dcn_ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one == dcn_bw_yes;
dc->dml.ip.bug_forcing_LC_req_same_size_fixed =
dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes;
dc->dml.ip.dcfclk_cstate_latency = dc->dcn_ip->dcfclk_cstate_latency;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c |
/*
* Copyright 2019-2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "resource.h"
#include "clk_mgr.h"
#include "dcn20/dcn20_resource.h"
#include "dcn301/dcn301_resource.h"
#include "clk_mgr/dcn301/vg_clk_mgr.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "dcn301_fpu.h"
#define TO_DCN301_RES_POOL(pool)\
container_of(pool, struct dcn301_resource_pool, base)
/* Based on: //vidip/dc/dcn3/doc/architecture/DCN3x_Display_Mode.xlsm#83 */
struct _vcs_dpi_ip_params_st dcn3_01_ip = {
.odm_capable = 1,
.gpuvm_enable = 1,
.hostvm_enable = 1,
.gpuvm_max_page_table_levels = 1,
.hostvm_max_page_table_levels = 2,
.hostvm_cached_page_table_levels = 0,
.pte_group_size_bytes = 2048,
.num_dsc = 3,
.rob_buffer_size_kbytes = 184,
.det_buffer_size_kbytes = 184,
.dpte_buffer_size_in_pte_reqs_luma = 64,
.dpte_buffer_size_in_pte_reqs_chroma = 32,
.pde_proc_buffer_size_64k_reqs = 48,
.dpp_output_buffer_pixels = 2560,
.opp_output_buffer_lines = 1,
.pixel_chunk_size_kbytes = 8,
.meta_chunk_size_kbytes = 2,
.writeback_chunk_size_kbytes = 8,
.line_buffer_size_bits = 789504,
.is_line_buffer_bpp_fixed = 0, // ?
.line_buffer_fixed_bpp = 48, // ?
.dcc_supported = true,
.writeback_interface_buffer_size_kbytes = 90,
.writeback_line_buffer_buffer_size = 656640,
.max_line_buffer_lines = 12,
.writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640
.writeback_chroma_buffer_size_kbytes = 8,
.writeback_chroma_line_buffer_width_pixels = 4,
.writeback_max_hscl_ratio = 1,
.writeback_max_vscl_ratio = 1,
.writeback_min_hscl_ratio = 1,
.writeback_min_vscl_ratio = 1,
.writeback_max_hscl_taps = 1,
.writeback_max_vscl_taps = 1,
.writeback_line_buffer_luma_buffer_size = 0,
.writeback_line_buffer_chroma_buffer_size = 14643,
.cursor_buffer_size = 8,
.cursor_chunk_size = 2,
.max_num_otg = 4,
.max_num_dpp = 4,
.max_num_wb = 1,
.max_dchub_pscl_bw_pix_per_clk = 4,
.max_pscl_lb_bw_pix_per_clk = 2,
.max_lb_vscl_bw_pix_per_clk = 4,
.max_vscl_hscl_bw_pix_per_clk = 4,
.max_hscl_ratio = 6,
.max_vscl_ratio = 6,
.hscl_mults = 4,
.vscl_mults = 4,
.max_hscl_taps = 8,
.max_vscl_taps = 8,
.dispclk_ramp_margin_percent = 1,
.underscan_factor = 1.11,
.min_vblank_lines = 32,
.dppclk_delay_subtotal = 46,
.dynamic_metadata_vm_enabled = true,
.dppclk_delay_scl_lb_only = 16,
.dppclk_delay_scl = 50,
.dppclk_delay_cnvc_formatter = 27,
.dppclk_delay_cnvc_cursor = 6,
.dispclk_delay_subtotal = 119,
.dcfclk_cstate_latency = 5.2, // SRExitTime
.max_inter_dcn_tile_repeaters = 8,
.max_num_hdmi_frl_outputs = 0,
.odm_combine_4to1_supported = true,
.xfc_supported = false,
.xfc_fill_bw_overhead_percent = 10.0,
.xfc_fill_constant_bytes = 0,
.gfx7_compat_tiling_supported = 0,
.number_of_cursors = 1,
};
struct _vcs_dpi_soc_bounding_box_st dcn3_01_soc = {
.clock_limits = {
{
.state = 0,
.dram_speed_mts = 2400.0,
.fabricclk_mhz = 600,
.socclk_mhz = 278.0,
.dcfclk_mhz = 400.0,
.dscclk_mhz = 206.0,
.dppclk_mhz = 1015.0,
.dispclk_mhz = 1015.0,
.phyclk_mhz = 600.0,
},
{
.state = 1,
.dram_speed_mts = 2400.0,
.fabricclk_mhz = 688,
.socclk_mhz = 278.0,
.dcfclk_mhz = 400.0,
.dscclk_mhz = 206.0,
.dppclk_mhz = 1015.0,
.dispclk_mhz = 1015.0,
.phyclk_mhz = 600.0,
},
{
.state = 2,
.dram_speed_mts = 4267.0,
.fabricclk_mhz = 1067,
.socclk_mhz = 278.0,
.dcfclk_mhz = 608.0,
.dscclk_mhz = 296.0,
.dppclk_mhz = 1015.0,
.dispclk_mhz = 1015.0,
.phyclk_mhz = 810.0,
},
{
.state = 3,
.dram_speed_mts = 4267.0,
.fabricclk_mhz = 1067,
.socclk_mhz = 715.0,
.dcfclk_mhz = 676.0,
.dscclk_mhz = 338.0,
.dppclk_mhz = 1015.0,
.dispclk_mhz = 1015.0,
.phyclk_mhz = 810.0,
},
{
.state = 4,
.dram_speed_mts = 4267.0,
.fabricclk_mhz = 1067,
.socclk_mhz = 953.0,
.dcfclk_mhz = 810.0,
.dscclk_mhz = 338.0,
.dppclk_mhz = 1015.0,
.dispclk_mhz = 1015.0,
.phyclk_mhz = 810.0,
},
},
.sr_exit_time_us = 9.0,
.sr_enter_plus_exit_time_us = 11.0,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
.urgent_latency_vm_data_only_us = 4.0,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 75.0,
.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
.max_avg_sdp_bw_use_normal_percent = 60.0,
.max_avg_dram_bw_use_normal_percent = 60.0,
.writeback_latency_us = 12.0,
.max_request_size_bytes = 256,
.dram_channel_width_bytes = 4,
.fabric_datapath_to_dcn_data_return_bytes = 32,
.dcn_downspread_percent = 0.5,
.downspread_percent = 0.38,
.dram_page_open_time_ns = 50.0,
.dram_rw_turnaround_time_ns = 17.5,
.dram_return_buffer_per_channel_bytes = 8192,
.round_trip_ping_latency_dcfclk_cycles = 191,
.urgent_out_of_order_return_per_channel_bytes = 4096,
.channel_interleave_bytes = 256,
.num_banks = 8,
.num_chans = 4,
.gpuvm_min_page_size_bytes = 4096,
.hostvm_min_page_size_bytes = 4096,
.dram_clock_change_latency_us = 23.84,
.writeback_dram_clock_change_latency_us = 23.0,
.return_bus_width_bytes = 64,
.dispclk_dppclk_vco_speed_mhz = 3550,
.xfc_bus_transport_time_us = 20, // ?
.xfc_xbuf_latency_tolerance_us = 4, // ?
.use_urgent_burst_bw = 1, // ?
.num_states = 5,
.do_urgent_latency_adjustment = false,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
};
struct wm_table ddr4_wm_table = {
.entries = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 6.09,
.sr_enter_plus_exit_time_us = 7.14,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 10.12,
.sr_enter_plus_exit_time_us = 11.48,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 10.12,
.sr_enter_plus_exit_time_us = 11.48,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 10.12,
.sr_enter_plus_exit_time_us = 11.48,
.valid = true,
},
}
};
struct wm_table lpddr5_wm_table = {
.entries = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 13.5,
.sr_enter_plus_exit_time_us = 16.5,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 13.5,
.sr_enter_plus_exit_time_us = 16.5,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 13.5,
.sr_enter_plus_exit_time_us = 16.5,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 13.5,
.sr_enter_plus_exit_time_us = 16.5,
.valid = true,
},
}
};
static void calculate_wm_set_for_vlevel(int vlevel,
struct wm_range_table_entry *table_entry,
struct dcn_watermarks *wm_set,
struct display_mode_lib *dml,
display_e2e_pipe_params_st *pipes,
int pipe_cnt)
{
double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us;
ASSERT(vlevel < dml->soc.num_states);
/* only pipe 0 is read for voltage and dcf/soc clocks */
pipes[0].clks_cfg.voltage = vlevel;
pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz;
pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;
dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;
wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
}
void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
struct _vcs_dpi_voltage_scaling_st *s = dc->scratch.update_bw_bounding_box.clock_limits;
struct dcn301_resource_pool *pool = TO_DCN301_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
unsigned int i, closest_clk_lvl;
int j;
dc_assert_fp_enabled();
memcpy(s, dcn3_01_soc.clock_limits, sizeof(dcn3_01_soc.clock_limits));
/* Default clock levels are used for diags, which may lead to overclocking. */
dcn3_01_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
dcn3_01_ip.max_num_dpp = pool->base.pipe_count;
dcn3_01_soc.num_chans = bw_params->num_channels;
ASSERT(clk_table->num_entries);
for (i = 0; i < clk_table->num_entries; i++) {
/* loop backwards*/
for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) {
if ((unsigned int) dcn3_01_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
closest_clk_lvl = j;
break;
}
}
s[i].state = i;
s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
s[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
s[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
s[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
s[i].dram_bw_per_chan_gbps =
dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
s[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
s[i].dtbclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
s[i].phyclk_d18_mhz =
dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
s[i].phyclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
}
if (clk_table->num_entries) {
dcn3_01_soc.num_states = clk_table->num_entries;
/* duplicate last level */
s[dcn3_01_soc.num_states] =
dcn3_01_soc.clock_limits[dcn3_01_soc.num_states - 1];
s[dcn3_01_soc.num_states].state = dcn3_01_soc.num_states;
}
memcpy(dcn3_01_soc.clock_limits, s, sizeof(dcn3_01_soc.clock_limits));
dcn3_01_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
if ((int)(dcn3_01_soc.dram_clock_change_latency_us * 1000)
!= dc->debug.dram_clock_change_latency_ns
&& dc->debug.dram_clock_change_latency_ns) {
dcn3_01_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000.0;
}
dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
}
void dcn301_fpu_set_wm_ranges(int i,
struct pp_smu_wm_range_sets *ranges,
struct _vcs_dpi_soc_bounding_box_st *loaded_bb)
{
dc_assert_fp_enabled();
ranges->reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? (loaded_bb->clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0;
ranges->reader_wm_sets[i].max_fill_clk_mhz = loaded_bb->clock_limits[i].dram_speed_mts / 16;
}
void dcn301_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info)
{
dc_assert_fp_enabled();
if (bb_info.dram_clock_change_latency_100ns > 0)
dcn3_01_soc.dram_clock_change_latency_us = bb_info.dram_clock_change_latency_100ns * 10;
if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
dcn3_01_soc.sr_enter_plus_exit_time_us = bb_info.dram_sr_enter_exit_latency_100ns * 10;
if (bb_info.dram_sr_exit_latency_100ns > 0)
dcn3_01_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10;
}
void dcn301_calculate_wm_and_dlg_fp(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
int vlevel_req)
{
int i, pipe_idx;
int vlevel, vlevel_max;
struct wm_range_table_entry *table_entry;
struct clk_bw_params *bw_params = dc->clk_mgr->bw_params;
ASSERT(bw_params);
dc_assert_fp_enabled();
vlevel_max = bw_params->clk_table.num_entries - 1;
/* WM Set D */
table_entry = &bw_params->wm_table.entries[WM_D];
if (table_entry->wm_type == WM_TYPE_RETRAINING)
vlevel = 0;
else
vlevel = vlevel_max;
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d,
&context->bw_ctx.dml, pipes, pipe_cnt);
/* WM Set C */
table_entry = &bw_params->wm_table.entries[WM_C];
vlevel = min(max(vlevel_req, 2), vlevel_max);
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
&context->bw_ctx.dml, pipes, pipe_cnt);
/* WM Set B */
table_entry = &bw_params->wm_table.entries[WM_B];
vlevel = min(max(vlevel_req, 1), vlevel_max);
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
&context->bw_ctx.dml, pipes, pipe_cnt);
/* WM Set A */
table_entry = &bw_params->wm_table.entries[WM_A];
vlevel = min(vlevel_req, vlevel_max);
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a,
&context->bw_ctx.dml, pipes, pipe_cnt);
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
if (dc->config.forced_clocks) {
pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
}
if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
pipe_idx++;
}
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c |
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "display_mode_vba_util_32.h"
#include "../dml_inline_defs.h"
#include "display_mode_vba_32.h"
#include "../display_mode_lib.h"
#define DCN32_MAX_FMT_420_BUFFER_WIDTH 4096
unsigned int dml32_dscceComputeDelay(
unsigned int bpc,
double BPP,
unsigned int sliceWidth,
unsigned int numSlices,
enum output_format_class pixelFormat,
enum output_encoder_class Output)
{
// valid bpc = source bits per component in the set of {8, 10, 12}
// valid bpp = increments of 1/16 of a bit
// min = 6/7/8 in N420/N422/444, respectively
// max = such that compression is 1:1
//valid sliceWidth = number of pixels per slice line,
// must be less than or equal to 5184/numSlices (or 4096/numSlices in 420 mode)
//valid numSlices = number of slices in the horiziontal direction per DSC engine in the set of {1, 2, 3, 4}
//valid pixelFormat = pixel/color format in the set of {:N444_RGB, :S422, :N422, :N420}
// fixed value
unsigned int rcModelSize = 8192;
// N422/N420 operate at 2 pixels per clock
unsigned int pixelsPerClock, lstall, D, initalXmitDelay, w, s, ix, wx, p, l0, a, ax, L,
Delay, pixels;
if (pixelFormat == dm_420)
pixelsPerClock = 2;
else if (pixelFormat == dm_n422)
pixelsPerClock = 2;
// #all other modes operate at 1 pixel per clock
else
pixelsPerClock = 1;
//initial transmit delay as per PPS
initalXmitDelay = dml_round(rcModelSize / 2.0 / BPP / pixelsPerClock);
//compute ssm delay
if (bpc == 8)
D = 81;
else if (bpc == 10)
D = 89;
else
D = 113;
//divide by pixel per cycle to compute slice width as seen by DSC
w = sliceWidth / pixelsPerClock;
//422 mode has an additional cycle of delay
if (pixelFormat == dm_420 || pixelFormat == dm_444 || pixelFormat == dm_n422)
s = 0;
else
s = 1;
//main calculation for the dscce
ix = initalXmitDelay + 45;
wx = (w + 2) / 3;
p = 3 * wx - w;
l0 = ix / w;
a = ix + p * l0;
ax = (a + 2) / 3 + D + 6 + 1;
L = (ax + wx - 1) / wx;
if ((ix % w) == 0 && p != 0)
lstall = 1;
else
lstall = 0;
Delay = L * wx * (numSlices - 1) + ax + s + lstall + 22;
//dsc processes 3 pixel containers per cycle and a container can contain 1 or 2 pixels
pixels = Delay * 3 * pixelsPerClock;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: bpc: %d\n", __func__, bpc);
dml_print("DML::%s: BPP: %f\n", __func__, BPP);
dml_print("DML::%s: sliceWidth: %d\n", __func__, sliceWidth);
dml_print("DML::%s: numSlices: %d\n", __func__, numSlices);
dml_print("DML::%s: pixelFormat: %d\n", __func__, pixelFormat);
dml_print("DML::%s: Output: %d\n", __func__, Output);
dml_print("DML::%s: pixels: %d\n", __func__, pixels);
#endif
return pixels;
}
unsigned int dml32_dscComputeDelay(enum output_format_class pixelFormat, enum output_encoder_class Output)
{
unsigned int Delay = 0;
if (pixelFormat == dm_420) {
// sfr
Delay = Delay + 2;
// dsccif
Delay = Delay + 0;
// dscc - input deserializer
Delay = Delay + 3;
// dscc gets pixels every other cycle
Delay = Delay + 2;
// dscc - input cdc fifo
Delay = Delay + 12;
// dscc gets pixels every other cycle
Delay = Delay + 13;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output cdc fifo
Delay = Delay + 7;
// dscc gets pixels every other cycle
Delay = Delay + 3;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output serializer
Delay = Delay + 1;
// sft
Delay = Delay + 1;
} else if (pixelFormat == dm_n422 || (pixelFormat != dm_444)) {
// sfr
Delay = Delay + 2;
// dsccif
Delay = Delay + 1;
// dscc - input deserializer
Delay = Delay + 5;
// dscc - input cdc fifo
Delay = Delay + 25;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output cdc fifo
Delay = Delay + 10;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output serializer
Delay = Delay + 1;
// sft
Delay = Delay + 1;
} else {
// sfr
Delay = Delay + 2;
// dsccif
Delay = Delay + 0;
// dscc - input deserializer
Delay = Delay + 3;
// dscc - input cdc fifo
Delay = Delay + 12;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output cdc fifo
Delay = Delay + 7;
// dscc - output serializer
Delay = Delay + 1;
// dscc - cdc uncertainty
Delay = Delay + 2;
// sft
Delay = Delay + 1;
}
return Delay;
}
bool IsVertical(enum dm_rotation_angle Scan)
{
bool is_vert = false;
if (Scan == dm_rotation_90 || Scan == dm_rotation_90m || Scan == dm_rotation_270 || Scan == dm_rotation_270m)
is_vert = true;
else
is_vert = false;
return is_vert;
}
void dml32_CalculateSinglePipeDPPCLKAndSCLThroughput(
double HRatio,
double HRatioChroma,
double VRatio,
double VRatioChroma,
double MaxDCHUBToPSCLThroughput,
double MaxPSCLToLBThroughput,
double PixelClock,
enum source_format_class SourcePixelFormat,
unsigned int HTaps,
unsigned int HTapsChroma,
unsigned int VTaps,
unsigned int VTapsChroma,
/* output */
double *PSCL_THROUGHPUT,
double *PSCL_THROUGHPUT_CHROMA,
double *DPPCLKUsingSingleDPP)
{
double DPPCLKUsingSingleDPPLuma;
double DPPCLKUsingSingleDPPChroma;
if (HRatio > 1) {
*PSCL_THROUGHPUT = dml_min(MaxDCHUBToPSCLThroughput, MaxPSCLToLBThroughput * HRatio /
dml_ceil((double) HTaps / 6.0, 1.0));
} else {
*PSCL_THROUGHPUT = dml_min(MaxDCHUBToPSCLThroughput, MaxPSCLToLBThroughput);
}
DPPCLKUsingSingleDPPLuma = PixelClock * dml_max3(VTaps / 6 * dml_min(1, HRatio), HRatio * VRatio /
*PSCL_THROUGHPUT, 1);
if ((HTaps > 6 || VTaps > 6) && DPPCLKUsingSingleDPPLuma < 2 * PixelClock)
DPPCLKUsingSingleDPPLuma = 2 * PixelClock;
if ((SourcePixelFormat != dm_420_8 && SourcePixelFormat != dm_420_10 && SourcePixelFormat != dm_420_12 &&
SourcePixelFormat != dm_rgbe_alpha)) {
*PSCL_THROUGHPUT_CHROMA = 0;
*DPPCLKUsingSingleDPP = DPPCLKUsingSingleDPPLuma;
} else {
if (HRatioChroma > 1) {
*PSCL_THROUGHPUT_CHROMA = dml_min(MaxDCHUBToPSCLThroughput, MaxPSCLToLBThroughput *
HRatioChroma / dml_ceil((double) HTapsChroma / 6.0, 1.0));
} else {
*PSCL_THROUGHPUT_CHROMA = dml_min(MaxDCHUBToPSCLThroughput, MaxPSCLToLBThroughput);
}
DPPCLKUsingSingleDPPChroma = PixelClock * dml_max3(VTapsChroma / 6 * dml_min(1, HRatioChroma),
HRatioChroma * VRatioChroma / *PSCL_THROUGHPUT_CHROMA, 1);
if ((HTapsChroma > 6 || VTapsChroma > 6) && DPPCLKUsingSingleDPPChroma < 2 * PixelClock)
DPPCLKUsingSingleDPPChroma = 2 * PixelClock;
*DPPCLKUsingSingleDPP = dml_max(DPPCLKUsingSingleDPPLuma, DPPCLKUsingSingleDPPChroma);
}
}
void dml32_CalculateBytePerPixelAndBlockSizes(
enum source_format_class SourcePixelFormat,
enum dm_swizzle_mode SurfaceTiling,
/* Output */
unsigned int *BytePerPixelY,
unsigned int *BytePerPixelC,
double *BytePerPixelDETY,
double *BytePerPixelDETC,
unsigned int *BlockHeight256BytesY,
unsigned int *BlockHeight256BytesC,
unsigned int *BlockWidth256BytesY,
unsigned int *BlockWidth256BytesC,
unsigned int *MacroTileHeightY,
unsigned int *MacroTileHeightC,
unsigned int *MacroTileWidthY,
unsigned int *MacroTileWidthC)
{
if (SourcePixelFormat == dm_444_64) {
*BytePerPixelDETY = 8;
*BytePerPixelDETC = 0;
*BytePerPixelY = 8;
*BytePerPixelC = 0;
} else if (SourcePixelFormat == dm_444_32 || SourcePixelFormat == dm_rgbe) {
*BytePerPixelDETY = 4;
*BytePerPixelDETC = 0;
*BytePerPixelY = 4;
*BytePerPixelC = 0;
} else if (SourcePixelFormat == dm_444_16) {
*BytePerPixelDETY = 2;
*BytePerPixelDETC = 0;
*BytePerPixelY = 2;
*BytePerPixelC = 0;
} else if (SourcePixelFormat == dm_444_8) {
*BytePerPixelDETY = 1;
*BytePerPixelDETC = 0;
*BytePerPixelY = 1;
*BytePerPixelC = 0;
} else if (SourcePixelFormat == dm_rgbe_alpha) {
*BytePerPixelDETY = 4;
*BytePerPixelDETC = 1;
*BytePerPixelY = 4;
*BytePerPixelC = 1;
} else if (SourcePixelFormat == dm_420_8) {
*BytePerPixelDETY = 1;
*BytePerPixelDETC = 2;
*BytePerPixelY = 1;
*BytePerPixelC = 2;
} else if (SourcePixelFormat == dm_420_12) {
*BytePerPixelDETY = 2;
*BytePerPixelDETC = 4;
*BytePerPixelY = 2;
*BytePerPixelC = 4;
} else {
*BytePerPixelDETY = 4.0 / 3;
*BytePerPixelDETC = 8.0 / 3;
*BytePerPixelY = 2;
*BytePerPixelC = 4;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: SourcePixelFormat = %d\n", __func__, SourcePixelFormat);
dml_print("DML::%s: BytePerPixelDETY = %f\n", __func__, *BytePerPixelDETY);
dml_print("DML::%s: BytePerPixelDETC = %f\n", __func__, *BytePerPixelDETC);
dml_print("DML::%s: BytePerPixelY = %d\n", __func__, *BytePerPixelY);
dml_print("DML::%s: BytePerPixelC = %d\n", __func__, *BytePerPixelC);
#endif
if ((SourcePixelFormat == dm_444_64 || SourcePixelFormat == dm_444_32
|| SourcePixelFormat == dm_444_16
|| SourcePixelFormat == dm_444_8
|| SourcePixelFormat == dm_mono_16
|| SourcePixelFormat == dm_mono_8
|| SourcePixelFormat == dm_rgbe)) {
if (SurfaceTiling == dm_sw_linear)
*BlockHeight256BytesY = 1;
else if (SourcePixelFormat == dm_444_64)
*BlockHeight256BytesY = 4;
else if (SourcePixelFormat == dm_444_8)
*BlockHeight256BytesY = 16;
else
*BlockHeight256BytesY = 8;
*BlockWidth256BytesY = 256U / *BytePerPixelY / *BlockHeight256BytesY;
*BlockHeight256BytesC = 0;
*BlockWidth256BytesC = 0;
} else {
if (SurfaceTiling == dm_sw_linear) {
*BlockHeight256BytesY = 1;
*BlockHeight256BytesC = 1;
} else if (SourcePixelFormat == dm_rgbe_alpha) {
*BlockHeight256BytesY = 8;
*BlockHeight256BytesC = 16;
} else if (SourcePixelFormat == dm_420_8) {
*BlockHeight256BytesY = 16;
*BlockHeight256BytesC = 8;
} else {
*BlockHeight256BytesY = 8;
*BlockHeight256BytesC = 8;
}
*BlockWidth256BytesY = 256U / *BytePerPixelY / *BlockHeight256BytesY;
*BlockWidth256BytesC = 256U / *BytePerPixelC / *BlockHeight256BytesC;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: BlockWidth256BytesY = %d\n", __func__, *BlockWidth256BytesY);
dml_print("DML::%s: BlockHeight256BytesY = %d\n", __func__, *BlockHeight256BytesY);
dml_print("DML::%s: BlockWidth256BytesC = %d\n", __func__, *BlockWidth256BytesC);
dml_print("DML::%s: BlockHeight256BytesC = %d\n", __func__, *BlockHeight256BytesC);
#endif
if (SurfaceTiling == dm_sw_linear) {
*MacroTileHeightY = *BlockHeight256BytesY;
*MacroTileWidthY = 256 / *BytePerPixelY / *MacroTileHeightY;
*MacroTileHeightC = *BlockHeight256BytesC;
if (*MacroTileHeightC == 0)
*MacroTileWidthC = 0;
else
*MacroTileWidthC = 256 / *BytePerPixelC / *MacroTileHeightC;
} else if (SurfaceTiling == dm_sw_64kb_d || SurfaceTiling == dm_sw_64kb_d_t ||
SurfaceTiling == dm_sw_64kb_d_x || SurfaceTiling == dm_sw_64kb_r_x) {
*MacroTileHeightY = 16 * *BlockHeight256BytesY;
*MacroTileWidthY = 65536 / *BytePerPixelY / *MacroTileHeightY;
*MacroTileHeightC = 16 * *BlockHeight256BytesC;
if (*MacroTileHeightC == 0)
*MacroTileWidthC = 0;
else
*MacroTileWidthC = 65536 / *BytePerPixelC / *MacroTileHeightC;
} else {
*MacroTileHeightY = 32 * *BlockHeight256BytesY;
*MacroTileWidthY = 65536 * 4 / *BytePerPixelY / *MacroTileHeightY;
*MacroTileHeightC = 32 * *BlockHeight256BytesC;
if (*MacroTileHeightC == 0)
*MacroTileWidthC = 0;
else
*MacroTileWidthC = 65536 * 4 / *BytePerPixelC / *MacroTileHeightC;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: MacroTileWidthY = %d\n", __func__, *MacroTileWidthY);
dml_print("DML::%s: MacroTileHeightY = %d\n", __func__, *MacroTileHeightY);
dml_print("DML::%s: MacroTileWidthC = %d\n", __func__, *MacroTileWidthC);
dml_print("DML::%s: MacroTileHeightC = %d\n", __func__, *MacroTileHeightC);
#endif
} // CalculateBytePerPixelAndBlockSizes
void dml32_CalculateSwathAndDETConfiguration(
unsigned int DETSizeOverride[],
enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
unsigned int ConfigReturnBufferSizeInKByte,
unsigned int MaxTotalDETInKByte,
unsigned int MinCompressedBufferSizeInKByte,
double ForceSingleDPP,
unsigned int NumberOfActiveSurfaces,
unsigned int nomDETInKByte,
enum unbounded_requesting_policy UseUnboundedRequestingFinal,
bool DisableUnboundRequestIfCompBufReservedSpaceNeedAdjustment,
unsigned int PixelChunkSizeKBytes,
unsigned int ROBSizeKBytes,
unsigned int CompressedBufferSegmentSizeInkByteFinal,
enum output_encoder_class Output[],
double ReadBandwidthLuma[],
double ReadBandwidthChroma[],
double MaximumSwathWidthLuma[],
double MaximumSwathWidthChroma[],
enum dm_rotation_angle SourceRotation[],
bool ViewportStationary[],
enum source_format_class SourcePixelFormat[],
enum dm_swizzle_mode SurfaceTiling[],
unsigned int ViewportWidth[],
unsigned int ViewportHeight[],
unsigned int ViewportXStart[],
unsigned int ViewportYStart[],
unsigned int ViewportXStartC[],
unsigned int ViewportYStartC[],
unsigned int SurfaceWidthY[],
unsigned int SurfaceWidthC[],
unsigned int SurfaceHeightY[],
unsigned int SurfaceHeightC[],
unsigned int Read256BytesBlockHeightY[],
unsigned int Read256BytesBlockHeightC[],
unsigned int Read256BytesBlockWidthY[],
unsigned int Read256BytesBlockWidthC[],
enum odm_combine_mode ODMMode[],
unsigned int BlendingAndTiming[],
unsigned int BytePerPixY[],
unsigned int BytePerPixC[],
double BytePerPixDETY[],
double BytePerPixDETC[],
unsigned int HActive[],
double HRatio[],
double HRatioChroma[],
unsigned int DPPPerSurface[],
/* Output */
unsigned int swath_width_luma_ub[],
unsigned int swath_width_chroma_ub[],
double SwathWidth[],
double SwathWidthChroma[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
unsigned int DETBufferSizeInKByte[],
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
bool *UnboundedRequestEnabled,
unsigned int *CompressedBufferSizeInkByte,
unsigned int *CompBufReservedSpaceKBytes,
bool *CompBufReservedSpaceNeedAdjustment,
bool ViewportSizeSupportPerSurface[],
bool *ViewportSizeSupport)
{
unsigned int MaximumSwathHeightY[DC__NUM_DPP__MAX];
unsigned int MaximumSwathHeightC[DC__NUM_DPP__MAX];
unsigned int RoundedUpMaxSwathSizeBytesY[DC__NUM_DPP__MAX];
unsigned int RoundedUpMaxSwathSizeBytesC[DC__NUM_DPP__MAX];
unsigned int RoundedUpSwathSizeBytesY;
unsigned int RoundedUpSwathSizeBytesC;
double SwathWidthdoubleDPP[DC__NUM_DPP__MAX];
double SwathWidthdoubleDPPChroma[DC__NUM_DPP__MAX];
unsigned int k;
unsigned int TotalActiveDPP = 0;
bool NoChromaSurfaces = true;
unsigned int DETBufferSizeInKByteForSwathCalculation;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: ForceSingleDPP = %d\n", __func__, ForceSingleDPP);
dml_print("DML::%s: ROBSizeKBytes = %d\n", __func__, ROBSizeKBytes);
dml_print("DML::%s: PixelChunkSizeKBytes = %d\n", __func__, PixelChunkSizeKBytes);
#endif
dml32_CalculateSwathWidth(ForceSingleDPP,
NumberOfActiveSurfaces,
SourcePixelFormat,
SourceRotation,
ViewportStationary,
ViewportWidth,
ViewportHeight,
ViewportXStart,
ViewportYStart,
ViewportXStartC,
ViewportYStartC,
SurfaceWidthY,
SurfaceWidthC,
SurfaceHeightY,
SurfaceHeightC,
ODMMode,
BytePerPixY,
BytePerPixC,
Read256BytesBlockHeightY,
Read256BytesBlockHeightC,
Read256BytesBlockWidthY,
Read256BytesBlockWidthC,
BlendingAndTiming,
HActive,
HRatio,
DPPPerSurface,
/* Output */
SwathWidthdoubleDPP,
SwathWidthdoubleDPPChroma,
SwathWidth,
SwathWidthChroma,
MaximumSwathHeightY,
MaximumSwathHeightC,
swath_width_luma_ub,
swath_width_chroma_ub);
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
RoundedUpMaxSwathSizeBytesY[k] = swath_width_luma_ub[k] * BytePerPixDETY[k] * MaximumSwathHeightY[k];
RoundedUpMaxSwathSizeBytesC[k] = swath_width_chroma_ub[k] * BytePerPixDETC[k] * MaximumSwathHeightC[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d DPPPerSurface = %d\n", __func__, k, DPPPerSurface[k]);
dml_print("DML::%s: k=%0d swath_width_luma_ub = %d\n", __func__, k, swath_width_luma_ub[k]);
dml_print("DML::%s: k=%0d BytePerPixDETY = %f\n", __func__, k, BytePerPixDETY[k]);
dml_print("DML::%s: k=%0d MaximumSwathHeightY = %d\n", __func__, k, MaximumSwathHeightY[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesY = %d\n", __func__, k,
RoundedUpMaxSwathSizeBytesY[k]);
dml_print("DML::%s: k=%0d swath_width_chroma_ub = %d\n", __func__, k, swath_width_chroma_ub[k]);
dml_print("DML::%s: k=%0d BytePerPixDETC = %f\n", __func__, k, BytePerPixDETC[k]);
dml_print("DML::%s: k=%0d MaximumSwathHeightC = %d\n", __func__, k, MaximumSwathHeightC[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesC = %d\n", __func__, k,
RoundedUpMaxSwathSizeBytesC[k]);
#endif
if (SourcePixelFormat[k] == dm_420_10) {
RoundedUpMaxSwathSizeBytesY[k] = dml_ceil((unsigned int) RoundedUpMaxSwathSizeBytesY[k], 256);
RoundedUpMaxSwathSizeBytesC[k] = dml_ceil((unsigned int) RoundedUpMaxSwathSizeBytesC[k], 256);
}
}
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
TotalActiveDPP = TotalActiveDPP + (ForceSingleDPP ? 1 : DPPPerSurface[k]);
if (SourcePixelFormat[k] == dm_420_8 || SourcePixelFormat[k] == dm_420_10 ||
SourcePixelFormat[k] == dm_420_12 || SourcePixelFormat[k] == dm_rgbe_alpha) {
NoChromaSurfaces = false;
}
}
// By default, just set the reserved space to 2 pixel chunks size
*CompBufReservedSpaceKBytes = PixelChunkSizeKBytes * 2;
// if unbounded req is enabled, program reserved space such that the ROB will not hold more than 8 swaths worth of data
// - assume worst-case compression rate of 4. [ROB size - 8 * swath_size / max_compression ratio]
// - assume for "narrow" vp case in which the ROB can fit 8 swaths, the DET should be big enough to do full size req
*CompBufReservedSpaceNeedAdjustment = ((int) ROBSizeKBytes - (int) *CompBufReservedSpaceKBytes) > (int) (RoundedUpMaxSwathSizeBytesY[0]/512);
if (*CompBufReservedSpaceNeedAdjustment == 1) {
*CompBufReservedSpaceKBytes = ROBSizeKBytes - RoundedUpMaxSwathSizeBytesY[0]/512;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: CompBufReservedSpaceKBytes = %d\n", __func__, *CompBufReservedSpaceKBytes);
dml_print("DML::%s: CompBufReservedSpaceNeedAdjustment = %d\n", __func__, *CompBufReservedSpaceNeedAdjustment);
#endif
*UnboundedRequestEnabled = dml32_UnboundedRequest(UseUnboundedRequestingFinal, TotalActiveDPP, NoChromaSurfaces, Output[0], SurfaceTiling[0], *CompBufReservedSpaceNeedAdjustment, DisableUnboundRequestIfCompBufReservedSpaceNeedAdjustment);
dml32_CalculateDETBufferSize(DETSizeOverride,
UseMALLForPStateChange,
ForceSingleDPP,
NumberOfActiveSurfaces,
*UnboundedRequestEnabled,
nomDETInKByte,
MaxTotalDETInKByte,
ConfigReturnBufferSizeInKByte,
MinCompressedBufferSizeInKByte,
CompressedBufferSegmentSizeInkByteFinal,
SourcePixelFormat,
ReadBandwidthLuma,
ReadBandwidthChroma,
RoundedUpMaxSwathSizeBytesY,
RoundedUpMaxSwathSizeBytesC,
DPPPerSurface,
/* Output */
DETBufferSizeInKByte, // per hubp pipe
CompressedBufferSizeInkByte);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: TotalActiveDPP = %d\n", __func__, TotalActiveDPP);
dml_print("DML::%s: nomDETInKByte = %d\n", __func__, nomDETInKByte);
dml_print("DML::%s: ConfigReturnBufferSizeInKByte = %d\n", __func__, ConfigReturnBufferSizeInKByte);
dml_print("DML::%s: UseUnboundedRequestingFinal = %d\n", __func__, UseUnboundedRequestingFinal);
dml_print("DML::%s: UnboundedRequestEnabled = %d\n", __func__, *UnboundedRequestEnabled);
dml_print("DML::%s: CompressedBufferSizeInkByte = %d\n", __func__, *CompressedBufferSizeInkByte);
#endif
*ViewportSizeSupport = true;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
DETBufferSizeInKByteForSwathCalculation = (UseMALLForPStateChange[k] ==
dm_use_mall_pstate_change_phantom_pipe ? 1024 : DETBufferSizeInKByte[k]);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d DETBufferSizeInKByteForSwathCalculation = %d\n", __func__, k,
DETBufferSizeInKByteForSwathCalculation);
#endif
if (RoundedUpMaxSwathSizeBytesY[k] + RoundedUpMaxSwathSizeBytesC[k] <=
DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
SwathHeightY[k] = MaximumSwathHeightY[k];
SwathHeightC[k] = MaximumSwathHeightC[k];
RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k];
RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k];
} else if (RoundedUpMaxSwathSizeBytesY[k] >= 1.5 * RoundedUpMaxSwathSizeBytesC[k] &&
RoundedUpMaxSwathSizeBytesY[k] / 2 + RoundedUpMaxSwathSizeBytesC[k] <=
DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
SwathHeightY[k] = MaximumSwathHeightY[k] / 2;
SwathHeightC[k] = MaximumSwathHeightC[k];
RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k] / 2;
RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k];
} else if (RoundedUpMaxSwathSizeBytesY[k] < 1.5 * RoundedUpMaxSwathSizeBytesC[k] &&
RoundedUpMaxSwathSizeBytesY[k] + RoundedUpMaxSwathSizeBytesC[k] / 2 <=
DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
SwathHeightY[k] = MaximumSwathHeightY[k];
SwathHeightC[k] = MaximumSwathHeightC[k] / 2;
RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k];
RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k] / 2;
} else {
SwathHeightY[k] = MaximumSwathHeightY[k] / 2;
SwathHeightC[k] = MaximumSwathHeightC[k] / 2;
RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k] / 2;
RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k] / 2;
}
if ((RoundedUpMaxSwathSizeBytesY[k] / 2 + RoundedUpMaxSwathSizeBytesC[k] / 2 >
DETBufferSizeInKByteForSwathCalculation * 1024 / 2)
|| SwathWidth[k] > MaximumSwathWidthLuma[k] || (SwathHeightC[k] > 0 &&
SwathWidthChroma[k] > MaximumSwathWidthChroma[k])) {
*ViewportSizeSupport = false;
ViewportSizeSupportPerSurface[k] = false;
} else {
ViewportSizeSupportPerSurface[k] = true;
}
if (SwathHeightC[k] == 0) {
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d All DET for plane0\n", __func__, k);
#endif
DETBufferSizeY[k] = DETBufferSizeInKByte[k] * 1024;
DETBufferSizeC[k] = 0;
} else if (RoundedUpSwathSizeBytesY <= 1.5 * RoundedUpSwathSizeBytesC) {
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d Half DET for plane0, half for plane1\n", __func__, k);
#endif
DETBufferSizeY[k] = DETBufferSizeInKByte[k] * 1024 / 2;
DETBufferSizeC[k] = DETBufferSizeInKByte[k] * 1024 / 2;
} else {
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d 2/3 DET for plane0, 1/3 for plane1\n", __func__, k);
#endif
DETBufferSizeY[k] = dml_floor(DETBufferSizeInKByte[k] * 1024 * 2 / 3, 1024);
DETBufferSizeC[k] = DETBufferSizeInKByte[k] * 1024 - DETBufferSizeY[k];
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d SwathHeightY = %d\n", __func__, k, SwathHeightY[k]);
dml_print("DML::%s: k=%0d SwathHeightC = %d\n", __func__, k, SwathHeightC[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesY = %d\n", __func__,
k, RoundedUpMaxSwathSizeBytesY[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesC = %d\n", __func__,
k, RoundedUpMaxSwathSizeBytesC[k]);
dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesY = %d\n", __func__, k, RoundedUpSwathSizeBytesY);
dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesC = %d\n", __func__, k, RoundedUpSwathSizeBytesC);
dml_print("DML::%s: k=%0d DETBufferSizeInKByte = %d\n", __func__, k, DETBufferSizeInKByte[k]);
dml_print("DML::%s: k=%0d DETBufferSizeY = %d\n", __func__, k, DETBufferSizeY[k]);
dml_print("DML::%s: k=%0d DETBufferSizeC = %d\n", __func__, k, DETBufferSizeC[k]);
dml_print("DML::%s: k=%0d ViewportSizeSupportPerSurface = %d\n", __func__, k,
ViewportSizeSupportPerSurface[k]);
#endif
}
} // CalculateSwathAndDETConfiguration
void dml32_CalculateSwathWidth(
bool ForceSingleDPP,
unsigned int NumberOfActiveSurfaces,
enum source_format_class SourcePixelFormat[],
enum dm_rotation_angle SourceRotation[],
bool ViewportStationary[],
unsigned int ViewportWidth[],
unsigned int ViewportHeight[],
unsigned int ViewportXStart[],
unsigned int ViewportYStart[],
unsigned int ViewportXStartC[],
unsigned int ViewportYStartC[],
unsigned int SurfaceWidthY[],
unsigned int SurfaceWidthC[],
unsigned int SurfaceHeightY[],
unsigned int SurfaceHeightC[],
enum odm_combine_mode ODMMode[],
unsigned int BytePerPixY[],
unsigned int BytePerPixC[],
unsigned int Read256BytesBlockHeightY[],
unsigned int Read256BytesBlockHeightC[],
unsigned int Read256BytesBlockWidthY[],
unsigned int Read256BytesBlockWidthC[],
unsigned int BlendingAndTiming[],
unsigned int HActive[],
double HRatio[],
unsigned int DPPPerSurface[],
/* Output */
double SwathWidthdoubleDPPY[],
double SwathWidthdoubleDPPC[],
double SwathWidthY[], // per-pipe
double SwathWidthC[], // per-pipe
unsigned int MaximumSwathHeightY[],
unsigned int MaximumSwathHeightC[],
unsigned int swath_width_luma_ub[], // per-pipe
unsigned int swath_width_chroma_ub[]) // per-pipe
{
unsigned int k, j;
enum odm_combine_mode MainSurfaceODMMode;
unsigned int surface_width_ub_l;
unsigned int surface_height_ub_l;
unsigned int surface_width_ub_c = 0;
unsigned int surface_height_ub_c = 0;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: ForceSingleDPP = %d\n", __func__, ForceSingleDPP);
dml_print("DML::%s: NumberOfActiveSurfaces = %d\n", __func__, NumberOfActiveSurfaces);
#endif
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (!IsVertical(SourceRotation[k]))
SwathWidthdoubleDPPY[k] = ViewportWidth[k];
else
SwathWidthdoubleDPPY[k] = ViewportHeight[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d ViewportWidth=%d\n", __func__, k, ViewportWidth[k]);
dml_print("DML::%s: k=%d ViewportHeight=%d\n", __func__, k, ViewportHeight[k]);
#endif
MainSurfaceODMMode = ODMMode[k];
for (j = 0; j < NumberOfActiveSurfaces; ++j) {
if (BlendingAndTiming[k] == j)
MainSurfaceODMMode = ODMMode[j];
}
if (ForceSingleDPP) {
SwathWidthY[k] = SwathWidthdoubleDPPY[k];
} else {
if (MainSurfaceODMMode == dm_odm_combine_mode_4to1) {
SwathWidthY[k] = dml_min(SwathWidthdoubleDPPY[k],
dml_round(HActive[k] / 4.0 * HRatio[k]));
} else if (MainSurfaceODMMode == dm_odm_combine_mode_2to1) {
SwathWidthY[k] = dml_min(SwathWidthdoubleDPPY[k],
dml_round(HActive[k] / 2.0 * HRatio[k]));
} else if (DPPPerSurface[k] == 2) {
SwathWidthY[k] = SwathWidthdoubleDPPY[k] / 2;
} else {
SwathWidthY[k] = SwathWidthdoubleDPPY[k];
}
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d HActive=%d\n", __func__, k, HActive[k]);
dml_print("DML::%s: k=%d HRatio=%f\n", __func__, k, HRatio[k]);
dml_print("DML::%s: k=%d MainSurfaceODMMode=%d\n", __func__, k, MainSurfaceODMMode);
dml_print("DML::%s: k=%d SwathWidthdoubleDPPY=%d\n", __func__, k, SwathWidthdoubleDPPY[k]);
dml_print("DML::%s: k=%d SwathWidthY=%d\n", __func__, k, SwathWidthY[k]);
#endif
if (SourcePixelFormat[k] == dm_420_8 || SourcePixelFormat[k] == dm_420_10 ||
SourcePixelFormat[k] == dm_420_12) {
SwathWidthC[k] = SwathWidthY[k] / 2;
SwathWidthdoubleDPPC[k] = SwathWidthdoubleDPPY[k] / 2;
} else {
SwathWidthC[k] = SwathWidthY[k];
SwathWidthdoubleDPPC[k] = SwathWidthdoubleDPPY[k];
}
if (ForceSingleDPP == true) {
SwathWidthY[k] = SwathWidthdoubleDPPY[k];
SwathWidthC[k] = SwathWidthdoubleDPPC[k];
}
surface_width_ub_l = dml_ceil(SurfaceWidthY[k], Read256BytesBlockWidthY[k]);
surface_height_ub_l = dml_ceil(SurfaceHeightY[k], Read256BytesBlockHeightY[k]);
if (!IsVertical(SourceRotation[k])) {
MaximumSwathHeightY[k] = Read256BytesBlockHeightY[k];
MaximumSwathHeightC[k] = Read256BytesBlockHeightC[k];
if (ViewportStationary[k] && DPPPerSurface[k] == 1) {
swath_width_luma_ub[k] = dml_min(surface_width_ub_l,
dml_floor(ViewportXStart[k] +
SwathWidthY[k] +
Read256BytesBlockWidthY[k] - 1,
Read256BytesBlockWidthY[k]) -
dml_floor(ViewportXStart[k],
Read256BytesBlockWidthY[k]));
} else {
swath_width_luma_ub[k] = dml_min(surface_width_ub_l,
dml_ceil(SwathWidthY[k] - 1,
Read256BytesBlockWidthY[k]) +
Read256BytesBlockWidthY[k]);
}
if (BytePerPixC[k] > 0) {
surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
if (ViewportStationary[k] && DPPPerSurface[k] == 1) {
swath_width_chroma_ub[k] = dml_min(surface_width_ub_c,
dml_floor(ViewportXStartC[k] + SwathWidthC[k] +
Read256BytesBlockWidthC[k] - 1,
Read256BytesBlockWidthC[k]) -
dml_floor(ViewportXStartC[k],
Read256BytesBlockWidthC[k]));
} else {
swath_width_chroma_ub[k] = dml_min(surface_width_ub_c,
dml_ceil(SwathWidthC[k] - 1,
Read256BytesBlockWidthC[k]) +
Read256BytesBlockWidthC[k]);
}
} else {
swath_width_chroma_ub[k] = 0;
}
} else {
MaximumSwathHeightY[k] = Read256BytesBlockWidthY[k];
MaximumSwathHeightC[k] = Read256BytesBlockWidthC[k];
if (ViewportStationary[k] && DPPPerSurface[k] == 1) {
swath_width_luma_ub[k] = dml_min(surface_height_ub_l, dml_floor(ViewportYStart[k] +
SwathWidthY[k] + Read256BytesBlockHeightY[k] - 1,
Read256BytesBlockHeightY[k]) -
dml_floor(ViewportYStart[k], Read256BytesBlockHeightY[k]));
} else {
swath_width_luma_ub[k] = dml_min(surface_height_ub_l, dml_ceil(SwathWidthY[k] - 1,
Read256BytesBlockHeightY[k]) + Read256BytesBlockHeightY[k]);
}
if (BytePerPixC[k] > 0) {
surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
if (ViewportStationary[k] && DPPPerSurface[k] == 1) {
swath_width_chroma_ub[k] = dml_min(surface_height_ub_c,
dml_floor(ViewportYStartC[k] + SwathWidthC[k] +
Read256BytesBlockHeightC[k] - 1,
Read256BytesBlockHeightC[k]) -
dml_floor(ViewportYStartC[k],
Read256BytesBlockHeightC[k]));
} else {
swath_width_chroma_ub[k] = dml_min(surface_height_ub_c,
dml_ceil(SwathWidthC[k] - 1, Read256BytesBlockHeightC[k]) +
Read256BytesBlockHeightC[k]);
}
} else {
swath_width_chroma_ub[k] = 0;
}
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d surface_width_ub_l=%0d\n", __func__, k, surface_width_ub_l);
dml_print("DML::%s: k=%d surface_height_ub_l=%0d\n", __func__, k, surface_height_ub_l);
dml_print("DML::%s: k=%d surface_width_ub_c=%0d\n", __func__, k, surface_width_ub_c);
dml_print("DML::%s: k=%d surface_height_ub_c=%0d\n", __func__, k, surface_height_ub_c);
dml_print("DML::%s: k=%d Read256BytesBlockWidthY=%0d\n", __func__, k, Read256BytesBlockWidthY[k]);
dml_print("DML::%s: k=%d Read256BytesBlockHeightY=%0d\n", __func__, k, Read256BytesBlockHeightY[k]);
dml_print("DML::%s: k=%d Read256BytesBlockWidthC=%0d\n", __func__, k, Read256BytesBlockWidthC[k]);
dml_print("DML::%s: k=%d Read256BytesBlockHeightC=%0d\n", __func__, k, Read256BytesBlockHeightC[k]);
dml_print("DML::%s: k=%d ViewportStationary=%0d\n", __func__, k, ViewportStationary[k]);
dml_print("DML::%s: k=%d DPPPerSurface=%0d\n", __func__, k, DPPPerSurface[k]);
dml_print("DML::%s: k=%d swath_width_luma_ub=%0d\n", __func__, k, swath_width_luma_ub[k]);
dml_print("DML::%s: k=%d swath_width_chroma_ub=%0d\n", __func__, k, swath_width_chroma_ub[k]);
dml_print("DML::%s: k=%d MaximumSwathHeightY=%0d\n", __func__, k, MaximumSwathHeightY[k]);
dml_print("DML::%s: k=%d MaximumSwathHeightC=%0d\n", __func__, k, MaximumSwathHeightC[k]);
#endif
}
} // CalculateSwathWidth
bool dml32_UnboundedRequest(enum unbounded_requesting_policy UseUnboundedRequestingFinal,
unsigned int TotalNumberOfActiveDPP,
bool NoChroma,
enum output_encoder_class Output,
enum dm_swizzle_mode SurfaceTiling,
bool CompBufReservedSpaceNeedAdjustment,
bool DisableUnboundRequestIfCompBufReservedSpaceNeedAdjustment)
{
bool ret_val = false;
ret_val = (UseUnboundedRequestingFinal != dm_unbounded_requesting_disable &&
TotalNumberOfActiveDPP == 1 && NoChroma);
if (UseUnboundedRequestingFinal == dm_unbounded_requesting_edp_only && Output != dm_edp)
ret_val = false;
if (SurfaceTiling == dm_sw_linear)
ret_val = false;
if (CompBufReservedSpaceNeedAdjustment == 1 && DisableUnboundRequestIfCompBufReservedSpaceNeedAdjustment)
ret_val = false;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: CompBufReservedSpaceNeedAdjustment = %d\n", __func__, CompBufReservedSpaceNeedAdjustment);
dml_print("DML::%s: DisableUnboundRequestIfCompBufReservedSpaceNeedAdjustment = %d\n", __func__, DisableUnboundRequestIfCompBufReservedSpaceNeedAdjustment);
dml_print("DML::%s: ret_val = %d\n", __func__, ret_val);
#endif
return (ret_val);
}
void dml32_CalculateDETBufferSize(
unsigned int DETSizeOverride[],
enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
bool ForceSingleDPP,
unsigned int NumberOfActiveSurfaces,
bool UnboundedRequestEnabled,
unsigned int nomDETInKByte,
unsigned int MaxTotalDETInKByte,
unsigned int ConfigReturnBufferSizeInKByte,
unsigned int MinCompressedBufferSizeInKByte,
unsigned int CompressedBufferSegmentSizeInkByteFinal,
enum source_format_class SourcePixelFormat[],
double ReadBandwidthLuma[],
double ReadBandwidthChroma[],
unsigned int RoundedUpMaxSwathSizeBytesY[],
unsigned int RoundedUpMaxSwathSizeBytesC[],
unsigned int DPPPerSurface[],
/* Output */
unsigned int DETBufferSizeInKByte[],
unsigned int *CompressedBufferSizeInkByte)
{
unsigned int DETBufferSizePoolInKByte;
unsigned int NextDETBufferPieceInKByte;
bool DETPieceAssignedToThisSurfaceAlready[DC__NUM_DPP__MAX];
bool NextPotentialSurfaceToAssignDETPieceFound;
unsigned int NextSurfaceToAssignDETPiece;
double TotalBandwidth;
double BandwidthOfSurfacesNotAssignedDETPiece;
unsigned int max_minDET;
unsigned int minDET;
unsigned int minDET_pipe;
unsigned int j, k;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: ForceSingleDPP = %d\n", __func__, ForceSingleDPP);
dml_print("DML::%s: nomDETInKByte = %d\n", __func__, nomDETInKByte);
dml_print("DML::%s: NumberOfActiveSurfaces = %d\n", __func__, NumberOfActiveSurfaces);
dml_print("DML::%s: UnboundedRequestEnabled = %d\n", __func__, UnboundedRequestEnabled);
dml_print("DML::%s: MaxTotalDETInKByte = %d\n", __func__, MaxTotalDETInKByte);
dml_print("DML::%s: ConfigReturnBufferSizeInKByte = %d\n", __func__, ConfigReturnBufferSizeInKByte);
dml_print("DML::%s: MinCompressedBufferSizeInKByte = %d\n", __func__, MinCompressedBufferSizeInKByte);
dml_print("DML::%s: CompressedBufferSegmentSizeInkByteFinal = %d\n", __func__,
CompressedBufferSegmentSizeInkByteFinal);
#endif
// Note: Will use default det size if that fits 2 swaths
if (UnboundedRequestEnabled) {
if (DETSizeOverride[0] > 0) {
DETBufferSizeInKByte[0] = DETSizeOverride[0];
} else {
DETBufferSizeInKByte[0] = dml_max(nomDETInKByte, dml_ceil(2.0 *
((double) RoundedUpMaxSwathSizeBytesY[0] +
(double) RoundedUpMaxSwathSizeBytesC[0]) / 1024.0, 64.0));
}
*CompressedBufferSizeInkByte = ConfigReturnBufferSizeInKByte - DETBufferSizeInKByte[0];
} else {
DETBufferSizePoolInKByte = MaxTotalDETInKByte;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
DETBufferSizeInKByte[k] = nomDETInKByte;
if (SourcePixelFormat[k] == dm_420_8 || SourcePixelFormat[k] == dm_420_10 ||
SourcePixelFormat[k] == dm_420_12) {
max_minDET = nomDETInKByte - 64;
} else {
max_minDET = nomDETInKByte;
}
minDET = 128;
minDET_pipe = 0;
// add DET resource until can hold 2 full swaths
while (minDET <= max_minDET && minDET_pipe == 0) {
if (2.0 * ((double) RoundedUpMaxSwathSizeBytesY[k] +
(double) RoundedUpMaxSwathSizeBytesC[k]) / 1024.0 <= minDET)
minDET_pipe = minDET;
minDET = minDET + 64;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d minDET = %d\n", __func__, k, minDET);
dml_print("DML::%s: k=%0d max_minDET = %d\n", __func__, k, max_minDET);
dml_print("DML::%s: k=%0d minDET_pipe = %d\n", __func__, k, minDET_pipe);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesY = %d\n", __func__, k,
RoundedUpMaxSwathSizeBytesY[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesC = %d\n", __func__, k,
RoundedUpMaxSwathSizeBytesC[k]);
#endif
if (minDET_pipe == 0) {
minDET_pipe = dml_max(128, dml_ceil(((double)RoundedUpMaxSwathSizeBytesY[k] +
(double)RoundedUpMaxSwathSizeBytesC[k]) / 1024.0, 64));
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d minDET_pipe = %d (assume each plane take half DET)\n",
__func__, k, minDET_pipe);
#endif
}
if (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) {
DETBufferSizeInKByte[k] = 0;
} else if (DETSizeOverride[k] > 0) {
DETBufferSizeInKByte[k] = DETSizeOverride[k];
DETBufferSizePoolInKByte = DETBufferSizePoolInKByte -
(ForceSingleDPP ? 1 : DPPPerSurface[k]) * DETSizeOverride[k];
} else if ((ForceSingleDPP ? 1 : DPPPerSurface[k]) * minDET_pipe <= DETBufferSizePoolInKByte) {
DETBufferSizeInKByte[k] = minDET_pipe;
DETBufferSizePoolInKByte = DETBufferSizePoolInKByte -
(ForceSingleDPP ? 1 : DPPPerSurface[k]) * minDET_pipe;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d DPPPerSurface = %d\n", __func__, k, DPPPerSurface[k]);
dml_print("DML::%s: k=%d DETSizeOverride = %d\n", __func__, k, DETSizeOverride[k]);
dml_print("DML::%s: k=%d DETBufferSizeInKByte = %d\n", __func__, k, DETBufferSizeInKByte[k]);
dml_print("DML::%s: DETBufferSizePoolInKByte = %d\n", __func__, DETBufferSizePoolInKByte);
#endif
}
TotalBandwidth = 0;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe)
TotalBandwidth = TotalBandwidth + ReadBandwidthLuma[k] + ReadBandwidthChroma[k];
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: --- Before bandwidth adjustment ---\n", __func__);
for (uint k = 0; k < NumberOfActiveSurfaces; ++k)
dml_print("DML::%s: k=%d DETBufferSizeInKByte = %d\n", __func__, k, DETBufferSizeInKByte[k]);
dml_print("DML::%s: --- DET allocation with bandwidth ---\n", __func__);
dml_print("DML::%s: TotalBandwidth = %f\n", __func__, TotalBandwidth);
#endif
BandwidthOfSurfacesNotAssignedDETPiece = TotalBandwidth;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) {
DETPieceAssignedToThisSurfaceAlready[k] = true;
} else if (DETSizeOverride[k] > 0 || (((double) (ForceSingleDPP ? 1 : DPPPerSurface[k]) *
(double) DETBufferSizeInKByte[k] / (double) MaxTotalDETInKByte) >=
((ReadBandwidthLuma[k] + ReadBandwidthChroma[k]) / TotalBandwidth))) {
DETPieceAssignedToThisSurfaceAlready[k] = true;
BandwidthOfSurfacesNotAssignedDETPiece = BandwidthOfSurfacesNotAssignedDETPiece -
ReadBandwidthLuma[k] - ReadBandwidthChroma[k];
} else {
DETPieceAssignedToThisSurfaceAlready[k] = false;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d DETPieceAssignedToThisSurfaceAlready = %d\n", __func__, k,
DETPieceAssignedToThisSurfaceAlready[k]);
dml_print("DML::%s: k=%d BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, k,
BandwidthOfSurfacesNotAssignedDETPiece);
#endif
}
for (j = 0; j < NumberOfActiveSurfaces; ++j) {
NextPotentialSurfaceToAssignDETPieceFound = false;
NextSurfaceToAssignDETPiece = 0;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: j=%d k=%d, ReadBandwidthLuma[k] = %f\n", __func__, j, k,
ReadBandwidthLuma[k]);
dml_print("DML::%s: j=%d k=%d, ReadBandwidthChroma[k] = %f\n", __func__, j, k,
ReadBandwidthChroma[k]);
dml_print("DML::%s: j=%d k=%d, ReadBandwidthLuma[Next] = %f\n", __func__, j, k,
ReadBandwidthLuma[NextSurfaceToAssignDETPiece]);
dml_print("DML::%s: j=%d k=%d, ReadBandwidthChroma[Next] = %f\n", __func__, j, k,
ReadBandwidthChroma[NextSurfaceToAssignDETPiece]);
dml_print("DML::%s: j=%d k=%d, NextSurfaceToAssignDETPiece = %d\n", __func__, j, k,
NextSurfaceToAssignDETPiece);
#endif
if (!DETPieceAssignedToThisSurfaceAlready[k] &&
(!NextPotentialSurfaceToAssignDETPieceFound ||
ReadBandwidthLuma[k] + ReadBandwidthChroma[k] <
ReadBandwidthLuma[NextSurfaceToAssignDETPiece] +
ReadBandwidthChroma[NextSurfaceToAssignDETPiece])) {
NextSurfaceToAssignDETPiece = k;
NextPotentialSurfaceToAssignDETPieceFound = true;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: j=%d k=%d, DETPieceAssignedToThisSurfaceAlready = %d\n",
__func__, j, k, DETPieceAssignedToThisSurfaceAlready[k]);
dml_print("DML::%s: j=%d k=%d, NextPotentialSurfaceToAssignDETPieceFound = %d\n",
__func__, j, k, NextPotentialSurfaceToAssignDETPieceFound);
#endif
}
if (NextPotentialSurfaceToAssignDETPieceFound) {
// Note: To show the banker's rounding behavior in VBA and also the fact
// that the DET buffer size varies due to precision issue
//
//double tmp1 = ((double) DETBufferSizePoolInKByte *
// (ReadBandwidthLuma[NextSurfaceToAssignDETPiece] +
// ReadBandwidthChroma[NextSurfaceToAssignDETPiece]) /
// BandwidthOfSurfacesNotAssignedDETPiece /
// ((ForceSingleDPP ? 1 : DPPPerSurface[NextSurfaceToAssignDETPiece]) * 64.0));
//double tmp2 = dml_round((double) DETBufferSizePoolInKByte *
// (ReadBandwidthLuma[NextSurfaceToAssignDETPiece] +
// ReadBandwidthChroma[NextSurfaceToAssignDETPiece]) /
//BandwidthOfSurfacesNotAssignedDETPiece /
// ((ForceSingleDPP ? 1 : DPPPerSurface[NextSurfaceToAssignDETPiece]) * 64.0));
//
//dml_print("DML::%s: j=%d, tmp1 = %f\n", __func__, j, tmp1);
//dml_print("DML::%s: j=%d, tmp2 = %f\n", __func__, j, tmp2);
NextDETBufferPieceInKByte = dml_min(
dml_round((double) DETBufferSizePoolInKByte *
(ReadBandwidthLuma[NextSurfaceToAssignDETPiece] +
ReadBandwidthChroma[NextSurfaceToAssignDETPiece]) /
BandwidthOfSurfacesNotAssignedDETPiece /
((ForceSingleDPP ? 1 :
DPPPerSurface[NextSurfaceToAssignDETPiece]) * 64.0)) *
(ForceSingleDPP ? 1 :
DPPPerSurface[NextSurfaceToAssignDETPiece]) * 64.0,
dml_floor((double) DETBufferSizePoolInKByte,
(ForceSingleDPP ? 1 :
DPPPerSurface[NextSurfaceToAssignDETPiece]) * 64.0));
// Above calculation can assign the entire DET buffer allocation to a single pipe.
// We should limit the per-pipe DET size to the nominal / max per pipe.
if (NextDETBufferPieceInKByte > nomDETInKByte * (ForceSingleDPP ? 1 : DPPPerSurface[k])) {
if (DETBufferSizeInKByte[NextSurfaceToAssignDETPiece] <
nomDETInKByte * (ForceSingleDPP ? 1 : DPPPerSurface[k])) {
NextDETBufferPieceInKByte = nomDETInKByte * (ForceSingleDPP ? 1 : DPPPerSurface[k]) -
DETBufferSizeInKByte[NextSurfaceToAssignDETPiece];
} else {
// Case where DETBufferSizeInKByte[NextSurfaceToAssignDETPiece]
// already has the max per-pipe value
NextDETBufferPieceInKByte = 0;
}
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: j=%0d, DETBufferSizePoolInKByte = %d\n", __func__, j,
DETBufferSizePoolInKByte);
dml_print("DML::%s: j=%0d, NextSurfaceToAssignDETPiece = %d\n", __func__, j,
NextSurfaceToAssignDETPiece);
dml_print("DML::%s: j=%0d, ReadBandwidthLuma[%0d] = %f\n", __func__, j,
NextSurfaceToAssignDETPiece, ReadBandwidthLuma[NextSurfaceToAssignDETPiece]);
dml_print("DML::%s: j=%0d, ReadBandwidthChroma[%0d] = %f\n", __func__, j,
NextSurfaceToAssignDETPiece, ReadBandwidthChroma[NextSurfaceToAssignDETPiece]);
dml_print("DML::%s: j=%0d, BandwidthOfSurfacesNotAssignedDETPiece = %f\n",
__func__, j, BandwidthOfSurfacesNotAssignedDETPiece);
dml_print("DML::%s: j=%0d, NextDETBufferPieceInKByte = %d\n", __func__, j,
NextDETBufferPieceInKByte);
dml_print("DML::%s: j=%0d, DETBufferSizeInKByte[%0d] increases from %0d ",
__func__, j, NextSurfaceToAssignDETPiece,
DETBufferSizeInKByte[NextSurfaceToAssignDETPiece]);
#endif
DETBufferSizeInKByte[NextSurfaceToAssignDETPiece] =
DETBufferSizeInKByte[NextSurfaceToAssignDETPiece]
+ NextDETBufferPieceInKByte
/ (ForceSingleDPP ? 1 : DPPPerSurface[NextSurfaceToAssignDETPiece]);
#ifdef __DML_VBA_DEBUG__
dml_print("to %0d\n", DETBufferSizeInKByte[NextSurfaceToAssignDETPiece]);
#endif
DETBufferSizePoolInKByte = DETBufferSizePoolInKByte - NextDETBufferPieceInKByte;
DETPieceAssignedToThisSurfaceAlready[NextSurfaceToAssignDETPiece] = true;
BandwidthOfSurfacesNotAssignedDETPiece = BandwidthOfSurfacesNotAssignedDETPiece -
(ReadBandwidthLuma[NextSurfaceToAssignDETPiece] +
ReadBandwidthChroma[NextSurfaceToAssignDETPiece]);
}
}
*CompressedBufferSizeInkByte = MinCompressedBufferSizeInKByte;
}
*CompressedBufferSizeInkByte = *CompressedBufferSizeInkByte * CompressedBufferSegmentSizeInkByteFinal / 64;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: --- After bandwidth adjustment ---\n", __func__);
dml_print("DML::%s: CompressedBufferSizeInkByte = %d\n", __func__, *CompressedBufferSizeInkByte);
for (uint k = 0; k < NumberOfActiveSurfaces; ++k) {
dml_print("DML::%s: k=%d DETBufferSizeInKByte = %d (TotalReadBandWidth=%f)\n",
__func__, k, DETBufferSizeInKByte[k], ReadBandwidthLuma[k] + ReadBandwidthChroma[k]);
}
#endif
} // CalculateDETBufferSize
void dml32_CalculateODMMode(
unsigned int MaximumPixelsPerLinePerDSCUnit,
unsigned int HActive,
enum output_format_class OutFormat,
enum output_encoder_class Output,
enum odm_combine_policy ODMUse,
double StateDispclk,
double MaxDispclk,
bool DSCEnable,
unsigned int TotalNumberOfActiveDPP,
unsigned int MaxNumDPP,
double PixelClock,
double DISPCLKDPPCLKDSCCLKDownSpreading,
double DISPCLKRampingMargin,
double DISPCLKDPPCLKVCOSpeed,
unsigned int NumberOfDSCSlices,
/* Output */
bool *TotalAvailablePipesSupport,
unsigned int *NumberOfDPP,
enum odm_combine_mode *ODMMode,
double *RequiredDISPCLKPerSurface)
{
double SurfaceRequiredDISPCLKWithoutODMCombine;
double SurfaceRequiredDISPCLKWithODMCombineTwoToOne;
double SurfaceRequiredDISPCLKWithODMCombineFourToOne;
SurfaceRequiredDISPCLKWithoutODMCombine = dml32_CalculateRequiredDispclk(dm_odm_combine_mode_disabled,
PixelClock, DISPCLKDPPCLKDSCCLKDownSpreading, DISPCLKRampingMargin, DISPCLKDPPCLKVCOSpeed,
MaxDispclk);
SurfaceRequiredDISPCLKWithODMCombineTwoToOne = dml32_CalculateRequiredDispclk(dm_odm_combine_mode_2to1,
PixelClock, DISPCLKDPPCLKDSCCLKDownSpreading, DISPCLKRampingMargin, DISPCLKDPPCLKVCOSpeed,
MaxDispclk);
SurfaceRequiredDISPCLKWithODMCombineFourToOne = dml32_CalculateRequiredDispclk(dm_odm_combine_mode_4to1,
PixelClock, DISPCLKDPPCLKDSCCLKDownSpreading, DISPCLKRampingMargin, DISPCLKDPPCLKVCOSpeed,
MaxDispclk);
*TotalAvailablePipesSupport = true;
*ODMMode = dm_odm_combine_mode_disabled; // initialize as disable
if (ODMUse == dm_odm_combine_policy_none)
*ODMMode = dm_odm_combine_mode_disabled;
*RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithoutODMCombine;
*NumberOfDPP = 0;
// FIXME check ODMUse == "" condition does it mean bypass or Gabriel means something like don't care??
// (ODMUse == "" || ODMUse == "CombineAsNeeded")
if (!(Output == dm_hdmi || Output == dm_dp || Output == dm_edp) && (ODMUse == dm_odm_combine_policy_4to1 ||
((SurfaceRequiredDISPCLKWithODMCombineTwoToOne > StateDispclk ||
(DSCEnable && (HActive > 2 * MaximumPixelsPerLinePerDSCUnit))
|| NumberOfDSCSlices > 8)))) {
if (TotalNumberOfActiveDPP + 4 <= MaxNumDPP) {
*ODMMode = dm_odm_combine_mode_4to1;
*RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineFourToOne;
*NumberOfDPP = 4;
} else {
*TotalAvailablePipesSupport = false;
}
} else if (Output != dm_hdmi && (ODMUse == dm_odm_combine_policy_2to1 ||
(((SurfaceRequiredDISPCLKWithoutODMCombine > StateDispclk &&
SurfaceRequiredDISPCLKWithODMCombineTwoToOne <= StateDispclk) ||
(DSCEnable && (HActive > MaximumPixelsPerLinePerDSCUnit))
|| (NumberOfDSCSlices <= 8 && NumberOfDSCSlices > 4))))) {
if (TotalNumberOfActiveDPP + 2 <= MaxNumDPP) {
*ODMMode = dm_odm_combine_mode_2to1;
*RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineTwoToOne;
*NumberOfDPP = 2;
} else {
*TotalAvailablePipesSupport = false;
}
} else {
if (TotalNumberOfActiveDPP + 1 <= MaxNumDPP)
*NumberOfDPP = 1;
else
*TotalAvailablePipesSupport = false;
}
if (OutFormat == dm_420 && HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH &&
ODMUse != dm_odm_combine_policy_4to1) {
if (HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH * 4) {
*ODMMode = dm_odm_combine_mode_disabled;
*NumberOfDPP = 0;
*TotalAvailablePipesSupport = false;
} else if (HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH * 2 ||
*ODMMode == dm_odm_combine_mode_4to1) {
*ODMMode = dm_odm_combine_mode_4to1;
*RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineFourToOne;
*NumberOfDPP = 4;
} else {
*ODMMode = dm_odm_combine_mode_2to1;
*RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineTwoToOne;
*NumberOfDPP = 2;
}
}
if (Output == dm_hdmi && OutFormat == dm_420 &&
HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH) {
*ODMMode = dm_odm_combine_mode_disabled;
*NumberOfDPP = 0;
*TotalAvailablePipesSupport = false;
}
}
double dml32_CalculateRequiredDispclk(
enum odm_combine_mode ODMMode,
double PixelClock,
double DISPCLKDPPCLKDSCCLKDownSpreading,
double DISPCLKRampingMargin,
double DISPCLKDPPCLKVCOSpeed,
double MaxDispclk)
{
double RequiredDispclk = 0.;
double PixelClockAfterODM;
double DISPCLKWithRampingRoundedToDFSGranularity;
double DISPCLKWithoutRampingRoundedToDFSGranularity;
double MaxDispclkRoundedDownToDFSGranularity;
if (ODMMode == dm_odm_combine_mode_4to1)
PixelClockAfterODM = PixelClock / 4;
else if (ODMMode == dm_odm_combine_mode_2to1)
PixelClockAfterODM = PixelClock / 2;
else
PixelClockAfterODM = PixelClock;
DISPCLKWithRampingRoundedToDFSGranularity = dml32_RoundToDFSGranularity(
PixelClockAfterODM * (1 + DISPCLKDPPCLKDSCCLKDownSpreading / 100)
* (1 + DISPCLKRampingMargin / 100), 1, DISPCLKDPPCLKVCOSpeed);
DISPCLKWithoutRampingRoundedToDFSGranularity = dml32_RoundToDFSGranularity(
PixelClockAfterODM * (1 + DISPCLKDPPCLKDSCCLKDownSpreading / 100), 1, DISPCLKDPPCLKVCOSpeed);
MaxDispclkRoundedDownToDFSGranularity = dml32_RoundToDFSGranularity(MaxDispclk, 0, DISPCLKDPPCLKVCOSpeed);
if (DISPCLKWithoutRampingRoundedToDFSGranularity > MaxDispclkRoundedDownToDFSGranularity)
RequiredDispclk = DISPCLKWithoutRampingRoundedToDFSGranularity;
else if (DISPCLKWithRampingRoundedToDFSGranularity > MaxDispclkRoundedDownToDFSGranularity)
RequiredDispclk = MaxDispclkRoundedDownToDFSGranularity;
else
RequiredDispclk = DISPCLKWithRampingRoundedToDFSGranularity;
return RequiredDispclk;
}
double dml32_RoundToDFSGranularity(double Clock, bool round_up, double VCOSpeed)
{
if (Clock <= 0.0)
return 0.0;
if (round_up)
return VCOSpeed * 4.0 / dml_floor(VCOSpeed * 4.0 / Clock, 1.0);
else
return VCOSpeed * 4.0 / dml_ceil(VCOSpeed * 4.0 / Clock, 1.0);
}
void dml32_CalculateOutputLink(
double PHYCLKPerState,
double PHYCLKD18PerState,
double PHYCLKD32PerState,
double Downspreading,
bool IsMainSurfaceUsingTheIndicatedTiming,
enum output_encoder_class Output,
enum output_format_class OutputFormat,
unsigned int HTotal,
unsigned int HActive,
double PixelClockBackEnd,
double ForcedOutputLinkBPP,
unsigned int DSCInputBitPerComponent,
unsigned int NumberOfDSCSlices,
double AudioSampleRate,
unsigned int AudioSampleLayout,
enum odm_combine_mode ODMModeNoDSC,
enum odm_combine_mode ODMModeDSC,
bool DSCEnable,
unsigned int OutputLinkDPLanes,
enum dm_output_link_dp_rate OutputLinkDPRate,
/* Output */
bool *RequiresDSC,
double *RequiresFEC,
double *OutBpp,
enum dm_output_type *OutputType,
enum dm_output_rate *OutputRate,
unsigned int *RequiredSlots)
{
bool LinkDSCEnable;
unsigned int dummy;
*RequiresDSC = false;
*RequiresFEC = false;
*OutBpp = 0;
*OutputType = dm_output_type_unknown;
*OutputRate = dm_output_rate_unknown;
if (IsMainSurfaceUsingTheIndicatedTiming) {
if (Output == dm_hdmi) {
*RequiresDSC = false;
*RequiresFEC = false;
*OutBpp = dml32_TruncToValidBPP(dml_min(600, PHYCLKPerState) * 10, 3, HTotal, HActive,
PixelClockBackEnd, ForcedOutputLinkBPP, false, Output, OutputFormat,
DSCInputBitPerComponent, NumberOfDSCSlices, AudioSampleRate, AudioSampleLayout,
ODMModeNoDSC, ODMModeDSC, &dummy);
//OutputTypeAndRate = "HDMI";
*OutputType = dm_output_type_hdmi;
} else if (Output == dm_dp || Output == dm_dp2p0 || Output == dm_edp) {
if (DSCEnable == true) {
*RequiresDSC = true;
LinkDSCEnable = true;
if (Output == dm_dp || Output == dm_dp2p0)
*RequiresFEC = true;
else
*RequiresFEC = false;
} else {
*RequiresDSC = false;
LinkDSCEnable = false;
if (Output == dm_dp2p0)
*RequiresFEC = true;
else
*RequiresFEC = false;
}
if (Output == dm_dp2p0) {
*OutBpp = 0;
if ((OutputLinkDPRate == dm_dp_rate_na || OutputLinkDPRate == dm_dp_rate_uhbr10) &&
PHYCLKD32PerState >= 10000 / 32) {
*OutBpp = dml32_TruncToValidBPP((1 - Downspreading / 100) * 10000,
OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd,
ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat,
DSCInputBitPerComponent, NumberOfDSCSlices, AudioSampleRate,
AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
if (*OutBpp == 0 && PHYCLKD32PerState < 13500 / 32 && DSCEnable == true &&
ForcedOutputLinkBPP == 0) {
*RequiresDSC = true;
LinkDSCEnable = true;
*OutBpp = dml32_TruncToValidBPP((1 - Downspreading / 100) * 10000,
OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd,
ForcedOutputLinkBPP, LinkDSCEnable, Output,
OutputFormat, DSCInputBitPerComponent,
NumberOfDSCSlices, AudioSampleRate, AudioSampleLayout,
ODMModeNoDSC, ODMModeDSC, RequiredSlots);
}
//OutputTypeAndRate = Output & " UHBR10";
*OutputType = dm_output_type_dp2p0;
*OutputRate = dm_output_rate_dp_rate_uhbr10;
}
if ((OutputLinkDPRate == dm_dp_rate_na || OutputLinkDPRate == dm_dp_rate_uhbr13p5) &&
*OutBpp == 0 && PHYCLKD32PerState >= 13500 / 32) {
*OutBpp = dml32_TruncToValidBPP((1 - Downspreading / 100) * 13500,
OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd,
ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat,
DSCInputBitPerComponent, NumberOfDSCSlices, AudioSampleRate,
AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
if (*OutBpp == 0 && PHYCLKD32PerState < 20000 / 32 && DSCEnable == true &&
ForcedOutputLinkBPP == 0) {
*RequiresDSC = true;
LinkDSCEnable = true;
*OutBpp = dml32_TruncToValidBPP((1 - Downspreading / 100) * 13500,
OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd,
ForcedOutputLinkBPP, LinkDSCEnable, Output,
OutputFormat, DSCInputBitPerComponent,
NumberOfDSCSlices, AudioSampleRate, AudioSampleLayout,
ODMModeNoDSC, ODMModeDSC, RequiredSlots);
}
//OutputTypeAndRate = Output & " UHBR13p5";
*OutputType = dm_output_type_dp2p0;
*OutputRate = dm_output_rate_dp_rate_uhbr13p5;
}
if ((OutputLinkDPRate == dm_dp_rate_na || OutputLinkDPRate == dm_dp_rate_uhbr20) &&
*OutBpp == 0 && PHYCLKD32PerState >= 20000 / 32) {
*OutBpp = dml32_TruncToValidBPP((1 - Downspreading / 100) * 20000,
OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd,
ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat,
DSCInputBitPerComponent, NumberOfDSCSlices, AudioSampleRate,
AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
if (*OutBpp == 0 && DSCEnable == true && ForcedOutputLinkBPP == 0) {
*RequiresDSC = true;
LinkDSCEnable = true;
*OutBpp = dml32_TruncToValidBPP((1 - Downspreading / 100) * 20000,
OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd,
ForcedOutputLinkBPP, LinkDSCEnable, Output,
OutputFormat, DSCInputBitPerComponent,
NumberOfDSCSlices, AudioSampleRate, AudioSampleLayout,
ODMModeNoDSC, ODMModeDSC, RequiredSlots);
}
//OutputTypeAndRate = Output & " UHBR20";
*OutputType = dm_output_type_dp2p0;
*OutputRate = dm_output_rate_dp_rate_uhbr20;
}
} else {
*OutBpp = 0;
if ((OutputLinkDPRate == dm_dp_rate_na || OutputLinkDPRate == dm_dp_rate_hbr) &&
PHYCLKPerState >= 270) {
*OutBpp = dml32_TruncToValidBPP((1 - Downspreading / 100) * 2700,
OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd,
ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat,
DSCInputBitPerComponent, NumberOfDSCSlices, AudioSampleRate,
AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
if (*OutBpp == 0 && PHYCLKPerState < 540 && DSCEnable == true &&
ForcedOutputLinkBPP == 0) {
*RequiresDSC = true;
LinkDSCEnable = true;
if (Output == dm_dp)
*RequiresFEC = true;
*OutBpp = dml32_TruncToValidBPP((1 - Downspreading / 100) * 2700,
OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd,
ForcedOutputLinkBPP, LinkDSCEnable, Output,
OutputFormat, DSCInputBitPerComponent,
NumberOfDSCSlices, AudioSampleRate, AudioSampleLayout,
ODMModeNoDSC, ODMModeDSC, RequiredSlots);
}
//OutputTypeAndRate = Output & " HBR";
*OutputType = (Output == dm_dp) ? dm_output_type_dp : dm_output_type_edp;
*OutputRate = dm_output_rate_dp_rate_hbr;
}
if ((OutputLinkDPRate == dm_dp_rate_na || OutputLinkDPRate == dm_dp_rate_hbr2) &&
*OutBpp == 0 && PHYCLKPerState >= 540) {
*OutBpp = dml32_TruncToValidBPP((1 - Downspreading / 100) * 5400,
OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd,
ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat,
DSCInputBitPerComponent, NumberOfDSCSlices, AudioSampleRate,
AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
if (*OutBpp == 0 && PHYCLKPerState < 810 && DSCEnable == true &&
ForcedOutputLinkBPP == 0) {
*RequiresDSC = true;
LinkDSCEnable = true;
if (Output == dm_dp)
*RequiresFEC = true;
*OutBpp = dml32_TruncToValidBPP((1 - Downspreading / 100) * 5400,
OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd,
ForcedOutputLinkBPP, LinkDSCEnable, Output,
OutputFormat, DSCInputBitPerComponent,
NumberOfDSCSlices, AudioSampleRate, AudioSampleLayout,
ODMModeNoDSC, ODMModeDSC, RequiredSlots);
}
//OutputTypeAndRate = Output & " HBR2";
*OutputType = (Output == dm_dp) ? dm_output_type_dp : dm_output_type_edp;
*OutputRate = dm_output_rate_dp_rate_hbr2;
}
if ((OutputLinkDPRate == dm_dp_rate_na || OutputLinkDPRate == dm_dp_rate_hbr3) && *OutBpp == 0 && PHYCLKPerState >= 810) {
*OutBpp = dml32_TruncToValidBPP((1 - Downspreading / 100) * 8100,
OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd,
ForcedOutputLinkBPP, LinkDSCEnable, Output,
OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices,
AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC,
RequiredSlots);
if (*OutBpp == 0 && DSCEnable == true && ForcedOutputLinkBPP == 0) {
*RequiresDSC = true;
LinkDSCEnable = true;
if (Output == dm_dp)
*RequiresFEC = true;
*OutBpp = dml32_TruncToValidBPP((1 - Downspreading / 100) * 8100,
OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd,
ForcedOutputLinkBPP, LinkDSCEnable, Output,
OutputFormat, DSCInputBitPerComponent,
NumberOfDSCSlices, AudioSampleRate, AudioSampleLayout,
ODMModeNoDSC, ODMModeDSC, RequiredSlots);
}
//OutputTypeAndRate = Output & " HBR3";
*OutputType = (Output == dm_dp) ? dm_output_type_dp : dm_output_type_edp;
*OutputRate = dm_output_rate_dp_rate_hbr3;
}
}
}
}
}
void dml32_CalculateDPPCLK(
unsigned int NumberOfActiveSurfaces,
double DISPCLKDPPCLKDSCCLKDownSpreading,
double DISPCLKDPPCLKVCOSpeed,
double DPPCLKUsingSingleDPP[],
unsigned int DPPPerSurface[],
/* output */
double *GlobalDPPCLK,
double Dppclk[])
{
unsigned int k;
*GlobalDPPCLK = 0;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
Dppclk[k] = DPPCLKUsingSingleDPP[k] / DPPPerSurface[k] * (1 + DISPCLKDPPCLKDSCCLKDownSpreading / 100);
*GlobalDPPCLK = dml_max(*GlobalDPPCLK, Dppclk[k]);
}
*GlobalDPPCLK = dml32_RoundToDFSGranularity(*GlobalDPPCLK, 1, DISPCLKDPPCLKVCOSpeed);
for (k = 0; k < NumberOfActiveSurfaces; ++k)
Dppclk[k] = *GlobalDPPCLK / 255 * dml_ceil(Dppclk[k] * 255.0 / *GlobalDPPCLK, 1.0);
}
double dml32_TruncToValidBPP(
double LinkBitRate,
unsigned int Lanes,
unsigned int HTotal,
unsigned int HActive,
double PixelClock,
double DesiredBPP,
bool DSCEnable,
enum output_encoder_class Output,
enum output_format_class Format,
unsigned int DSCInputBitPerComponent,
unsigned int DSCSlices,
unsigned int AudioRate,
unsigned int AudioLayout,
enum odm_combine_mode ODMModeNoDSC,
enum odm_combine_mode ODMModeDSC,
/* Output */
unsigned int *RequiredSlots)
{
double MaxLinkBPP;
unsigned int MinDSCBPP;
double MaxDSCBPP;
unsigned int NonDSCBPP0;
unsigned int NonDSCBPP1;
unsigned int NonDSCBPP2;
if (Format == dm_420) {
NonDSCBPP0 = 12;
NonDSCBPP1 = 15;
NonDSCBPP2 = 18;
MinDSCBPP = 6;
MaxDSCBPP = 1.5 * DSCInputBitPerComponent - 1 / 16;
} else if (Format == dm_444) {
NonDSCBPP0 = 24;
NonDSCBPP1 = 30;
NonDSCBPP2 = 36;
MinDSCBPP = 8;
MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16;
} else {
if (Output == dm_hdmi) {
NonDSCBPP0 = 24;
NonDSCBPP1 = 24;
NonDSCBPP2 = 24;
} else {
NonDSCBPP0 = 16;
NonDSCBPP1 = 20;
NonDSCBPP2 = 24;
}
if (Format == dm_n422) {
MinDSCBPP = 7;
MaxDSCBPP = 2 * DSCInputBitPerComponent - 1.0 / 16.0;
} else {
MinDSCBPP = 8;
MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16.0;
}
}
if (Output == dm_dp2p0) {
MaxLinkBPP = LinkBitRate * Lanes / PixelClock * 128 / 132 * 383 / 384 * 65536 / 65540;
} else if (DSCEnable && Output == dm_dp) {
MaxLinkBPP = LinkBitRate / 10 * 8 * Lanes / PixelClock * (1 - 2.4 / 100);
} else {
MaxLinkBPP = LinkBitRate / 10 * 8 * Lanes / PixelClock;
}
if (DSCEnable) {
if (ODMModeDSC == dm_odm_combine_mode_4to1)
MaxLinkBPP = dml_min(MaxLinkBPP, 16);
else if (ODMModeDSC == dm_odm_combine_mode_2to1)
MaxLinkBPP = dml_min(MaxLinkBPP, 32);
else if (ODMModeDSC == dm_odm_split_mode_1to2)
MaxLinkBPP = 2 * MaxLinkBPP;
} else {
if (ODMModeNoDSC == dm_odm_combine_mode_4to1)
MaxLinkBPP = dml_min(MaxLinkBPP, 16);
else if (ODMModeNoDSC == dm_odm_combine_mode_2to1)
MaxLinkBPP = dml_min(MaxLinkBPP, 32);
else if (ODMModeNoDSC == dm_odm_split_mode_1to2)
MaxLinkBPP = 2 * MaxLinkBPP;
}
if (DesiredBPP == 0) {
if (DSCEnable) {
if (MaxLinkBPP < MinDSCBPP)
return BPP_INVALID;
else if (MaxLinkBPP >= MaxDSCBPP)
return MaxDSCBPP;
else
return dml_floor(16.0 * MaxLinkBPP, 1.0) / 16.0;
} else {
if (MaxLinkBPP >= NonDSCBPP2)
return NonDSCBPP2;
else if (MaxLinkBPP >= NonDSCBPP1)
return NonDSCBPP1;
else if (MaxLinkBPP >= NonDSCBPP0)
return 16.0;
else
return BPP_INVALID;
}
} else {
if (!((DSCEnable == false && (DesiredBPP == NonDSCBPP2 || DesiredBPP == NonDSCBPP1 ||
DesiredBPP <= NonDSCBPP0)) ||
(DSCEnable && DesiredBPP >= MinDSCBPP && DesiredBPP <= MaxDSCBPP)))
return BPP_INVALID;
else
return DesiredBPP;
}
*RequiredSlots = dml_ceil(DesiredBPP / MaxLinkBPP * 64, 1);
return BPP_INVALID;
} // TruncToValidBPP
double dml32_RequiredDTBCLK(
bool DSCEnable,
double PixelClock,
enum output_format_class OutputFormat,
double OutputBpp,
unsigned int DSCSlices,
unsigned int HTotal,
unsigned int HActive,
unsigned int AudioRate,
unsigned int AudioLayout)
{
double PixelWordRate;
double HCActive;
double HCBlank;
double AverageTribyteRate;
double HActiveTribyteRate;
if (DSCEnable != true)
return dml_max(PixelClock / 4.0 * OutputBpp / 24.0, 25.0);
PixelWordRate = PixelClock / (OutputFormat == dm_444 ? 1 : 2);
HCActive = dml_ceil(DSCSlices * dml_ceil(OutputBpp *
dml_ceil(HActive / DSCSlices, 1) / 8.0, 1) / 3.0, 1);
HCBlank = 64 + 32 *
dml_ceil(AudioRate * (AudioLayout == 1 ? 1 : 0.25) * HTotal / (PixelClock * 1000), 1);
AverageTribyteRate = PixelWordRate * (HCActive + HCBlank) / HTotal;
HActiveTribyteRate = PixelWordRate * HCActive / HActive;
return dml_max4(PixelWordRate / 4.0, AverageTribyteRate / 4.0, HActiveTribyteRate / 4.0, 25.0) * 1.002;
}
unsigned int dml32_DSCDelayRequirement(bool DSCEnabled,
enum odm_combine_mode ODMMode,
unsigned int DSCInputBitPerComponent,
double OutputBpp,
unsigned int HActive,
unsigned int HTotal,
unsigned int NumberOfDSCSlices,
enum output_format_class OutputFormat,
enum output_encoder_class Output,
double PixelClock,
double PixelClockBackEnd,
double dsc_delay_factor_wa)
{
unsigned int DSCDelayRequirement_val;
if (DSCEnabled == true && OutputBpp != 0) {
if (ODMMode == dm_odm_combine_mode_4to1) {
DSCDelayRequirement_val = 4 * (dml32_dscceComputeDelay(DSCInputBitPerComponent, OutputBpp,
dml_ceil(HActive / NumberOfDSCSlices, 1), NumberOfDSCSlices / 4,
OutputFormat, Output) + dml32_dscComputeDelay(OutputFormat, Output));
} else if (ODMMode == dm_odm_combine_mode_2to1) {
DSCDelayRequirement_val = 2 * (dml32_dscceComputeDelay(DSCInputBitPerComponent, OutputBpp,
dml_ceil(HActive / NumberOfDSCSlices, 1), NumberOfDSCSlices / 2,
OutputFormat, Output) + dml32_dscComputeDelay(OutputFormat, Output));
} else {
DSCDelayRequirement_val = dml32_dscceComputeDelay(DSCInputBitPerComponent, OutputBpp,
dml_ceil(HActive / NumberOfDSCSlices, 1), NumberOfDSCSlices,
OutputFormat, Output) + dml32_dscComputeDelay(OutputFormat, Output);
}
DSCDelayRequirement_val = DSCDelayRequirement_val + (HTotal - HActive) *
dml_ceil((double)DSCDelayRequirement_val / HActive, 1);
DSCDelayRequirement_val = DSCDelayRequirement_val * PixelClock / PixelClockBackEnd;
} else {
DSCDelayRequirement_val = 0;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DSCEnabled = %d\n", __func__, DSCEnabled);
dml_print("DML::%s: OutputBpp = %f\n", __func__, OutputBpp);
dml_print("DML::%s: HActive = %d\n", __func__, HActive);
dml_print("DML::%s: OutputFormat = %d\n", __func__, OutputFormat);
dml_print("DML::%s: DSCInputBitPerComponent = %d\n", __func__, DSCInputBitPerComponent);
dml_print("DML::%s: NumberOfDSCSlices = %d\n", __func__, NumberOfDSCSlices);
dml_print("DML::%s: DSCDelayRequirement_val = %d\n", __func__, DSCDelayRequirement_val);
#endif
return dml_ceil(DSCDelayRequirement_val * dsc_delay_factor_wa, 1);
}
void dml32_CalculateSurfaceSizeInMall(
unsigned int NumberOfActiveSurfaces,
unsigned int MALLAllocatedForDCN,
enum dm_use_mall_for_static_screen_mode UseMALLForStaticScreen[],
enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[],
bool DCCEnable[],
bool ViewportStationary[],
unsigned int ViewportXStartY[],
unsigned int ViewportYStartY[],
unsigned int ViewportXStartC[],
unsigned int ViewportYStartC[],
unsigned int ViewportWidthY[],
unsigned int ViewportHeightY[],
unsigned int BytesPerPixelY[],
unsigned int ViewportWidthC[],
unsigned int ViewportHeightC[],
unsigned int BytesPerPixelC[],
unsigned int SurfaceWidthY[],
unsigned int SurfaceWidthC[],
unsigned int SurfaceHeightY[],
unsigned int SurfaceHeightC[],
unsigned int Read256BytesBlockWidthY[],
unsigned int Read256BytesBlockWidthC[],
unsigned int Read256BytesBlockHeightY[],
unsigned int Read256BytesBlockHeightC[],
unsigned int ReadBlockWidthY[],
unsigned int ReadBlockWidthC[],
unsigned int ReadBlockHeightY[],
unsigned int ReadBlockHeightC[],
unsigned int DCCMetaPitchY[],
unsigned int DCCMetaPitchC[],
/* Output */
unsigned int SurfaceSizeInMALL[],
bool *ExceededMALLSize)
{
unsigned int k;
unsigned int TotalSurfaceSizeInMALLForSS = 0;
unsigned int TotalSurfaceSizeInMALLForSubVP = 0;
unsigned int MALLAllocatedForDCNInBytes = MALLAllocatedForDCN * 1024 * 1024;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (ViewportStationary[k]) {
SurfaceSizeInMALL[k] = dml_min(dml_ceil(SurfaceWidthY[k], ReadBlockWidthY[k]),
dml_floor(ViewportXStartY[k] + ViewportWidthY[k] + ReadBlockWidthY[k] - 1,
ReadBlockWidthY[k]) - dml_floor(ViewportXStartY[k],
ReadBlockWidthY[k])) * dml_min(dml_ceil(SurfaceHeightY[k],
ReadBlockHeightY[k]), dml_floor(ViewportYStartY[k] +
ViewportHeightY[k] + ReadBlockHeightY[k] - 1, ReadBlockHeightY[k]) -
dml_floor(ViewportYStartY[k], ReadBlockHeightY[k])) * BytesPerPixelY[k];
if (ReadBlockWidthC[k] > 0) {
SurfaceSizeInMALL[k] = SurfaceSizeInMALL[k] +
dml_min(dml_ceil(SurfaceWidthC[k], ReadBlockWidthC[k]),
dml_floor(ViewportXStartC[k] + ViewportWidthC[k] +
ReadBlockWidthC[k] - 1, ReadBlockWidthC[k]) -
dml_floor(ViewportXStartC[k], ReadBlockWidthC[k])) *
dml_min(dml_ceil(SurfaceHeightC[k], ReadBlockHeightC[k]),
dml_floor(ViewportYStartC[k] + ViewportHeightC[k] +
ReadBlockHeightC[k] - 1, ReadBlockHeightC[k]) -
dml_floor(ViewportYStartC[k], ReadBlockHeightC[k])) *
BytesPerPixelC[k];
}
if (DCCEnable[k] == true) {
SurfaceSizeInMALL[k] = SurfaceSizeInMALL[k] +
(dml_min(dml_ceil(DCCMetaPitchY[k], 8 * Read256BytesBlockWidthY[k]),
dml_floor(ViewportXStartY[k] + ViewportWidthY[k] + 8 *
Read256BytesBlockWidthY[k] - 1, 8 * Read256BytesBlockWidthY[k])
- dml_floor(ViewportXStartY[k], 8 * Read256BytesBlockWidthY[k]))
* dml_min(dml_ceil(SurfaceHeightY[k], 8 *
Read256BytesBlockHeightY[k]), dml_floor(ViewportYStartY[k] +
ViewportHeightY[k] + 8 * Read256BytesBlockHeightY[k] - 1, 8 *
Read256BytesBlockHeightY[k]) - dml_floor(ViewportYStartY[k], 8 *
Read256BytesBlockHeightY[k])) * BytesPerPixelY[k] / 256) + (64 * 1024);
if (Read256BytesBlockWidthC[k] > 0) {
SurfaceSizeInMALL[k] = SurfaceSizeInMALL[k] +
dml_min(dml_ceil(DCCMetaPitchC[k], 8 *
Read256BytesBlockWidthC[k]),
dml_floor(ViewportXStartC[k] + ViewportWidthC[k] + 8
* Read256BytesBlockWidthC[k] - 1, 8 *
Read256BytesBlockWidthC[k]) -
dml_floor(ViewportXStartC[k], 8 *
Read256BytesBlockWidthC[k])) *
dml_min(dml_ceil(SurfaceHeightC[k], 8 *
Read256BytesBlockHeightC[k]),
dml_floor(ViewportYStartC[k] + ViewportHeightC[k] +
8 * Read256BytesBlockHeightC[k] - 1, 8 *
Read256BytesBlockHeightC[k]) -
dml_floor(ViewportYStartC[k], 8 *
Read256BytesBlockHeightC[k])) *
BytesPerPixelC[k] / 256;
}
}
} else {
SurfaceSizeInMALL[k] = dml_ceil(dml_min(SurfaceWidthY[k], ViewportWidthY[k] +
ReadBlockWidthY[k] - 1), ReadBlockWidthY[k]) *
dml_ceil(dml_min(SurfaceHeightY[k], ViewportHeightY[k] +
ReadBlockHeightY[k] - 1), ReadBlockHeightY[k]) *
BytesPerPixelY[k];
if (ReadBlockWidthC[k] > 0) {
SurfaceSizeInMALL[k] = SurfaceSizeInMALL[k] +
dml_ceil(dml_min(SurfaceWidthC[k], ViewportWidthC[k] +
ReadBlockWidthC[k] - 1), ReadBlockWidthC[k]) *
dml_ceil(dml_min(SurfaceHeightC[k], ViewportHeightC[k] +
ReadBlockHeightC[k] - 1), ReadBlockHeightC[k]) *
BytesPerPixelC[k];
}
if (DCCEnable[k] == true) {
SurfaceSizeInMALL[k] = SurfaceSizeInMALL[k] +
(dml_ceil(dml_min(DCCMetaPitchY[k], ViewportWidthY[k] + 8 *
Read256BytesBlockWidthY[k] - 1), 8 *
Read256BytesBlockWidthY[k]) *
dml_ceil(dml_min(SurfaceHeightY[k], ViewportHeightY[k] + 8 *
Read256BytesBlockHeightY[k] - 1), 8 *
Read256BytesBlockHeightY[k]) * BytesPerPixelY[k] / 256) + (64 * 1024);
if (Read256BytesBlockWidthC[k] > 0) {
SurfaceSizeInMALL[k] = SurfaceSizeInMALL[k] +
dml_ceil(dml_min(DCCMetaPitchC[k], ViewportWidthC[k] + 8 *
Read256BytesBlockWidthC[k] - 1), 8 *
Read256BytesBlockWidthC[k]) *
dml_ceil(dml_min(SurfaceHeightC[k], ViewportHeightC[k] + 8 *
Read256BytesBlockHeightC[k] - 1), 8 *
Read256BytesBlockHeightC[k]) *
BytesPerPixelC[k] / 256;
}
}
}
}
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
/* SS and Subvp counted separate as they are never used at the same time */
if (UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe)
TotalSurfaceSizeInMALLForSubVP = TotalSurfaceSizeInMALLForSubVP + SurfaceSizeInMALL[k];
else if (UseMALLForStaticScreen[k] == dm_use_mall_static_screen_enable)
TotalSurfaceSizeInMALLForSS = TotalSurfaceSizeInMALLForSS + SurfaceSizeInMALL[k];
}
*ExceededMALLSize = (TotalSurfaceSizeInMALLForSS > MALLAllocatedForDCNInBytes) ||
(TotalSurfaceSizeInMALLForSubVP > MALLAllocatedForDCNInBytes);
} // CalculateSurfaceSizeInMall
void dml32_CalculateVMRowAndSwath(
unsigned int NumberOfActiveSurfaces,
DmlPipe myPipe[],
unsigned int SurfaceSizeInMALL[],
unsigned int PTEBufferSizeInRequestsLuma,
unsigned int PTEBufferSizeInRequestsChroma,
unsigned int DCCMetaBufferSizeBytes,
enum dm_use_mall_for_static_screen_mode UseMALLForStaticScreen[],
enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
unsigned int MALLAllocatedForDCN,
double SwathWidthY[],
double SwathWidthC[],
bool GPUVMEnable,
bool HostVMEnable,
unsigned int HostVMMaxNonCachedPageTableLevels,
unsigned int GPUVMMaxPageTableLevels,
unsigned int GPUVMMinPageSizeKBytes[],
unsigned int HostVMMinPageSize,
/* Output */
bool PTEBufferSizeNotExceeded[],
bool DCCMetaBufferSizeNotExceeded[],
unsigned int dpte_row_width_luma_ub[],
unsigned int dpte_row_width_chroma_ub[],
unsigned int dpte_row_height_luma[],
unsigned int dpte_row_height_chroma[],
unsigned int dpte_row_height_linear_luma[], // VBA_DELTA
unsigned int dpte_row_height_linear_chroma[], // VBA_DELTA
unsigned int meta_req_width[],
unsigned int meta_req_width_chroma[],
unsigned int meta_req_height[],
unsigned int meta_req_height_chroma[],
unsigned int meta_row_width[],
unsigned int meta_row_width_chroma[],
unsigned int meta_row_height[],
unsigned int meta_row_height_chroma[],
unsigned int vm_group_bytes[],
unsigned int dpte_group_bytes[],
unsigned int PixelPTEReqWidthY[],
unsigned int PixelPTEReqHeightY[],
unsigned int PTERequestSizeY[],
unsigned int PixelPTEReqWidthC[],
unsigned int PixelPTEReqHeightC[],
unsigned int PTERequestSizeC[],
unsigned int dpde0_bytes_per_frame_ub_l[],
unsigned int meta_pte_bytes_per_frame_ub_l[],
unsigned int dpde0_bytes_per_frame_ub_c[],
unsigned int meta_pte_bytes_per_frame_ub_c[],
double PrefetchSourceLinesY[],
double PrefetchSourceLinesC[],
double VInitPreFillY[],
double VInitPreFillC[],
unsigned int MaxNumSwathY[],
unsigned int MaxNumSwathC[],
double meta_row_bw[],
double dpte_row_bw[],
double PixelPTEBytesPerRow[],
double PDEAndMetaPTEBytesFrame[],
double MetaRowByte[],
bool use_one_row_for_frame[],
bool use_one_row_for_frame_flip[],
bool UsesMALLForStaticScreen[],
bool PTE_BUFFER_MODE[],
unsigned int BIGK_FRAGMENT_SIZE[])
{
unsigned int k;
unsigned int PTEBufferSizeInRequestsForLuma[DC__NUM_DPP__MAX];
unsigned int PTEBufferSizeInRequestsForChroma[DC__NUM_DPP__MAX];
unsigned int PDEAndMetaPTEBytesFrameY;
unsigned int PDEAndMetaPTEBytesFrameC;
unsigned int MetaRowByteY[DC__NUM_DPP__MAX];
unsigned int MetaRowByteC[DC__NUM_DPP__MAX];
unsigned int PixelPTEBytesPerRowY[DC__NUM_DPP__MAX];
unsigned int PixelPTEBytesPerRowC[DC__NUM_DPP__MAX];
unsigned int PixelPTEBytesPerRowY_one_row_per_frame[DC__NUM_DPP__MAX];
unsigned int PixelPTEBytesPerRowC_one_row_per_frame[DC__NUM_DPP__MAX];
unsigned int dpte_row_width_luma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
unsigned int dpte_row_height_luma_one_row_per_frame[DC__NUM_DPP__MAX];
unsigned int dpte_row_width_chroma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
unsigned int dpte_row_height_chroma_one_row_per_frame[DC__NUM_DPP__MAX];
bool one_row_per_frame_fits_in_buffer[DC__NUM_DPP__MAX];
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (HostVMEnable == true) {
vm_group_bytes[k] = 512;
dpte_group_bytes[k] = 512;
} else if (GPUVMEnable == true) {
vm_group_bytes[k] = 2048;
if (GPUVMMinPageSizeKBytes[k] >= 64 && IsVertical(myPipe[k].SourceRotation))
dpte_group_bytes[k] = 512;
else
dpte_group_bytes[k] = 2048;
} else {
vm_group_bytes[k] = 0;
dpte_group_bytes[k] = 0;
}
if (myPipe[k].SourcePixelFormat == dm_420_8 || myPipe[k].SourcePixelFormat == dm_420_10 ||
myPipe[k].SourcePixelFormat == dm_420_12 ||
myPipe[k].SourcePixelFormat == dm_rgbe_alpha) {
if ((myPipe[k].SourcePixelFormat == dm_420_10 || myPipe[k].SourcePixelFormat == dm_420_12) &&
!IsVertical(myPipe[k].SourceRotation)) {
PTEBufferSizeInRequestsForLuma[k] =
(PTEBufferSizeInRequestsLuma + PTEBufferSizeInRequestsChroma) / 2;
PTEBufferSizeInRequestsForChroma[k] = PTEBufferSizeInRequestsForLuma[k];
} else {
PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma;
PTEBufferSizeInRequestsForChroma[k] = PTEBufferSizeInRequestsChroma;
}
PDEAndMetaPTEBytesFrameC = dml32_CalculateVMAndRowBytes(
myPipe[k].ViewportStationary,
myPipe[k].DCCEnable,
myPipe[k].DPPPerSurface,
myPipe[k].BlockHeight256BytesC,
myPipe[k].BlockWidth256BytesC,
myPipe[k].SourcePixelFormat,
myPipe[k].SurfaceTiling,
myPipe[k].BytePerPixelC,
myPipe[k].SourceRotation,
SwathWidthC[k],
myPipe[k].ViewportHeightChroma,
myPipe[k].ViewportXStartC,
myPipe[k].ViewportYStartC,
GPUVMEnable,
HostVMEnable,
HostVMMaxNonCachedPageTableLevels,
GPUVMMaxPageTableLevels,
GPUVMMinPageSizeKBytes[k],
HostVMMinPageSize,
PTEBufferSizeInRequestsForChroma[k],
myPipe[k].PitchC,
myPipe[k].DCCMetaPitchC,
myPipe[k].BlockWidthC,
myPipe[k].BlockHeightC,
/* Output */
&MetaRowByteC[k],
&PixelPTEBytesPerRowC[k],
&dpte_row_width_chroma_ub[k],
&dpte_row_height_chroma[k],
&dpte_row_height_linear_chroma[k],
&PixelPTEBytesPerRowC_one_row_per_frame[k],
&dpte_row_width_chroma_ub_one_row_per_frame[k],
&dpte_row_height_chroma_one_row_per_frame[k],
&meta_req_width_chroma[k],
&meta_req_height_chroma[k],
&meta_row_width_chroma[k],
&meta_row_height_chroma[k],
&PixelPTEReqWidthC[k],
&PixelPTEReqHeightC[k],
&PTERequestSizeC[k],
&dpde0_bytes_per_frame_ub_c[k],
&meta_pte_bytes_per_frame_ub_c[k]);
PrefetchSourceLinesC[k] = dml32_CalculatePrefetchSourceLines(
myPipe[k].VRatioChroma,
myPipe[k].VTapsChroma,
myPipe[k].InterlaceEnable,
myPipe[k].ProgressiveToInterlaceUnitInOPP,
myPipe[k].SwathHeightC,
myPipe[k].SourceRotation,
myPipe[k].ViewportStationary,
SwathWidthC[k],
myPipe[k].ViewportHeightChroma,
myPipe[k].ViewportXStartC,
myPipe[k].ViewportYStartC,
/* Output */
&VInitPreFillC[k],
&MaxNumSwathC[k]);
} else {
PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma + PTEBufferSizeInRequestsChroma;
PTEBufferSizeInRequestsForChroma[k] = 0;
PixelPTEBytesPerRowC[k] = 0;
PDEAndMetaPTEBytesFrameC = 0;
MetaRowByteC[k] = 0;
MaxNumSwathC[k] = 0;
PrefetchSourceLinesC[k] = 0;
dpte_row_height_chroma_one_row_per_frame[k] = 0;
dpte_row_width_chroma_ub_one_row_per_frame[k] = 0;
PixelPTEBytesPerRowC_one_row_per_frame[k] = 0;
}
PDEAndMetaPTEBytesFrameY = dml32_CalculateVMAndRowBytes(
myPipe[k].ViewportStationary,
myPipe[k].DCCEnable,
myPipe[k].DPPPerSurface,
myPipe[k].BlockHeight256BytesY,
myPipe[k].BlockWidth256BytesY,
myPipe[k].SourcePixelFormat,
myPipe[k].SurfaceTiling,
myPipe[k].BytePerPixelY,
myPipe[k].SourceRotation,
SwathWidthY[k],
myPipe[k].ViewportHeight,
myPipe[k].ViewportXStart,
myPipe[k].ViewportYStart,
GPUVMEnable,
HostVMEnable,
HostVMMaxNonCachedPageTableLevels,
GPUVMMaxPageTableLevels,
GPUVMMinPageSizeKBytes[k],
HostVMMinPageSize,
PTEBufferSizeInRequestsForLuma[k],
myPipe[k].PitchY,
myPipe[k].DCCMetaPitchY,
myPipe[k].BlockWidthY,
myPipe[k].BlockHeightY,
/* Output */
&MetaRowByteY[k],
&PixelPTEBytesPerRowY[k],
&dpte_row_width_luma_ub[k],
&dpte_row_height_luma[k],
&dpte_row_height_linear_luma[k],
&PixelPTEBytesPerRowY_one_row_per_frame[k],
&dpte_row_width_luma_ub_one_row_per_frame[k],
&dpte_row_height_luma_one_row_per_frame[k],
&meta_req_width[k],
&meta_req_height[k],
&meta_row_width[k],
&meta_row_height[k],
&PixelPTEReqWidthY[k],
&PixelPTEReqHeightY[k],
&PTERequestSizeY[k],
&dpde0_bytes_per_frame_ub_l[k],
&meta_pte_bytes_per_frame_ub_l[k]);
PrefetchSourceLinesY[k] = dml32_CalculatePrefetchSourceLines(
myPipe[k].VRatio,
myPipe[k].VTaps,
myPipe[k].InterlaceEnable,
myPipe[k].ProgressiveToInterlaceUnitInOPP,
myPipe[k].SwathHeightY,
myPipe[k].SourceRotation,
myPipe[k].ViewportStationary,
SwathWidthY[k],
myPipe[k].ViewportHeight,
myPipe[k].ViewportXStart,
myPipe[k].ViewportYStart,
/* Output */
&VInitPreFillY[k],
&MaxNumSwathY[k]);
PDEAndMetaPTEBytesFrame[k] = PDEAndMetaPTEBytesFrameY + PDEAndMetaPTEBytesFrameC;
MetaRowByte[k] = MetaRowByteY[k] + MetaRowByteC[k];
if (PixelPTEBytesPerRowY[k] <= 64 * PTEBufferSizeInRequestsForLuma[k] &&
PixelPTEBytesPerRowC[k] <= 64 * PTEBufferSizeInRequestsForChroma[k]) {
PTEBufferSizeNotExceeded[k] = true;
} else {
PTEBufferSizeNotExceeded[k] = false;
}
one_row_per_frame_fits_in_buffer[k] = (PixelPTEBytesPerRowY_one_row_per_frame[k] <= 64 * 2 *
PTEBufferSizeInRequestsForLuma[k] &&
PixelPTEBytesPerRowC_one_row_per_frame[k] <= 64 * 2 * PTEBufferSizeInRequestsForChroma[k]);
}
dml32_CalculateMALLUseForStaticScreen(
NumberOfActiveSurfaces,
MALLAllocatedForDCN,
UseMALLForStaticScreen, // mode
SurfaceSizeInMALL,
one_row_per_frame_fits_in_buffer,
/* Output */
UsesMALLForStaticScreen); // boolen
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
PTE_BUFFER_MODE[k] = myPipe[k].FORCE_ONE_ROW_FOR_FRAME || UsesMALLForStaticScreen[k] ||
(UseMALLForPStateChange[k] == dm_use_mall_pstate_change_sub_viewport) ||
(UseMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) ||
(GPUVMMinPageSizeKBytes[k] > 64);
BIGK_FRAGMENT_SIZE[k] = dml_log2(GPUVMMinPageSizeKBytes[k] * 1024) - 12;
}
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d, SurfaceSizeInMALL = %d\n", __func__, k, SurfaceSizeInMALL[k]);
dml_print("DML::%s: k=%d, UsesMALLForStaticScreen = %d\n", __func__, k, UsesMALLForStaticScreen[k]);
#endif
use_one_row_for_frame[k] = myPipe[k].FORCE_ONE_ROW_FOR_FRAME || UsesMALLForStaticScreen[k] ||
(UseMALLForPStateChange[k] == dm_use_mall_pstate_change_sub_viewport) ||
(UseMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) ||
(GPUVMMinPageSizeKBytes[k] > 64 && IsVertical(myPipe[k].SourceRotation));
use_one_row_for_frame_flip[k] = use_one_row_for_frame[k] &&
!(UseMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame);
if (use_one_row_for_frame[k]) {
dpte_row_height_luma[k] = dpte_row_height_luma_one_row_per_frame[k];
dpte_row_width_luma_ub[k] = dpte_row_width_luma_ub_one_row_per_frame[k];
PixelPTEBytesPerRowY[k] = PixelPTEBytesPerRowY_one_row_per_frame[k];
dpte_row_height_chroma[k] = dpte_row_height_chroma_one_row_per_frame[k];
dpte_row_width_chroma_ub[k] = dpte_row_width_chroma_ub_one_row_per_frame[k];
PixelPTEBytesPerRowC[k] = PixelPTEBytesPerRowC_one_row_per_frame[k];
PTEBufferSizeNotExceeded[k] = one_row_per_frame_fits_in_buffer[k];
}
if (MetaRowByte[k] <= DCCMetaBufferSizeBytes)
DCCMetaBufferSizeNotExceeded[k] = true;
else
DCCMetaBufferSizeNotExceeded[k] = false;
PixelPTEBytesPerRow[k] = PixelPTEBytesPerRowY[k] + PixelPTEBytesPerRowC[k];
if (use_one_row_for_frame[k])
PixelPTEBytesPerRow[k] = PixelPTEBytesPerRow[k] / 2;
dml32_CalculateRowBandwidth(
GPUVMEnable,
myPipe[k].SourcePixelFormat,
myPipe[k].VRatio,
myPipe[k].VRatioChroma,
myPipe[k].DCCEnable,
myPipe[k].HTotal / myPipe[k].PixelClock,
MetaRowByteY[k], MetaRowByteC[k],
meta_row_height[k],
meta_row_height_chroma[k],
PixelPTEBytesPerRowY[k],
PixelPTEBytesPerRowC[k],
dpte_row_height_luma[k],
dpte_row_height_chroma[k],
/* Output */
&meta_row_bw[k],
&dpte_row_bw[k]);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d, use_one_row_for_frame = %d\n", __func__, k, use_one_row_for_frame[k]);
dml_print("DML::%s: k=%d, use_one_row_for_frame_flip = %d\n",
__func__, k, use_one_row_for_frame_flip[k]);
dml_print("DML::%s: k=%d, UseMALLForPStateChange = %d\n",
__func__, k, UseMALLForPStateChange[k]);
dml_print("DML::%s: k=%d, dpte_row_height_luma = %d\n", __func__, k, dpte_row_height_luma[k]);
dml_print("DML::%s: k=%d, dpte_row_width_luma_ub = %d\n",
__func__, k, dpte_row_width_luma_ub[k]);
dml_print("DML::%s: k=%d, PixelPTEBytesPerRowY = %d\n", __func__, k, PixelPTEBytesPerRowY[k]);
dml_print("DML::%s: k=%d, dpte_row_height_chroma = %d\n",
__func__, k, dpte_row_height_chroma[k]);
dml_print("DML::%s: k=%d, dpte_row_width_chroma_ub = %d\n",
__func__, k, dpte_row_width_chroma_ub[k]);
dml_print("DML::%s: k=%d, PixelPTEBytesPerRowC = %d\n", __func__, k, PixelPTEBytesPerRowC[k]);
dml_print("DML::%s: k=%d, PixelPTEBytesPerRow = %d\n", __func__, k, PixelPTEBytesPerRow[k]);
dml_print("DML::%s: k=%d, PTEBufferSizeNotExceeded = %d\n",
__func__, k, PTEBufferSizeNotExceeded[k]);
dml_print("DML::%s: k=%d, PTE_BUFFER_MODE = %d\n", __func__, k, PTE_BUFFER_MODE[k]);
dml_print("DML::%s: k=%d, BIGK_FRAGMENT_SIZE = %d\n", __func__, k, BIGK_FRAGMENT_SIZE[k]);
#endif
}
} // CalculateVMRowAndSwath
unsigned int dml32_CalculateVMAndRowBytes(
bool ViewportStationary,
bool DCCEnable,
unsigned int NumberOfDPPs,
unsigned int BlockHeight256Bytes,
unsigned int BlockWidth256Bytes,
enum source_format_class SourcePixelFormat,
unsigned int SurfaceTiling,
unsigned int BytePerPixel,
enum dm_rotation_angle SourceRotation,
double SwathWidth,
unsigned int ViewportHeight,
unsigned int ViewportXStart,
unsigned int ViewportYStart,
bool GPUVMEnable,
bool HostVMEnable,
unsigned int HostVMMaxNonCachedPageTableLevels,
unsigned int GPUVMMaxPageTableLevels,
unsigned int GPUVMMinPageSizeKBytes,
unsigned int HostVMMinPageSize,
unsigned int PTEBufferSizeInRequests,
unsigned int Pitch,
unsigned int DCCMetaPitch,
unsigned int MacroTileWidth,
unsigned int MacroTileHeight,
/* Output */
unsigned int *MetaRowByte,
unsigned int *PixelPTEBytesPerRow,
unsigned int *dpte_row_width_ub,
unsigned int *dpte_row_height,
unsigned int *dpte_row_height_linear,
unsigned int *PixelPTEBytesPerRow_one_row_per_frame,
unsigned int *dpte_row_width_ub_one_row_per_frame,
unsigned int *dpte_row_height_one_row_per_frame,
unsigned int *MetaRequestWidth,
unsigned int *MetaRequestHeight,
unsigned int *meta_row_width,
unsigned int *meta_row_height,
unsigned int *PixelPTEReqWidth,
unsigned int *PixelPTEReqHeight,
unsigned int *PTERequestSize,
unsigned int *DPDE0BytesFrame,
unsigned int *MetaPTEBytesFrame)
{
unsigned int MPDEBytesFrame;
unsigned int DCCMetaSurfaceBytes;
unsigned int ExtraDPDEBytesFrame;
unsigned int PDEAndMetaPTEBytesFrame;
unsigned int HostVMDynamicLevels = 0;
unsigned int MacroTileSizeBytes;
unsigned int vp_height_meta_ub;
unsigned int vp_height_dpte_ub;
unsigned int PixelPTEReqWidth_linear = 0; // VBA_DELTA. VBA doesn't calculate this
if (GPUVMEnable == true && HostVMEnable == true) {
if (HostVMMinPageSize < 2048)
HostVMDynamicLevels = HostVMMaxNonCachedPageTableLevels;
else if (HostVMMinPageSize >= 2048 && HostVMMinPageSize < 1048576)
HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 1);
else
HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 2);
}
*MetaRequestHeight = 8 * BlockHeight256Bytes;
*MetaRequestWidth = 8 * BlockWidth256Bytes;
if (SurfaceTiling == dm_sw_linear) {
*meta_row_height = 32;
*meta_row_width = dml_floor(ViewportXStart + SwathWidth + *MetaRequestWidth - 1, *MetaRequestWidth)
- dml_floor(ViewportXStart, *MetaRequestWidth);
} else if (!IsVertical(SourceRotation)) {
*meta_row_height = *MetaRequestHeight;
if (ViewportStationary && NumberOfDPPs == 1) {
*meta_row_width = dml_floor(ViewportXStart + SwathWidth + *MetaRequestWidth - 1,
*MetaRequestWidth) - dml_floor(ViewportXStart, *MetaRequestWidth);
} else {
*meta_row_width = dml_ceil(SwathWidth - 1, *MetaRequestWidth) + *MetaRequestWidth;
}
*MetaRowByte = *meta_row_width * *MetaRequestHeight * BytePerPixel / 256.0;
} else {
*meta_row_height = *MetaRequestWidth;
if (ViewportStationary && NumberOfDPPs == 1) {
*meta_row_width = dml_floor(ViewportYStart + ViewportHeight + *MetaRequestHeight - 1,
*MetaRequestHeight) - dml_floor(ViewportYStart, *MetaRequestHeight);
} else {
*meta_row_width = dml_ceil(SwathWidth - 1, *MetaRequestHeight) + *MetaRequestHeight;
}
*MetaRowByte = *meta_row_width * *MetaRequestWidth * BytePerPixel / 256.0;
}
if (ViewportStationary && (NumberOfDPPs == 1 || !IsVertical(SourceRotation))) {
vp_height_meta_ub = dml_floor(ViewportYStart + ViewportHeight + 64 * BlockHeight256Bytes - 1,
64 * BlockHeight256Bytes) - dml_floor(ViewportYStart, 64 * BlockHeight256Bytes);
} else if (!IsVertical(SourceRotation)) {
vp_height_meta_ub = dml_ceil(ViewportHeight - 1, 64 * BlockHeight256Bytes) + 64 * BlockHeight256Bytes;
} else {
vp_height_meta_ub = dml_ceil(SwathWidth - 1, 64 * BlockHeight256Bytes) + 64 * BlockHeight256Bytes;
}
DCCMetaSurfaceBytes = DCCMetaPitch * vp_height_meta_ub * BytePerPixel / 256.0;
if (GPUVMEnable == true) {
*MetaPTEBytesFrame = (dml_ceil((double) (DCCMetaSurfaceBytes - 4.0 * 1024.0) /
(8 * 4.0 * 1024), 1) + 1) * 64;
MPDEBytesFrame = 128 * (GPUVMMaxPageTableLevels - 1);
} else {
*MetaPTEBytesFrame = 0;
MPDEBytesFrame = 0;
}
if (DCCEnable != true) {
*MetaPTEBytesFrame = 0;
MPDEBytesFrame = 0;
*MetaRowByte = 0;
}
MacroTileSizeBytes = MacroTileWidth * BytePerPixel * MacroTileHeight;
if (GPUVMEnable == true && GPUVMMaxPageTableLevels > 1) {
if (ViewportStationary && (NumberOfDPPs == 1 || !IsVertical(SourceRotation))) {
vp_height_dpte_ub = dml_floor(ViewportYStart + ViewportHeight +
MacroTileHeight - 1, MacroTileHeight) -
dml_floor(ViewportYStart, MacroTileHeight);
} else if (!IsVertical(SourceRotation)) {
vp_height_dpte_ub = dml_ceil(ViewportHeight - 1, MacroTileHeight) + MacroTileHeight;
} else {
vp_height_dpte_ub = dml_ceil(SwathWidth - 1, MacroTileHeight) + MacroTileHeight;
}
*DPDE0BytesFrame = 64 * (dml_ceil((Pitch * vp_height_dpte_ub * BytePerPixel - MacroTileSizeBytes) /
(8 * 2097152), 1) + 1);
ExtraDPDEBytesFrame = 128 * (GPUVMMaxPageTableLevels - 2);
} else {
*DPDE0BytesFrame = 0;
ExtraDPDEBytesFrame = 0;
vp_height_dpte_ub = 0;
}
PDEAndMetaPTEBytesFrame = *MetaPTEBytesFrame + MPDEBytesFrame + *DPDE0BytesFrame + ExtraDPDEBytesFrame;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DCCEnable = %d\n", __func__, DCCEnable);
dml_print("DML::%s: GPUVMEnable = %d\n", __func__, GPUVMEnable);
dml_print("DML::%s: SwModeLinear = %d\n", __func__, SurfaceTiling == dm_sw_linear);
dml_print("DML::%s: BytePerPixel = %d\n", __func__, BytePerPixel);
dml_print("DML::%s: GPUVMMaxPageTableLevels = %d\n", __func__, GPUVMMaxPageTableLevels);
dml_print("DML::%s: BlockHeight256Bytes = %d\n", __func__, BlockHeight256Bytes);
dml_print("DML::%s: BlockWidth256Bytes = %d\n", __func__, BlockWidth256Bytes);
dml_print("DML::%s: MacroTileHeight = %d\n", __func__, MacroTileHeight);
dml_print("DML::%s: MacroTileWidth = %d\n", __func__, MacroTileWidth);
dml_print("DML::%s: MetaPTEBytesFrame = %d\n", __func__, *MetaPTEBytesFrame);
dml_print("DML::%s: MPDEBytesFrame = %d\n", __func__, MPDEBytesFrame);
dml_print("DML::%s: DPDE0BytesFrame = %d\n", __func__, *DPDE0BytesFrame);
dml_print("DML::%s: ExtraDPDEBytesFrame= %d\n", __func__, ExtraDPDEBytesFrame);
dml_print("DML::%s: PDEAndMetaPTEBytesFrame = %d\n", __func__, PDEAndMetaPTEBytesFrame);
dml_print("DML::%s: ViewportHeight = %d\n", __func__, ViewportHeight);
dml_print("DML::%s: SwathWidth = %d\n", __func__, SwathWidth);
dml_print("DML::%s: vp_height_dpte_ub = %d\n", __func__, vp_height_dpte_ub);
#endif
if (HostVMEnable == true)
PDEAndMetaPTEBytesFrame = PDEAndMetaPTEBytesFrame * (1 + 8 * HostVMDynamicLevels);
if (SurfaceTiling == dm_sw_linear) {
*PixelPTEReqHeight = 1;
*PixelPTEReqWidth = GPUVMMinPageSizeKBytes * 1024 * 8 / BytePerPixel;
PixelPTEReqWidth_linear = GPUVMMinPageSizeKBytes * 1024 * 8 / BytePerPixel;
*PTERequestSize = 64;
} else if (GPUVMMinPageSizeKBytes == 4) {
*PixelPTEReqHeight = 16 * BlockHeight256Bytes;
*PixelPTEReqWidth = 16 * BlockWidth256Bytes;
*PTERequestSize = 128;
} else {
*PixelPTEReqHeight = MacroTileHeight;
*PixelPTEReqWidth = 8 * 1024 * GPUVMMinPageSizeKBytes / (MacroTileHeight * BytePerPixel);
*PTERequestSize = 64;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: GPUVMMinPageSizeKBytes = %d\n", __func__, GPUVMMinPageSizeKBytes);
dml_print("DML::%s: PDEAndMetaPTEBytesFrame = %d (after HostVM factor)\n", __func__, PDEAndMetaPTEBytesFrame);
dml_print("DML::%s: PixelPTEReqHeight = %d\n", __func__, *PixelPTEReqHeight);
dml_print("DML::%s: PixelPTEReqWidth = %d\n", __func__, *PixelPTEReqWidth);
dml_print("DML::%s: PixelPTEReqWidth_linear = %d\n", __func__, PixelPTEReqWidth_linear);
dml_print("DML::%s: PTERequestSize = %d\n", __func__, *PTERequestSize);
dml_print("DML::%s: Pitch = %d\n", __func__, Pitch);
#endif
*dpte_row_height_one_row_per_frame = vp_height_dpte_ub;
*dpte_row_width_ub_one_row_per_frame = (dml_ceil(((double)Pitch * (double)*dpte_row_height_one_row_per_frame /
(double) *PixelPTEReqHeight - 1) / (double) *PixelPTEReqWidth, 1) + 1) *
(double) *PixelPTEReqWidth;
*PixelPTEBytesPerRow_one_row_per_frame = *dpte_row_width_ub_one_row_per_frame / *PixelPTEReqWidth *
*PTERequestSize;
if (SurfaceTiling == dm_sw_linear) {
*dpte_row_height = dml_min(128, 1 << (unsigned int) dml_floor(dml_log2(PTEBufferSizeInRequests *
*PixelPTEReqWidth / Pitch), 1));
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: dpte_row_height = %d (1)\n", __func__,
PTEBufferSizeInRequests * *PixelPTEReqWidth / Pitch);
dml_print("DML::%s: dpte_row_height = %f (2)\n", __func__,
dml_log2(PTEBufferSizeInRequests * *PixelPTEReqWidth / Pitch));
dml_print("DML::%s: dpte_row_height = %f (3)\n", __func__,
dml_floor(dml_log2(PTEBufferSizeInRequests * *PixelPTEReqWidth / Pitch), 1));
dml_print("DML::%s: dpte_row_height = %d (4)\n", __func__,
1 << (unsigned int) dml_floor(dml_log2(PTEBufferSizeInRequests *
*PixelPTEReqWidth / Pitch), 1));
dml_print("DML::%s: dpte_row_height = %d\n", __func__, *dpte_row_height);
#endif
*dpte_row_width_ub = dml_ceil(((double) Pitch * (double) *dpte_row_height - 1),
(double) *PixelPTEReqWidth) + *PixelPTEReqWidth;
*PixelPTEBytesPerRow = *dpte_row_width_ub / (double)*PixelPTEReqWidth * (double)*PTERequestSize;
// VBA_DELTA, VBA doesn't have programming value for pte row height linear.
*dpte_row_height_linear = 1 << (unsigned int) dml_floor(dml_log2(PTEBufferSizeInRequests *
PixelPTEReqWidth_linear / Pitch), 1);
if (*dpte_row_height_linear > 128)
*dpte_row_height_linear = 128;
} else if (!IsVertical(SourceRotation)) {
*dpte_row_height = *PixelPTEReqHeight;
if (GPUVMMinPageSizeKBytes > 64) {
*dpte_row_width_ub = (dml_ceil((Pitch * *dpte_row_height / *PixelPTEReqHeight - 1) /
*PixelPTEReqWidth, 1) + 1) * *PixelPTEReqWidth;
} else if (ViewportStationary && (NumberOfDPPs == 1)) {
*dpte_row_width_ub = dml_floor(ViewportXStart + SwathWidth +
*PixelPTEReqWidth - 1, *PixelPTEReqWidth) -
dml_floor(ViewportXStart, *PixelPTEReqWidth);
} else {
*dpte_row_width_ub = (dml_ceil((SwathWidth - 1) / *PixelPTEReqWidth, 1) + 1) *
*PixelPTEReqWidth;
}
*PixelPTEBytesPerRow = *dpte_row_width_ub / *PixelPTEReqWidth * *PTERequestSize;
} else {
*dpte_row_height = dml_min(*PixelPTEReqWidth, MacroTileWidth);
if (ViewportStationary && (NumberOfDPPs == 1)) {
*dpte_row_width_ub = dml_floor(ViewportYStart + ViewportHeight + *PixelPTEReqHeight - 1,
*PixelPTEReqHeight) - dml_floor(ViewportYStart, *PixelPTEReqHeight);
} else {
*dpte_row_width_ub = (dml_ceil((SwathWidth - 1) / *PixelPTEReqHeight, 1) + 1)
* *PixelPTEReqHeight;
}
*PixelPTEBytesPerRow = *dpte_row_width_ub / *PixelPTEReqHeight * *PTERequestSize;
}
if (GPUVMEnable != true)
*PixelPTEBytesPerRow = 0;
if (HostVMEnable == true)
*PixelPTEBytesPerRow = *PixelPTEBytesPerRow * (1 + 8 * HostVMDynamicLevels);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: GPUVMMinPageSizeKBytes = %d\n", __func__, GPUVMMinPageSizeKBytes);
dml_print("DML::%s: dpte_row_height = %d\n", __func__, *dpte_row_height);
dml_print("DML::%s: dpte_row_height_linear = %d\n", __func__, *dpte_row_height_linear);
dml_print("DML::%s: dpte_row_width_ub = %d\n", __func__, *dpte_row_width_ub);
dml_print("DML::%s: PixelPTEBytesPerRow = %d\n", __func__, *PixelPTEBytesPerRow);
dml_print("DML::%s: PTEBufferSizeInRequests = %d\n", __func__, PTEBufferSizeInRequests);
dml_print("DML::%s: dpte_row_height_one_row_per_frame = %d\n", __func__, *dpte_row_height_one_row_per_frame);
dml_print("DML::%s: dpte_row_width_ub_one_row_per_frame = %d\n",
__func__, *dpte_row_width_ub_one_row_per_frame);
dml_print("DML::%s: PixelPTEBytesPerRow_one_row_per_frame = %d\n",
__func__, *PixelPTEBytesPerRow_one_row_per_frame);
dml_print("DML: vm_bytes = meta_pte_bytes_per_frame (per_pipe) = MetaPTEBytesFrame = : %i\n",
*MetaPTEBytesFrame);
#endif
return PDEAndMetaPTEBytesFrame;
} // CalculateVMAndRowBytes
double dml32_CalculatePrefetchSourceLines(
double VRatio,
unsigned int VTaps,
bool Interlace,
bool ProgressiveToInterlaceUnitInOPP,
unsigned int SwathHeight,
enum dm_rotation_angle SourceRotation,
bool ViewportStationary,
double SwathWidth,
unsigned int ViewportHeight,
unsigned int ViewportXStart,
unsigned int ViewportYStart,
/* Output */
double *VInitPreFill,
unsigned int *MaxNumSwath)
{
unsigned int vp_start_rot;
unsigned int sw0_tmp;
unsigned int MaxPartialSwath;
double numLines;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VRatio = %f\n", __func__, VRatio);
dml_print("DML::%s: VTaps = %d\n", __func__, VTaps);
dml_print("DML::%s: ViewportXStart = %d\n", __func__, ViewportXStart);
dml_print("DML::%s: ViewportYStart = %d\n", __func__, ViewportYStart);
dml_print("DML::%s: ViewportStationary = %d\n", __func__, ViewportStationary);
dml_print("DML::%s: SwathHeight = %d\n", __func__, SwathHeight);
#endif
if (ProgressiveToInterlaceUnitInOPP)
*VInitPreFill = dml_floor((VRatio + (double) VTaps + 1) / 2.0, 1);
else
*VInitPreFill = dml_floor((VRatio + (double) VTaps + 1 + Interlace * 0.5 * VRatio) / 2.0, 1);
if (ViewportStationary) {
if (SourceRotation == dm_rotation_180 || SourceRotation == dm_rotation_180m) {
vp_start_rot = SwathHeight -
(((unsigned int) (ViewportYStart + ViewportHeight - 1) % SwathHeight) + 1);
} else if (SourceRotation == dm_rotation_270 || SourceRotation == dm_rotation_90m) {
vp_start_rot = ViewportXStart;
} else if (SourceRotation == dm_rotation_90 || SourceRotation == dm_rotation_270m) {
vp_start_rot = SwathHeight -
(((unsigned int)(ViewportYStart + SwathWidth - 1) % SwathHeight) + 1);
} else {
vp_start_rot = ViewportYStart;
}
sw0_tmp = SwathHeight - (vp_start_rot % SwathHeight);
if (sw0_tmp < *VInitPreFill)
*MaxNumSwath = dml_ceil((*VInitPreFill - sw0_tmp) / SwathHeight, 1) + 1;
else
*MaxNumSwath = 1;
MaxPartialSwath = dml_max(1, (unsigned int) (vp_start_rot + *VInitPreFill - 1) % SwathHeight);
} else {
*MaxNumSwath = dml_ceil((*VInitPreFill - 1.0) / SwathHeight, 1) + 1;
if (*VInitPreFill > 1)
MaxPartialSwath = dml_max(1, (unsigned int) (*VInitPreFill - 2) % SwathHeight);
else
MaxPartialSwath = dml_max(1, (unsigned int) (*VInitPreFill + SwathHeight - 2) % SwathHeight);
}
numLines = *MaxNumSwath * SwathHeight + MaxPartialSwath;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: vp_start_rot = %d\n", __func__, vp_start_rot);
dml_print("DML::%s: VInitPreFill = %d\n", __func__, *VInitPreFill);
dml_print("DML::%s: MaxPartialSwath = %d\n", __func__, MaxPartialSwath);
dml_print("DML::%s: MaxNumSwath = %d\n", __func__, *MaxNumSwath);
dml_print("DML::%s: Prefetch source lines = %3.2f\n", __func__, numLines);
#endif
return numLines;
} // CalculatePrefetchSourceLines
void dml32_CalculateMALLUseForStaticScreen(
unsigned int NumberOfActiveSurfaces,
unsigned int MALLAllocatedForDCNFinal,
enum dm_use_mall_for_static_screen_mode *UseMALLForStaticScreen,
unsigned int SurfaceSizeInMALL[],
bool one_row_per_frame_fits_in_buffer[],
/* output */
bool UsesMALLForStaticScreen[])
{
unsigned int k;
unsigned int SurfaceToAddToMALL;
bool CanAddAnotherSurfaceToMALL;
unsigned int TotalSurfaceSizeInMALL;
TotalSurfaceSizeInMALL = 0;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
UsesMALLForStaticScreen[k] = (UseMALLForStaticScreen[k] == dm_use_mall_static_screen_enable);
if (UsesMALLForStaticScreen[k])
TotalSurfaceSizeInMALL = TotalSurfaceSizeInMALL + SurfaceSizeInMALL[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d, UsesMALLForStaticScreen = %d\n", __func__, k, UsesMALLForStaticScreen[k]);
dml_print("DML::%s: k=%d, TotalSurfaceSizeInMALL = %d\n", __func__, k, TotalSurfaceSizeInMALL);
#endif
}
SurfaceToAddToMALL = 0;
CanAddAnotherSurfaceToMALL = true;
while (CanAddAnotherSurfaceToMALL) {
CanAddAnotherSurfaceToMALL = false;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (TotalSurfaceSizeInMALL + SurfaceSizeInMALL[k] <= MALLAllocatedForDCNFinal * 1024 * 1024 &&
!UsesMALLForStaticScreen[k] &&
UseMALLForStaticScreen[k] != dm_use_mall_static_screen_disable &&
one_row_per_frame_fits_in_buffer[k] &&
(!CanAddAnotherSurfaceToMALL ||
SurfaceSizeInMALL[k] < SurfaceSizeInMALL[SurfaceToAddToMALL])) {
CanAddAnotherSurfaceToMALL = true;
SurfaceToAddToMALL = k;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d, UseMALLForStaticScreen = %d (dis, en, optimize)\n",
__func__, k, UseMALLForStaticScreen[k]);
#endif
}
}
if (CanAddAnotherSurfaceToMALL) {
UsesMALLForStaticScreen[SurfaceToAddToMALL] = true;
TotalSurfaceSizeInMALL = TotalSurfaceSizeInMALL + SurfaceSizeInMALL[SurfaceToAddToMALL];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: SurfaceToAddToMALL = %d\n", __func__, SurfaceToAddToMALL);
dml_print("DML::%s: TotalSurfaceSizeInMALL = %d\n", __func__, TotalSurfaceSizeInMALL);
#endif
}
}
}
void dml32_CalculateRowBandwidth(
bool GPUVMEnable,
enum source_format_class SourcePixelFormat,
double VRatio,
double VRatioChroma,
bool DCCEnable,
double LineTime,
unsigned int MetaRowByteLuma,
unsigned int MetaRowByteChroma,
unsigned int meta_row_height_luma,
unsigned int meta_row_height_chroma,
unsigned int PixelPTEBytesPerRowLuma,
unsigned int PixelPTEBytesPerRowChroma,
unsigned int dpte_row_height_luma,
unsigned int dpte_row_height_chroma,
/* Output */
double *meta_row_bw,
double *dpte_row_bw)
{
if (DCCEnable != true) {
*meta_row_bw = 0;
} else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_420_12 ||
SourcePixelFormat == dm_rgbe_alpha) {
*meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime) + VRatioChroma *
MetaRowByteChroma / (meta_row_height_chroma * LineTime);
} else {
*meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime);
}
if (GPUVMEnable != true) {
*dpte_row_bw = 0;
} else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_420_12 ||
SourcePixelFormat == dm_rgbe_alpha) {
*dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime) +
VRatioChroma * PixelPTEBytesPerRowChroma / (dpte_row_height_chroma * LineTime);
} else {
*dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime);
}
}
double dml32_CalculateUrgentLatency(
double UrgentLatencyPixelDataOnly,
double UrgentLatencyPixelMixedWithVMData,
double UrgentLatencyVMDataOnly,
bool DoUrgentLatencyAdjustment,
double UrgentLatencyAdjustmentFabricClockComponent,
double UrgentLatencyAdjustmentFabricClockReference,
double FabricClock)
{
double ret;
ret = dml_max3(UrgentLatencyPixelDataOnly, UrgentLatencyPixelMixedWithVMData, UrgentLatencyVMDataOnly);
if (DoUrgentLatencyAdjustment == true) {
ret = ret + UrgentLatencyAdjustmentFabricClockComponent *
(UrgentLatencyAdjustmentFabricClockReference / FabricClock - 1);
}
return ret;
}
void dml32_CalculateUrgentBurstFactor(
enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange,
unsigned int swath_width_luma_ub,
unsigned int swath_width_chroma_ub,
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double LineTime,
double UrgentLatency,
double CursorBufferSize,
unsigned int CursorWidth,
unsigned int CursorBPP,
double VRatio,
double VRatioC,
double BytePerPixelInDETY,
double BytePerPixelInDETC,
unsigned int DETBufferSizeY,
unsigned int DETBufferSizeC,
/* Output */
double *UrgentBurstFactorCursor,
double *UrgentBurstFactorLuma,
double *UrgentBurstFactorChroma,
bool *NotEnoughUrgentLatencyHiding)
{
double LinesInDETLuma;
double LinesInDETChroma;
unsigned int LinesInCursorBuffer;
double CursorBufferSizeInTime;
double DETBufferSizeInTimeLuma;
double DETBufferSizeInTimeChroma;
*NotEnoughUrgentLatencyHiding = 0;
if (CursorWidth > 0) {
LinesInCursorBuffer = 1 << (unsigned int) dml_floor(dml_log2(CursorBufferSize * 1024.0 /
(CursorWidth * CursorBPP / 8.0)), 1.0);
if (VRatio > 0) {
CursorBufferSizeInTime = LinesInCursorBuffer * LineTime / VRatio;
if (CursorBufferSizeInTime - UrgentLatency <= 0) {
*NotEnoughUrgentLatencyHiding = 1;
*UrgentBurstFactorCursor = 0;
} else {
*UrgentBurstFactorCursor = CursorBufferSizeInTime /
(CursorBufferSizeInTime - UrgentLatency);
}
} else {
*UrgentBurstFactorCursor = 1;
}
}
LinesInDETLuma = (UseMALLForPStateChange == dm_use_mall_pstate_change_phantom_pipe ? 1024*1024 :
DETBufferSizeY) / BytePerPixelInDETY / swath_width_luma_ub;
if (VRatio > 0) {
DETBufferSizeInTimeLuma = dml_floor(LinesInDETLuma, SwathHeightY) * LineTime / VRatio;
if (DETBufferSizeInTimeLuma - UrgentLatency <= 0) {
*NotEnoughUrgentLatencyHiding = 1;
*UrgentBurstFactorLuma = 0;
} else {
*UrgentBurstFactorLuma = DETBufferSizeInTimeLuma / (DETBufferSizeInTimeLuma - UrgentLatency);
}
} else {
*UrgentBurstFactorLuma = 1;
}
if (BytePerPixelInDETC > 0) {
LinesInDETChroma = (UseMALLForPStateChange == dm_use_mall_pstate_change_phantom_pipe ?
1024 * 1024 : DETBufferSizeC) / BytePerPixelInDETC
/ swath_width_chroma_ub;
if (VRatio > 0) {
DETBufferSizeInTimeChroma = dml_floor(LinesInDETChroma, SwathHeightC) * LineTime / VRatio;
if (DETBufferSizeInTimeChroma - UrgentLatency <= 0) {
*NotEnoughUrgentLatencyHiding = 1;
*UrgentBurstFactorChroma = 0;
} else {
*UrgentBurstFactorChroma = DETBufferSizeInTimeChroma
/ (DETBufferSizeInTimeChroma - UrgentLatency);
}
} else {
*UrgentBurstFactorChroma = 1;
}
}
} // CalculateUrgentBurstFactor
void dml32_CalculateDCFCLKDeepSleep(
unsigned int NumberOfActiveSurfaces,
unsigned int BytePerPixelY[],
unsigned int BytePerPixelC[],
double VRatio[],
double VRatioChroma[],
double SwathWidthY[],
double SwathWidthC[],
unsigned int DPPPerSurface[],
double HRatio[],
double HRatioChroma[],
double PixelClock[],
double PSCL_THROUGHPUT[],
double PSCL_THROUGHPUT_CHROMA[],
double Dppclk[],
double ReadBandwidthLuma[],
double ReadBandwidthChroma[],
unsigned int ReturnBusWidth,
/* Output */
double *DCFClkDeepSleep)
{
unsigned int k;
double DisplayPipeLineDeliveryTimeLuma;
double DisplayPipeLineDeliveryTimeChroma;
double DCFClkDeepSleepPerSurface[DC__NUM_DPP__MAX];
double ReadBandwidth = 0.0;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (VRatio[k] <= 1) {
DisplayPipeLineDeliveryTimeLuma = SwathWidthY[k] * DPPPerSurface[k] / HRatio[k]
/ PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeLuma = SwathWidthY[k] / PSCL_THROUGHPUT[k] / Dppclk[k];
}
if (BytePerPixelC[k] == 0) {
DisplayPipeLineDeliveryTimeChroma = 0;
} else {
if (VRatioChroma[k] <= 1) {
DisplayPipeLineDeliveryTimeChroma = SwathWidthC[k] *
DPPPerSurface[k] / HRatioChroma[k] / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeChroma = SwathWidthC[k] / PSCL_THROUGHPUT_CHROMA[k]
/ Dppclk[k];
}
}
if (BytePerPixelC[k] > 0) {
DCFClkDeepSleepPerSurface[k] = dml_max(__DML_MIN_DCFCLK_FACTOR__ * SwathWidthY[k] *
BytePerPixelY[k] / 32.0 / DisplayPipeLineDeliveryTimeLuma,
__DML_MIN_DCFCLK_FACTOR__ * SwathWidthC[k] * BytePerPixelC[k] /
32.0 / DisplayPipeLineDeliveryTimeChroma);
} else {
DCFClkDeepSleepPerSurface[k] = __DML_MIN_DCFCLK_FACTOR__ * SwathWidthY[k] * BytePerPixelY[k] /
64.0 / DisplayPipeLineDeliveryTimeLuma;
}
DCFClkDeepSleepPerSurface[k] = dml_max(DCFClkDeepSleepPerSurface[k], PixelClock[k] / 16);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d, PixelClock = %f\n", __func__, k, PixelClock[k]);
dml_print("DML::%s: k=%d, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]);
#endif
}
for (k = 0; k < NumberOfActiveSurfaces; ++k)
ReadBandwidth = ReadBandwidth + ReadBandwidthLuma[k] + ReadBandwidthChroma[k];
*DCFClkDeepSleep = dml_max(8.0, __DML_MIN_DCFCLK_FACTOR__ * ReadBandwidth / (double) ReturnBusWidth);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: __DML_MIN_DCFCLK_FACTOR__ = %f\n", __func__, __DML_MIN_DCFCLK_FACTOR__);
dml_print("DML::%s: ReadBandwidth = %f\n", __func__, ReadBandwidth);
dml_print("DML::%s: ReturnBusWidth = %d\n", __func__, ReturnBusWidth);
dml_print("DML::%s: DCFClkDeepSleep = %f\n", __func__, *DCFClkDeepSleep);
#endif
for (k = 0; k < NumberOfActiveSurfaces; ++k)
*DCFClkDeepSleep = dml_max(*DCFClkDeepSleep, DCFClkDeepSleepPerSurface[k]);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DCFClkDeepSleep = %f (final)\n", __func__, *DCFClkDeepSleep);
#endif
} // CalculateDCFCLKDeepSleep
double dml32_CalculateWriteBackDelay(
enum source_format_class WritebackPixelFormat,
double WritebackHRatio,
double WritebackVRatio,
unsigned int WritebackVTaps,
unsigned int WritebackDestinationWidth,
unsigned int WritebackDestinationHeight,
unsigned int WritebackSourceHeight,
unsigned int HTotal)
{
double CalculateWriteBackDelay;
double Line_length;
double Output_lines_last_notclamped;
double WritebackVInit;
WritebackVInit = (WritebackVRatio + WritebackVTaps + 1) / 2;
Line_length = dml_max((double) WritebackDestinationWidth,
dml_ceil((double)WritebackDestinationWidth / 6.0, 1.0) * WritebackVTaps);
Output_lines_last_notclamped = WritebackDestinationHeight - 1 -
dml_ceil(((double)WritebackSourceHeight -
(double) WritebackVInit) / (double)WritebackVRatio, 1.0);
if (Output_lines_last_notclamped < 0) {
CalculateWriteBackDelay = 0;
} else {
CalculateWriteBackDelay = Output_lines_last_notclamped * Line_length +
(HTotal - WritebackDestinationWidth) + 80;
}
return CalculateWriteBackDelay;
}
void dml32_UseMinimumDCFCLK(
enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
bool DRRDisplay[],
bool SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
unsigned int MaxInterDCNTileRepeaters,
unsigned int MaxPrefetchMode,
double DRAMClockChangeLatencyFinal,
double FCLKChangeLatency,
double SREnterPlusExitTime,
unsigned int ReturnBusWidth,
unsigned int RoundTripPingLatencyCycles,
unsigned int ReorderingBytes,
unsigned int PixelChunkSizeInKByte,
unsigned int MetaChunkSize,
bool GPUVMEnable,
unsigned int GPUVMMaxPageTableLevels,
bool HostVMEnable,
unsigned int NumberOfActiveSurfaces,
double HostVMMinPageSize,
unsigned int HostVMMaxNonCachedPageTableLevels,
bool DynamicMetadataVMEnabled,
bool ImmediateFlipRequirement,
bool ProgressiveToInterlaceUnitInOPP,
double MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation,
double PercentOfIdealSDPPortBWReceivedAfterUrgLatency,
unsigned int VTotal[],
unsigned int VActive[],
unsigned int DynamicMetadataTransmittedBytes[],
unsigned int DynamicMetadataLinesBeforeActiveRequired[],
bool Interlace[],
double RequiredDPPCLKPerSurface[][2][DC__NUM_DPP__MAX],
double RequiredDISPCLK[][2],
double UrgLatency[],
unsigned int NoOfDPP[][2][DC__NUM_DPP__MAX],
double ProjectedDCFClkDeepSleep[][2],
double MaximumVStartup[][2][DC__NUM_DPP__MAX],
unsigned int TotalNumberOfActiveDPP[][2],
unsigned int TotalNumberOfDCCActiveDPP[][2],
unsigned int dpte_group_bytes[],
double PrefetchLinesY[][2][DC__NUM_DPP__MAX],
double PrefetchLinesC[][2][DC__NUM_DPP__MAX],
unsigned int swath_width_luma_ub_all_states[][2][DC__NUM_DPP__MAX],
unsigned int swath_width_chroma_ub_all_states[][2][DC__NUM_DPP__MAX],
unsigned int BytePerPixelY[],
unsigned int BytePerPixelC[],
unsigned int HTotal[],
double PixelClock[],
double PDEAndMetaPTEBytesPerFrame[][2][DC__NUM_DPP__MAX],
double DPTEBytesPerRow[][2][DC__NUM_DPP__MAX],
double MetaRowBytes[][2][DC__NUM_DPP__MAX],
bool DynamicMetadataEnable[],
double ReadBandwidthLuma[],
double ReadBandwidthChroma[],
double DCFCLKPerState[],
/* Output */
double DCFCLKState[][2])
{
unsigned int i, j, k;
unsigned int dummy1;
double dummy2, dummy3;
double NormalEfficiency;
double TotalMaxPrefetchFlipDPTERowBandwidth[DC__VOLTAGE_STATES][2];
NormalEfficiency = PercentOfIdealSDPPortBWReceivedAfterUrgLatency / 100.0;
for (i = 0; i < DC__VOLTAGE_STATES; ++i) {
for (j = 0; j <= 1; ++j) {
double PixelDCFCLKCyclesRequiredInPrefetch[DC__NUM_DPP__MAX];
double PrefetchPixelLinesTime[DC__NUM_DPP__MAX];
double DCFCLKRequiredForPeakBandwidthPerSurface[DC__NUM_DPP__MAX];
double DynamicMetadataVMExtraLatency[DC__NUM_DPP__MAX];
double MinimumTWait = 0.0;
double DPTEBandwidth;
double DCFCLKRequiredForAverageBandwidth;
unsigned int ExtraLatencyBytes;
double ExtraLatencyCycles;
double DCFCLKRequiredForPeakBandwidth;
unsigned int NoOfDPPState[DC__NUM_DPP__MAX];
double MinimumTvmPlus2Tr0;
TotalMaxPrefetchFlipDPTERowBandwidth[i][j] = 0;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
TotalMaxPrefetchFlipDPTERowBandwidth[i][j] = TotalMaxPrefetchFlipDPTERowBandwidth[i][j]
+ NoOfDPP[i][j][k] * DPTEBytesPerRow[i][j][k]
/ (15.75 * HTotal[k] / PixelClock[k]);
}
for (k = 0; k <= NumberOfActiveSurfaces - 1; ++k)
NoOfDPPState[k] = NoOfDPP[i][j][k];
DPTEBandwidth = TotalMaxPrefetchFlipDPTERowBandwidth[i][j];
DCFCLKRequiredForAverageBandwidth = dml_max(ProjectedDCFClkDeepSleep[i][j], DPTEBandwidth / NormalEfficiency / ReturnBusWidth);
ExtraLatencyBytes = dml32_CalculateExtraLatencyBytes(ReorderingBytes,
TotalNumberOfActiveDPP[i][j], PixelChunkSizeInKByte,
TotalNumberOfDCCActiveDPP[i][j], MetaChunkSize, GPUVMEnable, HostVMEnable,
NumberOfActiveSurfaces, NoOfDPPState, dpte_group_bytes, 1, HostVMMinPageSize,
HostVMMaxNonCachedPageTableLevels);
ExtraLatencyCycles = RoundTripPingLatencyCycles + __DML_ARB_TO_RET_DELAY__
+ ExtraLatencyBytes / NormalEfficiency / ReturnBusWidth;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
double DCFCLKCyclesRequiredInPrefetch;
double PrefetchTime;
PixelDCFCLKCyclesRequiredInPrefetch[k] = (PrefetchLinesY[i][j][k]
* swath_width_luma_ub_all_states[i][j][k] * BytePerPixelY[k]
+ PrefetchLinesC[i][j][k] * swath_width_chroma_ub_all_states[i][j][k]
* BytePerPixelC[k]) / NormalEfficiency
/ ReturnBusWidth;
DCFCLKCyclesRequiredInPrefetch = 2 * ExtraLatencyCycles / NoOfDPPState[k]
+ PDEAndMetaPTEBytesPerFrame[i][j][k] / NormalEfficiency
/ NormalEfficiency / ReturnBusWidth
* (GPUVMMaxPageTableLevels > 2 ? 1 : 0)
+ 2 * DPTEBytesPerRow[i][j][k] / NormalEfficiency / NormalEfficiency
/ ReturnBusWidth
+ 2 * MetaRowBytes[i][j][k] / NormalEfficiency / ReturnBusWidth
+ PixelDCFCLKCyclesRequiredInPrefetch[k];
PrefetchPixelLinesTime[k] = dml_max(PrefetchLinesY[i][j][k], PrefetchLinesC[i][j][k])
* HTotal[k] / PixelClock[k];
DynamicMetadataVMExtraLatency[k] = (GPUVMEnable == true &&
DynamicMetadataEnable[k] == true && DynamicMetadataVMEnabled == true) ?
UrgLatency[i] * GPUVMMaxPageTableLevels *
(HostVMEnable == true ? HostVMMaxNonCachedPageTableLevels + 1 : 1) : 0;
MinimumTWait = dml32_CalculateTWait(MaxPrefetchMode,
UseMALLForPStateChange[k],
SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
DRRDisplay[k],
DRAMClockChangeLatencyFinal,
FCLKChangeLatency,
UrgLatency[i],
SREnterPlusExitTime);
PrefetchTime = (MaximumVStartup[i][j][k] - 1) * HTotal[k] / PixelClock[k] -
MinimumTWait - UrgLatency[i] *
((GPUVMMaxPageTableLevels <= 2 ? GPUVMMaxPageTableLevels :
GPUVMMaxPageTableLevels - 2) * (HostVMEnable == true ?
HostVMMaxNonCachedPageTableLevels + 1 : 1) - 1) -
DynamicMetadataVMExtraLatency[k];
if (PrefetchTime > 0) {
double ExpectedVRatioPrefetch;
ExpectedVRatioPrefetch = PrefetchPixelLinesTime[k] / (PrefetchTime *
PixelDCFCLKCyclesRequiredInPrefetch[k] /
DCFCLKCyclesRequiredInPrefetch);
DCFCLKRequiredForPeakBandwidthPerSurface[k] = NoOfDPPState[k] *
PixelDCFCLKCyclesRequiredInPrefetch[k] /
PrefetchPixelLinesTime[k] *
dml_max(1.0, ExpectedVRatioPrefetch) *
dml_max(1.0, ExpectedVRatioPrefetch / 4);
if (HostVMEnable == true || ImmediateFlipRequirement == true) {
DCFCLKRequiredForPeakBandwidthPerSurface[k] =
DCFCLKRequiredForPeakBandwidthPerSurface[k] +
NoOfDPPState[k] * DPTEBandwidth / NormalEfficiency /
NormalEfficiency / ReturnBusWidth;
}
} else {
DCFCLKRequiredForPeakBandwidthPerSurface[k] = DCFCLKPerState[i];
}
if (DynamicMetadataEnable[k] == true) {
double TSetupPipe;
double TdmbfPipe;
double TdmsksPipe;
double TdmecPipe;
double AllowedTimeForUrgentExtraLatency;
dml32_CalculateVUpdateAndDynamicMetadataParameters(
MaxInterDCNTileRepeaters,
RequiredDPPCLKPerSurface[i][j][k],
RequiredDISPCLK[i][j],
ProjectedDCFClkDeepSleep[i][j],
PixelClock[k],
HTotal[k],
VTotal[k] - VActive[k],
DynamicMetadataTransmittedBytes[k],
DynamicMetadataLinesBeforeActiveRequired[k],
Interlace[k],
ProgressiveToInterlaceUnitInOPP,
/* output */
&TSetupPipe,
&TdmbfPipe,
&TdmecPipe,
&TdmsksPipe,
&dummy1,
&dummy2,
&dummy3);
AllowedTimeForUrgentExtraLatency = MaximumVStartup[i][j][k] * HTotal[k] /
PixelClock[k] - MinimumTWait - TSetupPipe - TdmbfPipe -
TdmecPipe - TdmsksPipe - DynamicMetadataVMExtraLatency[k];
if (AllowedTimeForUrgentExtraLatency > 0)
DCFCLKRequiredForPeakBandwidthPerSurface[k] =
dml_max(DCFCLKRequiredForPeakBandwidthPerSurface[k],
ExtraLatencyCycles / AllowedTimeForUrgentExtraLatency);
else
DCFCLKRequiredForPeakBandwidthPerSurface[k] = DCFCLKPerState[i];
}
}
DCFCLKRequiredForPeakBandwidth = 0;
for (k = 0; k <= NumberOfActiveSurfaces - 1; ++k) {
DCFCLKRequiredForPeakBandwidth = DCFCLKRequiredForPeakBandwidth +
DCFCLKRequiredForPeakBandwidthPerSurface[k];
}
MinimumTvmPlus2Tr0 = UrgLatency[i] * (GPUVMEnable == true ?
(HostVMEnable == true ? (GPUVMMaxPageTableLevels + 2) *
(HostVMMaxNonCachedPageTableLevels + 1) - 1 : GPUVMMaxPageTableLevels + 1) : 0);
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
double MaximumTvmPlus2Tr0PlusTsw;
MaximumTvmPlus2Tr0PlusTsw = (MaximumVStartup[i][j][k] - 2) * HTotal[k] /
PixelClock[k] - MinimumTWait - DynamicMetadataVMExtraLatency[k];
if (MaximumTvmPlus2Tr0PlusTsw <= MinimumTvmPlus2Tr0 + PrefetchPixelLinesTime[k] / 4) {
DCFCLKRequiredForPeakBandwidth = DCFCLKPerState[i];
} else {
DCFCLKRequiredForPeakBandwidth = dml_max3(DCFCLKRequiredForPeakBandwidth,
2 * ExtraLatencyCycles / (MaximumTvmPlus2Tr0PlusTsw -
MinimumTvmPlus2Tr0 -
PrefetchPixelLinesTime[k] / 4),
(2 * ExtraLatencyCycles +
PixelDCFCLKCyclesRequiredInPrefetch[k]) /
(MaximumTvmPlus2Tr0PlusTsw - MinimumTvmPlus2Tr0));
}
}
DCFCLKState[i][j] = dml_min(DCFCLKPerState[i], 1.05 *
dml_max(DCFCLKRequiredForAverageBandwidth, DCFCLKRequiredForPeakBandwidth));
}
}
}
unsigned int dml32_CalculateExtraLatencyBytes(unsigned int ReorderingBytes,
unsigned int TotalNumberOfActiveDPP,
unsigned int PixelChunkSizeInKByte,
unsigned int TotalNumberOfDCCActiveDPP,
unsigned int MetaChunkSize,
bool GPUVMEnable,
bool HostVMEnable,
unsigned int NumberOfActiveSurfaces,
unsigned int NumberOfDPP[],
unsigned int dpte_group_bytes[],
double HostVMInefficiencyFactor,
double HostVMMinPageSize,
unsigned int HostVMMaxNonCachedPageTableLevels)
{
unsigned int k;
double ret;
unsigned int HostVMDynamicLevels;
if (GPUVMEnable == true && HostVMEnable == true) {
if (HostVMMinPageSize < 2048)
HostVMDynamicLevels = HostVMMaxNonCachedPageTableLevels;
else if (HostVMMinPageSize >= 2048 && HostVMMinPageSize < 1048576)
HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 1);
else
HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 2);
} else {
HostVMDynamicLevels = 0;
}
ret = ReorderingBytes + (TotalNumberOfActiveDPP * PixelChunkSizeInKByte +
TotalNumberOfDCCActiveDPP * MetaChunkSize) * 1024.0;
if (GPUVMEnable == true) {
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
ret = ret + NumberOfDPP[k] * dpte_group_bytes[k] *
(1 + 8 * HostVMDynamicLevels) * HostVMInefficiencyFactor;
}
}
return ret;
}
void dml32_CalculateVUpdateAndDynamicMetadataParameters(
unsigned int MaxInterDCNTileRepeaters,
double Dppclk,
double Dispclk,
double DCFClkDeepSleep,
double PixelClock,
unsigned int HTotal,
unsigned int VBlank,
unsigned int DynamicMetadataTransmittedBytes,
unsigned int DynamicMetadataLinesBeforeActiveRequired,
unsigned int InterlaceEnable,
bool ProgressiveToInterlaceUnitInOPP,
/* output */
double *TSetup,
double *Tdmbf,
double *Tdmec,
double *Tdmsks,
unsigned int *VUpdateOffsetPix,
double *VUpdateWidthPix,
double *VReadyOffsetPix)
{
double TotalRepeaterDelayTime;
TotalRepeaterDelayTime = MaxInterDCNTileRepeaters * (2 / Dppclk + 3 / Dispclk);
*VUpdateWidthPix =
dml_ceil((14.0 / DCFClkDeepSleep + 12.0 / Dppclk + TotalRepeaterDelayTime) * PixelClock, 1.0);
*VReadyOffsetPix = dml_ceil(dml_max(150.0 / Dppclk,
TotalRepeaterDelayTime + 20.0 / DCFClkDeepSleep + 10.0 / Dppclk) * PixelClock, 1.0);
*VUpdateOffsetPix = dml_ceil(HTotal / 4.0, 1.0);
*TSetup = (*VUpdateOffsetPix + *VUpdateWidthPix + *VReadyOffsetPix) / PixelClock;
*Tdmbf = DynamicMetadataTransmittedBytes / 4.0 / Dispclk;
*Tdmec = HTotal / PixelClock;
if (DynamicMetadataLinesBeforeActiveRequired == 0)
*Tdmsks = VBlank * HTotal / PixelClock / 2.0;
else
*Tdmsks = DynamicMetadataLinesBeforeActiveRequired * HTotal / PixelClock;
if (InterlaceEnable == 1 && ProgressiveToInterlaceUnitInOPP == false)
*Tdmsks = *Tdmsks / 2;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VUpdateWidthPix = %d\n", __func__, *VUpdateWidthPix);
dml_print("DML::%s: VReadyOffsetPix = %d\n", __func__, *VReadyOffsetPix);
dml_print("DML::%s: VUpdateOffsetPix = %d\n", __func__, *VUpdateOffsetPix);
dml_print("DML::%s: DynamicMetadataLinesBeforeActiveRequired = %d\n",
__func__, DynamicMetadataLinesBeforeActiveRequired);
dml_print("DML::%s: VBlank = %d\n", __func__, VBlank);
dml_print("DML::%s: HTotal = %d\n", __func__, HTotal);
dml_print("DML::%s: PixelClock = %f\n", __func__, PixelClock);
dml_print("DML::%s: Tdmsks = %f\n", __func__, *Tdmsks);
#endif
}
double dml32_CalculateTWait(
unsigned int PrefetchMode,
enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange,
bool SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
bool DRRDisplay,
double DRAMClockChangeLatency,
double FCLKChangeLatency,
double UrgentLatency,
double SREnterPlusExitTime)
{
double TWait = 0.0;
if (PrefetchMode == 0 &&
!(UseMALLForPStateChange == dm_use_mall_pstate_change_full_frame) &&
!(UseMALLForPStateChange == dm_use_mall_pstate_change_sub_viewport) &&
!(UseMALLForPStateChange == dm_use_mall_pstate_change_phantom_pipe) &&
!(SynchronizeDRRDisplaysForUCLKPStateChangeFinal && DRRDisplay)) {
TWait = dml_max3(DRAMClockChangeLatency + UrgentLatency, SREnterPlusExitTime, UrgentLatency);
} else if (PrefetchMode <= 1 && !(UseMALLForPStateChange == dm_use_mall_pstate_change_phantom_pipe)) {
TWait = dml_max3(FCLKChangeLatency + UrgentLatency, SREnterPlusExitTime, UrgentLatency);
} else if (PrefetchMode <= 2 && !(UseMALLForPStateChange == dm_use_mall_pstate_change_phantom_pipe)) {
TWait = dml_max(SREnterPlusExitTime, UrgentLatency);
} else {
TWait = UrgentLatency;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: PrefetchMode = %d\n", __func__, PrefetchMode);
dml_print("DML::%s: TWait = %f\n", __func__, TWait);
#endif
return TWait;
} // CalculateTWait
// Function: get_return_bw_mbps
// Megabyte per second
double dml32_get_return_bw_mbps(const soc_bounding_box_st *soc,
const int VoltageLevel,
const bool HostVMEnable,
const double DCFCLK,
const double FabricClock,
const double DRAMSpeed)
{
double ReturnBW = 0.;
double IdealSDPPortBandwidth = soc->return_bus_width_bytes /*mode_lib->vba.ReturnBusWidth*/ * DCFCLK;
double IdealFabricBandwidth = FabricClock * soc->fabric_datapath_to_dcn_data_return_bytes;
double IdealDRAMBandwidth = DRAMSpeed * soc->num_chans * soc->dram_channel_width_bytes;
double PixelDataOnlyReturnBW = dml_min3(IdealSDPPortBandwidth * soc->pct_ideal_sdp_bw_after_urgent / 100,
IdealFabricBandwidth * soc->pct_ideal_fabric_bw_after_urgent / 100,
IdealDRAMBandwidth * (VoltageLevel < 2 ? soc->pct_ideal_dram_bw_after_urgent_strobe :
soc->pct_ideal_dram_sdp_bw_after_urgent_pixel_only) / 100);
double PixelMixedWithVMDataReturnBW = dml_min3(IdealSDPPortBandwidth * soc->pct_ideal_sdp_bw_after_urgent / 100,
IdealFabricBandwidth * soc->pct_ideal_fabric_bw_after_urgent / 100,
IdealDRAMBandwidth * (VoltageLevel < 2 ? soc->pct_ideal_dram_bw_after_urgent_strobe :
soc->pct_ideal_dram_sdp_bw_after_urgent_pixel_only) / 100);
if (HostVMEnable != true)
ReturnBW = PixelDataOnlyReturnBW;
else
ReturnBW = PixelMixedWithVMDataReturnBW;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VoltageLevel = %d\n", __func__, VoltageLevel);
dml_print("DML::%s: HostVMEnable = %d\n", __func__, HostVMEnable);
dml_print("DML::%s: DCFCLK = %f\n", __func__, DCFCLK);
dml_print("DML::%s: FabricClock = %f\n", __func__, FabricClock);
dml_print("DML::%s: DRAMSpeed = %f\n", __func__, DRAMSpeed);
dml_print("DML::%s: IdealSDPPortBandwidth = %f\n", __func__, IdealSDPPortBandwidth);
dml_print("DML::%s: IdealFabricBandwidth = %f\n", __func__, IdealFabricBandwidth);
dml_print("DML::%s: IdealDRAMBandwidth = %f\n", __func__, IdealDRAMBandwidth);
dml_print("DML::%s: PixelDataOnlyReturnBW = %f\n", __func__, PixelDataOnlyReturnBW);
dml_print("DML::%s: PixelMixedWithVMDataReturnBW = %f\n", __func__, PixelMixedWithVMDataReturnBW);
dml_print("DML::%s: ReturnBW = %f MBps\n", __func__, ReturnBW);
#endif
return ReturnBW;
}
// Function: get_return_bw_mbps_vm_only
// Megabyte per second
double dml32_get_return_bw_mbps_vm_only(const soc_bounding_box_st *soc,
const int VoltageLevel,
const double DCFCLK,
const double FabricClock,
const double DRAMSpeed)
{
double VMDataOnlyReturnBW = dml_min3(
soc->return_bus_width_bytes * DCFCLK * soc->pct_ideal_sdp_bw_after_urgent / 100.0,
FabricClock * soc->fabric_datapath_to_dcn_data_return_bytes
* soc->pct_ideal_sdp_bw_after_urgent / 100.0,
DRAMSpeed * soc->num_chans * soc->dram_channel_width_bytes
* (VoltageLevel < 2 ?
soc->pct_ideal_dram_bw_after_urgent_strobe :
soc->pct_ideal_dram_sdp_bw_after_urgent_vm_only) / 100.0);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VoltageLevel = %d\n", __func__, VoltageLevel);
dml_print("DML::%s: DCFCLK = %f\n", __func__, DCFCLK);
dml_print("DML::%s: FabricClock = %f\n", __func__, FabricClock);
dml_print("DML::%s: DRAMSpeed = %f\n", __func__, DRAMSpeed);
dml_print("DML::%s: VMDataOnlyReturnBW = %f\n", __func__, VMDataOnlyReturnBW);
#endif
return VMDataOnlyReturnBW;
}
double dml32_CalculateExtraLatency(
unsigned int RoundTripPingLatencyCycles,
unsigned int ReorderingBytes,
double DCFCLK,
unsigned int TotalNumberOfActiveDPP,
unsigned int PixelChunkSizeInKByte,
unsigned int TotalNumberOfDCCActiveDPP,
unsigned int MetaChunkSize,
double ReturnBW,
bool GPUVMEnable,
bool HostVMEnable,
unsigned int NumberOfActiveSurfaces,
unsigned int NumberOfDPP[],
unsigned int dpte_group_bytes[],
double HostVMInefficiencyFactor,
double HostVMMinPageSize,
unsigned int HostVMMaxNonCachedPageTableLevels)
{
double ExtraLatencyBytes;
double ExtraLatency;
ExtraLatencyBytes = dml32_CalculateExtraLatencyBytes(
ReorderingBytes,
TotalNumberOfActiveDPP,
PixelChunkSizeInKByte,
TotalNumberOfDCCActiveDPP,
MetaChunkSize,
GPUVMEnable,
HostVMEnable,
NumberOfActiveSurfaces,
NumberOfDPP,
dpte_group_bytes,
HostVMInefficiencyFactor,
HostVMMinPageSize,
HostVMMaxNonCachedPageTableLevels);
ExtraLatency = (RoundTripPingLatencyCycles + __DML_ARB_TO_RET_DELAY__) / DCFCLK + ExtraLatencyBytes / ReturnBW;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: RoundTripPingLatencyCycles=%d\n", __func__, RoundTripPingLatencyCycles);
dml_print("DML::%s: DCFCLK=%f\n", __func__, DCFCLK);
dml_print("DML::%s: ExtraLatencyBytes=%f\n", __func__, ExtraLatencyBytes);
dml_print("DML::%s: ReturnBW=%f\n", __func__, ReturnBW);
dml_print("DML::%s: ExtraLatency=%f\n", __func__, ExtraLatency);
#endif
return ExtraLatency;
} // CalculateExtraLatency
bool dml32_CalculatePrefetchSchedule(
struct vba_vars_st *v,
unsigned int k,
double HostVMInefficiencyFactor,
DmlPipe *myPipe,
unsigned int DSCDelay,
unsigned int DPP_RECOUT_WIDTH,
unsigned int VStartup,
unsigned int MaxVStartup,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
unsigned int PDEAndMetaPTEBytesFrame,
unsigned int MetaRowByte,
unsigned int PixelPTEBytesPerRow,
double PrefetchSourceLinesY,
unsigned int SwathWidthY,
unsigned int VInitPreFillY,
unsigned int MaxNumSwathY,
double PrefetchSourceLinesC,
unsigned int SwathWidthC,
unsigned int VInitPreFillC,
unsigned int MaxNumSwathC,
unsigned int swath_width_luma_ub,
unsigned int swath_width_chroma_ub,
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double TWait,
double TPreReq,
/* Output */
double *DSTXAfterScaler,
double *DSTYAfterScaler,
double *DestinationLinesForPrefetch,
double *PrefetchBandwidth,
double *DestinationLinesToRequestVMInVBlank,
double *DestinationLinesToRequestRowInVBlank,
double *VRatioPrefetchY,
double *VRatioPrefetchC,
double *RequiredPrefetchPixDataBWLuma,
double *RequiredPrefetchPixDataBWChroma,
bool *NotEnoughTimeForDynamicMetadata,
double *Tno_bw,
double *prefetch_vmrow_bw,
double *Tdmdl_vm,
double *Tdmdl,
double *TSetup,
unsigned int *VUpdateOffsetPix,
double *VUpdateWidthPix,
double *VReadyOffsetPix)
{
double DPPCLKDelaySubtotalPlusCNVCFormater = v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater;
bool MyError = false;
unsigned int DPPCycles, DISPCLKCycles;
double DSTTotalPixelsAfterScaler;
double LineTime;
double dst_y_prefetch_equ;
double prefetch_bw_oto;
double Tvm_oto;
double Tr0_oto;
double Tvm_oto_lines;
double Tr0_oto_lines;
double dst_y_prefetch_oto;
double TimeForFetchingMetaPTE = 0;
double TimeForFetchingRowInVBlank = 0;
double LinesToRequestPrefetchPixelData = 0;
double LinesForPrefetchBandwidth = 0;
unsigned int HostVMDynamicLevelsTrips;
double trip_to_mem;
double Tvm_trips;
double Tr0_trips;
double Tvm_trips_rounded;
double Tr0_trips_rounded;
double Lsw_oto;
double Tpre_rounded;
double prefetch_bw_equ;
double Tvm_equ;
double Tr0_equ;
double Tdmbf;
double Tdmec;
double Tdmsks;
double prefetch_sw_bytes;
double bytes_pp;
double dep_bytes;
unsigned int max_vratio_pre = v->MaxVRatioPre;
double min_Lsw;
double Tsw_est1 = 0;
double Tsw_est3 = 0;
if (v->GPUVMEnable == true && v->HostVMEnable == true)
HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
else
HostVMDynamicLevelsTrips = 0;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: v->GPUVMEnable = %d\n", __func__, v->GPUVMEnable);
dml_print("DML::%s: v->GPUVMMaxPageTableLevels = %d\n", __func__, v->GPUVMMaxPageTableLevels);
dml_print("DML::%s: DCCEnable = %d\n", __func__, myPipe->DCCEnable);
dml_print("DML::%s: v->HostVMEnable=%d HostVMInefficiencyFactor=%f\n",
__func__, v->HostVMEnable, HostVMInefficiencyFactor);
#endif
dml32_CalculateVUpdateAndDynamicMetadataParameters(
v->MaxInterDCNTileRepeaters,
myPipe->Dppclk,
myPipe->Dispclk,
myPipe->DCFClkDeepSleep,
myPipe->PixelClock,
myPipe->HTotal,
myPipe->VBlank,
v->DynamicMetadataTransmittedBytes[k],
v->DynamicMetadataLinesBeforeActiveRequired[k],
myPipe->InterlaceEnable,
myPipe->ProgressiveToInterlaceUnitInOPP,
TSetup,
/* output */
&Tdmbf,
&Tdmec,
&Tdmsks,
VUpdateOffsetPix,
VUpdateWidthPix,
VReadyOffsetPix);
LineTime = myPipe->HTotal / myPipe->PixelClock;
trip_to_mem = UrgentLatency;
Tvm_trips = UrgentExtraLatency + trip_to_mem * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
if (v->DynamicMetadataVMEnabled == true)
*Tdmdl = TWait + Tvm_trips + trip_to_mem;
else
*Tdmdl = TWait + UrgentExtraLatency;
#ifdef __DML_VBA_ALLOW_DELTA__
if (v->DynamicMetadataEnable[k] == false)
*Tdmdl = 0.0;
#endif
if (v->DynamicMetadataEnable[k] == true) {
if (VStartup * LineTime < *TSetup + *Tdmdl + Tdmbf + Tdmec + Tdmsks) {
*NotEnoughTimeForDynamicMetadata = true;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Not Enough Time for Dynamic Meta!\n", __func__);
dml_print("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n",
__func__, Tdmbf);
dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, Tdmec);
dml_print("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n",
__func__, Tdmsks);
dml_print("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd\n",
__func__, *Tdmdl);
#endif
} else {
*NotEnoughTimeForDynamicMetadata = false;
}
} else {
*NotEnoughTimeForDynamicMetadata = false;
}
*Tdmdl_vm = (v->DynamicMetadataEnable[k] == true && v->DynamicMetadataVMEnabled == true &&
v->GPUVMEnable == true ? TWait + Tvm_trips : 0);
if (myPipe->ScalerEnabled)
DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + v->DPPCLKDelaySCL;
else
DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + v->DPPCLKDelaySCLLBOnly;
DPPCycles = DPPCycles + myPipe->NumberOfCursors * v->DPPCLKDelayCNVCCursor;
DISPCLKCycles = v->DISPCLKDelaySubtotal;
if (myPipe->Dppclk == 0.0 || myPipe->Dispclk == 0.0)
return true;
*DSTXAfterScaler = DPPCycles * myPipe->PixelClock / myPipe->Dppclk + DISPCLKCycles *
myPipe->PixelClock / myPipe->Dispclk + DSCDelay;
*DSTXAfterScaler = *DSTXAfterScaler + (myPipe->ODMMode != dm_odm_combine_mode_disabled ? 18 : 0)
+ (myPipe->DPPPerSurface - 1) * DPP_RECOUT_WIDTH
+ ((myPipe->ODMMode == dm_odm_split_mode_1to2 || myPipe->ODMMode == dm_odm_mode_mso_1to2) ?
myPipe->HActive / 2 : 0)
+ ((myPipe->ODMMode == dm_odm_mode_mso_1to4) ? myPipe->HActive * 3 / 4 : 0);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DPPCycles: %d\n", __func__, DPPCycles);
dml_print("DML::%s: PixelClock: %f\n", __func__, myPipe->PixelClock);
dml_print("DML::%s: Dppclk: %f\n", __func__, myPipe->Dppclk);
dml_print("DML::%s: DISPCLKCycles: %d\n", __func__, DISPCLKCycles);
dml_print("DML::%s: DISPCLK: %f\n", __func__, myPipe->Dispclk);
dml_print("DML::%s: DSCDelay: %d\n", __func__, DSCDelay);
dml_print("DML::%s: ODMMode: %d\n", __func__, myPipe->ODMMode);
dml_print("DML::%s: DPP_RECOUT_WIDTH: %d\n", __func__, DPP_RECOUT_WIDTH);
dml_print("DML::%s: DSTXAfterScaler: %d\n", __func__, *DSTXAfterScaler);
#endif
if (v->OutputFormat[k] == dm_420 || (myPipe->InterlaceEnable && myPipe->ProgressiveToInterlaceUnitInOPP))
*DSTYAfterScaler = 1;
else
*DSTYAfterScaler = 0;
DSTTotalPixelsAfterScaler = *DSTYAfterScaler * myPipe->HTotal + *DSTXAfterScaler;
*DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
*DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * myPipe->HTotal));
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DSTXAfterScaler: %d (final)\n", __func__, *DSTXAfterScaler);
dml_print("DML::%s: DSTYAfterScaler: %d (final)\n", __func__, *DSTYAfterScaler);
#endif
MyError = false;
Tr0_trips = trip_to_mem * (HostVMDynamicLevelsTrips + 1);
if (v->GPUVMEnable == true) {
Tvm_trips_rounded = dml_ceil(4.0 * Tvm_trips / LineTime, 1.0) / 4.0 * LineTime;
Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1.0) / 4.0 * LineTime;
if (v->GPUVMMaxPageTableLevels >= 3) {
*Tno_bw = UrgentExtraLatency + trip_to_mem *
(double) ((v->GPUVMMaxPageTableLevels - 2) * (HostVMDynamicLevelsTrips + 1) - 1);
} else if (v->GPUVMMaxPageTableLevels == 1 && myPipe->DCCEnable != true) {
Tr0_trips_rounded = dml_ceil(4.0 * UrgentExtraLatency / LineTime, 1.0) /
4.0 * LineTime; // VBA_ERROR
*Tno_bw = UrgentExtraLatency;
} else {
*Tno_bw = 0;
}
} else if (myPipe->DCCEnable == true) {
Tvm_trips_rounded = LineTime / 4.0;
Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1.0) / 4.0 * LineTime;
*Tno_bw = 0;
} else {
Tvm_trips_rounded = LineTime / 4.0;
Tr0_trips_rounded = LineTime / 2.0;
*Tno_bw = 0;
}
Tvm_trips_rounded = dml_max(Tvm_trips_rounded, LineTime / 4.0);
Tr0_trips_rounded = dml_max(Tr0_trips_rounded, LineTime / 4.0);
if (myPipe->SourcePixelFormat == dm_420_8 || myPipe->SourcePixelFormat == dm_420_10
|| myPipe->SourcePixelFormat == dm_420_12) {
bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC / 4;
} else {
bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC;
}
prefetch_sw_bytes = PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY
+ PrefetchSourceLinesC * swath_width_chroma_ub * myPipe->BytePerPixelC;
prefetch_bw_oto = dml_max(bytes_pp * myPipe->PixelClock / myPipe->DPPPerSurface,
prefetch_sw_bytes / (dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime));
min_Lsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre;
min_Lsw = dml_max(min_Lsw, 1.0);
Lsw_oto = dml_ceil(4.0 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1.0) / 4.0;
if (v->GPUVMEnable == true) {
Tvm_oto = dml_max3(
Tvm_trips,
*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto,
LineTime / 4.0);
} else
Tvm_oto = LineTime / 4.0;
if ((v->GPUVMEnable == true || myPipe->DCCEnable == true)) {
Tr0_oto = dml_max4(
Tr0_trips,
(MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto,
(LineTime - Tvm_oto)/2.0,
LineTime / 4.0);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Tr0_oto max0 = %f\n", __func__,
(MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto);
dml_print("DML::%s: Tr0_oto max1 = %f\n", __func__, Tr0_trips);
dml_print("DML::%s: Tr0_oto max2 = %f\n", __func__, LineTime - Tvm_oto);
dml_print("DML::%s: Tr0_oto max3 = %f\n", __func__, LineTime / 4);
#endif
} else
Tr0_oto = (LineTime - Tvm_oto) / 2.0;
Tvm_oto_lines = dml_ceil(4.0 * Tvm_oto / LineTime, 1) / 4.0;
Tr0_oto_lines = dml_ceil(4.0 * Tr0_oto / LineTime, 1) / 4.0;
dst_y_prefetch_oto = Tvm_oto_lines + 2 * Tr0_oto_lines + Lsw_oto;
dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime -
(*DSTYAfterScaler + (double) *DSTXAfterScaler / (double) myPipe->HTotal);
dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, __DML_VBA_MAX_DST_Y_PRE__);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: HTotal = %d\n", __func__, myPipe->HTotal);
dml_print("DML::%s: min_Lsw = %f\n", __func__, min_Lsw);
dml_print("DML::%s: *Tno_bw = %f\n", __func__, *Tno_bw);
dml_print("DML::%s: UrgentExtraLatency = %f\n", __func__, UrgentExtraLatency);
dml_print("DML::%s: trip_to_mem = %f\n", __func__, trip_to_mem);
dml_print("DML::%s: BytePerPixelY = %d\n", __func__, myPipe->BytePerPixelY);
dml_print("DML::%s: PrefetchSourceLinesY = %f\n", __func__, PrefetchSourceLinesY);
dml_print("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
dml_print("DML::%s: BytePerPixelC = %d\n", __func__, myPipe->BytePerPixelC);
dml_print("DML::%s: PrefetchSourceLinesC = %f\n", __func__, PrefetchSourceLinesC);
dml_print("DML::%s: swath_width_chroma_ub = %d\n", __func__, swath_width_chroma_ub);
dml_print("DML::%s: prefetch_sw_bytes = %f\n", __func__, prefetch_sw_bytes);
dml_print("DML::%s: bytes_pp = %f\n", __func__, bytes_pp);
dml_print("DML::%s: PDEAndMetaPTEBytesFrame = %d\n", __func__, PDEAndMetaPTEBytesFrame);
dml_print("DML::%s: MetaRowByte = %d\n", __func__, MetaRowByte);
dml_print("DML::%s: PixelPTEBytesPerRow = %d\n", __func__, PixelPTEBytesPerRow);
dml_print("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
dml_print("DML::%s: Tvm_trips = %f\n", __func__, Tvm_trips);
dml_print("DML::%s: Tr0_trips = %f\n", __func__, Tr0_trips);
dml_print("DML::%s: prefetch_bw_oto = %f\n", __func__, prefetch_bw_oto);
dml_print("DML::%s: Tr0_oto = %f\n", __func__, Tr0_oto);
dml_print("DML::%s: Tvm_oto = %f\n", __func__, Tvm_oto);
dml_print("DML::%s: Tvm_oto_lines = %f\n", __func__, Tvm_oto_lines);
dml_print("DML::%s: Tr0_oto_lines = %f\n", __func__, Tr0_oto_lines);
dml_print("DML::%s: Lsw_oto = %f\n", __func__, Lsw_oto);
dml_print("DML::%s: dst_y_prefetch_oto = %f\n", __func__, dst_y_prefetch_oto);
dml_print("DML::%s: dst_y_prefetch_equ = %f\n", __func__, dst_y_prefetch_equ);
#endif
dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0;
Tpre_rounded = dst_y_prefetch_equ * LineTime;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, dst_y_prefetch_equ);
dml_print("DML::%s: LineTime: %f\n", __func__, LineTime);
dml_print("DML::%s: VStartup: %d\n", __func__, VStartup);
dml_print("DML::%s: Tvstartup: %fus - time between vstartup and first pixel of active\n",
__func__, VStartup * LineTime);
dml_print("DML::%s: TSetup: %fus - time from vstartup to vready\n", __func__, *TSetup);
dml_print("DML::%s: TCalc: %fus - time for calculations in dchub starting at vready\n", __func__, TCalc);
dml_print("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, Tdmbf);
dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, Tdmec);
dml_print("DML::%s: Tdmdl_vm: %fus - time for vm stages of dmd\n", __func__, *Tdmdl_vm);
dml_print("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd\n", __func__, *Tdmdl);
dml_print("DML::%s: DSTYAfterScaler: %d lines - number of lines of pipeline and buffer delay after scaler\n",
__func__, *DSTYAfterScaler);
#endif
dep_bytes = dml_max(PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor,
MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor);
if (prefetch_sw_bytes < dep_bytes)
prefetch_sw_bytes = 2 * dep_bytes;
*PrefetchBandwidth = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
if (dst_y_prefetch_equ > 1 &&
(Tpre_rounded >= TPreReq || dst_y_prefetch_equ == __DML_VBA_MAX_DST_Y_PRE__)) {
double PrefetchBandwidth1;
double PrefetchBandwidth2;
double PrefetchBandwidth3;
double PrefetchBandwidth4;
if (Tpre_rounded - *Tno_bw > 0) {
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte
+ 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
+ prefetch_sw_bytes) / (Tpre_rounded - *Tno_bw);
Tsw_est1 = prefetch_sw_bytes / PrefetchBandwidth1;
} else
PrefetchBandwidth1 = 0;
if (VStartup == MaxVStartup && (Tsw_est1 / LineTime < min_Lsw)
&& Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - *Tno_bw > 0) {
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte
+ 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor)
/ (Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - *Tno_bw);
}
if (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded > 0)
PrefetchBandwidth2 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + prefetch_sw_bytes) /
(Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded);
else
PrefetchBandwidth2 = 0;
if (Tpre_rounded - Tvm_trips_rounded > 0) {
PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
+ prefetch_sw_bytes) / (Tpre_rounded - Tvm_trips_rounded);
Tsw_est3 = prefetch_sw_bytes / PrefetchBandwidth3;
} else
PrefetchBandwidth3 = 0;
if (VStartup == MaxVStartup &&
(Tsw_est3 / LineTime < min_Lsw) && Tpre_rounded - min_Lsw * LineTime - 0.75 *
LineTime - Tvm_trips_rounded > 0) {
PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor)
/ (Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - Tvm_trips_rounded);
}
if (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded > 0) {
PrefetchBandwidth4 = prefetch_sw_bytes /
(Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded);
} else {
PrefetchBandwidth4 = 0;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Tpre_rounded: %f\n", __func__, Tpre_rounded);
dml_print("DML::%s: Tno_bw: %f\n", __func__, *Tno_bw);
dml_print("DML::%s: Tvm_trips_rounded: %f\n", __func__, Tvm_trips_rounded);
dml_print("DML::%s: Tsw_est1: %f\n", __func__, Tsw_est1);
dml_print("DML::%s: Tsw_est3: %f\n", __func__, Tsw_est3);
dml_print("DML::%s: PrefetchBandwidth1: %f\n", __func__, PrefetchBandwidth1);
dml_print("DML::%s: PrefetchBandwidth2: %f\n", __func__, PrefetchBandwidth2);
dml_print("DML::%s: PrefetchBandwidth3: %f\n", __func__, PrefetchBandwidth3);
dml_print("DML::%s: PrefetchBandwidth4: %f\n", __func__, PrefetchBandwidth4);
#endif
{
bool Case1OK;
bool Case2OK;
bool Case3OK;
if (PrefetchBandwidth1 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth1
>= Tvm_trips_rounded
&& (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor)
/ PrefetchBandwidth1 >= Tr0_trips_rounded) {
Case1OK = true;
} else {
Case1OK = false;
}
} else {
Case1OK = false;
}
if (PrefetchBandwidth2 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth2
>= Tvm_trips_rounded
&& (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor)
/ PrefetchBandwidth2 < Tr0_trips_rounded) {
Case2OK = true;
} else {
Case2OK = false;
}
} else {
Case2OK = false;
}
if (PrefetchBandwidth3 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth3 <
Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow *
HostVMInefficiencyFactor) / PrefetchBandwidth3 >=
Tr0_trips_rounded) {
Case3OK = true;
} else {
Case3OK = false;
}
} else {
Case3OK = false;
}
if (Case1OK)
prefetch_bw_equ = PrefetchBandwidth1;
else if (Case2OK)
prefetch_bw_equ = PrefetchBandwidth2;
else if (Case3OK)
prefetch_bw_equ = PrefetchBandwidth3;
else
prefetch_bw_equ = PrefetchBandwidth4;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Case1OK: %d\n", __func__, Case1OK);
dml_print("DML::%s: Case2OK: %d\n", __func__, Case2OK);
dml_print("DML::%s: Case3OK: %d\n", __func__, Case3OK);
dml_print("DML::%s: prefetch_bw_equ: %f\n", __func__, prefetch_bw_equ);
#endif
if (prefetch_bw_equ > 0) {
if (v->GPUVMEnable == true) {
Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame *
HostVMInefficiencyFactor / prefetch_bw_equ,
Tvm_trips, LineTime / 4);
} else {
Tvm_equ = LineTime / 4;
}
if ((v->GPUVMEnable == true || myPipe->DCCEnable == true)) {
Tr0_equ = dml_max4((MetaRowByte + PixelPTEBytesPerRow *
HostVMInefficiencyFactor) / prefetch_bw_equ, Tr0_trips,
(LineTime - Tvm_equ) / 2, LineTime / 4);
} else {
Tr0_equ = (LineTime - Tvm_equ) / 2;
}
} else {
Tvm_equ = 0;
Tr0_equ = 0;
#ifdef __DML_VBA_DEBUG__
dml_print("DML: prefetch_bw_equ equals 0! %s:%d\n", __FILE__, __LINE__);
#endif
}
}
if (dst_y_prefetch_oto < dst_y_prefetch_equ) {
if (dst_y_prefetch_oto * LineTime < TPreReq) {
*DestinationLinesForPrefetch = dst_y_prefetch_equ;
} else {
*DestinationLinesForPrefetch = dst_y_prefetch_oto;
}
TimeForFetchingMetaPTE = Tvm_oto;
TimeForFetchingRowInVBlank = Tr0_oto;
*PrefetchBandwidth = prefetch_bw_oto;
/* Clamp to oto for bandwidth calculation */
LinesForPrefetchBandwidth = dst_y_prefetch_oto;
} else {
*DestinationLinesForPrefetch = dst_y_prefetch_equ;
TimeForFetchingMetaPTE = Tvm_equ;
TimeForFetchingRowInVBlank = Tr0_equ;
*PrefetchBandwidth = prefetch_bw_equ;
/* Clamp to equ for bandwidth calculation */
LinesForPrefetchBandwidth = dst_y_prefetch_equ;
}
*DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0;
*DestinationLinesToRequestRowInVBlank =
dml_ceil(4.0 * TimeForFetchingRowInVBlank / LineTime, 1.0) / 4.0;
LinesToRequestPrefetchPixelData = LinesForPrefetchBandwidth -
*DestinationLinesToRequestVMInVBlank - 2 * *DestinationLinesToRequestRowInVBlank;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DestinationLinesForPrefetch = %f\n", __func__, *DestinationLinesForPrefetch);
dml_print("DML::%s: DestinationLinesToRequestVMInVBlank = %f\n",
__func__, *DestinationLinesToRequestVMInVBlank);
dml_print("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, TimeForFetchingRowInVBlank);
dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
dml_print("DML::%s: DestinationLinesToRequestRowInVBlank = %f\n",
__func__, *DestinationLinesToRequestRowInVBlank);
dml_print("DML::%s: PrefetchSourceLinesY = %f\n", __func__, PrefetchSourceLinesY);
dml_print("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, LinesToRequestPrefetchPixelData);
#endif
if (LinesToRequestPrefetchPixelData >= 1 && prefetch_bw_equ > 0) {
*VRatioPrefetchY = (double) PrefetchSourceLinesY / LinesToRequestPrefetchPixelData;
*VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VRatioPrefetchY = %f\n", __func__, *VRatioPrefetchY);
dml_print("DML::%s: SwathHeightY = %d\n", __func__, SwathHeightY);
dml_print("DML::%s: VInitPreFillY = %d\n", __func__, VInitPreFillY);
#endif
if ((SwathHeightY > 4) && (VInitPreFillY > 3)) {
if (LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
*VRatioPrefetchY =
dml_max((double) PrefetchSourceLinesY /
LinesToRequestPrefetchPixelData,
(double) MaxNumSwathY * SwathHeightY /
(LinesToRequestPrefetchPixelData -
(VInitPreFillY - 3.0) / 2.0));
*VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
} else {
MyError = true;
*VRatioPrefetchY = 0;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VRatioPrefetchY = %f\n", __func__, *VRatioPrefetchY);
dml_print("DML::%s: PrefetchSourceLinesY = %f\n", __func__, PrefetchSourceLinesY);
dml_print("DML::%s: MaxNumSwathY = %d\n", __func__, MaxNumSwathY);
#endif
}
*VRatioPrefetchC = (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData;
*VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VRatioPrefetchC = %f\n", __func__, *VRatioPrefetchC);
dml_print("DML::%s: SwathHeightC = %d\n", __func__, SwathHeightC);
dml_print("DML::%s: VInitPreFillC = %d\n", __func__, VInitPreFillC);
#endif
if ((SwathHeightC > 4)) {
if (LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
*VRatioPrefetchC =
dml_max(*VRatioPrefetchC,
(double) MaxNumSwathC * SwathHeightC /
(LinesToRequestPrefetchPixelData -
(VInitPreFillC - 3.0) / 2.0));
*VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
} else {
MyError = true;
*VRatioPrefetchC = 0;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VRatioPrefetchC = %f\n", __func__, *VRatioPrefetchC);
dml_print("DML::%s: PrefetchSourceLinesC = %f\n", __func__, PrefetchSourceLinesC);
dml_print("DML::%s: MaxNumSwathC = %d\n", __func__, MaxNumSwathC);
#endif
}
*RequiredPrefetchPixDataBWLuma = (double) PrefetchSourceLinesY
/ LinesToRequestPrefetchPixelData * myPipe->BytePerPixelY * swath_width_luma_ub
/ LineTime;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: BytePerPixelY = %d\n", __func__, myPipe->BytePerPixelY);
dml_print("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
dml_print("DML::%s: RequiredPrefetchPixDataBWLuma = %f\n",
__func__, *RequiredPrefetchPixDataBWLuma);
#endif
*RequiredPrefetchPixDataBWChroma = (double) PrefetchSourceLinesC /
LinesToRequestPrefetchPixelData
* myPipe->BytePerPixelC
* swath_width_chroma_ub / LineTime;
} else {
MyError = true;
#ifdef __DML_VBA_DEBUG__
dml_print("DML:%s: MyErr set. LinesToRequestPrefetchPixelData: %f, should be > 0\n",
__func__, LinesToRequestPrefetchPixelData);
#endif
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
*RequiredPrefetchPixDataBWChroma = 0;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML: Tpre: %fus - sum of time to request meta pte, 2 x data pte + meta data, swaths\n",
(double)LinesToRequestPrefetchPixelData * LineTime +
2.0*TimeForFetchingRowInVBlank + TimeForFetchingMetaPTE);
dml_print("DML: Tvm: %fus - time to fetch page tables for meta surface\n", TimeForFetchingMetaPTE);
dml_print("DML: To: %fus - time for propagation from scaler to optc\n",
(*DSTYAfterScaler + ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime);
dml_print("DML: Tvstartup - TSetup - Tcalc - Twait - Tpre - To > 0\n");
dml_print("DML: Tslack(pre): %fus - time left over in schedule\n", VStartup * LineTime -
TimeForFetchingMetaPTE - 2*TimeForFetchingRowInVBlank - (*DSTYAfterScaler +
((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime - TWait - TCalc - *TSetup);
dml_print("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %d\n",
PixelPTEBytesPerRow);
#endif
} else {
MyError = true;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: MyErr set, dst_y_prefetch_equ = %f (should be > 1)\n",
__func__, dst_y_prefetch_equ);
#endif
}
{
double prefetch_vm_bw;
double prefetch_row_bw;
if (PDEAndMetaPTEBytesFrame == 0) {
prefetch_vm_bw = 0;
} else if (*DestinationLinesToRequestVMInVBlank > 0) {
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: PDEAndMetaPTEBytesFrame = %d\n", __func__, PDEAndMetaPTEBytesFrame);
dml_print("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
dml_print("DML::%s: DestinationLinesToRequestVMInVBlank = %f\n",
__func__, *DestinationLinesToRequestVMInVBlank);
dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
#endif
prefetch_vm_bw = PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor /
(*DestinationLinesToRequestVMInVBlank * LineTime);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw);
#endif
} else {
prefetch_vm_bw = 0;
MyError = true;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: MyErr set. DestinationLinesToRequestVMInVBlank=%f (should be > 0)\n",
__func__, *DestinationLinesToRequestVMInVBlank);
#endif
}
if (MetaRowByte + PixelPTEBytesPerRow == 0) {
prefetch_row_bw = 0;
} else if (*DestinationLinesToRequestRowInVBlank > 0) {
prefetch_row_bw = (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) /
(*DestinationLinesToRequestRowInVBlank * LineTime);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: MetaRowByte = %d\n", __func__, MetaRowByte);
dml_print("DML::%s: PixelPTEBytesPerRow = %d\n", __func__, PixelPTEBytesPerRow);
dml_print("DML::%s: DestinationLinesToRequestRowInVBlank = %f\n",
__func__, *DestinationLinesToRequestRowInVBlank);
dml_print("DML::%s: prefetch_row_bw = %f\n", __func__, prefetch_row_bw);
#endif
} else {
prefetch_row_bw = 0;
MyError = true;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: MyErr set. DestinationLinesToRequestRowInVBlank=%f (should be > 0)\n",
__func__, *DestinationLinesToRequestRowInVBlank);
#endif
}
*prefetch_vmrow_bw = dml_max(prefetch_vm_bw, prefetch_row_bw);
}
if (MyError) {
*PrefetchBandwidth = 0;
TimeForFetchingMetaPTE = 0;
TimeForFetchingRowInVBlank = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*DestinationLinesForPrefetch = 0;
LinesToRequestPrefetchPixelData = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
*RequiredPrefetchPixDataBWChroma = 0;
}
return MyError;
} // CalculatePrefetchSchedule
void dml32_CalculateFlipSchedule(
double HostVMInefficiencyFactor,
double UrgentExtraLatency,
double UrgentLatency,
unsigned int GPUVMMaxPageTableLevels,
bool HostVMEnable,
unsigned int HostVMMaxNonCachedPageTableLevels,
bool GPUVMEnable,
double HostVMMinPageSize,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
double DPTEBytesPerRow,
double BandwidthAvailableForImmediateFlip,
unsigned int TotImmediateFlipBytes,
enum source_format_class SourcePixelFormat,
double LineTime,
double VRatio,
double VRatioChroma,
double Tno_bw,
bool DCCEnable,
unsigned int dpte_row_height,
unsigned int meta_row_height,
unsigned int dpte_row_height_chroma,
unsigned int meta_row_height_chroma,
bool use_one_row_for_frame_flip,
/* Output */
double *DestinationLinesToRequestVMInImmediateFlip,
double *DestinationLinesToRequestRowInImmediateFlip,
double *final_flip_bw,
bool *ImmediateFlipSupportedForPipe)
{
double min_row_time = 0.0;
unsigned int HostVMDynamicLevelsTrips;
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
double ImmediateFlipBW = 1.0;
if (GPUVMEnable == true && HostVMEnable == true)
HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
else
HostVMDynamicLevelsTrips = 0;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: TotImmediateFlipBytes = %d\n", __func__, TotImmediateFlipBytes);
dml_print("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip);
#endif
if (TotImmediateFlipBytes > 0) {
if (use_one_row_for_frame_flip) {
ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + 2 * DPTEBytesPerRow) *
BandwidthAvailableForImmediateFlip / TotImmediateFlipBytes;
} else {
ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) *
BandwidthAvailableForImmediateFlip / TotImmediateFlipBytes;
}
if (GPUVMEnable == true) {
TimeForFetchingMetaPTEImmediateFlip = dml_max3(Tno_bw + PDEAndMetaPTEBytesPerFrame *
HostVMInefficiencyFactor / ImmediateFlipBW,
UrgentExtraLatency + UrgentLatency *
(GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
LineTime / 4.0);
} else {
TimeForFetchingMetaPTEImmediateFlip = 0;
}
if ((GPUVMEnable == true || DCCEnable == true)) {
TimeForFetchingRowInVBlankImmediateFlip = dml_max3(
(MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / ImmediateFlipBW,
UrgentLatency * (HostVMDynamicLevelsTrips + 1), LineTime / 4.0);
} else {
TimeForFetchingRowInVBlankImmediateFlip = 0;
}
*DestinationLinesToRequestVMInImmediateFlip =
dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1.0) / 4.0;
*DestinationLinesToRequestRowInImmediateFlip =
dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1.0) / 4.0;
if (GPUVMEnable == true) {
*final_flip_bw = dml_max(PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor /
(*DestinationLinesToRequestVMInImmediateFlip * LineTime),
(MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) /
(*DestinationLinesToRequestRowInImmediateFlip * LineTime));
} else if ((GPUVMEnable == true || DCCEnable == true)) {
*final_flip_bw = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) /
(*DestinationLinesToRequestRowInImmediateFlip * LineTime);
} else {
*final_flip_bw = 0;
}
} else {
TimeForFetchingMetaPTEImmediateFlip = 0;
TimeForFetchingRowInVBlankImmediateFlip = 0;
*DestinationLinesToRequestVMInImmediateFlip = 0;
*DestinationLinesToRequestRowInImmediateFlip = 0;
*final_flip_bw = 0;
}
if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_rgbe_alpha) {
if (GPUVMEnable == true && DCCEnable != true) {
min_row_time = dml_min(dpte_row_height *
LineTime / VRatio, dpte_row_height_chroma * LineTime / VRatioChroma);
} else if (GPUVMEnable != true && DCCEnable == true) {
min_row_time = dml_min(meta_row_height *
LineTime / VRatio, meta_row_height_chroma * LineTime / VRatioChroma);
} else {
min_row_time = dml_min4(dpte_row_height * LineTime / VRatio, meta_row_height *
LineTime / VRatio, dpte_row_height_chroma * LineTime /
VRatioChroma, meta_row_height_chroma * LineTime / VRatioChroma);
}
} else {
if (GPUVMEnable == true && DCCEnable != true) {
min_row_time = dpte_row_height * LineTime / VRatio;
} else if (GPUVMEnable != true && DCCEnable == true) {
min_row_time = meta_row_height * LineTime / VRatio;
} else {
min_row_time =
dml_min(dpte_row_height * LineTime / VRatio, meta_row_height * LineTime / VRatio);
}
}
if (*DestinationLinesToRequestVMInImmediateFlip >= 32 || *DestinationLinesToRequestRowInImmediateFlip >= 16
|| TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip
> min_row_time) {
*ImmediateFlipSupportedForPipe = false;
} else {
*ImmediateFlipSupportedForPipe = true;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: GPUVMEnable = %d\n", __func__, GPUVMEnable);
dml_print("DML::%s: DCCEnable = %d\n", __func__, DCCEnable);
dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n",
__func__, *DestinationLinesToRequestVMInImmediateFlip);
dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n",
__func__, *DestinationLinesToRequestRowInImmediateFlip);
dml_print("DML::%s: TimeForFetchingMetaPTEImmediateFlip = %f\n", __func__, TimeForFetchingMetaPTEImmediateFlip);
dml_print("DML::%s: TimeForFetchingRowInVBlankImmediateFlip = %f\n",
__func__, TimeForFetchingRowInVBlankImmediateFlip);
dml_print("DML::%s: min_row_time = %f\n", __func__, min_row_time);
dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, *ImmediateFlipSupportedForPipe);
#endif
} // CalculateFlipSchedule
void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
struct vba_vars_st *v,
unsigned int PrefetchMode,
double DCFCLK,
double ReturnBW,
SOCParametersList mmSOCParameters,
double SOCCLK,
double DCFClkDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
double SwathWidthY[],
double SwathWidthC[],
unsigned int DPPPerSurface[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
double DSTXAfterScaler[],
double DSTYAfterScaler[],
bool UnboundedRequestEnabled,
unsigned int CompressedBufferSizeInkByte,
/* Output */
enum clock_change_support *DRAMClockChangeSupport,
double MaxActiveDRAMClockChangeLatencySupported[],
unsigned int SubViewportLinesNeededInMALL[],
enum dm_fclock_change_support *FCLKChangeSupport,
double *MinActiveFCLKChangeLatencySupported,
bool *USRRetrainingSupport,
double ActiveDRAMClockChangeLatencyMargin[])
{
unsigned int i, j, k;
unsigned int SurfaceWithMinActiveFCLKChangeMargin = 0;
unsigned int DRAMClockChangeSupportNumber = 0;
unsigned int LastSurfaceWithoutMargin;
unsigned int DRAMClockChangeMethod = 0;
bool FoundFirstSurfaceWithMinActiveFCLKChangeMargin = false;
double MinActiveFCLKChangeMargin = 0.;
double SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = 0.;
double ActiveClockChangeLatencyHidingY;
double ActiveClockChangeLatencyHidingC;
double ActiveClockChangeLatencyHiding;
double EffectiveDETBufferSizeY;
double ActiveFCLKChangeLatencyMargin[DC__NUM_DPP__MAX];
double USRRetrainingLatencyMargin[DC__NUM_DPP__MAX];
double TotalPixelBW = 0.0;
bool SynchronizedSurfaces[DC__NUM_DPP__MAX][DC__NUM_DPP__MAX];
double EffectiveLBLatencyHidingY;
double EffectiveLBLatencyHidingC;
double LinesInDETY[DC__NUM_DPP__MAX];
double LinesInDETC[DC__NUM_DPP__MAX];
unsigned int LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX];
unsigned int LinesInDETCRoundedDownToSwath[DC__NUM_DPP__MAX];
double FullDETBufferingTimeY;
double FullDETBufferingTimeC;
double WritebackDRAMClockChangeLatencyMargin;
double WritebackFCLKChangeLatencyMargin;
double WritebackLatencyHiding;
bool SameTimingForFCLKChange;
unsigned int TotalActiveWriteback = 0;
unsigned int LBLatencyHidingSourceLinesY[DC__NUM_DPP__MAX];
unsigned int LBLatencyHidingSourceLinesC[DC__NUM_DPP__MAX];
v->Watermark.UrgentWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency;
v->Watermark.USRRetrainingWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency
+ mmSOCParameters.USRRetrainingLatency + mmSOCParameters.SMNLatency;
v->Watermark.DRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency + v->Watermark.UrgentWatermark;
v->Watermark.FCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency + v->Watermark.UrgentWatermark;
v->Watermark.StutterExitWatermark = mmSOCParameters.SRExitTime + mmSOCParameters.ExtraLatency
+ 10 / DCFClkDeepSleep;
v->Watermark.StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitTime + mmSOCParameters.ExtraLatency
+ 10 / DCFClkDeepSleep;
v->Watermark.Z8StutterExitWatermark = mmSOCParameters.SRExitZ8Time + mmSOCParameters.ExtraLatency
+ 10 / DCFClkDeepSleep;
v->Watermark.Z8StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitZ8Time
+ mmSOCParameters.ExtraLatency + 10 / DCFClkDeepSleep;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: UrgentLatency = %f\n", __func__, mmSOCParameters.UrgentLatency);
dml_print("DML::%s: ExtraLatency = %f\n", __func__, mmSOCParameters.ExtraLatency);
dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, mmSOCParameters.DRAMClockChangeLatency);
dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->Watermark.UrgentWatermark);
dml_print("DML::%s: USRRetrainingWatermark = %f\n", __func__, v->Watermark.USRRetrainingWatermark);
dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->Watermark.DRAMClockChangeWatermark);
dml_print("DML::%s: FCLKChangeWatermark = %f\n", __func__, v->Watermark.FCLKChangeWatermark);
dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, v->Watermark.StutterExitWatermark);
dml_print("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, v->Watermark.StutterEnterPlusExitWatermark);
dml_print("DML::%s: Z8StutterExitWatermark = %f\n", __func__, v->Watermark.Z8StutterExitWatermark);
dml_print("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n",
__func__, v->Watermark.Z8StutterEnterPlusExitWatermark);
#endif
TotalActiveWriteback = 0;
for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
if (v->WritebackEnable[k] == true)
TotalActiveWriteback = TotalActiveWriteback + 1;
}
if (TotalActiveWriteback <= 1) {
v->Watermark.WritebackUrgentWatermark = mmSOCParameters.WritebackLatency;
} else {
v->Watermark.WritebackUrgentWatermark = mmSOCParameters.WritebackLatency
+ v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
if (v->USRRetrainingRequiredFinal)
v->Watermark.WritebackDRAMClockChangeWatermark = v->Watermark.WritebackDRAMClockChangeWatermark
+ mmSOCParameters.USRRetrainingLatency;
if (TotalActiveWriteback <= 1) {
v->Watermark.WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
+ mmSOCParameters.WritebackLatency;
v->Watermark.WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
+ mmSOCParameters.WritebackLatency;
} else {
v->Watermark.WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
+ mmSOCParameters.WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
v->Watermark.WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
+ mmSOCParameters.WritebackLatency + v->WritebackChunkSize * 1024 / 32 / SOCCLK;
}
if (v->USRRetrainingRequiredFinal)
v->Watermark.WritebackDRAMClockChangeWatermark = v->Watermark.WritebackDRAMClockChangeWatermark
+ mmSOCParameters.USRRetrainingLatency;
if (v->USRRetrainingRequiredFinal)
v->Watermark.WritebackFCLKChangeWatermark = v->Watermark.WritebackFCLKChangeWatermark
+ mmSOCParameters.USRRetrainingLatency;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: WritebackDRAMClockChangeWatermark = %f\n",
__func__, v->Watermark.WritebackDRAMClockChangeWatermark);
dml_print("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, v->Watermark.WritebackFCLKChangeWatermark);
dml_print("DML::%s: WritebackUrgentWatermark = %f\n", __func__, v->Watermark.WritebackUrgentWatermark);
dml_print("DML::%s: v->USRRetrainingRequiredFinal = %d\n", __func__, v->USRRetrainingRequiredFinal);
dml_print("DML::%s: USRRetrainingLatency = %f\n", __func__, mmSOCParameters.USRRetrainingLatency);
#endif
for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
TotalPixelBW = TotalPixelBW + DPPPerSurface[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] +
SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k]) / (v->HTotal[k] / v->PixelClock[k]);
}
for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
LBLatencyHidingSourceLinesY[k] = dml_min((double) v->MaxLineBufferLines, dml_floor(v->LineBufferSizeFinal / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
LBLatencyHidingSourceLinesC[k] = dml_min((double) v->MaxLineBufferLines, dml_floor(v->LineBufferSizeFinal / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d, v->MaxLineBufferLines = %d\n", __func__, k, v->MaxLineBufferLines);
dml_print("DML::%s: k=%d, v->LineBufferSizeFinal = %d\n", __func__, k, v->LineBufferSizeFinal);
dml_print("DML::%s: k=%d, v->LBBitPerPixel = %d\n", __func__, k, v->LBBitPerPixel[k]);
dml_print("DML::%s: k=%d, v->HRatio = %f\n", __func__, k, v->HRatio[k]);
dml_print("DML::%s: k=%d, v->vtaps = %d\n", __func__, k, v->vtaps[k]);
#endif
EffectiveLBLatencyHidingY = LBLatencyHidingSourceLinesY[k] / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
EffectiveLBLatencyHidingC = LBLatencyHidingSourceLinesC[k] / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
EffectiveDETBufferSizeY = DETBufferSizeY[k];
if (UnboundedRequestEnabled) {
EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
+ CompressedBufferSizeInkByte * 1024
* (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k])
/ (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW;
}
LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
ActiveClockChangeLatencyHidingY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
- (DSTXAfterScaler[k] / v->HTotal[k] + DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k];
if (v->NumberOfActiveSurfaces > 1) {
ActiveClockChangeLatencyHidingY = ActiveClockChangeLatencyHidingY
- (1.0 - 1.0 / v->NumberOfActiveSurfaces) * SwathHeightY[k] * v->HTotal[k]
/ v->PixelClock[k] / v->VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
LinesInDETC[k] = DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
LinesInDETCRoundedDownToSwath[k] = dml_floor(LinesInDETC[k], SwathHeightC[k]);
FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k])
/ v->VRatioChroma[k];
ActiveClockChangeLatencyHidingC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
- (DSTXAfterScaler[k] / v->HTotal[k] + DSTYAfterScaler[k]) * v->HTotal[k]
/ v->PixelClock[k];
if (v->NumberOfActiveSurfaces > 1) {
ActiveClockChangeLatencyHidingC = ActiveClockChangeLatencyHidingC
- (1 - 1 / v->NumberOfActiveSurfaces) * SwathHeightC[k] * v->HTotal[k]
/ v->PixelClock[k] / v->VRatioChroma[k];
}
ActiveClockChangeLatencyHiding = dml_min(ActiveClockChangeLatencyHidingY,
ActiveClockChangeLatencyHidingC);
} else {
ActiveClockChangeLatencyHiding = ActiveClockChangeLatencyHidingY;
}
ActiveDRAMClockChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - v->Watermark.UrgentWatermark
- v->Watermark.DRAMClockChangeWatermark;
ActiveFCLKChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - v->Watermark.UrgentWatermark
- v->Watermark.FCLKChangeWatermark;
USRRetrainingLatencyMargin[k] = ActiveClockChangeLatencyHiding - v->Watermark.USRRetrainingWatermark;
if (v->WritebackEnable[k]) {
WritebackLatencyHiding = v->WritebackInterfaceBufferSize * 1024
/ (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k]
/ (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
if (v->WritebackPixelFormat[k] == dm_444_64)
WritebackLatencyHiding = WritebackLatencyHiding / 2;
WritebackDRAMClockChangeLatencyMargin = WritebackLatencyHiding
- v->Watermark.WritebackDRAMClockChangeWatermark;
WritebackFCLKChangeLatencyMargin = WritebackLatencyHiding
- v->Watermark.WritebackFCLKChangeWatermark;
ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMargin[k],
WritebackFCLKChangeLatencyMargin);
ActiveFCLKChangeLatencyMargin[k] = dml_min(ActiveFCLKChangeLatencyMargin[k],
WritebackDRAMClockChangeLatencyMargin);
}
MaxActiveDRAMClockChangeLatencySupported[k] =
(v->UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) ?
0 :
(ActiveDRAMClockChangeLatencyMargin[k]
+ mmSOCParameters.DRAMClockChangeLatency);
}
for (i = 0; i < v->NumberOfActiveSurfaces; ++i) {
for (j = 0; j < v->NumberOfActiveSurfaces; ++j) {
if (i == j ||
(v->BlendingAndTiming[i] == i && v->BlendingAndTiming[j] == i) ||
(v->BlendingAndTiming[j] == j && v->BlendingAndTiming[i] == j) ||
(v->BlendingAndTiming[i] == v->BlendingAndTiming[j] && v->BlendingAndTiming[i] != i) ||
(v->SynchronizeTimingsFinal && v->PixelClock[i] == v->PixelClock[j] &&
v->HTotal[i] == v->HTotal[j] && v->VTotal[i] == v->VTotal[j] &&
v->VActive[i] == v->VActive[j]) || (v->SynchronizeDRRDisplaysForUCLKPStateChangeFinal &&
(v->DRRDisplay[i] || v->DRRDisplay[j]))) {
SynchronizedSurfaces[i][j] = true;
} else {
SynchronizedSurfaces[i][j] = false;
}
}
}
for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
if ((v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
(!FoundFirstSurfaceWithMinActiveFCLKChangeMargin ||
ActiveFCLKChangeLatencyMargin[k] < MinActiveFCLKChangeMargin)) {
FoundFirstSurfaceWithMinActiveFCLKChangeMargin = true;
MinActiveFCLKChangeMargin = ActiveFCLKChangeLatencyMargin[k];
SurfaceWithMinActiveFCLKChangeMargin = k;
}
}
*MinActiveFCLKChangeLatencySupported = MinActiveFCLKChangeMargin + mmSOCParameters.FCLKChangeLatency;
SameTimingForFCLKChange = true;
for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
if (!SynchronizedSurfaces[k][SurfaceWithMinActiveFCLKChangeMargin]) {
if ((v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
(SameTimingForFCLKChange ||
ActiveFCLKChangeLatencyMargin[k] <
SecondMinActiveFCLKChangeMarginOneDisplayInVBLank)) {
SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = ActiveFCLKChangeLatencyMargin[k];
}
SameTimingForFCLKChange = false;
}
}
if (MinActiveFCLKChangeMargin > 0) {
*FCLKChangeSupport = dm_fclock_change_vactive;
} else if ((SameTimingForFCLKChange || SecondMinActiveFCLKChangeMarginOneDisplayInVBLank > 0) &&
(PrefetchMode <= 1)) {
*FCLKChangeSupport = dm_fclock_change_vblank;
} else {
*FCLKChangeSupport = dm_fclock_change_unsupported;
}
*USRRetrainingSupport = true;
for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
if ((v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
(USRRetrainingLatencyMargin[k] < 0)) {
*USRRetrainingSupport = false;
}
}
for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
if (v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_full_frame &&
v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_sub_viewport &&
v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe &&
ActiveDRAMClockChangeLatencyMargin[k] < 0) {
if (PrefetchMode > 0) {
DRAMClockChangeSupportNumber = 2;
} else if (DRAMClockChangeSupportNumber == 0) {
DRAMClockChangeSupportNumber = 1;
LastSurfaceWithoutMargin = k;
} else if (DRAMClockChangeSupportNumber == 1 &&
!SynchronizedSurfaces[LastSurfaceWithoutMargin][k]) {
DRAMClockChangeSupportNumber = 2;
}
}
}
for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
if (v->UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame)
DRAMClockChangeMethod = 1;
else if (v->UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_sub_viewport)
DRAMClockChangeMethod = 2;
}
if (DRAMClockChangeMethod == 0) {
if (DRAMClockChangeSupportNumber == 0)
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
else if (DRAMClockChangeSupportNumber == 1)
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
else
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
} else if (DRAMClockChangeMethod == 1) {
if (DRAMClockChangeSupportNumber == 0)
*DRAMClockChangeSupport = dm_dram_clock_change_vactive_w_mall_full_frame;
else if (DRAMClockChangeSupportNumber == 1)
*DRAMClockChangeSupport = dm_dram_clock_change_vblank_w_mall_full_frame;
else
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
} else {
if (DRAMClockChangeSupportNumber == 0)
*DRAMClockChangeSupport = dm_dram_clock_change_vactive_w_mall_sub_vp;
else if (DRAMClockChangeSupportNumber == 1)
*DRAMClockChangeSupport = dm_dram_clock_change_vblank_w_mall_sub_vp;
else
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
}
for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
unsigned int dst_y_pstate;
unsigned int src_y_pstate_l;
unsigned int src_y_pstate_c;
unsigned int src_y_ahead_l, src_y_ahead_c, sub_vp_lines_l, sub_vp_lines_c;
dst_y_pstate = dml_ceil((mmSOCParameters.DRAMClockChangeLatency + mmSOCParameters.UrgentLatency) / (v->HTotal[k] / v->PixelClock[k]), 1);
src_y_pstate_l = dml_ceil(dst_y_pstate * v->VRatio[k], SwathHeightY[k]);
src_y_ahead_l = dml_floor(DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k], SwathHeightY[k]) + LBLatencyHidingSourceLinesY[k];
sub_vp_lines_l = src_y_pstate_l + src_y_ahead_l + v->meta_row_height[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d, DETBufferSizeY = %d\n", __func__, k, DETBufferSizeY[k]);
dml_print("DML::%s: k=%d, BytePerPixelDETY = %f\n", __func__, k, BytePerPixelDETY[k]);
dml_print("DML::%s: k=%d, SwathWidthY = %d\n", __func__, k, SwathWidthY[k]);
dml_print("DML::%s: k=%d, SwathHeightY = %d\n", __func__, k, SwathHeightY[k]);
dml_print("DML::%s: k=%d, LBLatencyHidingSourceLinesY = %d\n", __func__, k, LBLatencyHidingSourceLinesY[k]);
dml_print("DML::%s: k=%d, dst_y_pstate = %d\n", __func__, k, dst_y_pstate);
dml_print("DML::%s: k=%d, src_y_pstate_l = %d\n", __func__, k, src_y_pstate_l);
dml_print("DML::%s: k=%d, src_y_ahead_l = %d\n", __func__, k, src_y_ahead_l);
dml_print("DML::%s: k=%d, v->meta_row_height = %d\n", __func__, k, v->meta_row_height[k]);
dml_print("DML::%s: k=%d, sub_vp_lines_l = %d\n", __func__, k, sub_vp_lines_l);
#endif
SubViewportLinesNeededInMALL[k] = sub_vp_lines_l;
if (BytePerPixelDETC[k] > 0) {
src_y_pstate_c = dml_ceil(dst_y_pstate * v->VRatioChroma[k], SwathHeightC[k]);
src_y_ahead_c = dml_floor(DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k], SwathHeightC[k]) + LBLatencyHidingSourceLinesC[k];
sub_vp_lines_c = src_y_pstate_c + src_y_ahead_c + v->meta_row_height_chroma[k];
SubViewportLinesNeededInMALL[k] = dml_max(sub_vp_lines_l, sub_vp_lines_c);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d, src_y_pstate_c = %d\n", __func__, k, src_y_pstate_c);
dml_print("DML::%s: k=%d, src_y_ahead_c = %d\n", __func__, k, src_y_ahead_c);
dml_print("DML::%s: k=%d, v->meta_row_height_chroma = %d\n", __func__, k, v->meta_row_height_chroma[k]);
dml_print("DML::%s: k=%d, sub_vp_lines_c = %d\n", __func__, k, sub_vp_lines_c);
#endif
}
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DRAMClockChangeSupport = %d\n", __func__, *DRAMClockChangeSupport);
dml_print("DML::%s: FCLKChangeSupport = %d\n", __func__, *FCLKChangeSupport);
dml_print("DML::%s: MinActiveFCLKChangeLatencySupported = %f\n",
__func__, *MinActiveFCLKChangeLatencySupported);
dml_print("DML::%s: USRRetrainingSupport = %d\n", __func__, *USRRetrainingSupport);
#endif
} // CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport
double dml32_CalculateWriteBackDISPCLK(
enum source_format_class WritebackPixelFormat,
double PixelClock,
double WritebackHRatio,
double WritebackVRatio,
unsigned int WritebackHTaps,
unsigned int WritebackVTaps,
unsigned int WritebackSourceWidth,
unsigned int WritebackDestinationWidth,
unsigned int HTotal,
unsigned int WritebackLineBufferSize,
double DISPCLKDPPCLKVCOSpeed)
{
double DISPCLK_H, DISPCLK_V, DISPCLK_HB;
DISPCLK_H = PixelClock * dml_ceil(WritebackHTaps / 8.0, 1) / WritebackHRatio;
DISPCLK_V = PixelClock * (WritebackVTaps * dml_ceil(WritebackDestinationWidth / 6.0, 1) + 8.0) / HTotal;
DISPCLK_HB = PixelClock * WritebackVTaps * (WritebackDestinationWidth *
WritebackVTaps - WritebackLineBufferSize / 57.0) / 6.0 / WritebackSourceWidth;
return dml32_RoundToDFSGranularity(dml_max3(DISPCLK_H, DISPCLK_V, DISPCLK_HB), 1, DISPCLKDPPCLKVCOSpeed);
}
void dml32_CalculateMinAndMaxPrefetchMode(
enum dm_prefetch_modes AllowForPStateChangeOrStutterInVBlankFinal,
unsigned int *MinPrefetchMode,
unsigned int *MaxPrefetchMode)
{
if (AllowForPStateChangeOrStutterInVBlankFinal == dm_prefetch_support_none) {
*MinPrefetchMode = 3;
*MaxPrefetchMode = 3;
} else if (AllowForPStateChangeOrStutterInVBlankFinal == dm_prefetch_support_stutter) {
*MinPrefetchMode = 2;
*MaxPrefetchMode = 2;
} else if (AllowForPStateChangeOrStutterInVBlankFinal == dm_prefetch_support_fclk_and_stutter) {
*MinPrefetchMode = 1;
*MaxPrefetchMode = 1;
} else if (AllowForPStateChangeOrStutterInVBlankFinal == dm_prefetch_support_uclk_fclk_and_stutter) {
*MinPrefetchMode = 0;
*MaxPrefetchMode = 0;
} else if (AllowForPStateChangeOrStutterInVBlankFinal ==
dm_prefetch_support_uclk_fclk_and_stutter_if_possible) {
*MinPrefetchMode = 0;
*MaxPrefetchMode = 3;
} else {
*MinPrefetchMode = 0;
*MaxPrefetchMode = 3;
}
} // CalculateMinAndMaxPrefetchMode
void dml32_CalculatePixelDeliveryTimes(
unsigned int NumberOfActiveSurfaces,
double VRatio[],
double VRatioChroma[],
double VRatioPrefetchY[],
double VRatioPrefetchC[],
unsigned int swath_width_luma_ub[],
unsigned int swath_width_chroma_ub[],
unsigned int DPPPerSurface[],
double HRatio[],
double HRatioChroma[],
double PixelClock[],
double PSCL_THROUGHPUT[],
double PSCL_THROUGHPUT_CHROMA[],
double Dppclk[],
unsigned int BytePerPixelC[],
enum dm_rotation_angle SourceRotation[],
unsigned int NumberOfCursors[],
unsigned int CursorWidth[][DC__NUM_CURSOR__MAX],
unsigned int CursorBPP[][DC__NUM_CURSOR__MAX],
unsigned int BlockWidth256BytesY[],
unsigned int BlockHeight256BytesY[],
unsigned int BlockWidth256BytesC[],
unsigned int BlockHeight256BytesC[],
/* Output */
double DisplayPipeLineDeliveryTimeLuma[],
double DisplayPipeLineDeliveryTimeChroma[],
double DisplayPipeLineDeliveryTimeLumaPrefetch[],
double DisplayPipeLineDeliveryTimeChromaPrefetch[],
double DisplayPipeRequestDeliveryTimeLuma[],
double DisplayPipeRequestDeliveryTimeChroma[],
double DisplayPipeRequestDeliveryTimeLumaPrefetch[],
double DisplayPipeRequestDeliveryTimeChromaPrefetch[],
double CursorRequestDeliveryTime[],
double CursorRequestDeliveryTimePrefetch[])
{
double req_per_swath_ub;
unsigned int k;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d : HRatio = %f\n", __func__, k, HRatio[k]);
dml_print("DML::%s: k=%d : VRatio = %f\n", __func__, k, VRatio[k]);
dml_print("DML::%s: k=%d : HRatioChroma = %f\n", __func__, k, HRatioChroma[k]);
dml_print("DML::%s: k=%d : VRatioChroma = %f\n", __func__, k, VRatioChroma[k]);
dml_print("DML::%s: k=%d : swath_width_luma_ub = %d\n", __func__, k, swath_width_luma_ub[k]);
dml_print("DML::%s: k=%d : swath_width_chroma_ub = %d\n", __func__, k, swath_width_chroma_ub[k]);
dml_print("DML::%s: k=%d : PSCL_THROUGHPUT = %f\n", __func__, k, PSCL_THROUGHPUT[k]);
dml_print("DML::%s: k=%d : PSCL_THROUGHPUT_CHROMA = %f\n", __func__, k, PSCL_THROUGHPUT_CHROMA[k]);
dml_print("DML::%s: k=%d : DPPPerSurface = %d\n", __func__, k, DPPPerSurface[k]);
dml_print("DML::%s: k=%d : PixelClock = %f\n", __func__, k, PixelClock[k]);
dml_print("DML::%s: k=%d : Dppclk = %f\n", __func__, k, Dppclk[k]);
#endif
if (VRatio[k] <= 1) {
DisplayPipeLineDeliveryTimeLuma[k] =
swath_width_luma_ub[k] * DPPPerSurface[k] / HRatio[k] / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeLuma[k] = swath_width_luma_ub[k] / PSCL_THROUGHPUT[k] / Dppclk[k];
}
if (BytePerPixelC[k] == 0) {
DisplayPipeLineDeliveryTimeChroma[k] = 0;
} else {
if (VRatioChroma[k] <= 1) {
DisplayPipeLineDeliveryTimeChroma[k] =
swath_width_chroma_ub[k] * DPPPerSurface[k] / HRatioChroma[k] / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeChroma[k] =
swath_width_chroma_ub[k] / PSCL_THROUGHPUT_CHROMA[k] / Dppclk[k];
}
}
if (VRatioPrefetchY[k] <= 1) {
DisplayPipeLineDeliveryTimeLumaPrefetch[k] =
swath_width_luma_ub[k] * DPPPerSurface[k] / HRatio[k] / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeLumaPrefetch[k] =
swath_width_luma_ub[k] / PSCL_THROUGHPUT[k] / Dppclk[k];
}
if (BytePerPixelC[k] == 0) {
DisplayPipeLineDeliveryTimeChromaPrefetch[k] = 0;
} else {
if (VRatioPrefetchC[k] <= 1) {
DisplayPipeLineDeliveryTimeChromaPrefetch[k] = swath_width_chroma_ub[k] *
DPPPerSurface[k] / HRatioChroma[k] / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeChromaPrefetch[k] =
swath_width_chroma_ub[k] / PSCL_THROUGHPUT_CHROMA[k] / Dppclk[k];
}
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d : DisplayPipeLineDeliveryTimeLuma = %f\n",
__func__, k, DisplayPipeLineDeliveryTimeLuma[k]);
dml_print("DML::%s: k=%d : DisplayPipeLineDeliveryTimeLumaPrefetch = %f\n",
__func__, k, DisplayPipeLineDeliveryTimeLumaPrefetch[k]);
dml_print("DML::%s: k=%d : DisplayPipeLineDeliveryTimeChroma = %f\n",
__func__, k, DisplayPipeLineDeliveryTimeChroma[k]);
dml_print("DML::%s: k=%d : DisplayPipeLineDeliveryTimeChromaPrefetch = %f\n",
__func__, k, DisplayPipeLineDeliveryTimeChromaPrefetch[k]);
#endif
}
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (!IsVertical(SourceRotation[k]))
req_per_swath_ub = swath_width_luma_ub[k] / BlockWidth256BytesY[k];
else
req_per_swath_ub = swath_width_luma_ub[k] / BlockHeight256BytesY[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d : req_per_swath_ub = %f (Luma)\n", __func__, k, req_per_swath_ub);
#endif
DisplayPipeRequestDeliveryTimeLuma[k] = DisplayPipeLineDeliveryTimeLuma[k] / req_per_swath_ub;
DisplayPipeRequestDeliveryTimeLumaPrefetch[k] =
DisplayPipeLineDeliveryTimeLumaPrefetch[k] / req_per_swath_ub;
if (BytePerPixelC[k] == 0) {
DisplayPipeRequestDeliveryTimeChroma[k] = 0;
DisplayPipeRequestDeliveryTimeChromaPrefetch[k] = 0;
} else {
if (!IsVertical(SourceRotation[k]))
req_per_swath_ub = swath_width_chroma_ub[k] / BlockWidth256BytesC[k];
else
req_per_swath_ub = swath_width_chroma_ub[k] / BlockHeight256BytesC[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d : req_per_swath_ub = %f (Chroma)\n", __func__, k, req_per_swath_ub);
#endif
DisplayPipeRequestDeliveryTimeChroma[k] =
DisplayPipeLineDeliveryTimeChroma[k] / req_per_swath_ub;
DisplayPipeRequestDeliveryTimeChromaPrefetch[k] =
DisplayPipeLineDeliveryTimeChromaPrefetch[k] / req_per_swath_ub;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d : DisplayPipeRequestDeliveryTimeLuma = %f\n",
__func__, k, DisplayPipeRequestDeliveryTimeLuma[k]);
dml_print("DML::%s: k=%d : DisplayPipeRequestDeliveryTimeLumaPrefetch = %f\n",
__func__, k, DisplayPipeRequestDeliveryTimeLumaPrefetch[k]);
dml_print("DML::%s: k=%d : DisplayPipeRequestDeliveryTimeChroma = %f\n",
__func__, k, DisplayPipeRequestDeliveryTimeChroma[k]);
dml_print("DML::%s: k=%d : DisplayPipeRequestDeliveryTimeChromaPrefetch = %f\n",
__func__, k, DisplayPipeRequestDeliveryTimeChromaPrefetch[k]);
#endif
}
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
unsigned int cursor_req_per_width;
cursor_req_per_width = dml_ceil((double) CursorWidth[k][0] * (double) CursorBPP[k][0] /
256.0 / 8.0, 1.0);
if (NumberOfCursors[k] > 0) {
if (VRatio[k] <= 1) {
CursorRequestDeliveryTime[k] = (double) CursorWidth[k][0] /
HRatio[k] / PixelClock[k] / cursor_req_per_width;
} else {
CursorRequestDeliveryTime[k] = (double) CursorWidth[k][0] /
PSCL_THROUGHPUT[k] / Dppclk[k] / cursor_req_per_width;
}
if (VRatioPrefetchY[k] <= 1) {
CursorRequestDeliveryTimePrefetch[k] = (double) CursorWidth[k][0] /
HRatio[k] / PixelClock[k] / cursor_req_per_width;
} else {
CursorRequestDeliveryTimePrefetch[k] = (double) CursorWidth[k][0] /
PSCL_THROUGHPUT[k] / Dppclk[k] / cursor_req_per_width;
}
} else {
CursorRequestDeliveryTime[k] = 0;
CursorRequestDeliveryTimePrefetch[k] = 0;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d : NumberOfCursors = %d\n",
__func__, k, NumberOfCursors[k]);
dml_print("DML::%s: k=%d : CursorRequestDeliveryTime = %f\n",
__func__, k, CursorRequestDeliveryTime[k]);
dml_print("DML::%s: k=%d : CursorRequestDeliveryTimePrefetch = %f\n",
__func__, k, CursorRequestDeliveryTimePrefetch[k]);
#endif
}
} // CalculatePixelDeliveryTimes
void dml32_CalculateMetaAndPTETimes(
bool use_one_row_for_frame[],
unsigned int NumberOfActiveSurfaces,
bool GPUVMEnable,
unsigned int MetaChunkSize,
unsigned int MinMetaChunkSizeBytes,
unsigned int HTotal[],
double VRatio[],
double VRatioChroma[],
double DestinationLinesToRequestRowInVBlank[],
double DestinationLinesToRequestRowInImmediateFlip[],
bool DCCEnable[],
double PixelClock[],
unsigned int BytePerPixelY[],
unsigned int BytePerPixelC[],
enum dm_rotation_angle SourceRotation[],
unsigned int dpte_row_height[],
unsigned int dpte_row_height_chroma[],
unsigned int meta_row_width[],
unsigned int meta_row_width_chroma[],
unsigned int meta_row_height[],
unsigned int meta_row_height_chroma[],
unsigned int meta_req_width[],
unsigned int meta_req_width_chroma[],
unsigned int meta_req_height[],
unsigned int meta_req_height_chroma[],
unsigned int dpte_group_bytes[],
unsigned int PTERequestSizeY[],
unsigned int PTERequestSizeC[],
unsigned int PixelPTEReqWidthY[],
unsigned int PixelPTEReqHeightY[],
unsigned int PixelPTEReqWidthC[],
unsigned int PixelPTEReqHeightC[],
unsigned int dpte_row_width_luma_ub[],
unsigned int dpte_row_width_chroma_ub[],
/* Output */
double DST_Y_PER_PTE_ROW_NOM_L[],
double DST_Y_PER_PTE_ROW_NOM_C[],
double DST_Y_PER_META_ROW_NOM_L[],
double DST_Y_PER_META_ROW_NOM_C[],
double TimePerMetaChunkNominal[],
double TimePerChromaMetaChunkNominal[],
double TimePerMetaChunkVBlank[],
double TimePerChromaMetaChunkVBlank[],
double TimePerMetaChunkFlip[],
double TimePerChromaMetaChunkFlip[],
double time_per_pte_group_nom_luma[],
double time_per_pte_group_vblank_luma[],
double time_per_pte_group_flip_luma[],
double time_per_pte_group_nom_chroma[],
double time_per_pte_group_vblank_chroma[],
double time_per_pte_group_flip_chroma[])
{
unsigned int meta_chunk_width;
unsigned int min_meta_chunk_width;
unsigned int meta_chunk_per_row_int;
unsigned int meta_row_remainder;
unsigned int meta_chunk_threshold;
unsigned int meta_chunks_per_row_ub;
unsigned int meta_chunk_width_chroma;
unsigned int min_meta_chunk_width_chroma;
unsigned int meta_chunk_per_row_int_chroma;
unsigned int meta_row_remainder_chroma;
unsigned int meta_chunk_threshold_chroma;
unsigned int meta_chunks_per_row_ub_chroma;
unsigned int dpte_group_width_luma;
unsigned int dpte_groups_per_row_luma_ub;
unsigned int dpte_group_width_chroma;
unsigned int dpte_groups_per_row_chroma_ub;
unsigned int k;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
DST_Y_PER_PTE_ROW_NOM_L[k] = dpte_row_height[k] / VRatio[k];
if (BytePerPixelC[k] == 0)
DST_Y_PER_PTE_ROW_NOM_C[k] = 0;
else
DST_Y_PER_PTE_ROW_NOM_C[k] = dpte_row_height_chroma[k] / VRatioChroma[k];
DST_Y_PER_META_ROW_NOM_L[k] = meta_row_height[k] / VRatio[k];
if (BytePerPixelC[k] == 0)
DST_Y_PER_META_ROW_NOM_C[k] = 0;
else
DST_Y_PER_META_ROW_NOM_C[k] = meta_row_height_chroma[k] / VRatioChroma[k];
}
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (DCCEnable[k] == true) {
meta_chunk_width = MetaChunkSize * 1024 * 256 / BytePerPixelY[k] / meta_row_height[k];
min_meta_chunk_width = MinMetaChunkSizeBytes * 256 / BytePerPixelY[k] / meta_row_height[k];
meta_chunk_per_row_int = meta_row_width[k] / meta_chunk_width;
meta_row_remainder = meta_row_width[k] % meta_chunk_width;
if (!IsVertical(SourceRotation[k]))
meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_width[k];
else
meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_height[k];
if (meta_row_remainder <= meta_chunk_threshold)
meta_chunks_per_row_ub = meta_chunk_per_row_int + 1;
else
meta_chunks_per_row_ub = meta_chunk_per_row_int + 2;
TimePerMetaChunkNominal[k] = meta_row_height[k] / VRatio[k] *
HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub;
TimePerMetaChunkVBlank[k] = DestinationLinesToRequestRowInVBlank[k] *
HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub;
TimePerMetaChunkFlip[k] = DestinationLinesToRequestRowInImmediateFlip[k] *
HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub;
if (BytePerPixelC[k] == 0) {
TimePerChromaMetaChunkNominal[k] = 0;
TimePerChromaMetaChunkVBlank[k] = 0;
TimePerChromaMetaChunkFlip[k] = 0;
} else {
meta_chunk_width_chroma = MetaChunkSize * 1024 * 256 / BytePerPixelC[k] /
meta_row_height_chroma[k];
min_meta_chunk_width_chroma = MinMetaChunkSizeBytes * 256 / BytePerPixelC[k] /
meta_row_height_chroma[k];
meta_chunk_per_row_int_chroma = (double) meta_row_width_chroma[k] /
meta_chunk_width_chroma;
meta_row_remainder_chroma = meta_row_width_chroma[k] % meta_chunk_width_chroma;
if (!IsVertical(SourceRotation[k])) {
meta_chunk_threshold_chroma = 2 * min_meta_chunk_width_chroma -
meta_req_width_chroma[k];
} else {
meta_chunk_threshold_chroma = 2 * min_meta_chunk_width_chroma -
meta_req_height_chroma[k];
}
if (meta_row_remainder_chroma <= meta_chunk_threshold_chroma)
meta_chunks_per_row_ub_chroma = meta_chunk_per_row_int_chroma + 1;
else
meta_chunks_per_row_ub_chroma = meta_chunk_per_row_int_chroma + 2;
TimePerChromaMetaChunkNominal[k] = meta_row_height_chroma[k] / VRatioChroma[k] *
HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub_chroma;
TimePerChromaMetaChunkVBlank[k] = DestinationLinesToRequestRowInVBlank[k] *
HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub_chroma;
TimePerChromaMetaChunkFlip[k] = DestinationLinesToRequestRowInImmediateFlip[k] *
HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub_chroma;
}
} else {
TimePerMetaChunkNominal[k] = 0;
TimePerMetaChunkVBlank[k] = 0;
TimePerMetaChunkFlip[k] = 0;
TimePerChromaMetaChunkNominal[k] = 0;
TimePerChromaMetaChunkVBlank[k] = 0;
TimePerChromaMetaChunkFlip[k] = 0;
}
}
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (GPUVMEnable == true) {
if (!IsVertical(SourceRotation[k])) {
dpte_group_width_luma = (double) dpte_group_bytes[k] /
(double) PTERequestSizeY[k] * PixelPTEReqWidthY[k];
} else {
dpte_group_width_luma = (double) dpte_group_bytes[k] /
(double) PTERequestSizeY[k] * PixelPTEReqHeightY[k];
}
if (use_one_row_for_frame[k]) {
dpte_groups_per_row_luma_ub = dml_ceil((double) dpte_row_width_luma_ub[k] /
(double) dpte_group_width_luma / 2.0, 1.0);
} else {
dpte_groups_per_row_luma_ub = dml_ceil((double) dpte_row_width_luma_ub[k] /
(double) dpte_group_width_luma, 1.0);
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d, use_one_row_for_frame = %d\n",
__func__, k, use_one_row_for_frame[k]);
dml_print("DML::%s: k=%0d, dpte_group_bytes = %d\n",
__func__, k, dpte_group_bytes[k]);
dml_print("DML::%s: k=%0d, PTERequestSizeY = %d\n",
__func__, k, PTERequestSizeY[k]);
dml_print("DML::%s: k=%0d, PixelPTEReqWidthY = %d\n",
__func__, k, PixelPTEReqWidthY[k]);
dml_print("DML::%s: k=%0d, PixelPTEReqHeightY = %d\n",
__func__, k, PixelPTEReqHeightY[k]);
dml_print("DML::%s: k=%0d, dpte_row_width_luma_ub = %d\n",
__func__, k, dpte_row_width_luma_ub[k]);
dml_print("DML::%s: k=%0d, dpte_group_width_luma = %d\n",
__func__, k, dpte_group_width_luma);
dml_print("DML::%s: k=%0d, dpte_groups_per_row_luma_ub = %d\n",
__func__, k, dpte_groups_per_row_luma_ub);
#endif
time_per_pte_group_nom_luma[k] = DST_Y_PER_PTE_ROW_NOM_L[k] *
HTotal[k] / PixelClock[k] / dpte_groups_per_row_luma_ub;
time_per_pte_group_vblank_luma[k] = DestinationLinesToRequestRowInVBlank[k] *
HTotal[k] / PixelClock[k] / dpte_groups_per_row_luma_ub;
time_per_pte_group_flip_luma[k] = DestinationLinesToRequestRowInImmediateFlip[k] *
HTotal[k] / PixelClock[k] / dpte_groups_per_row_luma_ub;
if (BytePerPixelC[k] == 0) {
time_per_pte_group_nom_chroma[k] = 0;
time_per_pte_group_vblank_chroma[k] = 0;
time_per_pte_group_flip_chroma[k] = 0;
} else {
if (!IsVertical(SourceRotation[k])) {
dpte_group_width_chroma = (double) dpte_group_bytes[k] /
(double) PTERequestSizeC[k] * PixelPTEReqWidthC[k];
} else {
dpte_group_width_chroma = (double) dpte_group_bytes[k] /
(double) PTERequestSizeC[k] * PixelPTEReqHeightC[k];
}
if (use_one_row_for_frame[k]) {
dpte_groups_per_row_chroma_ub = dml_ceil((double) dpte_row_width_chroma_ub[k] /
(double) dpte_group_width_chroma / 2.0, 1.0);
} else {
dpte_groups_per_row_chroma_ub = dml_ceil((double) dpte_row_width_chroma_ub[k] /
(double) dpte_group_width_chroma, 1.0);
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d, dpte_row_width_chroma_ub = %d\n",
__func__, k, dpte_row_width_chroma_ub[k]);
dml_print("DML::%s: k=%0d, dpte_group_width_chroma = %d\n",
__func__, k, dpte_group_width_chroma);
dml_print("DML::%s: k=%0d, dpte_groups_per_row_chroma_ub = %d\n",
__func__, k, dpte_groups_per_row_chroma_ub);
#endif
time_per_pte_group_nom_chroma[k] = DST_Y_PER_PTE_ROW_NOM_C[k] *
HTotal[k] / PixelClock[k] / dpte_groups_per_row_chroma_ub;
time_per_pte_group_vblank_chroma[k] = DestinationLinesToRequestRowInVBlank[k] *
HTotal[k] / PixelClock[k] / dpte_groups_per_row_chroma_ub;
time_per_pte_group_flip_chroma[k] = DestinationLinesToRequestRowInImmediateFlip[k] *
HTotal[k] / PixelClock[k] / dpte_groups_per_row_chroma_ub;
}
} else {
time_per_pte_group_nom_luma[k] = 0;
time_per_pte_group_vblank_luma[k] = 0;
time_per_pte_group_flip_luma[k] = 0;
time_per_pte_group_nom_chroma[k] = 0;
time_per_pte_group_vblank_chroma[k] = 0;
time_per_pte_group_flip_chroma[k] = 0;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d, DestinationLinesToRequestRowInVBlank = %f\n",
__func__, k, DestinationLinesToRequestRowInVBlank[k]);
dml_print("DML::%s: k=%0d, DestinationLinesToRequestRowInImmediateFlip = %f\n",
__func__, k, DestinationLinesToRequestRowInImmediateFlip[k]);
dml_print("DML::%s: k=%0d, DST_Y_PER_PTE_ROW_NOM_L = %f\n",
__func__, k, DST_Y_PER_PTE_ROW_NOM_L[k]);
dml_print("DML::%s: k=%0d, DST_Y_PER_PTE_ROW_NOM_C = %f\n",
__func__, k, DST_Y_PER_PTE_ROW_NOM_C[k]);
dml_print("DML::%s: k=%0d, DST_Y_PER_META_ROW_NOM_L = %f\n",
__func__, k, DST_Y_PER_META_ROW_NOM_L[k]);
dml_print("DML::%s: k=%0d, DST_Y_PER_META_ROW_NOM_C = %f\n",
__func__, k, DST_Y_PER_META_ROW_NOM_C[k]);
dml_print("DML::%s: k=%0d, TimePerMetaChunkNominal = %f\n",
__func__, k, TimePerMetaChunkNominal[k]);
dml_print("DML::%s: k=%0d, TimePerMetaChunkVBlank = %f\n",
__func__, k, TimePerMetaChunkVBlank[k]);
dml_print("DML::%s: k=%0d, TimePerMetaChunkFlip = %f\n",
__func__, k, TimePerMetaChunkFlip[k]);
dml_print("DML::%s: k=%0d, TimePerChromaMetaChunkNominal = %f\n",
__func__, k, TimePerChromaMetaChunkNominal[k]);
dml_print("DML::%s: k=%0d, TimePerChromaMetaChunkVBlank = %f\n",
__func__, k, TimePerChromaMetaChunkVBlank[k]);
dml_print("DML::%s: k=%0d, TimePerChromaMetaChunkFlip = %f\n",
__func__, k, TimePerChromaMetaChunkFlip[k]);
dml_print("DML::%s: k=%0d, time_per_pte_group_nom_luma = %f\n",
__func__, k, time_per_pte_group_nom_luma[k]);
dml_print("DML::%s: k=%0d, time_per_pte_group_vblank_luma = %f\n",
__func__, k, time_per_pte_group_vblank_luma[k]);
dml_print("DML::%s: k=%0d, time_per_pte_group_flip_luma = %f\n",
__func__, k, time_per_pte_group_flip_luma[k]);
dml_print("DML::%s: k=%0d, time_per_pte_group_nom_chroma = %f\n",
__func__, k, time_per_pte_group_nom_chroma[k]);
dml_print("DML::%s: k=%0d, time_per_pte_group_vblank_chroma = %f\n",
__func__, k, time_per_pte_group_vblank_chroma[k]);
dml_print("DML::%s: k=%0d, time_per_pte_group_flip_chroma = %f\n",
__func__, k, time_per_pte_group_flip_chroma[k]);
#endif
}
} // CalculateMetaAndPTETimes
void dml32_CalculateVMGroupAndRequestTimes(
unsigned int NumberOfActiveSurfaces,
bool GPUVMEnable,
unsigned int GPUVMMaxPageTableLevels,
unsigned int HTotal[],
unsigned int BytePerPixelC[],
double DestinationLinesToRequestVMInVBlank[],
double DestinationLinesToRequestVMInImmediateFlip[],
bool DCCEnable[],
double PixelClock[],
unsigned int dpte_row_width_luma_ub[],
unsigned int dpte_row_width_chroma_ub[],
unsigned int vm_group_bytes[],
unsigned int dpde0_bytes_per_frame_ub_l[],
unsigned int dpde0_bytes_per_frame_ub_c[],
unsigned int meta_pte_bytes_per_frame_ub_l[],
unsigned int meta_pte_bytes_per_frame_ub_c[],
/* Output */
double TimePerVMGroupVBlank[],
double TimePerVMGroupFlip[],
double TimePerVMRequestVBlank[],
double TimePerVMRequestFlip[])
{
unsigned int k;
unsigned int num_group_per_lower_vm_stage;
unsigned int num_req_per_lower_vm_stage;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: NumberOfActiveSurfaces = %d\n", __func__, NumberOfActiveSurfaces);
dml_print("DML::%s: GPUVMEnable = %d\n", __func__, GPUVMEnable);
#endif
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d, DCCEnable = %d\n", __func__, k, DCCEnable[k]);
dml_print("DML::%s: k=%0d, vm_group_bytes = %d\n", __func__, k, vm_group_bytes[k]);
dml_print("DML::%s: k=%0d, dpde0_bytes_per_frame_ub_l = %d\n",
__func__, k, dpde0_bytes_per_frame_ub_l[k]);
dml_print("DML::%s: k=%0d, dpde0_bytes_per_frame_ub_c = %d\n",
__func__, k, dpde0_bytes_per_frame_ub_c[k]);
dml_print("DML::%s: k=%0d, meta_pte_bytes_per_frame_ub_l = %d\n",
__func__, k, meta_pte_bytes_per_frame_ub_l[k]);
dml_print("DML::%s: k=%0d, meta_pte_bytes_per_frame_ub_c = %d\n",
__func__, k, meta_pte_bytes_per_frame_ub_c[k]);
#endif
if (GPUVMEnable == true && (DCCEnable[k] == true || GPUVMMaxPageTableLevels > 1)) {
if (DCCEnable[k] == false) {
if (BytePerPixelC[k] > 0) {
num_group_per_lower_vm_stage = dml_ceil(
(double) (dpde0_bytes_per_frame_ub_l[k]) /
(double) (vm_group_bytes[k]), 1.0) +
dml_ceil((double) (dpde0_bytes_per_frame_ub_c[k]) /
(double) (vm_group_bytes[k]), 1.0);
} else {
num_group_per_lower_vm_stage = dml_ceil(
(double) (dpde0_bytes_per_frame_ub_l[k]) /
(double) (vm_group_bytes[k]), 1.0);
}
} else {
if (GPUVMMaxPageTableLevels == 1) {
if (BytePerPixelC[k] > 0) {
num_group_per_lower_vm_stage = dml_ceil(
(double) (meta_pte_bytes_per_frame_ub_l[k]) /
(double) (vm_group_bytes[k]), 1.0) +
dml_ceil((double) (meta_pte_bytes_per_frame_ub_c[k]) /
(double) (vm_group_bytes[k]), 1.0);
} else {
num_group_per_lower_vm_stage = dml_ceil(
(double) (meta_pte_bytes_per_frame_ub_l[k]) /
(double) (vm_group_bytes[k]), 1.0);
}
} else {
if (BytePerPixelC[k] > 0) {
num_group_per_lower_vm_stage = 2 + dml_ceil(
(double) (dpde0_bytes_per_frame_ub_l[k]) /
(double) (vm_group_bytes[k]), 1) +
dml_ceil((double) (dpde0_bytes_per_frame_ub_c[k]) /
(double) (vm_group_bytes[k]), 1) +
dml_ceil((double) (meta_pte_bytes_per_frame_ub_l[k]) /
(double) (vm_group_bytes[k]), 1) +
dml_ceil((double) (meta_pte_bytes_per_frame_ub_c[k]) /
(double) (vm_group_bytes[k]), 1);
} else {
num_group_per_lower_vm_stage = 1 + dml_ceil(
(double) (dpde0_bytes_per_frame_ub_l[k]) /
(double) (vm_group_bytes[k]), 1) + dml_ceil(
(double) (meta_pte_bytes_per_frame_ub_l[k]) /
(double) (vm_group_bytes[k]), 1);
}
}
}
if (DCCEnable[k] == false) {
if (BytePerPixelC[k] > 0) {
num_req_per_lower_vm_stage = dpde0_bytes_per_frame_ub_l[k] / 64 +
dpde0_bytes_per_frame_ub_c[k] / 64;
} else {
num_req_per_lower_vm_stage = dpde0_bytes_per_frame_ub_l[k] / 64;
}
} else {
if (GPUVMMaxPageTableLevels == 1) {
if (BytePerPixelC[k] > 0) {
num_req_per_lower_vm_stage = meta_pte_bytes_per_frame_ub_l[k] / 64 +
meta_pte_bytes_per_frame_ub_c[k] / 64;
} else {
num_req_per_lower_vm_stage = meta_pte_bytes_per_frame_ub_l[k] / 64;
}
} else {
if (BytePerPixelC[k] > 0) {
num_req_per_lower_vm_stage = dpde0_bytes_per_frame_ub_l[k] /
64 + dpde0_bytes_per_frame_ub_c[k] / 64 +
meta_pte_bytes_per_frame_ub_l[k] / 64 +
meta_pte_bytes_per_frame_ub_c[k] / 64;
} else {
num_req_per_lower_vm_stage = dpde0_bytes_per_frame_ub_l[k] /
64 + meta_pte_bytes_per_frame_ub_l[k] / 64;
}
}
}
TimePerVMGroupVBlank[k] = DestinationLinesToRequestVMInVBlank[k] *
HTotal[k] / PixelClock[k] / num_group_per_lower_vm_stage;
TimePerVMGroupFlip[k] = DestinationLinesToRequestVMInImmediateFlip[k] *
HTotal[k] / PixelClock[k] / num_group_per_lower_vm_stage;
TimePerVMRequestVBlank[k] = DestinationLinesToRequestVMInVBlank[k] *
HTotal[k] / PixelClock[k] / num_req_per_lower_vm_stage;
TimePerVMRequestFlip[k] = DestinationLinesToRequestVMInImmediateFlip[k] *
HTotal[k] / PixelClock[k] / num_req_per_lower_vm_stage;
if (GPUVMMaxPageTableLevels > 2) {
TimePerVMGroupVBlank[k] = TimePerVMGroupVBlank[k] / 2;
TimePerVMGroupFlip[k] = TimePerVMGroupFlip[k] / 2;
TimePerVMRequestVBlank[k] = TimePerVMRequestVBlank[k] / 2;
TimePerVMRequestFlip[k] = TimePerVMRequestFlip[k] / 2;
}
} else {
TimePerVMGroupVBlank[k] = 0;
TimePerVMGroupFlip[k] = 0;
TimePerVMRequestVBlank[k] = 0;
TimePerVMRequestFlip[k] = 0;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d, TimePerVMGroupVBlank = %f\n", __func__, k, TimePerVMGroupVBlank[k]);
dml_print("DML::%s: k=%0d, TimePerVMGroupFlip = %f\n", __func__, k, TimePerVMGroupFlip[k]);
dml_print("DML::%s: k=%0d, TimePerVMRequestVBlank = %f\n", __func__, k, TimePerVMRequestVBlank[k]);
dml_print("DML::%s: k=%0d, TimePerVMRequestFlip = %f\n", __func__, k, TimePerVMRequestFlip[k]);
#endif
}
} // CalculateVMGroupAndRequestTimes
void dml32_CalculateDCCConfiguration(
bool DCCEnabled,
bool DCCProgrammingAssumesScanDirectionUnknown,
enum source_format_class SourcePixelFormat,
unsigned int SurfaceWidthLuma,
unsigned int SurfaceWidthChroma,
unsigned int SurfaceHeightLuma,
unsigned int SurfaceHeightChroma,
unsigned int nomDETInKByte,
unsigned int RequestHeight256ByteLuma,
unsigned int RequestHeight256ByteChroma,
enum dm_swizzle_mode TilingFormat,
unsigned int BytePerPixelY,
unsigned int BytePerPixelC,
double BytePerPixelDETY,
double BytePerPixelDETC,
enum dm_rotation_angle SourceRotation,
/* Output */
unsigned int *MaxUncompressedBlockLuma,
unsigned int *MaxUncompressedBlockChroma,
unsigned int *MaxCompressedBlockLuma,
unsigned int *MaxCompressedBlockChroma,
unsigned int *IndependentBlockLuma,
unsigned int *IndependentBlockChroma)
{
typedef enum {
REQ_256Bytes,
REQ_128BytesNonContiguous,
REQ_128BytesContiguous,
REQ_NA
} RequestType;
RequestType RequestLuma;
RequestType RequestChroma;
unsigned int segment_order_horz_contiguous_luma;
unsigned int segment_order_horz_contiguous_chroma;
unsigned int segment_order_vert_contiguous_luma;
unsigned int segment_order_vert_contiguous_chroma;
unsigned int req128_horz_wc_l;
unsigned int req128_horz_wc_c;
unsigned int req128_vert_wc_l;
unsigned int req128_vert_wc_c;
unsigned int MAS_vp_horz_limit;
unsigned int MAS_vp_vert_limit;
unsigned int max_vp_horz_width;
unsigned int max_vp_vert_height;
unsigned int eff_surf_width_l;
unsigned int eff_surf_width_c;
unsigned int eff_surf_height_l;
unsigned int eff_surf_height_c;
unsigned int full_swath_bytes_horz_wc_l;
unsigned int full_swath_bytes_horz_wc_c;
unsigned int full_swath_bytes_vert_wc_l;
unsigned int full_swath_bytes_vert_wc_c;
unsigned int DETBufferSizeForDCC = nomDETInKByte * 1024;
unsigned int yuv420;
unsigned int horz_div_l;
unsigned int horz_div_c;
unsigned int vert_div_l;
unsigned int vert_div_c;
unsigned int swath_buf_size;
double detile_buf_vp_horz_limit;
double detile_buf_vp_vert_limit;
yuv420 = ((SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 ||
SourcePixelFormat == dm_420_12) ? 1 : 0);
horz_div_l = 1;
horz_div_c = 1;
vert_div_l = 1;
vert_div_c = 1;
if (BytePerPixelY == 1)
vert_div_l = 0;
if (BytePerPixelC == 1)
vert_div_c = 0;
if (BytePerPixelC == 0) {
swath_buf_size = DETBufferSizeForDCC / 2 - 2 * 256;
detile_buf_vp_horz_limit = (double) swath_buf_size / ((double) RequestHeight256ByteLuma *
BytePerPixelY / (1 + horz_div_l));
detile_buf_vp_vert_limit = (double) swath_buf_size / (256.0 / RequestHeight256ByteLuma /
(1 + vert_div_l));
} else {
swath_buf_size = DETBufferSizeForDCC / 2 - 2 * 2 * 256;
detile_buf_vp_horz_limit = (double) swath_buf_size / ((double) RequestHeight256ByteLuma *
BytePerPixelY / (1 + horz_div_l) + (double) RequestHeight256ByteChroma *
BytePerPixelC / (1 + horz_div_c) / (1 + yuv420));
detile_buf_vp_vert_limit = (double) swath_buf_size / (256.0 / RequestHeight256ByteLuma /
(1 + vert_div_l) + 256.0 / RequestHeight256ByteChroma /
(1 + vert_div_c) / (1 + yuv420));
}
if (SourcePixelFormat == dm_420_10) {
detile_buf_vp_horz_limit = 1.5 * detile_buf_vp_horz_limit;
detile_buf_vp_vert_limit = 1.5 * detile_buf_vp_vert_limit;
}
detile_buf_vp_horz_limit = dml_floor(detile_buf_vp_horz_limit - 1, 16);
detile_buf_vp_vert_limit = dml_floor(detile_buf_vp_vert_limit - 1, 16);
MAS_vp_horz_limit = SourcePixelFormat == dm_rgbe_alpha ? 3840 : 6144;
MAS_vp_vert_limit = SourcePixelFormat == dm_rgbe_alpha ? 3840 : (BytePerPixelY == 8 ? 3072 : 6144);
max_vp_horz_width = dml_min((double) MAS_vp_horz_limit, detile_buf_vp_horz_limit);
max_vp_vert_height = dml_min((double) MAS_vp_vert_limit, detile_buf_vp_vert_limit);
eff_surf_width_l = (SurfaceWidthLuma > max_vp_horz_width ? max_vp_horz_width : SurfaceWidthLuma);
eff_surf_width_c = eff_surf_width_l / (1 + yuv420);
eff_surf_height_l = (SurfaceHeightLuma > max_vp_vert_height ? max_vp_vert_height : SurfaceHeightLuma);
eff_surf_height_c = eff_surf_height_l / (1 + yuv420);
full_swath_bytes_horz_wc_l = eff_surf_width_l * RequestHeight256ByteLuma * BytePerPixelY;
full_swath_bytes_vert_wc_l = eff_surf_height_l * 256 / RequestHeight256ByteLuma;
if (BytePerPixelC > 0) {
full_swath_bytes_horz_wc_c = eff_surf_width_c * RequestHeight256ByteChroma * BytePerPixelC;
full_swath_bytes_vert_wc_c = eff_surf_height_c * 256 / RequestHeight256ByteChroma;
} else {
full_swath_bytes_horz_wc_c = 0;
full_swath_bytes_vert_wc_c = 0;
}
if (SourcePixelFormat == dm_420_10) {
full_swath_bytes_horz_wc_l = dml_ceil((double) full_swath_bytes_horz_wc_l * 2.0 / 3.0, 256.0);
full_swath_bytes_horz_wc_c = dml_ceil((double) full_swath_bytes_horz_wc_c * 2.0 / 3.0, 256.0);
full_swath_bytes_vert_wc_l = dml_ceil((double) full_swath_bytes_vert_wc_l * 2.0 / 3.0, 256.0);
full_swath_bytes_vert_wc_c = dml_ceil((double) full_swath_bytes_vert_wc_c * 2.0 / 3.0, 256.0);
}
if (2 * full_swath_bytes_horz_wc_l + 2 * full_swath_bytes_horz_wc_c <= DETBufferSizeForDCC) {
req128_horz_wc_l = 0;
req128_horz_wc_c = 0;
} else if (full_swath_bytes_horz_wc_l < 1.5 * full_swath_bytes_horz_wc_c && 2 * full_swath_bytes_horz_wc_l +
full_swath_bytes_horz_wc_c <= DETBufferSizeForDCC) {
req128_horz_wc_l = 0;
req128_horz_wc_c = 1;
} else if (full_swath_bytes_horz_wc_l >= 1.5 * full_swath_bytes_horz_wc_c && full_swath_bytes_horz_wc_l + 2 *
full_swath_bytes_horz_wc_c <= DETBufferSizeForDCC) {
req128_horz_wc_l = 1;
req128_horz_wc_c = 0;
} else {
req128_horz_wc_l = 1;
req128_horz_wc_c = 1;
}
if (2 * full_swath_bytes_vert_wc_l + 2 * full_swath_bytes_vert_wc_c <= DETBufferSizeForDCC) {
req128_vert_wc_l = 0;
req128_vert_wc_c = 0;
} else if (full_swath_bytes_vert_wc_l < 1.5 * full_swath_bytes_vert_wc_c && 2 *
full_swath_bytes_vert_wc_l + full_swath_bytes_vert_wc_c <= DETBufferSizeForDCC) {
req128_vert_wc_l = 0;
req128_vert_wc_c = 1;
} else if (full_swath_bytes_vert_wc_l >= 1.5 * full_swath_bytes_vert_wc_c &&
full_swath_bytes_vert_wc_l + 2 * full_swath_bytes_vert_wc_c <= DETBufferSizeForDCC) {
req128_vert_wc_l = 1;
req128_vert_wc_c = 0;
} else {
req128_vert_wc_l = 1;
req128_vert_wc_c = 1;
}
if (BytePerPixelY == 2) {
segment_order_horz_contiguous_luma = 0;
segment_order_vert_contiguous_luma = 1;
} else {
segment_order_horz_contiguous_luma = 1;
segment_order_vert_contiguous_luma = 0;
}
if (BytePerPixelC == 2) {
segment_order_horz_contiguous_chroma = 0;
segment_order_vert_contiguous_chroma = 1;
} else {
segment_order_horz_contiguous_chroma = 1;
segment_order_vert_contiguous_chroma = 0;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DCCEnabled = %d\n", __func__, DCCEnabled);
dml_print("DML::%s: nomDETInKByte = %d\n", __func__, nomDETInKByte);
dml_print("DML::%s: DETBufferSizeForDCC = %d\n", __func__, DETBufferSizeForDCC);
dml_print("DML::%s: req128_horz_wc_l = %d\n", __func__, req128_horz_wc_l);
dml_print("DML::%s: req128_horz_wc_c = %d\n", __func__, req128_horz_wc_c);
dml_print("DML::%s: full_swath_bytes_horz_wc_l = %d\n", __func__, full_swath_bytes_horz_wc_l);
dml_print("DML::%s: full_swath_bytes_vert_wc_c = %d\n", __func__, full_swath_bytes_vert_wc_c);
dml_print("DML::%s: segment_order_horz_contiguous_luma = %d\n", __func__, segment_order_horz_contiguous_luma);
dml_print("DML::%s: segment_order_horz_contiguous_chroma = %d\n",
__func__, segment_order_horz_contiguous_chroma);
#endif
if (DCCProgrammingAssumesScanDirectionUnknown == true) {
if (req128_horz_wc_l == 0 && req128_vert_wc_l == 0)
RequestLuma = REQ_256Bytes;
else if ((req128_horz_wc_l == 1 && segment_order_horz_contiguous_luma == 0) ||
(req128_vert_wc_l == 1 && segment_order_vert_contiguous_luma == 0))
RequestLuma = REQ_128BytesNonContiguous;
else
RequestLuma = REQ_128BytesContiguous;
if (req128_horz_wc_c == 0 && req128_vert_wc_c == 0)
RequestChroma = REQ_256Bytes;
else if ((req128_horz_wc_c == 1 && segment_order_horz_contiguous_chroma == 0) ||
(req128_vert_wc_c == 1 && segment_order_vert_contiguous_chroma == 0))
RequestChroma = REQ_128BytesNonContiguous;
else
RequestChroma = REQ_128BytesContiguous;
} else if (!IsVertical(SourceRotation)) {
if (req128_horz_wc_l == 0)
RequestLuma = REQ_256Bytes;
else if (segment_order_horz_contiguous_luma == 0)
RequestLuma = REQ_128BytesNonContiguous;
else
RequestLuma = REQ_128BytesContiguous;
if (req128_horz_wc_c == 0)
RequestChroma = REQ_256Bytes;
else if (segment_order_horz_contiguous_chroma == 0)
RequestChroma = REQ_128BytesNonContiguous;
else
RequestChroma = REQ_128BytesContiguous;
} else {
if (req128_vert_wc_l == 0)
RequestLuma = REQ_256Bytes;
else if (segment_order_vert_contiguous_luma == 0)
RequestLuma = REQ_128BytesNonContiguous;
else
RequestLuma = REQ_128BytesContiguous;
if (req128_vert_wc_c == 0)
RequestChroma = REQ_256Bytes;
else if (segment_order_vert_contiguous_chroma == 0)
RequestChroma = REQ_128BytesNonContiguous;
else
RequestChroma = REQ_128BytesContiguous;
}
if (RequestLuma == REQ_256Bytes) {
*MaxUncompressedBlockLuma = 256;
*MaxCompressedBlockLuma = 256;
*IndependentBlockLuma = 0;
} else if (RequestLuma == REQ_128BytesContiguous) {
*MaxUncompressedBlockLuma = 256;
*MaxCompressedBlockLuma = 128;
*IndependentBlockLuma = 128;
} else {
*MaxUncompressedBlockLuma = 256;
*MaxCompressedBlockLuma = 64;
*IndependentBlockLuma = 64;
}
if (RequestChroma == REQ_256Bytes) {
*MaxUncompressedBlockChroma = 256;
*MaxCompressedBlockChroma = 256;
*IndependentBlockChroma = 0;
} else if (RequestChroma == REQ_128BytesContiguous) {
*MaxUncompressedBlockChroma = 256;
*MaxCompressedBlockChroma = 128;
*IndependentBlockChroma = 128;
} else {
*MaxUncompressedBlockChroma = 256;
*MaxCompressedBlockChroma = 64;
*IndependentBlockChroma = 64;
}
if (DCCEnabled != true || BytePerPixelC == 0) {
*MaxUncompressedBlockChroma = 0;
*MaxCompressedBlockChroma = 0;
*IndependentBlockChroma = 0;
}
if (DCCEnabled != true) {
*MaxUncompressedBlockLuma = 0;
*MaxCompressedBlockLuma = 0;
*IndependentBlockLuma = 0;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: MaxUncompressedBlockLuma = %d\n", __func__, *MaxUncompressedBlockLuma);
dml_print("DML::%s: MaxCompressedBlockLuma = %d\n", __func__, *MaxCompressedBlockLuma);
dml_print("DML::%s: IndependentBlockLuma = %d\n", __func__, *IndependentBlockLuma);
dml_print("DML::%s: MaxUncompressedBlockChroma = %d\n", __func__, *MaxUncompressedBlockChroma);
dml_print("DML::%s: MaxCompressedBlockChroma = %d\n", __func__, *MaxCompressedBlockChroma);
dml_print("DML::%s: IndependentBlockChroma = %d\n", __func__, *IndependentBlockChroma);
#endif
} // CalculateDCCConfiguration
void dml32_CalculateStutterEfficiency(
unsigned int CompressedBufferSizeInkByte,
enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
bool UnboundedRequestEnabled,
unsigned int MetaFIFOSizeInKEntries,
unsigned int ZeroSizeBufferEntries,
unsigned int PixelChunkSizeInKByte,
unsigned int NumberOfActiveSurfaces,
unsigned int ROBBufferSizeInKByte,
double TotalDataReadBandwidth,
double DCFCLK,
double ReturnBW,
unsigned int CompbufReservedSpace64B,
unsigned int CompbufReservedSpaceZs,
double SRExitTime,
double SRExitZ8Time,
bool SynchronizeTimingsFinal,
unsigned int BlendingAndTiming[],
double StutterEnterPlusExitWatermark,
double Z8StutterEnterPlusExitWatermark,
bool ProgressiveToInterlaceUnitInOPP,
bool Interlace[],
double MinTTUVBlank[],
unsigned int DPPPerSurface[],
unsigned int DETBufferSizeY[],
unsigned int BytePerPixelY[],
double BytePerPixelDETY[],
double SwathWidthY[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
double NetDCCRateLuma[],
double NetDCCRateChroma[],
double DCCFractionOfZeroSizeRequestsLuma[],
double DCCFractionOfZeroSizeRequestsChroma[],
unsigned int HTotal[],
unsigned int VTotal[],
double PixelClock[],
double VRatio[],
enum dm_rotation_angle SourceRotation[],
unsigned int BlockHeight256BytesY[],
unsigned int BlockWidth256BytesY[],
unsigned int BlockHeight256BytesC[],
unsigned int BlockWidth256BytesC[],
unsigned int DCCYMaxUncompressedBlock[],
unsigned int DCCCMaxUncompressedBlock[],
unsigned int VActive[],
bool DCCEnable[],
bool WritebackEnable[],
double ReadBandwidthSurfaceLuma[],
double ReadBandwidthSurfaceChroma[],
double meta_row_bw[],
double dpte_row_bw[],
/* Output */
double *StutterEfficiencyNotIncludingVBlank,
double *StutterEfficiency,
unsigned int *NumberOfStutterBurstsPerFrame,
double *Z8StutterEfficiencyNotIncludingVBlank,
double *Z8StutterEfficiency,
unsigned int *Z8NumberOfStutterBurstsPerFrame,
double *StutterPeriod,
bool *DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE)
{
bool FoundCriticalSurface = false;
unsigned int SwathSizeCriticalSurface = 0;
unsigned int LastChunkOfSwathSize;
unsigned int MissingPartOfLastSwathOfDETSize;
double LastZ8StutterPeriod = 0.0;
double LastStutterPeriod = 0.0;
unsigned int TotalNumberOfActiveOTG = 0;
double doublePixelClock;
unsigned int doubleHTotal;
unsigned int doubleVTotal;
bool SameTiming = true;
double DETBufferingTimeY;
double SwathWidthYCriticalSurface = 0.0;
double SwathHeightYCriticalSurface = 0.0;
double VActiveTimeCriticalSurface = 0.0;
double FrameTimeCriticalSurface = 0.0;
unsigned int BytePerPixelYCriticalSurface = 0;
double LinesToFinishSwathTransferStutterCriticalSurface = 0.0;
unsigned int DETBufferSizeYCriticalSurface = 0;
double MinTTUVBlankCriticalSurface = 0.0;
unsigned int BlockWidth256BytesYCriticalSurface = 0;
bool doublePlaneCriticalSurface = 0;
bool doublePipeCriticalSurface = 0;
double TotalCompressedReadBandwidth;
double TotalRowReadBandwidth;
double AverageDCCCompressionRate;
double EffectiveCompressedBufferSize;
double PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer;
double StutterBurstTime;
unsigned int TotalActiveWriteback;
double LinesInDETY;
double LinesInDETYRoundedDownToSwath;
double MaximumEffectiveCompressionLuma;
double MaximumEffectiveCompressionChroma;
double TotalZeroSizeRequestReadBandwidth;
double TotalZeroSizeCompressedReadBandwidth;
double AverageDCCZeroSizeFraction;
double AverageZeroSizeCompressionRate;
unsigned int k;
TotalZeroSizeRequestReadBandwidth = 0;
TotalZeroSizeCompressedReadBandwidth = 0;
TotalRowReadBandwidth = 0;
TotalCompressedReadBandwidth = 0;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) {
if (DCCEnable[k] == true) {
if ((IsVertical(SourceRotation[k]) && BlockWidth256BytesY[k] > SwathHeightY[k])
|| (!IsVertical(SourceRotation[k])
&& BlockHeight256BytesY[k] > SwathHeightY[k])
|| DCCYMaxUncompressedBlock[k] < 256) {
MaximumEffectiveCompressionLuma = 2;
} else {
MaximumEffectiveCompressionLuma = 4;
}
TotalCompressedReadBandwidth = TotalCompressedReadBandwidth
+ ReadBandwidthSurfaceLuma[k]
/ dml_min(NetDCCRateLuma[k],
MaximumEffectiveCompressionLuma);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d, ReadBandwidthSurfaceLuma = %f\n",
__func__, k, ReadBandwidthSurfaceLuma[k]);
dml_print("DML::%s: k=%0d, NetDCCRateLuma = %f\n",
__func__, k, NetDCCRateLuma[k]);
dml_print("DML::%s: k=%0d, MaximumEffectiveCompressionLuma = %f\n",
__func__, k, MaximumEffectiveCompressionLuma);
#endif
TotalZeroSizeRequestReadBandwidth = TotalZeroSizeRequestReadBandwidth
+ ReadBandwidthSurfaceLuma[k] * DCCFractionOfZeroSizeRequestsLuma[k];
TotalZeroSizeCompressedReadBandwidth = TotalZeroSizeCompressedReadBandwidth
+ ReadBandwidthSurfaceLuma[k] * DCCFractionOfZeroSizeRequestsLuma[k]
/ MaximumEffectiveCompressionLuma;
if (ReadBandwidthSurfaceChroma[k] > 0) {
if ((IsVertical(SourceRotation[k]) && BlockWidth256BytesC[k] > SwathHeightC[k])
|| (!IsVertical(SourceRotation[k])
&& BlockHeight256BytesC[k] > SwathHeightC[k])
|| DCCCMaxUncompressedBlock[k] < 256) {
MaximumEffectiveCompressionChroma = 2;
} else {
MaximumEffectiveCompressionChroma = 4;
}
TotalCompressedReadBandwidth =
TotalCompressedReadBandwidth
+ ReadBandwidthSurfaceChroma[k]
/ dml_min(NetDCCRateChroma[k],
MaximumEffectiveCompressionChroma);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d, ReadBandwidthSurfaceChroma = %f\n",
__func__, k, ReadBandwidthSurfaceChroma[k]);
dml_print("DML::%s: k=%0d, NetDCCRateChroma = %f\n",
__func__, k, NetDCCRateChroma[k]);
dml_print("DML::%s: k=%0d, MaximumEffectiveCompressionChroma = %f\n",
__func__, k, MaximumEffectiveCompressionChroma);
#endif
TotalZeroSizeRequestReadBandwidth = TotalZeroSizeRequestReadBandwidth
+ ReadBandwidthSurfaceChroma[k]
* DCCFractionOfZeroSizeRequestsChroma[k];
TotalZeroSizeCompressedReadBandwidth = TotalZeroSizeCompressedReadBandwidth
+ ReadBandwidthSurfaceChroma[k]
* DCCFractionOfZeroSizeRequestsChroma[k]
/ MaximumEffectiveCompressionChroma;
}
} else {
TotalCompressedReadBandwidth = TotalCompressedReadBandwidth
+ ReadBandwidthSurfaceLuma[k] + ReadBandwidthSurfaceChroma[k];
}
TotalRowReadBandwidth = TotalRowReadBandwidth
+ DPPPerSurface[k] * (meta_row_bw[k] + dpte_row_bw[k]);
}
}
AverageDCCCompressionRate = TotalDataReadBandwidth / TotalCompressedReadBandwidth;
AverageDCCZeroSizeFraction = TotalZeroSizeRequestReadBandwidth / TotalDataReadBandwidth;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: UnboundedRequestEnabled = %d\n", __func__, UnboundedRequestEnabled);
dml_print("DML::%s: TotalCompressedReadBandwidth = %f\n", __func__, TotalCompressedReadBandwidth);
dml_print("DML::%s: TotalZeroSizeRequestReadBandwidth = %f\n", __func__, TotalZeroSizeRequestReadBandwidth);
dml_print("DML::%s: TotalZeroSizeCompressedReadBandwidth = %f\n",
__func__, TotalZeroSizeCompressedReadBandwidth);
dml_print("DML::%s: MaximumEffectiveCompressionLuma = %f\n", __func__, MaximumEffectiveCompressionLuma);
dml_print("DML::%s: MaximumEffectiveCompressionChroma = %f\n", __func__, MaximumEffectiveCompressionChroma);
dml_print("DML::%s: AverageDCCCompressionRate = %f\n", __func__, AverageDCCCompressionRate);
dml_print("DML::%s: AverageDCCZeroSizeFraction = %f\n", __func__, AverageDCCZeroSizeFraction);
dml_print("DML::%s: CompbufReservedSpace64B = %d\n", __func__, CompbufReservedSpace64B);
dml_print("DML::%s: CompbufReservedSpaceZs = %d\n", __func__, CompbufReservedSpaceZs);
dml_print("DML::%s: CompressedBufferSizeInkByte = %d\n", __func__, CompressedBufferSizeInkByte);
#endif
if (AverageDCCZeroSizeFraction == 1) {
AverageZeroSizeCompressionRate = TotalZeroSizeRequestReadBandwidth
/ TotalZeroSizeCompressedReadBandwidth;
EffectiveCompressedBufferSize = (double) MetaFIFOSizeInKEntries * 1024 * 64
* AverageZeroSizeCompressionRate
+ ((double) ZeroSizeBufferEntries - CompbufReservedSpaceZs) * 64
* AverageZeroSizeCompressionRate;
} else if (AverageDCCZeroSizeFraction > 0) {
AverageZeroSizeCompressionRate = TotalZeroSizeRequestReadBandwidth
/ TotalZeroSizeCompressedReadBandwidth;
EffectiveCompressedBufferSize = dml_min(
(double) CompressedBufferSizeInkByte * 1024 * AverageDCCCompressionRate,
(double) MetaFIFOSizeInKEntries * 1024 * 64
/ (AverageDCCZeroSizeFraction / AverageZeroSizeCompressionRate
+ 1 / AverageDCCCompressionRate))
+ dml_min(((double) ROBBufferSizeInKByte * 1024 - CompbufReservedSpace64B * 64)
* AverageDCCCompressionRate,
((double) ZeroSizeBufferEntries - CompbufReservedSpaceZs) * 64
/ (AverageDCCZeroSizeFraction / AverageZeroSizeCompressionRate));
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: min 1 = %f\n", __func__,
CompressedBufferSizeInkByte * 1024 * AverageDCCCompressionRate);
dml_print("DML::%s: min 2 = %f\n", __func__, MetaFIFOSizeInKEntries * 1024 * 64 /
(AverageDCCZeroSizeFraction / AverageZeroSizeCompressionRate + 1 /
AverageDCCCompressionRate));
dml_print("DML::%s: min 3 = %f\n", __func__, (ROBBufferSizeInKByte * 1024 -
CompbufReservedSpace64B * 64) * AverageDCCCompressionRate);
dml_print("DML::%s: min 4 = %f\n", __func__, (ZeroSizeBufferEntries - CompbufReservedSpaceZs) * 64 /
(AverageDCCZeroSizeFraction / AverageZeroSizeCompressionRate));
#endif
} else {
EffectiveCompressedBufferSize = dml_min(
(double) CompressedBufferSizeInkByte * 1024 * AverageDCCCompressionRate,
(double) MetaFIFOSizeInKEntries * 1024 * 64 * AverageDCCCompressionRate)
+ ((double) ROBBufferSizeInKByte * 1024 - CompbufReservedSpace64B * 64)
* AverageDCCCompressionRate;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: min 1 = %f\n", __func__,
CompressedBufferSizeInkByte * 1024 * AverageDCCCompressionRate);
dml_print("DML::%s: min 2 = %f\n", __func__,
MetaFIFOSizeInKEntries * 1024 * 64 * AverageDCCCompressionRate);
#endif
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: MetaFIFOSizeInKEntries = %d\n", __func__, MetaFIFOSizeInKEntries);
dml_print("DML::%s: AverageZeroSizeCompressionRate = %f\n", __func__, AverageZeroSizeCompressionRate);
dml_print("DML::%s: EffectiveCompressedBufferSize = %f\n", __func__, EffectiveCompressedBufferSize);
#endif
*StutterPeriod = 0;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) {
LinesInDETY = ((double) DETBufferSizeY[k]
+ (UnboundedRequestEnabled == true ? EffectiveCompressedBufferSize : 0)
* ReadBandwidthSurfaceLuma[k] / TotalDataReadBandwidth)
/ BytePerPixelDETY[k] / SwathWidthY[k];
LinesInDETYRoundedDownToSwath = dml_floor(LinesInDETY, SwathHeightY[k]);
DETBufferingTimeY = LinesInDETYRoundedDownToSwath * ((double) HTotal[k] / PixelClock[k])
/ VRatio[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d, DETBufferSizeY = %d\n", __func__, k, DETBufferSizeY[k]);
dml_print("DML::%s: k=%0d, BytePerPixelDETY = %f\n", __func__, k, BytePerPixelDETY[k]);
dml_print("DML::%s: k=%0d, SwathWidthY = %d\n", __func__, k, SwathWidthY[k]);
dml_print("DML::%s: k=%0d, ReadBandwidthSurfaceLuma = %f\n",
__func__, k, ReadBandwidthSurfaceLuma[k]);
dml_print("DML::%s: k=%0d, TotalDataReadBandwidth = %f\n", __func__, k, TotalDataReadBandwidth);
dml_print("DML::%s: k=%0d, LinesInDETY = %f\n", __func__, k, LinesInDETY);
dml_print("DML::%s: k=%0d, LinesInDETYRoundedDownToSwath = %f\n",
__func__, k, LinesInDETYRoundedDownToSwath);
dml_print("DML::%s: k=%0d, HTotal = %d\n", __func__, k, HTotal[k]);
dml_print("DML::%s: k=%0d, PixelClock = %f\n", __func__, k, PixelClock[k]);
dml_print("DML::%s: k=%0d, VRatio = %f\n", __func__, k, VRatio[k]);
dml_print("DML::%s: k=%0d, DETBufferingTimeY = %f\n", __func__, k, DETBufferingTimeY);
dml_print("DML::%s: k=%0d, PixelClock = %f\n", __func__, k, PixelClock[k]);
#endif
if (!FoundCriticalSurface || DETBufferingTimeY < *StutterPeriod) {
bool isInterlaceTiming = Interlace[k] && !ProgressiveToInterlaceUnitInOPP;
FoundCriticalSurface = true;
*StutterPeriod = DETBufferingTimeY;
FrameTimeCriticalSurface = (
isInterlaceTiming ?
dml_floor((double) VTotal[k] / 2.0, 1.0) : VTotal[k])
* (double) HTotal[k] / PixelClock[k];
VActiveTimeCriticalSurface = (
isInterlaceTiming ?
dml_floor((double) VActive[k] / 2.0, 1.0) : VActive[k])
* (double) HTotal[k] / PixelClock[k];
BytePerPixelYCriticalSurface = BytePerPixelY[k];
SwathWidthYCriticalSurface = SwathWidthY[k];
SwathHeightYCriticalSurface = SwathHeightY[k];
BlockWidth256BytesYCriticalSurface = BlockWidth256BytesY[k];
LinesToFinishSwathTransferStutterCriticalSurface = SwathHeightY[k]
- (LinesInDETY - LinesInDETYRoundedDownToSwath);
DETBufferSizeYCriticalSurface = DETBufferSizeY[k];
MinTTUVBlankCriticalSurface = MinTTUVBlank[k];
doublePlaneCriticalSurface = (ReadBandwidthSurfaceChroma[k] == 0);
doublePipeCriticalSurface = (DPPPerSurface[k] == 1);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d, FoundCriticalSurface = %d\n",
__func__, k, FoundCriticalSurface);
dml_print("DML::%s: k=%0d, StutterPeriod = %f\n",
__func__, k, *StutterPeriod);
dml_print("DML::%s: k=%0d, MinTTUVBlankCriticalSurface = %f\n",
__func__, k, MinTTUVBlankCriticalSurface);
dml_print("DML::%s: k=%0d, FrameTimeCriticalSurface = %f\n",
__func__, k, FrameTimeCriticalSurface);
dml_print("DML::%s: k=%0d, VActiveTimeCriticalSurface = %f\n",
__func__, k, VActiveTimeCriticalSurface);
dml_print("DML::%s: k=%0d, BytePerPixelYCriticalSurface = %d\n",
__func__, k, BytePerPixelYCriticalSurface);
dml_print("DML::%s: k=%0d, SwathWidthYCriticalSurface = %f\n",
__func__, k, SwathWidthYCriticalSurface);
dml_print("DML::%s: k=%0d, SwathHeightYCriticalSurface = %f\n",
__func__, k, SwathHeightYCriticalSurface);
dml_print("DML::%s: k=%0d, BlockWidth256BytesYCriticalSurface = %d\n",
__func__, k, BlockWidth256BytesYCriticalSurface);
dml_print("DML::%s: k=%0d, doublePlaneCriticalSurface = %d\n",
__func__, k, doublePlaneCriticalSurface);
dml_print("DML::%s: k=%0d, doublePipeCriticalSurface = %d\n",
__func__, k, doublePipeCriticalSurface);
dml_print("DML::%s: k=%0d, LinesToFinishSwathTransferStutterCriticalSurface = %f\n",
__func__, k, LinesToFinishSwathTransferStutterCriticalSurface);
#endif
}
}
}
PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer = dml_min(*StutterPeriod * TotalDataReadBandwidth,
EffectiveCompressedBufferSize);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: ROBBufferSizeInKByte = %d\n", __func__, ROBBufferSizeInKByte);
dml_print("DML::%s: AverageDCCCompressionRate = %f\n", __func__, AverageDCCCompressionRate);
dml_print("DML::%s: StutterPeriod * TotalDataReadBandwidth = %f\n",
__func__, *StutterPeriod * TotalDataReadBandwidth);
dml_print("DML::%s: EffectiveCompressedBufferSize = %f\n", __func__, EffectiveCompressedBufferSize);
dml_print("DML::%s: PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer = %f\n", __func__,
PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer);
dml_print("DML::%s: ReturnBW = %f\n", __func__, ReturnBW);
dml_print("DML::%s: TotalDataReadBandwidth = %f\n", __func__, TotalDataReadBandwidth);
dml_print("DML::%s: TotalRowReadBandwidth = %f\n", __func__, TotalRowReadBandwidth);
dml_print("DML::%s: DCFCLK = %f\n", __func__, DCFCLK);
#endif
StutterBurstTime = PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / AverageDCCCompressionRate
/ ReturnBW
+ (*StutterPeriod * TotalDataReadBandwidth
- PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer) / (DCFCLK * 64)
+ *StutterPeriod * TotalRowReadBandwidth / ReturnBW;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Part 1 = %f\n", __func__, PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer /
AverageDCCCompressionRate / ReturnBW);
dml_print("DML::%s: StutterPeriod * TotalDataReadBandwidth = %f\n",
__func__, (*StutterPeriod * TotalDataReadBandwidth));
dml_print("DML::%s: Part 2 = %f\n", __func__, (*StutterPeriod * TotalDataReadBandwidth -
PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer) / (DCFCLK * 64));
dml_print("DML::%s: Part 3 = %f\n", __func__, *StutterPeriod * TotalRowReadBandwidth / ReturnBW);
dml_print("DML::%s: StutterBurstTime = %f\n", __func__, StutterBurstTime);
#endif
StutterBurstTime = dml_max(StutterBurstTime,
LinesToFinishSwathTransferStutterCriticalSurface * BytePerPixelYCriticalSurface
* SwathWidthYCriticalSurface / ReturnBW);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Time to finish residue swath=%f\n",
__func__,
LinesToFinishSwathTransferStutterCriticalSurface *
BytePerPixelYCriticalSurface * SwathWidthYCriticalSurface / ReturnBW);
#endif
TotalActiveWriteback = 0;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (WritebackEnable[k])
TotalActiveWriteback = TotalActiveWriteback + 1;
}
if (TotalActiveWriteback == 0) {
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: SRExitTime = %f\n", __func__, SRExitTime);
dml_print("DML::%s: SRExitZ8Time = %f\n", __func__, SRExitZ8Time);
dml_print("DML::%s: StutterBurstTime = %f (final)\n", __func__, StutterBurstTime);
dml_print("DML::%s: StutterPeriod = %f\n", __func__, *StutterPeriod);
#endif
*StutterEfficiencyNotIncludingVBlank = dml_max(0.,
1 - (SRExitTime + StutterBurstTime) / *StutterPeriod) * 100;
*Z8StutterEfficiencyNotIncludingVBlank = dml_max(0.,
1 - (SRExitZ8Time + StutterBurstTime) / *StutterPeriod) * 100;
*NumberOfStutterBurstsPerFrame = (
*StutterEfficiencyNotIncludingVBlank > 0 ?
dml_ceil(VActiveTimeCriticalSurface / *StutterPeriod, 1) : 0);
*Z8NumberOfStutterBurstsPerFrame = (
*Z8StutterEfficiencyNotIncludingVBlank > 0 ?
dml_ceil(VActiveTimeCriticalSurface / *StutterPeriod, 1) : 0);
} else {
*StutterEfficiencyNotIncludingVBlank = 0.;
*Z8StutterEfficiencyNotIncludingVBlank = 0.;
*NumberOfStutterBurstsPerFrame = 0;
*Z8NumberOfStutterBurstsPerFrame = 0;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VActiveTimeCriticalSurface = %f\n", __func__, VActiveTimeCriticalSurface);
dml_print("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n",
__func__, *StutterEfficiencyNotIncludingVBlank);
dml_print("DML::%s: Z8StutterEfficiencyNotIncludingVBlank = %f\n",
__func__, *Z8StutterEfficiencyNotIncludingVBlank);
dml_print("DML::%s: NumberOfStutterBurstsPerFrame = %d\n", __func__, *NumberOfStutterBurstsPerFrame);
dml_print("DML::%s: Z8NumberOfStutterBurstsPerFrame = %d\n", __func__, *Z8NumberOfStutterBurstsPerFrame);
#endif
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) {
if (BlendingAndTiming[k] == k) {
if (TotalNumberOfActiveOTG == 0) {
doublePixelClock = PixelClock[k];
doubleHTotal = HTotal[k];
doubleVTotal = VTotal[k];
} else if (doublePixelClock != PixelClock[k] || doubleHTotal != HTotal[k]
|| doubleVTotal != VTotal[k]) {
SameTiming = false;
}
TotalNumberOfActiveOTG = TotalNumberOfActiveOTG + 1;
}
}
}
if (*StutterEfficiencyNotIncludingVBlank > 0) {
LastStutterPeriod = VActiveTimeCriticalSurface - (*NumberOfStutterBurstsPerFrame - 1) * *StutterPeriod;
if ((SynchronizeTimingsFinal || TotalNumberOfActiveOTG == 1) && SameTiming
&& LastStutterPeriod + MinTTUVBlankCriticalSurface > StutterEnterPlusExitWatermark) {
*StutterEfficiency = (1 - (*NumberOfStutterBurstsPerFrame * SRExitTime
+ StutterBurstTime * VActiveTimeCriticalSurface
/ *StutterPeriod) / FrameTimeCriticalSurface) * 100;
} else {
*StutterEfficiency = *StutterEfficiencyNotIncludingVBlank;
}
} else {
*StutterEfficiency = 0;
}
if (*Z8StutterEfficiencyNotIncludingVBlank > 0) {
LastZ8StutterPeriod = VActiveTimeCriticalSurface
- (*NumberOfStutterBurstsPerFrame - 1) * *StutterPeriod;
if ((SynchronizeTimingsFinal || TotalNumberOfActiveOTG == 1) && SameTiming && LastZ8StutterPeriod +
MinTTUVBlankCriticalSurface > Z8StutterEnterPlusExitWatermark) {
*Z8StutterEfficiency = (1 - (*NumberOfStutterBurstsPerFrame * SRExitZ8Time + StutterBurstTime
* VActiveTimeCriticalSurface / *StutterPeriod) / FrameTimeCriticalSurface) * 100;
} else {
*Z8StutterEfficiency = *Z8StutterEfficiencyNotIncludingVBlank;
}
} else {
*Z8StutterEfficiency = 0.;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: LastZ8StutterPeriod = %f\n", __func__, LastZ8StutterPeriod);
dml_print("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, Z8StutterEnterPlusExitWatermark);
dml_print("DML::%s: StutterBurstTime = %f\n", __func__, StutterBurstTime);
dml_print("DML::%s: StutterPeriod = %f\n", __func__, *StutterPeriod);
dml_print("DML::%s: StutterEfficiency = %f\n", __func__, *StutterEfficiency);
dml_print("DML::%s: Z8StutterEfficiency = %f\n", __func__, *Z8StutterEfficiency);
dml_print("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n",
__func__, *StutterEfficiencyNotIncludingVBlank);
dml_print("DML::%s: Z8NumberOfStutterBurstsPerFrame = %d\n", __func__, *Z8NumberOfStutterBurstsPerFrame);
#endif
SwathSizeCriticalSurface = BytePerPixelYCriticalSurface * SwathHeightYCriticalSurface
* dml_ceil(SwathWidthYCriticalSurface, BlockWidth256BytesYCriticalSurface);
LastChunkOfSwathSize = SwathSizeCriticalSurface % (PixelChunkSizeInKByte * 1024);
MissingPartOfLastSwathOfDETSize = dml_ceil(DETBufferSizeYCriticalSurface, SwathSizeCriticalSurface)
- DETBufferSizeYCriticalSurface;
*DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = !(!UnboundedRequestEnabled && (NumberOfActiveSurfaces == 1)
&& doublePlaneCriticalSurface && doublePipeCriticalSurface && (LastChunkOfSwathSize > 0)
&& (LastChunkOfSwathSize <= 4096) && (MissingPartOfLastSwathOfDETSize > 0)
&& (MissingPartOfLastSwathOfDETSize <= LastChunkOfSwathSize));
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: SwathSizeCriticalSurface = %d\n", __func__, SwathSizeCriticalSurface);
dml_print("DML::%s: LastChunkOfSwathSize = %d\n", __func__, LastChunkOfSwathSize);
dml_print("DML::%s: MissingPartOfLastSwathOfDETSize = %d\n", __func__, MissingPartOfLastSwathOfDETSize);
dml_print("DML::%s: DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = %d\n", __func__, *DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE);
#endif
} // CalculateStutterEfficiency
void dml32_CalculateMaxDETAndMinCompressedBufferSize(
unsigned int ConfigReturnBufferSizeInKByte,
unsigned int ROBBufferSizeInKByte,
unsigned int MaxNumDPP,
bool nomDETInKByteOverrideEnable, // VBA_DELTA, allow DV to override default DET size
unsigned int nomDETInKByteOverrideValue, // VBA_DELTA
/* Output */
unsigned int *MaxTotalDETInKByte,
unsigned int *nomDETInKByte,
unsigned int *MinCompressedBufferSizeInKByte)
{
bool det_buff_size_override_en = nomDETInKByteOverrideEnable;
unsigned int det_buff_size_override_val = nomDETInKByteOverrideValue;
*MaxTotalDETInKByte = dml_ceil(((double)ConfigReturnBufferSizeInKByte +
(double) ROBBufferSizeInKByte) * 4.0 / 5.0, 64);
*nomDETInKByte = dml_floor((double) *MaxTotalDETInKByte / (double) MaxNumDPP, 64);
*MinCompressedBufferSizeInKByte = ConfigReturnBufferSizeInKByte - *MaxTotalDETInKByte;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: ConfigReturnBufferSizeInKByte = %0d\n", __func__, ConfigReturnBufferSizeInKByte);
dml_print("DML::%s: ROBBufferSizeInKByte = %0d\n", __func__, ROBBufferSizeInKByte);
dml_print("DML::%s: MaxNumDPP = %0d\n", __func__, MaxNumDPP);
dml_print("DML::%s: MaxTotalDETInKByte = %0d\n", __func__, *MaxTotalDETInKByte);
dml_print("DML::%s: nomDETInKByte = %0d\n", __func__, *nomDETInKByte);
dml_print("DML::%s: MinCompressedBufferSizeInKByte = %0d\n", __func__, *MinCompressedBufferSizeInKByte);
#endif
if (det_buff_size_override_en) {
*nomDETInKByte = det_buff_size_override_val;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: nomDETInKByte = %0d (override)\n", __func__, *nomDETInKByte);
#endif
}
} // CalculateMaxDETAndMinCompressedBufferSize
bool dml32_CalculateVActiveBandwithSupport(unsigned int NumberOfActiveSurfaces,
double ReturnBW,
bool NotUrgentLatencyHiding[],
double ReadBandwidthLuma[],
double ReadBandwidthChroma[],
double cursor_bw[],
double meta_row_bandwidth[],
double dpte_row_bandwidth[],
unsigned int NumberOfDPP[],
double UrgentBurstFactorLuma[],
double UrgentBurstFactorChroma[],
double UrgentBurstFactorCursor[])
{
unsigned int k;
bool NotEnoughUrgentLatencyHiding = false;
bool CalculateVActiveBandwithSupport_val = false;
double VActiveBandwith = 0;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (NotUrgentLatencyHiding[k]) {
NotEnoughUrgentLatencyHiding = true;
}
}
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
VActiveBandwith = VActiveBandwith + ReadBandwidthLuma[k] * UrgentBurstFactorLuma[k] + ReadBandwidthChroma[k] * UrgentBurstFactorChroma[k] + cursor_bw[k] * UrgentBurstFactorCursor[k] + NumberOfDPP[k] * meta_row_bandwidth[k] + NumberOfDPP[k] * dpte_row_bandwidth[k];
}
CalculateVActiveBandwithSupport_val = (VActiveBandwith <= ReturnBW) && !NotEnoughUrgentLatencyHiding;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, NotEnoughUrgentLatencyHiding);
dml_print("DML::%s: VActiveBandwith = %f\n", __func__, VActiveBandwith);
dml_print("DML::%s: ReturnBW = %f\n", __func__, ReturnBW);
dml_print("DML::%s: CalculateVActiveBandwithSupport_val = %d\n", __func__, CalculateVActiveBandwithSupport_val);
#endif
return CalculateVActiveBandwithSupport_val;
}
void dml32_CalculatePrefetchBandwithSupport(unsigned int NumberOfActiveSurfaces,
double ReturnBW,
bool NotUrgentLatencyHiding[],
double ReadBandwidthLuma[],
double ReadBandwidthChroma[],
double PrefetchBandwidthLuma[],
double PrefetchBandwidthChroma[],
double cursor_bw[],
double meta_row_bandwidth[],
double dpte_row_bandwidth[],
double cursor_bw_pre[],
double prefetch_vmrow_bw[],
unsigned int NumberOfDPP[],
double UrgentBurstFactorLuma[],
double UrgentBurstFactorChroma[],
double UrgentBurstFactorCursor[],
double UrgentBurstFactorLumaPre[],
double UrgentBurstFactorChromaPre[],
double UrgentBurstFactorCursorPre[],
double PrefetchBW[],
double VRatio[],
double MaxVRatioPre,
/* output */
double *MaxPrefetchBandwidth,
double *FractionOfUrgentBandwidth,
bool *PrefetchBandwidthSupport)
{
unsigned int k;
double ActiveBandwidthPerSurface;
bool NotEnoughUrgentLatencyHiding = false;
double TotalActiveBandwidth = 0;
double TotalPrefetchBandwidth = 0;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (NotUrgentLatencyHiding[k]) {
NotEnoughUrgentLatencyHiding = true;
}
}
*MaxPrefetchBandwidth = 0;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
ActiveBandwidthPerSurface = ReadBandwidthLuma[k] * UrgentBurstFactorLuma[k] + ReadBandwidthChroma[k] * UrgentBurstFactorChroma[k] + cursor_bw[k] * UrgentBurstFactorCursor[k] + NumberOfDPP[k] * (meta_row_bandwidth[k] + dpte_row_bandwidth[k]);
TotalActiveBandwidth += ActiveBandwidthPerSurface;
TotalPrefetchBandwidth = TotalPrefetchBandwidth + PrefetchBW[k] * VRatio[k];
*MaxPrefetchBandwidth = *MaxPrefetchBandwidth + dml_max3(NumberOfDPP[k] * prefetch_vmrow_bw[k],
ActiveBandwidthPerSurface,
NumberOfDPP[k] * (PrefetchBandwidthLuma[k] * UrgentBurstFactorLumaPre[k] + PrefetchBandwidthChroma[k] * UrgentBurstFactorChromaPre[k]) + cursor_bw_pre[k] * UrgentBurstFactorCursorPre[k]);
}
if (MaxVRatioPre == __DML_MAX_VRATIO_PRE__)
*PrefetchBandwidthSupport = (*MaxPrefetchBandwidth <= ReturnBW) && (TotalPrefetchBandwidth <= TotalActiveBandwidth * __DML_MAX_BW_RATIO_PRE__) && !NotEnoughUrgentLatencyHiding;
else
*PrefetchBandwidthSupport = (*MaxPrefetchBandwidth <= ReturnBW) && !NotEnoughUrgentLatencyHiding;
*FractionOfUrgentBandwidth = *MaxPrefetchBandwidth / ReturnBW;
}
double dml32_CalculateBandwidthAvailableForImmediateFlip(unsigned int NumberOfActiveSurfaces,
double ReturnBW,
double ReadBandwidthLuma[],
double ReadBandwidthChroma[],
double PrefetchBandwidthLuma[],
double PrefetchBandwidthChroma[],
double cursor_bw[],
double cursor_bw_pre[],
unsigned int NumberOfDPP[],
double UrgentBurstFactorLuma[],
double UrgentBurstFactorChroma[],
double UrgentBurstFactorCursor[],
double UrgentBurstFactorLumaPre[],
double UrgentBurstFactorChromaPre[],
double UrgentBurstFactorCursorPre[])
{
unsigned int k;
double CalculateBandwidthAvailableForImmediateFlip_val = ReturnBW;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
CalculateBandwidthAvailableForImmediateFlip_val = CalculateBandwidthAvailableForImmediateFlip_val - dml_max(ReadBandwidthLuma[k] * UrgentBurstFactorLuma[k] + ReadBandwidthChroma[k] * UrgentBurstFactorChroma[k] + cursor_bw[k] * UrgentBurstFactorCursor[k],
NumberOfDPP[k] * (PrefetchBandwidthLuma[k] * UrgentBurstFactorLumaPre[k] + PrefetchBandwidthChroma[k] * UrgentBurstFactorChromaPre[k]) + cursor_bw_pre[k] * UrgentBurstFactorCursorPre[k]);
}
return CalculateBandwidthAvailableForImmediateFlip_val;
}
void dml32_CalculateImmediateFlipBandwithSupport(unsigned int NumberOfActiveSurfaces,
double ReturnBW,
enum immediate_flip_requirement ImmediateFlipRequirement[],
double final_flip_bw[],
double ReadBandwidthLuma[],
double ReadBandwidthChroma[],
double PrefetchBandwidthLuma[],
double PrefetchBandwidthChroma[],
double cursor_bw[],
double meta_row_bandwidth[],
double dpte_row_bandwidth[],
double cursor_bw_pre[],
double prefetch_vmrow_bw[],
unsigned int NumberOfDPP[],
double UrgentBurstFactorLuma[],
double UrgentBurstFactorChroma[],
double UrgentBurstFactorCursor[],
double UrgentBurstFactorLumaPre[],
double UrgentBurstFactorChromaPre[],
double UrgentBurstFactorCursorPre[],
/* output */
double *TotalBandwidth,
double *FractionOfUrgentBandwidth,
bool *ImmediateFlipBandwidthSupport)
{
unsigned int k;
*TotalBandwidth = 0;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (ImmediateFlipRequirement[k] != dm_immediate_flip_not_required) {
*TotalBandwidth = *TotalBandwidth + dml_max3(NumberOfDPP[k] * prefetch_vmrow_bw[k],
NumberOfDPP[k] * final_flip_bw[k] + ReadBandwidthLuma[k] * UrgentBurstFactorLuma[k] + ReadBandwidthChroma[k] * UrgentBurstFactorChroma[k] + cursor_bw[k] * UrgentBurstFactorCursor[k],
NumberOfDPP[k] * (final_flip_bw[k] + PrefetchBandwidthLuma[k] * UrgentBurstFactorLumaPre[k] + PrefetchBandwidthChroma[k] * UrgentBurstFactorChromaPre[k]) + cursor_bw_pre[k] * UrgentBurstFactorCursorPre[k]);
} else {
*TotalBandwidth = *TotalBandwidth + dml_max3(NumberOfDPP[k] * prefetch_vmrow_bw[k],
NumberOfDPP[k] * (meta_row_bandwidth[k] + dpte_row_bandwidth[k]) + ReadBandwidthLuma[k] * UrgentBurstFactorLuma[k] + ReadBandwidthChroma[k] * UrgentBurstFactorChroma[k] + cursor_bw[k] * UrgentBurstFactorCursor[k],
NumberOfDPP[k] * (PrefetchBandwidthLuma[k] * UrgentBurstFactorLumaPre[k] + PrefetchBandwidthChroma[k] * UrgentBurstFactorChromaPre[k]) + cursor_bw_pre[k] * UrgentBurstFactorCursorPre[k]);
}
}
*ImmediateFlipBandwidthSupport = (*TotalBandwidth <= ReturnBW);
*FractionOfUrgentBandwidth = *TotalBandwidth / ReturnBW;
}
bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurfaces,
double ReturnBW,
double UrgentLatency,
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
unsigned int SwathWidthY[],
unsigned int SwathWidthC[],
double BytePerPixelInDETY[],
double BytePerPixelInDETC[],
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int NumOfDPP[],
unsigned int HTotal[],
double PixelClock[],
double VRatioY[],
double VRatioC[],
enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[],
enum unbounded_requesting_policy UseUnboundedRequesting)
{
int k;
double SwathSizeAllSurfaces = 0;
double SwathSizeAllSurfacesInFetchTimeUs;
double DETSwathLatencyHidingUs;
double DETSwathLatencyHidingYUs;
double DETSwathLatencyHidingCUs;
double SwathSizePerSurfaceY[DC__NUM_DPP__MAX];
double SwathSizePerSurfaceC[DC__NUM_DPP__MAX];
bool NotEnoughDETSwathFillLatencyHiding = false;
if (UseUnboundedRequesting == dm_unbounded_requesting)
return false;
/* calculate sum of single swath size for all pipes in bytes */
for (k = 0; k < NumberOfActiveSurfaces; k++) {
SwathSizePerSurfaceY[k] = SwathHeightY[k] * SwathWidthY[k] * BytePerPixelInDETY[k] * NumOfDPP[k];
if (SwathHeightC[k] != 0)
SwathSizePerSurfaceC[k] = SwathHeightC[k] * SwathWidthC[k] * BytePerPixelInDETC[k] * NumOfDPP[k];
else
SwathSizePerSurfaceC[k] = 0;
SwathSizeAllSurfaces += SwathSizePerSurfaceY[k] + SwathSizePerSurfaceC[k];
}
SwathSizeAllSurfacesInFetchTimeUs = SwathSizeAllSurfaces / ReturnBW + UrgentLatency;
/* ensure all DET - 1 swath can hide a fetch for all surfaces */
for (k = 0; k < NumberOfActiveSurfaces; k++) {
double LineTime = HTotal[k] / PixelClock[k];
/* only care if surface is not phantom */
if (UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) {
DETSwathLatencyHidingYUs = (dml_floor(DETBufferSizeY[k] / BytePerPixelInDETY[k] / SwathWidthY[k], 1.0) - SwathHeightY[k]) / VRatioY[k] * LineTime;
if (SwathHeightC[k] != 0) {
DETSwathLatencyHidingCUs = (dml_floor(DETBufferSizeC[k] / BytePerPixelInDETC[k] / SwathWidthC[k], 1.0) - SwathHeightC[k]) / VRatioC[k] * LineTime;
DETSwathLatencyHidingUs = dml_min(DETSwathLatencyHidingYUs, DETSwathLatencyHidingCUs);
} else {
DETSwathLatencyHidingUs = DETSwathLatencyHidingYUs;
}
/* DET must be able to hide time to fetch 1 swath for each surface */
if (DETSwathLatencyHidingUs < SwathSizeAllSurfacesInFetchTimeUs) {
NotEnoughDETSwathFillLatencyHiding = true;
break;
}
}
}
return NotEnoughDETSwathFillLatencyHiding;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c |
/*
* Copyright 2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dc.h"
#include "../display_mode_lib.h"
#include "display_mode_vba_32.h"
#include "../dml_inline_defs.h"
#include "display_mode_vba_util_32.h"
void dml32_recalculate(struct display_mode_lib *mode_lib);
static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
struct display_mode_lib *mode_lib);
void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib);
void dml32_recalculate(struct display_mode_lib *mode_lib)
{
ModeSupportAndSystemConfiguration(mode_lib);
dml32_CalculateMaxDETAndMinCompressedBufferSize(mode_lib->vba.ConfigReturnBufferSizeInKByte,
mode_lib->vba.ROBBufferSizeInKByte,
DC__NUM_DPP,
false, //mode_lib->vba.override_setting.nomDETInKByteOverrideEnable,
0, //mode_lib->vba.override_setting.nomDETInKByteOverrideValue,
/* Output */
&mode_lib->vba.MaxTotalDETInKByte, &mode_lib->vba.nomDETInKByte,
&mode_lib->vba.MinCompressedBufferSizeInKByte);
PixelClockAdjustmentForProgressiveToInterlaceUnit(mode_lib);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Calling DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation\n", __func__);
#endif
DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(mode_lib);
}
static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
struct display_mode_lib *mode_lib)
{
struct vba_vars_st *v = &mode_lib->vba;
unsigned int j, k;
bool ImmediateFlipRequirementFinal;
int iteration;
double MaxTotalRDBandwidth;
unsigned int NextPrefetchMode;
double MaxTotalRDBandwidthNoUrgentBurst = 0.0;
bool DestinationLineTimesForPrefetchLessThan2 = false;
bool VRatioPrefetchMoreThanMax = false;
double TWait;
double TotalWRBandwidth = 0;
double WRBandwidth = 0;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: --- START ---\n", __func__);
dml_print("DML::%s: mode_lib->vba.PrefetchMode = %d\n", __func__, mode_lib->vba.PrefetchMode);
dml_print("DML::%s: mode_lib->vba.ImmediateFlipSupport = %d\n", __func__, mode_lib->vba.ImmediateFlipSupport);
dml_print("DML::%s: mode_lib->vba.VoltageLevel = %d\n", __func__, mode_lib->vba.VoltageLevel);
#endif
v->WritebackDISPCLK = 0.0;
v->GlobalDPPCLK = 0.0;
// DISPCLK and DPPCLK Calculation
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.WritebackEnable[k]) {
v->WritebackDISPCLK = dml_max(v->WritebackDISPCLK,
dml32_CalculateWriteBackDISPCLK(
mode_lib->vba.WritebackPixelFormat[k],
mode_lib->vba.PixelClock[k], mode_lib->vba.WritebackHRatio[k],
mode_lib->vba.WritebackVRatio[k],
mode_lib->vba.WritebackHTaps[k],
mode_lib->vba.WritebackVTaps[k],
mode_lib->vba.WritebackSourceWidth[k],
mode_lib->vba.WritebackDestinationWidth[k],
mode_lib->vba.HTotal[k], mode_lib->vba.WritebackLineBufferSize,
mode_lib->vba.DISPCLKDPPCLKVCOSpeed));
}
}
v->DISPCLK_calculated = v->WritebackDISPCLK;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
v->DISPCLK_calculated = dml_max(v->DISPCLK_calculated,
dml32_CalculateRequiredDispclk(
mode_lib->vba.ODMCombineEnabled[k],
mode_lib->vba.PixelClock[k],
mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading,
mode_lib->vba.DISPCLKRampingMargin,
mode_lib->vba.DISPCLKDPPCLKVCOSpeed,
mode_lib->vba.MaxDppclk[v->soc.num_states - 1]));
}
}
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
dml32_CalculateSinglePipeDPPCLKAndSCLThroughput(mode_lib->vba.HRatio[k],
mode_lib->vba.HRatioChroma[k],
mode_lib->vba.VRatio[k],
mode_lib->vba.VRatioChroma[k],
mode_lib->vba.MaxDCHUBToPSCLThroughput,
mode_lib->vba.MaxPSCLToLBThroughput,
mode_lib->vba.PixelClock[k],
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.htaps[k],
mode_lib->vba.HTAPsChroma[k],
mode_lib->vba.vtaps[k],
mode_lib->vba.VTAPsChroma[k],
/* Output */
&v->PSCL_THROUGHPUT_LUMA[k], &v->PSCL_THROUGHPUT_CHROMA[k],
&v->DPPCLKUsingSingleDPP[k]);
}
dml32_CalculateDPPCLK(mode_lib->vba.NumberOfActiveSurfaces, mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading,
mode_lib->vba.DISPCLKDPPCLKVCOSpeed, v->DPPCLKUsingSingleDPP, mode_lib->vba.DPPPerPlane,
/* Output */
&v->GlobalDPPCLK, v->DPPCLK);
for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
v->DPPCLK_calculated[k] = v->DPPCLK[k];
}
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
dml32_CalculateBytePerPixelAndBlockSizes(
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.SurfaceTiling[k],
/* Output */
&v->BytePerPixelY[k],
&v->BytePerPixelC[k],
&v->BytePerPixelDETY[k],
&v->BytePerPixelDETC[k],
&v->BlockHeight256BytesY[k],
&v->BlockHeight256BytesC[k],
&v->BlockWidth256BytesY[k],
&v->BlockWidth256BytesC[k],
&v->BlockHeightY[k],
&v->BlockHeightC[k],
&v->BlockWidthY[k],
&v->BlockWidthC[k]);
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: %d\n", __func__, __LINE__);
#endif
dml32_CalculateSwathWidth(
false, // ForceSingleDPP
mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.SourcePixelFormat,
mode_lib->vba.SourceRotation,
mode_lib->vba.ViewportStationary,
mode_lib->vba.ViewportWidth,
mode_lib->vba.ViewportHeight,
mode_lib->vba.ViewportXStartY,
mode_lib->vba.ViewportYStartY,
mode_lib->vba.ViewportXStartC,
mode_lib->vba.ViewportYStartC,
mode_lib->vba.SurfaceWidthY,
mode_lib->vba.SurfaceWidthC,
mode_lib->vba.SurfaceHeightY,
mode_lib->vba.SurfaceHeightC,
mode_lib->vba.ODMCombineEnabled,
v->BytePerPixelY,
v->BytePerPixelC,
v->BlockHeight256BytesY,
v->BlockHeight256BytesC,
v->BlockWidth256BytesY,
v->BlockWidth256BytesC,
mode_lib->vba.BlendingAndTiming,
mode_lib->vba.HActive,
mode_lib->vba.HRatio,
mode_lib->vba.DPPPerPlane,
/* Output */
v->SwathWidthSingleDPPY, v->SwathWidthSingleDPPC, v->SwathWidthY, v->SwathWidthC,
v->dummy_vars
.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation
.dummy_integer_array[0], // Integer MaximumSwathHeightY[]
v->dummy_vars
.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation
.dummy_integer_array[1], // Integer MaximumSwathHeightC[]
v->swath_width_luma_ub, v->swath_width_chroma_ub);
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
v->ReadBandwidthSurfaceLuma[k] = v->SwathWidthSingleDPPY[k] * v->BytePerPixelY[k]
/ (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k];
v->ReadBandwidthSurfaceChroma[k] = v->SwathWidthSingleDPPC[k] * v->BytePerPixelC[k]
/ (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
* mode_lib->vba.VRatioChroma[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: ReadBandwidthSurfaceLuma[%i] = %fBps\n",
__func__, k, v->ReadBandwidthSurfaceLuma[k]);
dml_print("DML::%s: ReadBandwidthSurfaceChroma[%i] = %fBps\n",
__func__, k, v->ReadBandwidthSurfaceChroma[k]);
#endif
}
{
// VBA_DELTA
// Calculate DET size, swath height
dml32_CalculateSwathAndDETConfiguration(
mode_lib->vba.DETSizeOverride,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.ConfigReturnBufferSizeInKByte,
mode_lib->vba.MaxTotalDETInKByte,
mode_lib->vba.MinCompressedBufferSizeInKByte,
false, /* ForceSingleDPP */
mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.nomDETInKByte,
mode_lib->vba.UseUnboundedRequesting,
mode_lib->vba.DisableUnboundRequestIfCompBufReservedSpaceNeedAdjustment,
mode_lib->vba.ip.pixel_chunk_size_kbytes,
mode_lib->vba.ip.rob_buffer_size_kbytes,
mode_lib->vba.CompressedBufferSegmentSizeInkByteFinal,
v->dummy_vars
.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation
.dummy_output_encoder_array, /* output_encoder_class Output[] */
v->ReadBandwidthSurfaceLuma,
v->ReadBandwidthSurfaceChroma,
v->dummy_vars
.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation
.dummy_single_array[0], /* Single MaximumSwathWidthLuma[] */
v->dummy_vars
.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation
.dummy_single_array[1], /* Single MaximumSwathWidthChroma[] */
mode_lib->vba.SourceRotation,
mode_lib->vba.ViewportStationary,
mode_lib->vba.SourcePixelFormat,
mode_lib->vba.SurfaceTiling,
mode_lib->vba.ViewportWidth,
mode_lib->vba.ViewportHeight,
mode_lib->vba.ViewportXStartY,
mode_lib->vba.ViewportYStartY,
mode_lib->vba.ViewportXStartC,
mode_lib->vba.ViewportYStartC,
mode_lib->vba.SurfaceWidthY,
mode_lib->vba.SurfaceWidthC,
mode_lib->vba.SurfaceHeightY,
mode_lib->vba.SurfaceHeightC,
v->BlockHeight256BytesY,
v->BlockHeight256BytesC,
v->BlockWidth256BytesY,
v->BlockWidth256BytesC,
mode_lib->vba.ODMCombineEnabled,
mode_lib->vba.BlendingAndTiming,
v->BytePerPixelY,
v->BytePerPixelC,
v->BytePerPixelDETY,
v->BytePerPixelDETC,
mode_lib->vba.HActive,
mode_lib->vba.HRatio,
mode_lib->vba.HRatioChroma,
mode_lib->vba.DPPPerPlane,
/* Output */
v->dummy_vars
.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation
.dummy_long_array[0], /* Long swath_width_luma_ub[] */
v->dummy_vars
.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation
.dummy_long_array[1], /* Long swath_width_chroma_ub[] */
v->dummy_vars
.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation
.dummy_double_array[0], /* Long SwathWidth[] */
v->dummy_vars
.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation
.dummy_double_array[1], /* Long SwathWidthChroma[] */
mode_lib->vba.SwathHeightY,
mode_lib->vba.SwathHeightC,
mode_lib->vba.DETBufferSizeInKByte,
mode_lib->vba.DETBufferSizeY,
mode_lib->vba.DETBufferSizeC,
&v->UnboundedRequestEnabled,
&v->CompressedBufferSizeInkByte,
&v->CompBufReservedSpaceKBytes,
&v->dummy_vars
.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation
.dummy_boolean, /* bool *CompBufReservedSpaceNeedAjustment */
v->dummy_vars
.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation
.dummy_boolean_array, /* bool ViewportSizeSupportPerSurface[] */
&v->dummy_vars
.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation
.dummy_boolean); /* bool *ViewportSizeSupport */
}
v->CompBufReservedSpaceZs = v->CompBufReservedSpaceKBytes * 1024.0 / 256.0;
v->CompBufReservedSpace64B = v->CompBufReservedSpaceKBytes * 1024.0 / 64.0;
// DCFCLK Deep Sleep
dml32_CalculateDCFCLKDeepSleep(
mode_lib->vba.NumberOfActiveSurfaces,
v->BytePerPixelY,
v->BytePerPixelC,
mode_lib->vba.VRatio,
mode_lib->vba.VRatioChroma,
v->SwathWidthY,
v->SwathWidthC,
mode_lib->vba.DPPPerPlane,
mode_lib->vba.HRatio,
mode_lib->vba.HRatioChroma,
mode_lib->vba.PixelClock,
v->PSCL_THROUGHPUT_LUMA,
v->PSCL_THROUGHPUT_CHROMA,
mode_lib->vba.DPPCLK,
v->ReadBandwidthSurfaceLuma,
v->ReadBandwidthSurfaceChroma,
mode_lib->vba.ReturnBusWidth,
/* Output */
&v->DCFCLKDeepSleep);
// DSCCLK
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if ((mode_lib->vba.BlendingAndTiming[k] != k) || !mode_lib->vba.DSCEnabled[k]) {
v->DSCCLK_calculated[k] = 0.0;
} else {
if (mode_lib->vba.OutputFormat[k] == dm_420)
mode_lib->vba.DSCFormatFactor = 2;
else if (mode_lib->vba.OutputFormat[k] == dm_444)
mode_lib->vba.DSCFormatFactor = 1;
else if (mode_lib->vba.OutputFormat[k] == dm_n422)
mode_lib->vba.DSCFormatFactor = 2;
else
mode_lib->vba.DSCFormatFactor = 1;
if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_4to1)
v->DSCCLK_calculated[k] = mode_lib->vba.PixelClockBackEnd[k] / 12
/ mode_lib->vba.DSCFormatFactor
/ (1 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100);
else if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
v->DSCCLK_calculated[k] = mode_lib->vba.PixelClockBackEnd[k] / 6
/ mode_lib->vba.DSCFormatFactor
/ (1 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100);
else
v->DSCCLK_calculated[k] = mode_lib->vba.PixelClockBackEnd[k] / 3
/ mode_lib->vba.DSCFormatFactor
/ (1 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100);
}
}
// DSC Delay
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
v->DSCDelay[k] = dml32_DSCDelayRequirement(mode_lib->vba.DSCEnabled[k],
mode_lib->vba.ODMCombineEnabled[k], mode_lib->vba.DSCInputBitPerComponent[k],
mode_lib->vba.OutputBppPerState[mode_lib->vba.VoltageLevel][k],
mode_lib->vba.HActive[k], mode_lib->vba.HTotal[k],
mode_lib->vba.NumberOfDSCSlices[k], mode_lib->vba.OutputFormat[k],
mode_lib->vba.Output[k], mode_lib->vba.PixelClock[k],
mode_lib->vba.PixelClockBackEnd[k], mode_lib->vba.ip.dsc_delay_factor_wa);
}
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k)
for (j = 0; j < mode_lib->vba.NumberOfActiveSurfaces; ++j) // NumberOfSurfaces
if (j != k && mode_lib->vba.BlendingAndTiming[k] == j && mode_lib->vba.DSCEnabled[j])
v->DSCDelay[k] = v->DSCDelay[j];
//Immediate Flip
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
v->ImmediateFlipSupportedSurface[k] = mode_lib->vba.ImmediateFlipSupport
&& (mode_lib->vba.ImmediateFlipRequirement[k] != dm_immediate_flip_not_required);
}
// Prefetch
dml32_CalculateSurfaceSizeInMall(
mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.MALLAllocatedForDCNFinal,
mode_lib->vba.UseMALLForStaticScreen,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.DCCEnable,
mode_lib->vba.ViewportStationary,
mode_lib->vba.ViewportXStartY,
mode_lib->vba.ViewportYStartY,
mode_lib->vba.ViewportXStartC,
mode_lib->vba.ViewportYStartC,
mode_lib->vba.ViewportWidth,
mode_lib->vba.ViewportHeight,
v->BytePerPixelY,
mode_lib->vba.ViewportWidthChroma,
mode_lib->vba.ViewportHeightChroma,
v->BytePerPixelC,
mode_lib->vba.SurfaceWidthY,
mode_lib->vba.SurfaceWidthC,
mode_lib->vba.SurfaceHeightY,
mode_lib->vba.SurfaceHeightC,
v->BlockWidth256BytesY,
v->BlockWidth256BytesC,
v->BlockHeight256BytesY,
v->BlockHeight256BytesC,
v->BlockWidthY,
v->BlockWidthC,
v->BlockHeightY,
v->BlockHeightC,
mode_lib->vba.DCCMetaPitchY,
mode_lib->vba.DCCMetaPitchC,
/* Output */
v->SurfaceSizeInMALL,
&v->dummy_vars.
DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation
.dummy_boolean2); /* Boolean *ExceededMALLSize */
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].PixelClock = mode_lib->vba.PixelClock[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].DPPPerSurface = mode_lib->vba.DPPPerPlane[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].SourceRotation = mode_lib->vba.SourceRotation[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].ViewportHeight = mode_lib->vba.ViewportHeight[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].ViewportHeightChroma = mode_lib->vba.ViewportHeightChroma[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].BlockWidth256BytesY = v->BlockWidth256BytesY[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].BlockHeight256BytesY = v->BlockHeight256BytesY[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].BlockWidth256BytesC = v->BlockWidth256BytesC[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].BlockHeight256BytesC = v->BlockHeight256BytesC[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].BlockWidthY = v->BlockWidthY[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].BlockHeightY = v->BlockHeightY[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].BlockWidthC = v->BlockWidthC[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].BlockHeightC = v->BlockHeightC[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].InterlaceEnable = mode_lib->vba.Interlace[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].HTotal = mode_lib->vba.HTotal[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].DCCEnable = mode_lib->vba.DCCEnable[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].SourcePixelFormat = mode_lib->vba.SourcePixelFormat[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].SurfaceTiling = mode_lib->vba.SurfaceTiling[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].BytePerPixelY = v->BytePerPixelY[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].BytePerPixelC = v->BytePerPixelC[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].ProgressiveToInterlaceUnitInOPP = mode_lib->vba.ProgressiveToInterlaceUnitInOPP;
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].VRatio = mode_lib->vba.VRatio[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].VRatioChroma = mode_lib->vba.VRatioChroma[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].VTaps = mode_lib->vba.vtaps[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].VTapsChroma = mode_lib->vba.VTAPsChroma[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].PitchY = mode_lib->vba.PitchY[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].DCCMetaPitchY = mode_lib->vba.DCCMetaPitchY[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].PitchC = mode_lib->vba.PitchC[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].DCCMetaPitchC = mode_lib->vba.DCCMetaPitchC[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].ViewportStationary = mode_lib->vba.ViewportStationary[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].ViewportXStart = mode_lib->vba.ViewportXStartY[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].ViewportYStart = mode_lib->vba.ViewportYStartY[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].ViewportXStartC = mode_lib->vba.ViewportXStartC[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].ViewportYStartC = mode_lib->vba.ViewportYStartC[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].FORCE_ONE_ROW_FOR_FRAME = mode_lib->vba.ForceOneRowForFrame[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].SwathHeightY = mode_lib->vba.SwathHeightY[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters[k].SwathHeightC = mode_lib->vba.SwathHeightC[k];
}
{
dml32_CalculateVMRowAndSwath(
mode_lib->vba.NumberOfActiveSurfaces,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters,
v->SurfaceSizeInMALL,
mode_lib->vba.PTEBufferSizeInRequestsLuma,
mode_lib->vba.PTEBufferSizeInRequestsChroma,
mode_lib->vba.DCCMetaBufferSizeBytes,
mode_lib->vba.UseMALLForStaticScreen,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.MALLAllocatedForDCNFinal,
v->SwathWidthY,
v->SwathWidthC,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.HostVMEnable,
mode_lib->vba.HostVMMaxNonCachedPageTableLevels,
mode_lib->vba.GPUVMMaxPageTableLevels,
mode_lib->vba.GPUVMMinPageSizeKBytes,
mode_lib->vba.HostVMMinPageSize,
/* Output */
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_boolean_array2[0], // Boolean PTEBufferSizeNotExceeded[]
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_boolean_array2[1], // Boolean DCCMetaBufferSizeNotExceeded[]
v->dpte_row_width_luma_ub,
v->dpte_row_width_chroma_ub,
v->dpte_row_height,
v->dpte_row_height_chroma,
v->dpte_row_height_linear,
v->dpte_row_height_linear_chroma,
v->meta_req_width,
v->meta_req_width_chroma,
v->meta_req_height,
v->meta_req_height_chroma,
v->meta_row_width,
v->meta_row_width_chroma,
v->meta_row_height,
v->meta_row_height_chroma,
v->vm_group_bytes,
v->dpte_group_bytes,
v->PixelPTEReqWidthY,
v->PixelPTEReqHeightY,
v->PTERequestSizeY,
v->PixelPTEReqWidthC,
v->PixelPTEReqHeightC,
v->PTERequestSizeC,
v->dpde0_bytes_per_frame_ub_l,
v->meta_pte_bytes_per_frame_ub_l,
v->dpde0_bytes_per_frame_ub_c,
v->meta_pte_bytes_per_frame_ub_c,
v->PrefetchSourceLinesY,
v->PrefetchSourceLinesC,
v->VInitPreFillY, v->VInitPreFillC,
v->MaxNumSwathY,
v->MaxNumSwathC,
v->meta_row_bw,
v->dpte_row_bw,
v->PixelPTEBytesPerRow,
v->PDEAndMetaPTEBytesFrame,
v->MetaRowByte,
v->Use_One_Row_For_Frame,
v->Use_One_Row_For_Frame_Flip,
v->UsesMALLForStaticScreen,
v->PTE_BUFFER_MODE,
v->BIGK_FRAGMENT_SIZE);
}
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.ReorderBytes = mode_lib->vba.NumberOfChannels
* dml_max3(mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelDataOnly,
mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelMixedWithVMData,
mode_lib->vba.UrgentOutOfOrderReturnPerChannelVMDataOnly);
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.VMDataOnlyReturnBW = dml32_get_return_bw_mbps_vm_only(
&mode_lib->vba.soc,
mode_lib->vba.VoltageLevel,
mode_lib->vba.DCFCLK,
mode_lib->vba.FabricClock,
mode_lib->vba.DRAMSpeed);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: mode_lib->vba.ReturnBusWidth = %f\n", __func__, mode_lib->vba.ReturnBusWidth);
dml_print("DML::%s: mode_lib->vba.DCFCLK = %f\n", __func__, mode_lib->vba.DCFCLK);
dml_print("DML::%s: mode_lib->vba.FabricClock = %f\n", __func__, mode_lib->vba.FabricClock);
dml_print("DML::%s: mode_lib->vba.FabricDatapathToDCNDataReturn = %f\n", __func__,
mode_lib->vba.FabricDatapathToDCNDataReturn);
dml_print("DML::%s: mode_lib->vba.PercentOfIdealSDPPortBWReceivedAfterUrgLatency = %f\n",
__func__, mode_lib->vba.PercentOfIdealSDPPortBWReceivedAfterUrgLatency);
dml_print("DML::%s: mode_lib->vba.DRAMSpeed = %f\n", __func__, mode_lib->vba.DRAMSpeed);
dml_print("DML::%s: mode_lib->vba.NumberOfChannels = %f\n", __func__, mode_lib->vba.NumberOfChannels);
dml_print("DML::%s: mode_lib->vba.DRAMChannelWidth = %f\n", __func__, mode_lib->vba.DRAMChannelWidth);
dml_print("DML::%s: mode_lib->vba.PercentOfIdealDRAMBWReceivedAfterUrgLatencyVMDataOnly = %f\n",
__func__, mode_lib->vba.PercentOfIdealDRAMBWReceivedAfterUrgLatencyVMDataOnly);
dml_print("DML::%s: VMDataOnlyReturnBW = %f\n", __func__, VMDataOnlyReturnBW);
dml_print("DML::%s: ReturnBW = %f\n", __func__, mode_lib->vba.ReturnBW);
#endif
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.HostVMInefficiencyFactor = 1.0;
if (mode_lib->vba.GPUVMEnable && mode_lib->vba.HostVMEnable)
v->dummy_vars
.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation
.HostVMInefficiencyFactor =
mode_lib->vba.ReturnBW / v->dummy_vars
.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation
.VMDataOnlyReturnBW;
mode_lib->vba.TotalDCCActiveDPP = 0;
mode_lib->vba.TotalActiveDPP = 0;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
mode_lib->vba.TotalActiveDPP = mode_lib->vba.TotalActiveDPP + mode_lib->vba.DPPPerPlane[k];
if (mode_lib->vba.DCCEnable[k])
mode_lib->vba.TotalDCCActiveDPP = mode_lib->vba.TotalDCCActiveDPP
+ mode_lib->vba.DPPPerPlane[k];
}
v->UrgentExtraLatency = dml32_CalculateExtraLatency(
mode_lib->vba.RoundTripPingLatencyCycles,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.ReorderBytes,
mode_lib->vba.DCFCLK,
mode_lib->vba.TotalActiveDPP,
mode_lib->vba.PixelChunkSizeInKByte,
mode_lib->vba.TotalDCCActiveDPP,
mode_lib->vba.MetaChunkSize,
mode_lib->vba.ReturnBW,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.HostVMEnable,
mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.DPPPerPlane,
v->dpte_group_bytes,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.HostVMInefficiencyFactor,
mode_lib->vba.HostVMMinPageSize,
mode_lib->vba.HostVMMaxNonCachedPageTableLevels);
mode_lib->vba.TCalc = 24.0 / v->DCFCLKDeepSleep;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
if (mode_lib->vba.WritebackEnable[k] == true) {
v->WritebackDelay[mode_lib->vba.VoltageLevel][k] = mode_lib->vba.WritebackLatency
+ dml32_CalculateWriteBackDelay(
mode_lib->vba.WritebackPixelFormat[k],
mode_lib->vba.WritebackHRatio[k],
mode_lib->vba.WritebackVRatio[k],
mode_lib->vba.WritebackVTaps[k],
mode_lib->vba.WritebackDestinationWidth[k],
mode_lib->vba.WritebackDestinationHeight[k],
mode_lib->vba.WritebackSourceHeight[k],
mode_lib->vba.HTotal[k]) / mode_lib->vba.DISPCLK;
} else
v->WritebackDelay[mode_lib->vba.VoltageLevel][k] = 0;
for (j = 0; j < mode_lib->vba.NumberOfActiveSurfaces; ++j) {
if (mode_lib->vba.BlendingAndTiming[j] == k &&
mode_lib->vba.WritebackEnable[j] == true) {
v->WritebackDelay[mode_lib->vba.VoltageLevel][k] =
dml_max(v->WritebackDelay[mode_lib->vba.VoltageLevel][k],
mode_lib->vba.WritebackLatency +
dml32_CalculateWriteBackDelay(
mode_lib->vba.WritebackPixelFormat[j],
mode_lib->vba.WritebackHRatio[j],
mode_lib->vba.WritebackVRatio[j],
mode_lib->vba.WritebackVTaps[j],
mode_lib->vba.WritebackDestinationWidth[j],
mode_lib->vba.WritebackDestinationHeight[j],
mode_lib->vba.WritebackSourceHeight[j],
mode_lib->vba.HTotal[k]) / mode_lib->vba.DISPCLK);
}
}
}
}
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k)
for (j = 0; j < mode_lib->vba.NumberOfActiveSurfaces; ++j)
if (mode_lib->vba.BlendingAndTiming[k] == j)
v->WritebackDelay[mode_lib->vba.VoltageLevel][k] =
v->WritebackDelay[mode_lib->vba.VoltageLevel][j];
v->UrgentLatency = dml32_CalculateUrgentLatency(mode_lib->vba.UrgentLatencyPixelDataOnly,
mode_lib->vba.UrgentLatencyPixelMixedWithVMData,
mode_lib->vba.UrgentLatencyVMDataOnly,
mode_lib->vba.DoUrgentLatencyAdjustment,
mode_lib->vba.UrgentLatencyAdjustmentFabricClockComponent,
mode_lib->vba.UrgentLatencyAdjustmentFabricClockReference,
mode_lib->vba.FabricClock);
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
dml32_CalculateUrgentBurstFactor(mode_lib->vba.UsesMALLForPStateChange[k],
v->swath_width_luma_ub[k],
v->swath_width_chroma_ub[k],
mode_lib->vba.SwathHeightY[k],
mode_lib->vba.SwathHeightC[k],
mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
v->UrgentLatency,
mode_lib->vba.CursorBufferSize,
mode_lib->vba.CursorWidth[k][0],
mode_lib->vba.CursorBPP[k][0],
mode_lib->vba.VRatio[k],
mode_lib->vba.VRatioChroma[k],
v->BytePerPixelDETY[k],
v->BytePerPixelDETC[k],
mode_lib->vba.DETBufferSizeY[k],
mode_lib->vba.DETBufferSizeC[k],
/* output */
&v->UrgBurstFactorCursor[k],
&v->UrgBurstFactorLuma[k],
&v->UrgBurstFactorChroma[k],
&v->NoUrgentLatencyHiding[k]);
v->cursor_bw[k] = mode_lib->vba.NumberOfCursors[k] * mode_lib->vba.CursorWidth[k][0] * mode_lib->vba.CursorBPP[k][0] / 8 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k];
}
v->NotEnoughDETSwathFillLatencyHiding = dml32_CalculateDETSwathFillLatencyHiding(
mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.ReturnBW,
v->UrgentLatency,
mode_lib->vba.SwathHeightY,
mode_lib->vba.SwathHeightC,
v->swath_width_luma_ub,
v->swath_width_chroma_ub,
v->BytePerPixelDETY,
v->BytePerPixelDETC,
mode_lib->vba.DETBufferSizeY,
mode_lib->vba.DETBufferSizeC,
mode_lib->vba.DPPPerPlane,
mode_lib->vba.HTotal,
mode_lib->vba.PixelClock,
mode_lib->vba.VRatio,
mode_lib->vba.VRatioChroma,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.UseUnboundedRequesting);
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
v->MaxVStartupLines[k] = ((mode_lib->vba.Interlace[k] &&
!mode_lib->vba.ProgressiveToInterlaceUnitInOPP) ?
dml_floor((mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]) / 2.0, 1.0) :
mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]) - dml_max(1.0,
dml_ceil((double) v->WritebackDelay[mode_lib->vba.VoltageLevel][k]
/ (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1));
// Clamp to max OTG vstartup register limit
if (v->MaxVStartupLines[k] > 1023)
v->MaxVStartupLines[k] = 1023;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d MaxVStartupLines = %d\n", __func__, k, v->MaxVStartupLines[k]);
dml_print("DML::%s: k=%d VoltageLevel = %d\n", __func__, k, mode_lib->vba.VoltageLevel);
dml_print("DML::%s: k=%d WritebackDelay = %f\n", __func__,
k, v->WritebackDelay[mode_lib->vba.VoltageLevel][k]);
#endif
}
v->MaximumMaxVStartupLines = 0;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k)
v->MaximumMaxVStartupLines = dml_max(v->MaximumMaxVStartupLines, v->MaxVStartupLines[k]);
ImmediateFlipRequirementFinal = false;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
ImmediateFlipRequirementFinal = ImmediateFlipRequirementFinal
|| (mode_lib->vba.ImmediateFlipRequirement[k] == dm_immediate_flip_required);
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: ImmediateFlipRequirementFinal = %d\n", __func__, ImmediateFlipRequirementFinal);
#endif
// ModeProgramming will not repeat the schedule calculation using different prefetch mode,
//it is just calcualated once with given prefetch mode
dml32_CalculateMinAndMaxPrefetchMode(
mode_lib->vba.AllowForPStateChangeOrStutterInVBlankFinal,
&mode_lib->vba.MinPrefetchMode,
&mode_lib->vba.MaxPrefetchMode);
v->VStartupLines = __DML_VBA_MIN_VSTARTUP__;
iteration = 0;
MaxTotalRDBandwidth = 0;
NextPrefetchMode = mode_lib->vba.PrefetchModePerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb];
do {
MaxTotalRDBandwidth = 0;
DestinationLineTimesForPrefetchLessThan2 = false;
VRatioPrefetchMoreThanMax = false;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Start loop: VStartup = %d\n", __func__, mode_lib->vba.VStartupLines);
#endif
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
/* NOTE PerfetchMode variable is invalid in DAL as per the input received.
* Hence the direction is to use PrefetchModePerState.
*/
TWait = dml32_CalculateTWait(
mode_lib->vba.PrefetchModePerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb],
mode_lib->vba.UsesMALLForPStateChange[k],
mode_lib->vba.SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
mode_lib->vba.DRRDisplay[k],
mode_lib->vba.DRAMClockChangeLatency,
mode_lib->vba.FCLKChangeLatency, v->UrgentLatency,
mode_lib->vba.SREnterPlusExitTime);
memset(&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe, 0, sizeof(DmlPipe));
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.Dppclk = mode_lib->vba.DPPCLK[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.Dispclk = mode_lib->vba.DISPCLK;
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.PixelClock = mode_lib->vba.PixelClock[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.DCFClkDeepSleep = v->DCFCLKDeepSleep;
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.DPPPerSurface = mode_lib->vba.DPPPerPlane[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.ScalerEnabled = mode_lib->vba.ScalerEnabled[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.SourceRotation = mode_lib->vba.SourceRotation[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BlockWidth256BytesY = v->BlockWidth256BytesY[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BlockHeight256BytesY = v->BlockHeight256BytesY[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BlockWidth256BytesC = v->BlockWidth256BytesC[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BlockHeight256BytesC = v->BlockHeight256BytesC[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.InterlaceEnable = mode_lib->vba.Interlace[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.NumberOfCursors = mode_lib->vba.NumberOfCursors[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.VBlank = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.HTotal = mode_lib->vba.HTotal[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.HActive = mode_lib->vba.HActive[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.DCCEnable = mode_lib->vba.DCCEnable[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.ODMMode = mode_lib->vba.ODMCombineEnabled[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.SourcePixelFormat = mode_lib->vba.SourcePixelFormat[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BytePerPixelY = v->BytePerPixelY[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BytePerPixelC = v->BytePerPixelC[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.ProgressiveToInterlaceUnitInOPP = mode_lib->vba.ProgressiveToInterlaceUnitInOPP;
v->ErrorResult[k] = dml32_CalculatePrefetchSchedule(
v,
k,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.HostVMInefficiencyFactor,
&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe,
v->DSCDelay[k],
(unsigned int) (v->SwathWidthY[k] / v->HRatio[k]),
dml_min(v->VStartupLines, v->MaxVStartupLines[k]),
v->MaxVStartupLines[k],
v->UrgentLatency,
v->UrgentExtraLatency,
v->TCalc,
v->PDEAndMetaPTEBytesFrame[k],
v->MetaRowByte[k],
v->PixelPTEBytesPerRow[k],
v->PrefetchSourceLinesY[k],
v->SwathWidthY[k],
v->VInitPreFillY[k],
v->MaxNumSwathY[k],
v->PrefetchSourceLinesC[k],
v->SwathWidthC[k],
v->VInitPreFillC[k],
v->MaxNumSwathC[k],
v->swath_width_luma_ub[k],
v->swath_width_chroma_ub[k],
v->SwathHeightY[k],
v->SwathHeightC[k],
TWait,
(v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ||
v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ) ?
mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
/* Output */
&v->DSTXAfterScaler[k],
&v->DSTYAfterScaler[k],
&v->DestinationLinesForPrefetch[k],
&v->PrefetchBandwidth[k],
&v->DestinationLinesToRequestVMInVBlank[k],
&v->DestinationLinesToRequestRowInVBlank[k],
&v->VRatioPrefetchY[k],
&v->VRatioPrefetchC[k],
&v->RequiredPrefetchPixDataBWLuma[k],
&v->RequiredPrefetchPixDataBWChroma[k],
&v->NotEnoughTimeForDynamicMetadata[k],
&v->Tno_bw[k], &v->prefetch_vmrow_bw[k],
&v->Tdmdl_vm[k],
&v->Tdmdl[k],
&v->TSetup[k],
&v->VUpdateOffsetPix[k],
&v->VUpdateWidthPix[k],
&v->VReadyOffsetPix[k]);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d Prefetch calculation errResult=%0d\n",
__func__, k, mode_lib->vba.ErrorResult[k]);
#endif
v->VStartup[k] = dml_min(v->VStartupLines, v->MaxVStartupLines[k]);
}
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
dml32_CalculateUrgentBurstFactor(mode_lib->vba.UsesMALLForPStateChange[k],
v->swath_width_luma_ub[k],
v->swath_width_chroma_ub[k],
mode_lib->vba.SwathHeightY[k],
mode_lib->vba.SwathHeightC[k],
mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
v->UrgentLatency,
mode_lib->vba.CursorBufferSize,
mode_lib->vba.CursorWidth[k][0],
mode_lib->vba.CursorBPP[k][0],
v->VRatioPrefetchY[k],
v->VRatioPrefetchC[k],
v->BytePerPixelDETY[k],
v->BytePerPixelDETC[k],
mode_lib->vba.DETBufferSizeY[k],
mode_lib->vba.DETBufferSizeC[k],
/* Output */
&v->UrgBurstFactorCursorPre[k],
&v->UrgBurstFactorLumaPre[k],
&v->UrgBurstFactorChromaPre[k],
&v->NoUrgentLatencyHidingPre[k]);
v->cursor_bw_pre[k] = mode_lib->vba.NumberOfCursors[k] * mode_lib->vba.CursorWidth[k][0] * mode_lib->vba.CursorBPP[k][0] /
8.0 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * v->VRatioPrefetchY[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d DPPPerSurface=%d\n", __func__, k, mode_lib->vba.DPPPerPlane[k]);
dml_print("DML::%s: k=%0d UrgBurstFactorLuma=%f\n", __func__, k, v->UrgBurstFactorLuma[k]);
dml_print("DML::%s: k=%0d UrgBurstFactorChroma=%f\n", __func__, k, v->UrgBurstFactorChroma[k]);
dml_print("DML::%s: k=%0d UrgBurstFactorLumaPre=%f\n", __func__, k,
v->UrgBurstFactorLumaPre[k]);
dml_print("DML::%s: k=%0d UrgBurstFactorChromaPre=%f\n", __func__, k,
v->UrgBurstFactorChromaPre[k]);
dml_print("DML::%s: k=%0d VRatioPrefetchY=%f\n", __func__, k, v->VRatioPrefetchY[k]);
dml_print("DML::%s: k=%0d VRatioY=%f\n", __func__, k, mode_lib->vba.VRatio[k]);
dml_print("DML::%s: k=%0d prefetch_vmrow_bw=%f\n", __func__, k, v->prefetch_vmrow_bw[k]);
dml_print("DML::%s: k=%0d ReadBandwidthSurfaceLuma=%f\n", __func__, k,
v->ReadBandwidthSurfaceLuma[k]);
dml_print("DML::%s: k=%0d ReadBandwidthSurfaceChroma=%f\n", __func__, k,
v->ReadBandwidthSurfaceChroma[k]);
dml_print("DML::%s: k=%0d cursor_bw=%f\n", __func__, k, v->cursor_bw[k]);
dml_print("DML::%s: k=%0d meta_row_bw=%f\n", __func__, k, v->meta_row_bw[k]);
dml_print("DML::%s: k=%0d dpte_row_bw=%f\n", __func__, k, v->dpte_row_bw[k]);
dml_print("DML::%s: k=%0d RequiredPrefetchPixDataBWLuma=%f\n", __func__, k,
v->RequiredPrefetchPixDataBWLuma[k]);
dml_print("DML::%s: k=%0d RequiredPrefetchPixDataBWChroma=%f\n", __func__, k,
v->RequiredPrefetchPixDataBWChroma[k]);
dml_print("DML::%s: k=%0d cursor_bw_pre=%f\n", __func__, k, v->cursor_bw_pre[k]);
dml_print("DML::%s: k=%0d MaxTotalRDBandwidthNoUrgentBurst=%f\n", __func__, k,
MaxTotalRDBandwidthNoUrgentBurst);
#endif
if (v->DestinationLinesForPrefetch[k] < 2)
DestinationLineTimesForPrefetchLessThan2 = true;
if (v->VRatioPrefetchY[k] > v->MaxVRatioPre
|| v->VRatioPrefetchC[k] > v->MaxVRatioPre)
VRatioPrefetchMoreThanMax = true;
//bool DestinationLinesToRequestVMInVBlankEqualOrMoreThan32 = false;
//bool DestinationLinesToRequestRowInVBlankEqualOrMoreThan16 = false;
//if (v->DestinationLinesToRequestVMInVBlank[k] >= 32) {
// DestinationLinesToRequestVMInVBlankEqualOrMoreThan32 = true;
//}
//if (v->DestinationLinesToRequestRowInVBlank[k] >= 16) {
// DestinationLinesToRequestRowInVBlankEqualOrMoreThan16 = true;
//}
}
v->FractionOfUrgentBandwidth = MaxTotalRDBandwidthNoUrgentBurst / mode_lib->vba.ReturnBW;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: MaxTotalRDBandwidthNoUrgentBurst=%f\n",
__func__, MaxTotalRDBandwidthNoUrgentBurst);
dml_print("DML::%s: ReturnBW=%f\n", __func__, mode_lib->vba.ReturnBW);
dml_print("DML::%s: FractionOfUrgentBandwidth=%f\n",
__func__, mode_lib->vba.FractionOfUrgentBandwidth);
#endif
{
dml32_CalculatePrefetchBandwithSupport(
mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.ReturnBW,
v->NoUrgentLatencyHidingPre,
v->ReadBandwidthSurfaceLuma,
v->ReadBandwidthSurfaceChroma,
v->RequiredPrefetchPixDataBWLuma,
v->RequiredPrefetchPixDataBWChroma,
v->cursor_bw,
v->meta_row_bw,
v->dpte_row_bw,
v->cursor_bw_pre,
v->prefetch_vmrow_bw,
mode_lib->vba.DPPPerPlane,
v->UrgBurstFactorLuma,
v->UrgBurstFactorChroma,
v->UrgBurstFactorCursor,
v->UrgBurstFactorLumaPre,
v->UrgBurstFactorChromaPre,
v->UrgBurstFactorCursorPre,
v->PrefetchBandwidth,
v->VRatio,
v->MaxVRatioPre,
/* output */
&MaxTotalRDBandwidth,
&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_single[0],
&v->PrefetchModeSupported);
}
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k)
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_unit_vector[k] = 1.0;
{
dml32_CalculatePrefetchBandwithSupport(mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.ReturnBW,
v->NoUrgentLatencyHidingPre,
v->ReadBandwidthSurfaceLuma,
v->ReadBandwidthSurfaceChroma,
v->RequiredPrefetchPixDataBWLuma,
v->RequiredPrefetchPixDataBWChroma,
v->cursor_bw,
v->meta_row_bw,
v->dpte_row_bw,
v->cursor_bw_pre,
v->prefetch_vmrow_bw,
mode_lib->vba.DPPPerPlane,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_unit_vector,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_unit_vector,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_unit_vector,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_unit_vector,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_unit_vector,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_unit_vector,
v->PrefetchBandwidth,
v->VRatio,
v->MaxVRatioPre,
/* output */
&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_single[0],
&v->FractionOfUrgentBandwidth,
&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_boolean);
}
if (VRatioPrefetchMoreThanMax != false || DestinationLineTimesForPrefetchLessThan2 != false) {
v->PrefetchModeSupported = false;
}
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (v->ErrorResult[k] == true || v->NotEnoughTimeForDynamicMetadata[k]) {
v->PrefetchModeSupported = false;
}
}
if (v->PrefetchModeSupported == true && mode_lib->vba.ImmediateFlipSupport == true) {
mode_lib->vba.BandwidthAvailableForImmediateFlip = dml32_CalculateBandwidthAvailableForImmediateFlip(
mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.ReturnBW,
v->ReadBandwidthSurfaceLuma,
v->ReadBandwidthSurfaceChroma,
v->RequiredPrefetchPixDataBWLuma,
v->RequiredPrefetchPixDataBWChroma,
v->cursor_bw,
v->cursor_bw_pre,
mode_lib->vba.DPPPerPlane,
v->UrgBurstFactorLuma,
v->UrgBurstFactorChroma,
v->UrgBurstFactorCursor,
v->UrgBurstFactorLumaPre,
v->UrgBurstFactorChromaPre,
v->UrgBurstFactorCursorPre);
mode_lib->vba.TotImmediateFlipBytes = 0;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.ImmediateFlipRequirement[k] != dm_immediate_flip_not_required) {
mode_lib->vba.TotImmediateFlipBytes = mode_lib->vba.TotImmediateFlipBytes
+ mode_lib->vba.DPPPerPlane[k]
* (v->PDEAndMetaPTEBytesFrame[k]
+ v->MetaRowByte[k]);
if (v->use_one_row_for_frame_flip[k][0][0]) {
mode_lib->vba.TotImmediateFlipBytes =
mode_lib->vba.TotImmediateFlipBytes
+ 2 * v->PixelPTEBytesPerRow[k];
} else {
mode_lib->vba.TotImmediateFlipBytes =
mode_lib->vba.TotImmediateFlipBytes
+ v->PixelPTEBytesPerRow[k];
}
}
}
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
dml32_CalculateFlipSchedule(v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.HostVMInefficiencyFactor,
v->UrgentExtraLatency,
v->UrgentLatency,
mode_lib->vba.GPUVMMaxPageTableLevels,
mode_lib->vba.HostVMEnable,
mode_lib->vba.HostVMMaxNonCachedPageTableLevels,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.HostVMMinPageSize,
v->PDEAndMetaPTEBytesFrame[k],
v->MetaRowByte[k],
v->PixelPTEBytesPerRow[k],
mode_lib->vba.BandwidthAvailableForImmediateFlip,
mode_lib->vba.TotImmediateFlipBytes,
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
mode_lib->vba.VRatio[k],
mode_lib->vba.VRatioChroma[k],
v->Tno_bw[k],
mode_lib->vba.DCCEnable[k],
v->dpte_row_height[k],
v->meta_row_height[k],
v->dpte_row_height_chroma[k],
v->meta_row_height_chroma[k],
v->Use_One_Row_For_Frame_Flip[k],
/* Output */
&v->DestinationLinesToRequestVMInImmediateFlip[k],
&v->DestinationLinesToRequestRowInImmediateFlip[k],
&v->final_flip_bw[k],
&v->ImmediateFlipSupportedForPipe[k]);
}
{
dml32_CalculateImmediateFlipBandwithSupport(mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.ReturnBW,
mode_lib->vba.ImmediateFlipRequirement,
v->final_flip_bw,
v->ReadBandwidthSurfaceLuma,
v->ReadBandwidthSurfaceChroma,
v->RequiredPrefetchPixDataBWLuma,
v->RequiredPrefetchPixDataBWChroma,
v->cursor_bw,
v->meta_row_bw,
v->dpte_row_bw,
v->cursor_bw_pre,
v->prefetch_vmrow_bw,
mode_lib->vba.DPPPerPlane,
v->UrgBurstFactorLuma,
v->UrgBurstFactorChroma,
v->UrgBurstFactorCursor,
v->UrgBurstFactorLumaPre,
v->UrgBurstFactorChromaPre,
v->UrgBurstFactorCursorPre,
/* output */
&v->total_dcn_read_bw_with_flip, // Single *TotalBandwidth
&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_single[0], // Single *FractionOfUrgentBandwidth
&v->ImmediateFlipSupported); // Boolean *ImmediateFlipBandwidthSupport
dml32_CalculateImmediateFlipBandwithSupport(mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.ReturnBW,
mode_lib->vba.ImmediateFlipRequirement,
v->final_flip_bw,
v->ReadBandwidthSurfaceLuma,
v->ReadBandwidthSurfaceChroma,
v->RequiredPrefetchPixDataBWLuma,
v->RequiredPrefetchPixDataBWChroma,
v->cursor_bw,
v->meta_row_bw,
v->dpte_row_bw,
v->cursor_bw_pre,
v->prefetch_vmrow_bw,
mode_lib->vba.DPPPerPlane,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_unit_vector,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_unit_vector,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_unit_vector,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_unit_vector,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_unit_vector,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_unit_vector,
/* output */
&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_single[1], // Single *TotalBandwidth
&v->FractionOfUrgentBandwidthImmediateFlip, // Single *FractionOfUrgentBandwidth
&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_boolean); // Boolean *ImmediateFlipBandwidthSupport
}
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.ImmediateFlipRequirement[k] != dm_immediate_flip_not_required && v->ImmediateFlipSupportedForPipe[k] == false) {
v->ImmediateFlipSupported = false;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Pipe %0d not supporting iflip\n", __func__, k);
#endif
}
}
} else {
v->ImmediateFlipSupported = false;
}
/* consider flip support is okay if the flip bw is ok or (when user does't require a iflip and there is no host vm) */
v->PrefetchAndImmediateFlipSupported = (v->PrefetchModeSupported == true &&
((!mode_lib->vba.ImmediateFlipSupport && !mode_lib->vba.HostVMEnable && !ImmediateFlipRequirementFinal) ||
v->ImmediateFlipSupported)) ? true : false;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: PrefetchModeSupported = %d\n", __func__, locals->PrefetchModeSupported);
for (uint k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k)
dml_print("DML::%s: ImmediateFlipRequirement[%d] = %d\n", __func__, k, mode_lib->vba.ImmediateFlipRequirement[k] == dm_immediate_flip_required);
dml_print("DML::%s: ImmediateFlipSupported = %d\n", __func__, locals->ImmediateFlipSupported);
dml_print("DML::%s: ImmediateFlipSupport = %d\n", __func__, mode_lib->vba.ImmediateFlipSupport);
dml_print("DML::%s: HostVMEnable = %d\n", __func__, mode_lib->vba.HostVMEnable);
dml_print("DML::%s: PrefetchAndImmediateFlipSupported = %d\n", __func__, locals->PrefetchAndImmediateFlipSupported);
dml_print("DML::%s: Done loop: Vstartup=%d, Max Vstartup=%d\n", __func__, locals->VStartupLines, locals->MaximumMaxVStartupLines);
#endif
v->VStartupLines = v->VStartupLines + 1;
if (v->VStartupLines > v->MaximumMaxVStartupLines) {
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Vstartup exceeds max vstartup, exiting loop\n", __func__);
#endif
break; // VBA_DELTA: Implementation divergence! Gabe is *still* iterating across prefetch modes which we don't care to do
}
iteration++;
if (iteration > 2500) {
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: too many errors, exit now\n", __func__);
assert(0);
#endif
}
} while (!(v->PrefetchAndImmediateFlipSupported || NextPrefetchMode > mode_lib->vba.MaxPrefetchMode));
if (v->VStartupLines <= v->MaximumMaxVStartupLines) {
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Good, Prefetch and flip scheduling found solution at VStartupLines=%d\n", __func__, locals->VStartupLines-1);
#endif
}
//Watermarks and NB P-State/DRAM Clock Change Support
{
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters.UrgentLatency = v->UrgentLatency;
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters.ExtraLatency = v->UrgentExtraLatency;
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters.WritebackLatency = mode_lib->vba.WritebackLatency;
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters.DRAMClockChangeLatency = mode_lib->vba.DRAMClockChangeLatency;
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters.FCLKChangeLatency = mode_lib->vba.FCLKChangeLatency;
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters.SRExitTime = mode_lib->vba.SRExitTime;
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters.SREnterPlusExitTime = mode_lib->vba.SREnterPlusExitTime;
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters.SRExitZ8Time = mode_lib->vba.SRExitZ8Time;
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters.SREnterPlusExitZ8Time = mode_lib->vba.SREnterPlusExitZ8Time;
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters.USRRetrainingLatency = mode_lib->vba.USRRetrainingLatency;
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters.SMNLatency = mode_lib->vba.SMNLatency;
dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
v,
v->PrefetchModePerState[v->VoltageLevel][v->maxMpcComb],
v->DCFCLK,
v->ReturnBW,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters,
v->SOCCLK,
v->DCFCLKDeepSleep,
v->DETBufferSizeY,
v->DETBufferSizeC,
v->SwathHeightY,
v->SwathHeightC,
v->SwathWidthY,
v->SwathWidthC,
v->DPPPerPlane,
v->BytePerPixelDETY,
v->BytePerPixelDETC,
v->DSTXAfterScaler,
v->DSTYAfterScaler,
v->UnboundedRequestEnabled,
v->CompressedBufferSizeInkByte,
/* Output */
&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_dramchange_support,
v->MaxActiveDRAMClockChangeLatencySupported,
v->SubViewportLinesNeededInMALL,
&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_fclkchange_support,
&v->MinActiveFCLKChangeLatencySupported,
&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_USRRetrainingSupport,
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin);
/* DCN32 has a new struct Watermarks (typedef) which is used to store
* calculated WM values. Copy over values from struct to vba varaibles
* to ensure that the DCN32 getters return the correct value.
*/
v->UrgentWatermark = v->Watermark.UrgentWatermark;
v->WritebackUrgentWatermark = v->Watermark.WritebackUrgentWatermark;
v->DRAMClockChangeWatermark = v->Watermark.DRAMClockChangeWatermark;
v->WritebackDRAMClockChangeWatermark = v->Watermark.WritebackDRAMClockChangeWatermark;
v->StutterExitWatermark = v->Watermark.StutterExitWatermark;
v->StutterEnterPlusExitWatermark = v->Watermark.StutterEnterPlusExitWatermark;
v->Z8StutterExitWatermark = v->Watermark.Z8StutterExitWatermark;
v->Z8StutterEnterPlusExitWatermark = v->Watermark.Z8StutterEnterPlusExitWatermark;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.WritebackEnable[k] == true) {
v->WritebackAllowDRAMClockChangeEndPosition[k] = dml_max(0,
v->VStartup[k] * mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]
- v->Watermark.WritebackDRAMClockChangeWatermark);
v->WritebackAllowFCLKChangeEndPosition[k] = dml_max(0,
v->VStartup[k] * mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]
- v->Watermark.WritebackFCLKChangeWatermark);
} else {
v->WritebackAllowDRAMClockChangeEndPosition[k] = 0;
v->WritebackAllowFCLKChangeEndPosition[k] = 0;
}
}
}
//Display Pipeline Delivery Time in Prefetch, Groups
dml32_CalculatePixelDeliveryTimes(
mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.VRatio,
mode_lib->vba.VRatioChroma,
v->VRatioPrefetchY,
v->VRatioPrefetchC,
v->swath_width_luma_ub,
v->swath_width_chroma_ub,
mode_lib->vba.DPPPerPlane,
mode_lib->vba.HRatio,
mode_lib->vba.HRatioChroma,
mode_lib->vba.PixelClock,
v->PSCL_THROUGHPUT_LUMA,
v->PSCL_THROUGHPUT_CHROMA,
mode_lib->vba.DPPCLK,
v->BytePerPixelC,
mode_lib->vba.SourceRotation,
mode_lib->vba.NumberOfCursors,
mode_lib->vba.CursorWidth,
mode_lib->vba.CursorBPP,
v->BlockWidth256BytesY,
v->BlockHeight256BytesY,
v->BlockWidth256BytesC,
v->BlockHeight256BytesC,
/* Output */
v->DisplayPipeLineDeliveryTimeLuma,
v->DisplayPipeLineDeliveryTimeChroma,
v->DisplayPipeLineDeliveryTimeLumaPrefetch,
v->DisplayPipeLineDeliveryTimeChromaPrefetch,
v->DisplayPipeRequestDeliveryTimeLuma,
v->DisplayPipeRequestDeliveryTimeChroma,
v->DisplayPipeRequestDeliveryTimeLumaPrefetch,
v->DisplayPipeRequestDeliveryTimeChromaPrefetch,
v->CursorRequestDeliveryTime,
v->CursorRequestDeliveryTimePrefetch);
dml32_CalculateMetaAndPTETimes(v->Use_One_Row_For_Frame,
mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.MetaChunkSize,
mode_lib->vba.MinMetaChunkSizeBytes,
mode_lib->vba.HTotal,
mode_lib->vba.VRatio,
mode_lib->vba.VRatioChroma,
v->DestinationLinesToRequestRowInVBlank,
v->DestinationLinesToRequestRowInImmediateFlip,
mode_lib->vba.DCCEnable,
mode_lib->vba.PixelClock,
v->BytePerPixelY,
v->BytePerPixelC,
mode_lib->vba.SourceRotation,
v->dpte_row_height,
v->dpte_row_height_chroma,
v->meta_row_width,
v->meta_row_width_chroma,
v->meta_row_height,
v->meta_row_height_chroma,
v->meta_req_width,
v->meta_req_width_chroma,
v->meta_req_height,
v->meta_req_height_chroma,
v->dpte_group_bytes,
v->PTERequestSizeY,
v->PTERequestSizeC,
v->PixelPTEReqWidthY,
v->PixelPTEReqHeightY,
v->PixelPTEReqWidthC,
v->PixelPTEReqHeightC,
v->dpte_row_width_luma_ub,
v->dpte_row_width_chroma_ub,
/* Output */
v->DST_Y_PER_PTE_ROW_NOM_L,
v->DST_Y_PER_PTE_ROW_NOM_C,
v->DST_Y_PER_META_ROW_NOM_L,
v->DST_Y_PER_META_ROW_NOM_C,
v->TimePerMetaChunkNominal,
v->TimePerChromaMetaChunkNominal,
v->TimePerMetaChunkVBlank,
v->TimePerChromaMetaChunkVBlank,
v->TimePerMetaChunkFlip,
v->TimePerChromaMetaChunkFlip,
v->time_per_pte_group_nom_luma,
v->time_per_pte_group_vblank_luma,
v->time_per_pte_group_flip_luma,
v->time_per_pte_group_nom_chroma,
v->time_per_pte_group_vblank_chroma,
v->time_per_pte_group_flip_chroma);
dml32_CalculateVMGroupAndRequestTimes(
mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.GPUVMMaxPageTableLevels,
mode_lib->vba.HTotal,
v->BytePerPixelC,
v->DestinationLinesToRequestVMInVBlank,
v->DestinationLinesToRequestVMInImmediateFlip,
mode_lib->vba.DCCEnable,
mode_lib->vba.PixelClock,
v->dpte_row_width_luma_ub,
v->dpte_row_width_chroma_ub,
v->vm_group_bytes,
v->dpde0_bytes_per_frame_ub_l,
v->dpde0_bytes_per_frame_ub_c,
v->meta_pte_bytes_per_frame_ub_l,
v->meta_pte_bytes_per_frame_ub_c,
/* Output */
v->TimePerVMGroupVBlank,
v->TimePerVMGroupFlip,
v->TimePerVMRequestVBlank,
v->TimePerVMRequestFlip);
// Min TTUVBlank
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.PrefetchModePerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
v->MinTTUVBlank[k] = dml_max4(v->Watermark.DRAMClockChangeWatermark,
v->Watermark.FCLKChangeWatermark, v->Watermark.StutterEnterPlusExitWatermark,
v->Watermark.UrgentWatermark);
} else if (mode_lib->vba.PrefetchModePerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb]
== 1) {
v->MinTTUVBlank[k] = dml_max3(v->Watermark.FCLKChangeWatermark,
v->Watermark.StutterEnterPlusExitWatermark, v->Watermark.UrgentWatermark);
} else if (mode_lib->vba.PrefetchModePerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb]
== 2) {
v->MinTTUVBlank[k] = dml_max(v->Watermark.StutterEnterPlusExitWatermark,
v->Watermark.UrgentWatermark);
} else {
v->MinTTUVBlank[k] = v->Watermark.UrgentWatermark;
}
if (!mode_lib->vba.DynamicMetadataEnable[k])
v->MinTTUVBlank[k] = mode_lib->vba.TCalc + v->MinTTUVBlank[k];
}
// DCC Configuration
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Calculate DCC configuration for surface k=%d\n", __func__, k);
#endif
dml32_CalculateDCCConfiguration(
mode_lib->vba.DCCEnable[k],
mode_lib->vba.DCCProgrammingAssumesScanDirectionUnknownFinal,
mode_lib->vba.SourcePixelFormat[k], mode_lib->vba.SurfaceWidthY[k],
mode_lib->vba.SurfaceWidthC[k],
mode_lib->vba.SurfaceHeightY[k],
mode_lib->vba.SurfaceHeightC[k],
mode_lib->vba.nomDETInKByte,
v->BlockHeight256BytesY[k],
v->BlockHeight256BytesC[k],
mode_lib->vba.SurfaceTiling[k],
v->BytePerPixelY[k],
v->BytePerPixelC[k],
v->BytePerPixelDETY[k],
v->BytePerPixelDETC[k],
(enum dm_rotation_angle) mode_lib->vba.SourceScan[k],
/* Output */
&v->DCCYMaxUncompressedBlock[k],
&v->DCCCMaxUncompressedBlock[k],
&v->DCCYMaxCompressedBlock[k],
&v->DCCCMaxCompressedBlock[k],
&v->DCCYIndependentBlock[k],
&v->DCCCIndependentBlock[k]);
}
// VStartup Adjustment
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
bool isInterlaceTiming;
double Tvstartup_margin = (v->MaxVStartupLines[k] - v->VStartup[k]) * mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d, MinTTUVBlank = %f (before vstartup margin)\n", __func__, k,
v->MinTTUVBlank[k]);
#endif
v->MinTTUVBlank[k] = v->MinTTUVBlank[k] + Tvstartup_margin;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d, Tvstartup_margin = %f\n", __func__, k, Tvstartup_margin);
dml_print("DML::%s: k=%d, MaxVStartupLines = %d\n", __func__, k, v->MaxVStartupLines[k]);
dml_print("DML::%s: k=%d, VStartup = %d\n", __func__, k, v->VStartup[k]);
dml_print("DML::%s: k=%d, MinTTUVBlank = %f\n", __func__, k, v->MinTTUVBlank[k]);
#endif
v->Tdmdl[k] = v->Tdmdl[k] + Tvstartup_margin;
if (mode_lib->vba.DynamicMetadataEnable[k] && mode_lib->vba.DynamicMetadataVMEnabled)
v->Tdmdl_vm[k] = v->Tdmdl_vm[k] + Tvstartup_margin;
isInterlaceTiming = (mode_lib->vba.Interlace[k] &&
!mode_lib->vba.ProgressiveToInterlaceUnitInOPP);
v->MIN_DST_Y_NEXT_START[k] = ((isInterlaceTiming ? dml_floor((mode_lib->vba.VTotal[k] -
mode_lib->vba.VFrontPorch[k]) / 2.0, 1.0) :
mode_lib->vba.VTotal[k]) - mode_lib->vba.VFrontPorch[k])
+ dml_max(1.0,
dml_ceil(v->WritebackDelay[mode_lib->vba.VoltageLevel][k]
/ (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1.0))
+ dml_floor(4.0 * v->TSetup[k] / (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]), 1.0) / 4.0;
v->VStartup[k] = (isInterlaceTiming ? (2 * v->MaxVStartupLines[k]) : v->MaxVStartupLines[k]);
if (((v->VUpdateOffsetPix[k] + v->VUpdateWidthPix[k] + v->VReadyOffsetPix[k])
/ mode_lib->vba.HTotal[k]) <= (isInterlaceTiming ? dml_floor((mode_lib->vba.VTotal[k]
- mode_lib->vba.VActive[k] - mode_lib->vba.VFrontPorch[k] - v->VStartup[k]) / 2.0, 1.0) :
(int) (mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
- mode_lib->vba.VFrontPorch[k] - v->VStartup[k]))) {
v->VREADY_AT_OR_AFTER_VSYNC[k] = true;
} else {
v->VREADY_AT_OR_AFTER_VSYNC[k] = false;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d, VStartup = %d (max)\n", __func__, k, v->VStartup[k]);
dml_print("DML::%s: k=%d, VUpdateOffsetPix = %d\n", __func__, k, v->VUpdateOffsetPix[k]);
dml_print("DML::%s: k=%d, VUpdateWidthPix = %d\n", __func__, k, v->VUpdateWidthPix[k]);
dml_print("DML::%s: k=%d, VReadyOffsetPix = %d\n", __func__, k, v->VReadyOffsetPix[k]);
dml_print("DML::%s: k=%d, HTotal = %d\n", __func__, k, mode_lib->vba.HTotal[k]);
dml_print("DML::%s: k=%d, VTotal = %d\n", __func__, k, mode_lib->vba.VTotal[k]);
dml_print("DML::%s: k=%d, VActive = %d\n", __func__, k, mode_lib->vba.VActive[k]);
dml_print("DML::%s: k=%d, VFrontPorch = %d\n", __func__, k, mode_lib->vba.VFrontPorch[k]);
dml_print("DML::%s: k=%d, VStartup = %d\n", __func__, k, v->VStartup[k]);
dml_print("DML::%s: k=%d, TSetup = %f\n", __func__, k, v->TSetup[k]);
dml_print("DML::%s: k=%d, MIN_DST_Y_NEXT_START = %f\n", __func__, k, v->MIN_DST_Y_NEXT_START[k]);
dml_print("DML::%s: k=%d, VREADY_AT_OR_AFTER_VSYNC = %d\n", __func__, k,
v->VREADY_AT_OR_AFTER_VSYNC[k]);
#endif
}
{
//Maximum Bandwidth Used
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.WritebackEnable[k] == true
&& mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
WRBandwidth = mode_lib->vba.WritebackDestinationWidth[k]
* mode_lib->vba.WritebackDestinationHeight[k]
/ (mode_lib->vba.HTotal[k] * mode_lib->vba.WritebackSourceHeight[k]
/ mode_lib->vba.PixelClock[k]) * 4;
} else if (mode_lib->vba.WritebackEnable[k] == true) {
WRBandwidth = mode_lib->vba.WritebackDestinationWidth[k]
* mode_lib->vba.WritebackDestinationHeight[k]
/ (mode_lib->vba.HTotal[k] * mode_lib->vba.WritebackSourceHeight[k]
/ mode_lib->vba.PixelClock[k]) * 8;
}
TotalWRBandwidth = TotalWRBandwidth + WRBandwidth;
}
v->TotalDataReadBandwidth = 0;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
v->TotalDataReadBandwidth = v->TotalDataReadBandwidth + v->ReadBandwidthSurfaceLuma[k]
+ v->ReadBandwidthSurfaceChroma[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d, TotalDataReadBandwidth = %f\n",
__func__, k, v->TotalDataReadBandwidth);
dml_print("DML::%s: k=%d, ReadBandwidthSurfaceLuma = %f\n",
__func__, k, v->ReadBandwidthSurfaceLuma[k]);
dml_print("DML::%s: k=%d, ReadBandwidthSurfaceChroma = %f\n",
__func__, k, v->ReadBandwidthSurfaceChroma[k]);
#endif
}
}
// Stutter Efficiency
dml32_CalculateStutterEfficiency(v->CompressedBufferSizeInkByte,
mode_lib->vba.UsesMALLForPStateChange,
v->UnboundedRequestEnabled,
mode_lib->vba.MetaFIFOSizeInKEntries,
mode_lib->vba.ZeroSizeBufferEntries,
mode_lib->vba.PixelChunkSizeInKByte,
mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.ROBBufferSizeInKByte,
v->TotalDataReadBandwidth,
mode_lib->vba.DCFCLK,
mode_lib->vba.ReturnBW,
v->CompbufReservedSpace64B,
v->CompbufReservedSpaceZs,
mode_lib->vba.SRExitTime,
mode_lib->vba.SRExitZ8Time,
mode_lib->vba.SynchronizeTimingsFinal,
mode_lib->vba.BlendingAndTiming,
v->Watermark.StutterEnterPlusExitWatermark,
v->Watermark.Z8StutterEnterPlusExitWatermark,
mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
mode_lib->vba.Interlace,
v->MinTTUVBlank, mode_lib->vba.DPPPerPlane,
mode_lib->vba.DETBufferSizeY,
v->BytePerPixelY,
v->BytePerPixelDETY,
v->SwathWidthY,
mode_lib->vba.SwathHeightY,
mode_lib->vba.SwathHeightC,
mode_lib->vba.DCCRateLuma,
mode_lib->vba.DCCRateChroma,
mode_lib->vba.DCCFractionOfZeroSizeRequestsLuma,
mode_lib->vba.DCCFractionOfZeroSizeRequestsChroma,
mode_lib->vba.HTotal, mode_lib->vba.VTotal,
mode_lib->vba.PixelClock,
mode_lib->vba.VRatio,
mode_lib->vba.SourceRotation,
v->BlockHeight256BytesY,
v->BlockWidth256BytesY,
v->BlockHeight256BytesC,
v->BlockWidth256BytesC,
v->DCCYMaxUncompressedBlock,
v->DCCCMaxUncompressedBlock,
mode_lib->vba.VActive,
mode_lib->vba.DCCEnable,
mode_lib->vba.WritebackEnable,
v->ReadBandwidthSurfaceLuma,
v->ReadBandwidthSurfaceChroma,
v->meta_row_bw,
v->dpte_row_bw,
/* Output */
&v->StutterEfficiencyNotIncludingVBlank,
&v->StutterEfficiency,
&v->NumberOfStutterBurstsPerFrame,
&v->Z8StutterEfficiencyNotIncludingVBlank,
&v->Z8StutterEfficiency,
&v->Z8NumberOfStutterBurstsPerFrame,
&v->StutterPeriod,
&v->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE);
#ifdef __DML_VBA_ALLOW_DELTA__
{
unsigned int dummy_integer[1];
// Calculate z8 stutter eff assuming 0 reserved space
dml32_CalculateStutterEfficiency(v->CompressedBufferSizeInkByte,
mode_lib->vba.UsesMALLForPStateChange,
v->UnboundedRequestEnabled,
mode_lib->vba.MetaFIFOSizeInKEntries,
mode_lib->vba.ZeroSizeBufferEntries,
mode_lib->vba.PixelChunkSizeInKByte,
mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.ROBBufferSizeInKByte,
v->TotalDataReadBandwidth,
mode_lib->vba.DCFCLK,
mode_lib->vba.ReturnBW,
0, //CompbufReservedSpace64B,
0, //CompbufReservedSpaceZs,
mode_lib->vba.SRExitTime,
mode_lib->vba.SRExitZ8Time,
mode_lib->vba.SynchronizeTimingsFinal,
mode_lib->vba.BlendingAndTiming,
v->Watermark.StutterEnterPlusExitWatermark,
v->Watermark.Z8StutterEnterPlusExitWatermark,
mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
mode_lib->vba.Interlace,
v->MinTTUVBlank,
mode_lib->vba.DPPPerPlane,
mode_lib->vba.DETBufferSizeY,
v->BytePerPixelY, v->BytePerPixelDETY,
v->SwathWidthY, mode_lib->vba.SwathHeightY,
mode_lib->vba.SwathHeightC,
mode_lib->vba.DCCRateLuma,
mode_lib->vba.DCCRateChroma,
mode_lib->vba.DCCFractionOfZeroSizeRequestsLuma,
mode_lib->vba.DCCFractionOfZeroSizeRequestsChroma,
mode_lib->vba.HTotal,
mode_lib->vba.VTotal,
mode_lib->vba.PixelClock,
mode_lib->vba.VRatio,
mode_lib->vba.SourceRotation,
v->BlockHeight256BytesY,
v->BlockWidth256BytesY,
v->BlockHeight256BytesC,
v->BlockWidth256BytesC,
v->DCCYMaxUncompressedBlock,
v->DCCCMaxUncompressedBlock,
mode_lib->vba.VActive,
mode_lib->vba.DCCEnable,
mode_lib->vba.WritebackEnable,
v->ReadBandwidthSurfaceLuma,
v->ReadBandwidthSurfaceChroma,
v->meta_row_bw, v->dpte_row_bw,
/* Output */
&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_single[0],
&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_single[1],
&dummy_integer[0],
&v->Z8StutterEfficiencyNotIncludingVBlankBestCase,
&v->Z8StutterEfficiencyBestCase,
&v->Z8NumberOfStutterBurstsPerFrameBestCase,
&v->StutterPeriodBestCase,
&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_boolean);
}
#else
v->Z8StutterEfficiencyNotIncludingVBlankBestCase = v->Z8StutterEfficiencyNotIncludingVBlank;
v->Z8StutterEfficiencyBestCase = v->Z8StutterEfficiency;
v->Z8NumberOfStutterBurstsPerFrameBestCase = v->Z8NumberOfStutterBurstsPerFrame;
v->StutterPeriodBestCase = v->StutterPeriod;
#endif
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: --- END ---\n", __func__);
#endif
}
static void mode_support_configuration(struct vba_vars_st *v,
struct display_mode_lib *mode_lib)
{
int i, j, start_state;
if (mode_lib->validate_max_state)
start_state = v->soc.num_states - 1;
else
start_state = 0;
for (i = v->soc.num_states - 1; i >= start_state; i--) {
for (j = 0; j < 2; j++) {
if (mode_lib->vba.ScaleRatioAndTapsSupport == true
&& mode_lib->vba.SourceFormatPixelAndScanSupport == true
&& mode_lib->vba.ViewportSizeSupport[i][j] == true
&& !mode_lib->vba.LinkRateDoesNotMatchDPVersion
&& !mode_lib->vba.LinkRateForMultistreamNotIndicated
&& !mode_lib->vba.BPPForMultistreamNotIndicated
&& !mode_lib->vba.MultistreamWithHDMIOreDP
&& !mode_lib->vba.ExceededMultistreamSlots[i]
&& !mode_lib->vba.MSOOrODMSplitWithNonDPLink
&& !mode_lib->vba.NotEnoughLanesForMSO
&& mode_lib->vba.LinkCapacitySupport[i] == true && !mode_lib->vba.P2IWith420
//&& !mode_lib->vba.DSCOnlyIfNecessaryWithBPP
&& !mode_lib->vba.DSC422NativeNotSupported
&& !mode_lib->vba.MPCCombineMethodIncompatible
&& mode_lib->vba.ODMCombine2To1SupportCheckOK[i] == true
&& mode_lib->vba.ODMCombine4To1SupportCheckOK[i] == true
&& mode_lib->vba.NotEnoughDSCUnits[i] == false
&& !mode_lib->vba.NotEnoughDSCSlices[i]
&& !mode_lib->vba.ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe
&& !mode_lib->vba.InvalidCombinationOfMALLUseForPStateAndStaticScreen
&& mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] == false
&& mode_lib->vba.PixelsPerLinePerDSCUnitSupport[i]
&& mode_lib->vba.DTBCLKRequiredMoreThanSupported[i] == false
&& !mode_lib->vba.InvalidCombinationOfMALLUseForPState
&& !mode_lib->vba.ImmediateFlipRequiredButTheRequirementForEachSurfaceIsNotSpecified
&& mode_lib->vba.ROBSupport[i][j] == true
&& mode_lib->vba.DISPCLK_DPPCLK_Support[i][j] == true
&& mode_lib->vba.TotalAvailablePipesSupport[i][j] == true
&& mode_lib->vba.NumberOfOTGSupport == true
&& mode_lib->vba.NumberOfHDMIFRLSupport == true
&& mode_lib->vba.EnoughWritebackUnits == true
&& mode_lib->vba.WritebackLatencySupport == true
&& mode_lib->vba.WritebackScaleRatioAndTapsSupport == true
&& mode_lib->vba.CursorSupport == true && mode_lib->vba.PitchSupport == true
&& mode_lib->vba.ViewportExceedsSurface == false
&& mode_lib->vba.PrefetchSupported[i][j] == true
&& mode_lib->vba.VActiveBandwithSupport[i][j] == true
&& mode_lib->vba.DynamicMetadataSupported[i][j] == true
&& mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][j] == true
&& mode_lib->vba.VRatioInPrefetchSupported[i][j] == true
&& mode_lib->vba.PTEBufferSizeNotExceeded[i][j] == true
&& mode_lib->vba.DCCMetaBufferSizeNotExceeded[i][j] == true
&& mode_lib->vba.NonsupportedDSCInputBPC == false
&& !mode_lib->vba.ExceededMALLSize
&& (mode_lib->vba.NotEnoughDETSwathFillLatencyHidingPerState[i][j] == false
|| i == v->soc.num_states - 1)
&& ((mode_lib->vba.HostVMEnable == false
&& !mode_lib->vba.ImmediateFlipRequiredFinal)
|| mode_lib->vba.ImmediateFlipSupportedForState[i][j])
&& (!mode_lib->vba.DRAMClockChangeRequirementFinal
|| i == v->soc.num_states - 1
|| mode_lib->vba.DRAMClockChangeSupport[i][j] != dm_dram_clock_change_unsupported)
&& (!mode_lib->vba.FCLKChangeRequirementFinal || i == v->soc.num_states - 1
|| mode_lib->vba.FCLKChangeSupport[i][j] != dm_fclock_change_unsupported)
&& (!mode_lib->vba.USRRetrainingRequiredFinal
|| mode_lib->vba.USRRetrainingSupport[i][j])) {
mode_lib->vba.ModeSupport[i][j] = true;
} else {
mode_lib->vba.ModeSupport[i][j] = false;
}
}
}
}
void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
{
struct vba_vars_st *v = &mode_lib->vba;
int i, j, start_state;
unsigned int k, m;
unsigned int MaximumMPCCombine;
unsigned int NumberOfNonCombinedSurfaceOfMaximumBandwidth;
unsigned int TotalSlots;
bool CompBufReservedSpaceNeedAdjustment;
bool CompBufReservedSpaceNeedAdjustmentSingleDPP;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: called\n", __func__);
#endif
/*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/
if (mode_lib->validate_max_state)
start_state = v->soc.num_states - 1;
else
start_state = 0;
/*Scale Ratio, taps Support Check*/
mode_lib->vba.ScaleRatioAndTapsSupport = true;
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (mode_lib->vba.ScalerEnabled[k] == false
&& ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_32
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_8
&& mode_lib->vba.SourcePixelFormat[k] != dm_rgbe
&& mode_lib->vba.SourcePixelFormat[k] != dm_rgbe_alpha)
|| mode_lib->vba.HRatio[k] != 1.0 || mode_lib->vba.htaps[k] != 1.0
|| mode_lib->vba.VRatio[k] != 1.0 || mode_lib->vba.vtaps[k] != 1.0)) {
mode_lib->vba.ScaleRatioAndTapsSupport = false;
} else if (mode_lib->vba.vtaps[k] < 1.0 || mode_lib->vba.vtaps[k] > 8.0 || mode_lib->vba.htaps[k] < 1.0
|| mode_lib->vba.htaps[k] > 8.0
|| (mode_lib->vba.htaps[k] > 1.0 && (mode_lib->vba.htaps[k] % 2) == 1)
|| mode_lib->vba.HRatio[k] > mode_lib->vba.MaxHSCLRatio
|| mode_lib->vba.VRatio[k] > mode_lib->vba.MaxVSCLRatio
|| mode_lib->vba.HRatio[k] > mode_lib->vba.htaps[k]
|| mode_lib->vba.VRatio[k] > mode_lib->vba.vtaps[k]
|| (mode_lib->vba.SourcePixelFormat[k] != dm_444_64
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_32
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_8
&& mode_lib->vba.SourcePixelFormat[k] != dm_rgbe
&& (mode_lib->vba.VTAPsChroma[k] < 1
|| mode_lib->vba.VTAPsChroma[k] > 8
|| mode_lib->vba.HTAPsChroma[k] < 1
|| mode_lib->vba.HTAPsChroma[k] > 8
|| (mode_lib->vba.HTAPsChroma[k] > 1
&& mode_lib->vba.HTAPsChroma[k] % 2
== 1)
|| mode_lib->vba.HRatioChroma[k]
> mode_lib->vba.MaxHSCLRatio
|| mode_lib->vba.VRatioChroma[k]
> mode_lib->vba.MaxVSCLRatio
|| mode_lib->vba.HRatioChroma[k]
> mode_lib->vba.HTAPsChroma[k]
|| mode_lib->vba.VRatioChroma[k]
> mode_lib->vba.VTAPsChroma[k]))) {
mode_lib->vba.ScaleRatioAndTapsSupport = false;
}
}
/*Source Format, Pixel Format and Scan Support Check*/
mode_lib->vba.SourceFormatPixelAndScanSupport = true;
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
&& (!(!IsVertical((enum dm_rotation_angle) mode_lib->vba.SourceScan[k]))
|| mode_lib->vba.DCCEnable[k] == true)) {
mode_lib->vba.SourceFormatPixelAndScanSupport = false;
}
}
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
dml32_CalculateBytePerPixelAndBlockSizes(
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.SurfaceTiling[k],
/* Output */
&mode_lib->vba.BytePerPixelY[k],
&mode_lib->vba.BytePerPixelC[k],
&mode_lib->vba.BytePerPixelInDETY[k],
&mode_lib->vba.BytePerPixelInDETC[k],
&mode_lib->vba.Read256BlockHeightY[k],
&mode_lib->vba.Read256BlockHeightC[k],
&mode_lib->vba.Read256BlockWidthY[k],
&mode_lib->vba.Read256BlockWidthC[k],
&mode_lib->vba.MacroTileHeightY[k],
&mode_lib->vba.MacroTileHeightC[k],
&mode_lib->vba.MacroTileWidthY[k],
&mode_lib->vba.MacroTileWidthC[k]);
}
/*Bandwidth Support Check*/
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (!IsVertical(mode_lib->vba.SourceRotation[k])) {
v->SwathWidthYSingleDPP[k] = mode_lib->vba.ViewportWidth[k];
v->SwathWidthCSingleDPP[k] = mode_lib->vba.ViewportWidthChroma[k];
} else {
v->SwathWidthYSingleDPP[k] = mode_lib->vba.ViewportHeight[k];
v->SwathWidthCSingleDPP[k] = mode_lib->vba.ViewportHeightChroma[k];
}
}
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
v->ReadBandwidthLuma[k] = v->SwathWidthYSingleDPP[k] * dml_ceil(v->BytePerPixelInDETY[k], 1.0)
/ (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k];
v->ReadBandwidthChroma[k] = v->SwathWidthYSingleDPP[k] / 2 * dml_ceil(v->BytePerPixelInDETC[k], 2.0)
/ (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k]
/ 2.0;
}
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (mode_lib->vba.WritebackEnable[k] == true && mode_lib->vba.WritebackPixelFormat[k] == dm_444_64) {
v->WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
* mode_lib->vba.WritebackDestinationHeight[k]
/ (mode_lib->vba.WritebackSourceHeight[k] * mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]) * 8.0;
} else if (mode_lib->vba.WritebackEnable[k] == true) {
v->WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
* mode_lib->vba.WritebackDestinationHeight[k]
/ (mode_lib->vba.WritebackSourceHeight[k] * mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]) * 4.0;
} else {
v->WriteBandwidth[k] = 0.0;
}
}
/*Writeback Latency support check*/
mode_lib->vba.WritebackLatencySupport = true;
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (mode_lib->vba.WritebackEnable[k] == true
&& (v->WriteBandwidth[k]
> mode_lib->vba.WritebackInterfaceBufferSize * 1024
/ mode_lib->vba.WritebackLatency)) {
mode_lib->vba.WritebackLatencySupport = false;
}
}
/*Writeback Mode Support Check*/
mode_lib->vba.EnoughWritebackUnits = true;
mode_lib->vba.TotalNumberOfActiveWriteback = 0;
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (mode_lib->vba.WritebackEnable[k] == true)
mode_lib->vba.TotalNumberOfActiveWriteback = mode_lib->vba.TotalNumberOfActiveWriteback + 1;
}
if (mode_lib->vba.TotalNumberOfActiveWriteback > mode_lib->vba.MaxNumWriteback)
mode_lib->vba.EnoughWritebackUnits = false;
/*Writeback Scale Ratio and Taps Support Check*/
mode_lib->vba.WritebackScaleRatioAndTapsSupport = true;
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (mode_lib->vba.WritebackEnable[k] == true) {
if (mode_lib->vba.WritebackHRatio[k] > mode_lib->vba.WritebackMaxHSCLRatio
|| mode_lib->vba.WritebackVRatio[k] > mode_lib->vba.WritebackMaxVSCLRatio
|| mode_lib->vba.WritebackHRatio[k] < mode_lib->vba.WritebackMinHSCLRatio
|| mode_lib->vba.WritebackVRatio[k] < mode_lib->vba.WritebackMinVSCLRatio
|| mode_lib->vba.WritebackHTaps[k] > mode_lib->vba.WritebackMaxHSCLTaps
|| mode_lib->vba.WritebackVTaps[k] > mode_lib->vba.WritebackMaxVSCLTaps
|| mode_lib->vba.WritebackHRatio[k] > mode_lib->vba.WritebackHTaps[k]
|| mode_lib->vba.WritebackVRatio[k] > mode_lib->vba.WritebackVTaps[k]
|| (mode_lib->vba.WritebackHTaps[k] > 2.0
&& ((mode_lib->vba.WritebackHTaps[k] % 2) == 1))) {
mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
}
if (2.0 * mode_lib->vba.WritebackDestinationWidth[k] * (mode_lib->vba.WritebackVTaps[k] - 1)
* 57 > mode_lib->vba.WritebackLineBufferSize) {
mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
}
}
}
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
dml32_CalculateSinglePipeDPPCLKAndSCLThroughput(mode_lib->vba.HRatio[k], mode_lib->vba.HRatioChroma[k],
mode_lib->vba.VRatio[k], mode_lib->vba.VRatioChroma[k],
mode_lib->vba.MaxDCHUBToPSCLThroughput, mode_lib->vba.MaxPSCLToLBThroughput,
mode_lib->vba.PixelClock[k], mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.htaps[k], mode_lib->vba.HTAPsChroma[k], mode_lib->vba.vtaps[k],
mode_lib->vba.VTAPsChroma[k],
/* Output */
&mode_lib->vba.PSCL_FACTOR[k], &mode_lib->vba.PSCL_FACTOR_CHROMA[k],
&mode_lib->vba.MinDPPCLKUsingSingleDPP[k]);
}
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MaximumSwathWidthSupportLuma = 8192;
} else if (!IsVertical(mode_lib->vba.SourceRotation[k]) && v->BytePerPixelC[k] > 0
&& mode_lib->vba.SourcePixelFormat[k] != dm_rgbe_alpha) {
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MaximumSwathWidthSupportLuma = 7680;
} else if (IsVertical(mode_lib->vba.SourceRotation[k]) && v->BytePerPixelC[k] > 0
&& mode_lib->vba.SourcePixelFormat[k] != dm_rgbe_alpha) {
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MaximumSwathWidthSupportLuma = 4320;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_rgbe_alpha) {
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MaximumSwathWidthSupportLuma = 3840;
} else if (IsVertical(mode_lib->vba.SourceRotation[k]) && v->BytePerPixelY[k] == 8 &&
mode_lib->vba.DCCEnable[k] == true) {
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MaximumSwathWidthSupportLuma = 3072;
} else {
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MaximumSwathWidthSupportLuma = 6144;
}
if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8 || mode_lib->vba.SourcePixelFormat[k] == dm_420_10
|| mode_lib->vba.SourcePixelFormat[k] == dm_420_12) {
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MaximumSwathWidthSupportChroma = v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MaximumSwathWidthSupportLuma / 2.0;
} else {
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MaximumSwathWidthSupportChroma = v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MaximumSwathWidthSupportLuma;
}
v->MaximumSwathWidthInLineBufferLuma = mode_lib->vba.LineBufferSizeFinal
* dml_max(mode_lib->vba.HRatio[k], 1.0) / mode_lib->vba.LBBitPerPixel[k]
/ (mode_lib->vba.vtaps[k] + dml_max(dml_ceil(mode_lib->vba.VRatio[k], 1.0) - 2, 0.0));
if (v->BytePerPixelC[k] == 0.0) {
v->MaximumSwathWidthInLineBufferChroma = 0;
} else {
v->MaximumSwathWidthInLineBufferChroma = mode_lib->vba.LineBufferSizeFinal
* dml_max(mode_lib->vba.HRatioChroma[k], 1.0) / mode_lib->vba.LBBitPerPixel[k]
/ (mode_lib->vba.VTAPsChroma[k]
+ dml_max(dml_ceil(mode_lib->vba.VRatioChroma[k], 1.0) - 2,
0.0));
}
v->MaximumSwathWidthLuma[k] = dml_min(v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MaximumSwathWidthSupportLuma,
v->MaximumSwathWidthInLineBufferLuma);
v->MaximumSwathWidthChroma[k] = dml_min(v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MaximumSwathWidthSupportChroma,
v->MaximumSwathWidthInLineBufferChroma);
}
dml32_CalculateSwathAndDETConfiguration(
mode_lib->vba.DETSizeOverride,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.ConfigReturnBufferSizeInKByte,
mode_lib->vba.MaxTotalDETInKByte,
mode_lib->vba.MinCompressedBufferSizeInKByte,
1, /* ForceSingleDPP */
mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.nomDETInKByte,
mode_lib->vba.UseUnboundedRequesting,
mode_lib->vba.DisableUnboundRequestIfCompBufReservedSpaceNeedAdjustment,
mode_lib->vba.ip.pixel_chunk_size_kbytes,
mode_lib->vba.ip.rob_buffer_size_kbytes,
mode_lib->vba.CompressedBufferSegmentSizeInkByteFinal,
mode_lib->vba.Output,
mode_lib->vba.ReadBandwidthLuma,
mode_lib->vba.ReadBandwidthChroma,
mode_lib->vba.MaximumSwathWidthLuma,
mode_lib->vba.MaximumSwathWidthChroma,
mode_lib->vba.SourceRotation,
mode_lib->vba.ViewportStationary,
mode_lib->vba.SourcePixelFormat,
mode_lib->vba.SurfaceTiling,
mode_lib->vba.ViewportWidth,
mode_lib->vba.ViewportHeight,
mode_lib->vba.ViewportXStartY,
mode_lib->vba.ViewportYStartY,
mode_lib->vba.ViewportXStartC,
mode_lib->vba.ViewportYStartC,
mode_lib->vba.SurfaceWidthY,
mode_lib->vba.SurfaceWidthC,
mode_lib->vba.SurfaceHeightY,
mode_lib->vba.SurfaceHeightC,
mode_lib->vba.Read256BlockHeightY,
mode_lib->vba.Read256BlockHeightC,
mode_lib->vba.Read256BlockWidthY,
mode_lib->vba.Read256BlockWidthC,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_odm_mode,
mode_lib->vba.BlendingAndTiming,
mode_lib->vba.BytePerPixelY,
mode_lib->vba.BytePerPixelC,
mode_lib->vba.BytePerPixelInDETY,
mode_lib->vba.BytePerPixelInDETC,
mode_lib->vba.HActive,
mode_lib->vba.HRatio,
mode_lib->vba.HRatioChroma,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[0], /* Integer DPPPerSurface[] */
/* Output */
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[1], /* Long swath_width_luma_ub[] */
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[2], /* Long swath_width_chroma_ub[] */
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_double_array[0], /* Long SwathWidth[] */
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_double_array[1], /* Long SwathWidthChroma[] */
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[3], /* Integer SwathHeightY[] */
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[4], /* Integer SwathHeightC[] */
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[5], /* Long DETBufferSizeInKByte[] */
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[6], /* Long DETBufferSizeY[] */
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[7], /* Long DETBufferSizeC[] */
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_boolean_array[0][0], /* bool *UnboundedRequestEnabled */
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[0][0], /* Long *CompressedBufferSizeInkByte */
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[1][0], /* Long *CompBufReservedSpaceKBytes */
&CompBufReservedSpaceNeedAdjustmentSingleDPP,
mode_lib->vba.SingleDPPViewportSizeSupportPerSurface,/* bool ViewportSizeSupportPerSurface[] */
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_boolean_array[1][0]); /* bool *ViewportSizeSupport */
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MPCCombineMethodAsNeededForPStateChangeAndVoltage = false;
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MPCCombineMethodAsPossible = false;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.MPCCombineUse[k] == dm_mpc_reduce_voltage_and_clocks)
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MPCCombineMethodAsNeededForPStateChangeAndVoltage = true;
if (mode_lib->vba.MPCCombineUse[k] == dm_mpc_always_when_possible)
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MPCCombineMethodAsPossible = true;
}
mode_lib->vba.MPCCombineMethodIncompatible = v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MPCCombineMethodAsNeededForPStateChangeAndVoltage
&& v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MPCCombineMethodAsPossible;
for (i = start_state; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
mode_lib->vba.TotalNumberOfActiveDPP[i][j] = 0;
mode_lib->vba.TotalAvailablePipesSupport[i][j] = true;
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.ODMModeNoDSC = dm_odm_combine_mode_disabled;
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.ODMModeDSC = dm_odm_combine_mode_disabled;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
dml32_CalculateODMMode(
mode_lib->vba.MaximumPixelsPerLinePerDSCUnit,
mode_lib->vba.HActive[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.Output[k],
mode_lib->vba.ODMUse[k],
mode_lib->vba.MaxDispclk[i],
mode_lib->vba.MaxDispclk[v->soc.num_states - 1],
false,
mode_lib->vba.TotalNumberOfActiveDPP[i][j],
mode_lib->vba.MaxNumDPP,
mode_lib->vba.PixelClock[k],
mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading,
mode_lib->vba.DISPCLKRampingMargin,
mode_lib->vba.DISPCLKDPPCLKVCOSpeed,
mode_lib->vba.NumberOfDSCSlices[k],
/* Output */
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalAvailablePipesSupportNoDSC,
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.NumberOfDPPNoDSC,
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.ODMModeNoDSC,
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.RequiredDISPCLKPerSurfaceNoDSC);
dml32_CalculateODMMode(
mode_lib->vba.MaximumPixelsPerLinePerDSCUnit,
mode_lib->vba.HActive[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.Output[k],
mode_lib->vba.ODMUse[k],
mode_lib->vba.MaxDispclk[i],
mode_lib->vba.MaxDispclk[v->soc.num_states - 1],
true,
mode_lib->vba.TotalNumberOfActiveDPP[i][j],
mode_lib->vba.MaxNumDPP,
mode_lib->vba.PixelClock[k],
mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading,
mode_lib->vba.DISPCLKRampingMargin,
mode_lib->vba.DISPCLKDPPCLKVCOSpeed,
mode_lib->vba.NumberOfDSCSlices[k],
/* Output */
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalAvailablePipesSupportDSC,
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.NumberOfDPPDSC,
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.ODMModeDSC,
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.RequiredDISPCLKPerSurfaceDSC);
dml32_CalculateOutputLink(
mode_lib->vba.PHYCLKPerState[i],
mode_lib->vba.PHYCLKD18PerState[i],
mode_lib->vba.PHYCLKD32PerState[i],
mode_lib->vba.Downspreading,
(mode_lib->vba.BlendingAndTiming[k] == k),
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.HTotal[k],
mode_lib->vba.HActive[k],
mode_lib->vba.PixelClockBackEnd[k],
mode_lib->vba.ForcedOutputLinkBPP[k],
mode_lib->vba.DSCInputBitPerComponent[k],
mode_lib->vba.NumberOfDSCSlices[k],
mode_lib->vba.AudioSampleRate[k],
mode_lib->vba.AudioSampleLayout[k],
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.ODMModeNoDSC,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.ODMModeDSC,
mode_lib->vba.DSCEnable[k],
mode_lib->vba.OutputLinkDPLanes[k],
mode_lib->vba.OutputLinkDPRate[k],
/* Output */
&mode_lib->vba.RequiresDSC[i][k],
&mode_lib->vba.RequiresFEC[i][k],
&mode_lib->vba.OutputBppPerState[i][k],
&mode_lib->vba.OutputTypePerState[i][k],
&mode_lib->vba.OutputRatePerState[i][k],
&mode_lib->vba.RequiredSlots[i][k]);
if (mode_lib->vba.RequiresDSC[i][k] == false) {
mode_lib->vba.ODMCombineEnablePerState[i][k] = v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.ODMModeNoDSC;
mode_lib->vba.RequiredDISPCLKPerSurface[i][j][k] =
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.RequiredDISPCLKPerSurfaceNoDSC;
if (!v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalAvailablePipesSupportNoDSC)
mode_lib->vba.TotalAvailablePipesSupport[i][j] = false;
mode_lib->vba.TotalNumberOfActiveDPP[i][j] =
mode_lib->vba.TotalNumberOfActiveDPP[i][j] + v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.NumberOfDPPNoDSC;
} else {
mode_lib->vba.ODMCombineEnablePerState[i][k] = v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.ODMModeDSC;
mode_lib->vba.RequiredDISPCLKPerSurface[i][j][k] =
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.RequiredDISPCLKPerSurfaceDSC;
if (!v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalAvailablePipesSupportDSC)
mode_lib->vba.TotalAvailablePipesSupport[i][j] = false;
mode_lib->vba.TotalNumberOfActiveDPP[i][j] =
mode_lib->vba.TotalNumberOfActiveDPP[i][j] + v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.NumberOfDPPDSC;
}
}
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1) {
mode_lib->vba.MPCCombine[i][j][k] = false;
mode_lib->vba.NoOfDPP[i][j][k] = 4;
} else if (mode_lib->vba.ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
mode_lib->vba.MPCCombine[i][j][k] = false;
mode_lib->vba.NoOfDPP[i][j][k] = 2;
} else if (mode_lib->vba.MPCCombineUse[k] == dm_mpc_never) {
mode_lib->vba.MPCCombine[i][j][k] = false;
mode_lib->vba.NoOfDPP[i][j][k] = 1;
} else if (dml32_RoundToDFSGranularity(
mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
/ 100), 1,
mode_lib->vba.DISPCLKDPPCLKVCOSpeed) <= mode_lib->vba.MaxDppclk[i] &&
mode_lib->vba.SingleDPPViewportSizeSupportPerSurface[k] == true) {
mode_lib->vba.MPCCombine[i][j][k] = false;
mode_lib->vba.NoOfDPP[i][j][k] = 1;
} else if (mode_lib->vba.TotalNumberOfActiveDPP[i][j] < mode_lib->vba.MaxNumDPP) {
mode_lib->vba.MPCCombine[i][j][k] = true;
mode_lib->vba.NoOfDPP[i][j][k] = 2;
mode_lib->vba.TotalNumberOfActiveDPP[i][j] =
mode_lib->vba.TotalNumberOfActiveDPP[i][j] + 1;
} else {
mode_lib->vba.MPCCombine[i][j][k] = false;
mode_lib->vba.NoOfDPP[i][j][k] = 1;
mode_lib->vba.TotalAvailablePipesSupport[i][j] = false;
}
}
mode_lib->vba.TotalNumberOfSingleDPPSurfaces[i][j] = 0;
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.NoChroma = true;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.NoOfDPP[i][j][k] == 1)
mode_lib->vba.TotalNumberOfSingleDPPSurfaces[i][j] =
mode_lib->vba.TotalNumberOfSingleDPPSurfaces[i][j] + 1;
if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8
|| mode_lib->vba.SourcePixelFormat[k] == dm_420_10
|| mode_lib->vba.SourcePixelFormat[k] == dm_420_12
|| mode_lib->vba.SourcePixelFormat[k] == dm_rgbe_alpha) {
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.NoChroma = false;
}
}
// if TotalNumberOfActiveDPP is > 1, then there should be no unbounded req mode (hw limitation), the comp buf reserved adjustment is not needed regardless
// if TotalNumberOfActiveDPP is == 1, then will use the SingleDPP version of unbounded_req for the decision
CompBufReservedSpaceNeedAdjustment = (mode_lib->vba.TotalNumberOfActiveDPP[i][j] > 1) ? 0 : CompBufReservedSpaceNeedAdjustmentSingleDPP;
if (j == 1 && !dml32_UnboundedRequest(mode_lib->vba.UseUnboundedRequesting,
mode_lib->vba.TotalNumberOfActiveDPP[i][j], v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.NoChroma,
mode_lib->vba.Output[0],
mode_lib->vba.SurfaceTiling[0],
CompBufReservedSpaceNeedAdjustment,
mode_lib->vba.DisableUnboundRequestIfCompBufReservedSpaceNeedAdjustment)) {
while (!(mode_lib->vba.TotalNumberOfActiveDPP[i][j] >= mode_lib->vba.MaxNumDPP
|| mode_lib->vba.TotalNumberOfSingleDPPSurfaces[i][j] == 0)) {
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.BWOfNonCombinedSurfaceOfMaximumBandwidth = 0;
NumberOfNonCombinedSurfaceOfMaximumBandwidth = 0;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.MPCCombineUse[k]
!= dm_mpc_never &&
mode_lib->vba.MPCCombineUse[k] != dm_mpc_reduce_voltage &&
mode_lib->vba.ReadBandwidthLuma[k] +
mode_lib->vba.ReadBandwidthChroma[k] >
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.BWOfNonCombinedSurfaceOfMaximumBandwidth &&
(mode_lib->vba.ODMCombineEnablePerState[i][k] !=
dm_odm_combine_mode_2to1 &&
mode_lib->vba.ODMCombineEnablePerState[i][k] !=
dm_odm_combine_mode_4to1) &&
mode_lib->vba.MPCCombine[i][j][k] == false) {
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.BWOfNonCombinedSurfaceOfMaximumBandwidth =
mode_lib->vba.ReadBandwidthLuma[k]
+ mode_lib->vba.ReadBandwidthChroma[k];
NumberOfNonCombinedSurfaceOfMaximumBandwidth = k;
}
}
mode_lib->vba.MPCCombine[i][j][NumberOfNonCombinedSurfaceOfMaximumBandwidth] =
true;
mode_lib->vba.NoOfDPP[i][j][NumberOfNonCombinedSurfaceOfMaximumBandwidth] = 2;
mode_lib->vba.TotalNumberOfActiveDPP[i][j] =
mode_lib->vba.TotalNumberOfActiveDPP[i][j] + 1;
mode_lib->vba.TotalNumberOfSingleDPPSurfaces[i][j] =
mode_lib->vba.TotalNumberOfSingleDPPSurfaces[i][j] - 1;
}
}
//DISPCLK/DPPCLK
mode_lib->vba.WritebackRequiredDISPCLK = 0;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.WritebackEnable[k]) {
mode_lib->vba.WritebackRequiredDISPCLK = dml_max(
mode_lib->vba.WritebackRequiredDISPCLK,
dml32_CalculateWriteBackDISPCLK(
mode_lib->vba.WritebackPixelFormat[k],
mode_lib->vba.PixelClock[k],
mode_lib->vba.WritebackHRatio[k],
mode_lib->vba.WritebackVRatio[k],
mode_lib->vba.WritebackHTaps[k],
mode_lib->vba.WritebackVTaps[k],
mode_lib->vba.WritebackSourceWidth[k],
mode_lib->vba.WritebackDestinationWidth[k],
mode_lib->vba.HTotal[k],
mode_lib->vba.WritebackLineBufferSize,
mode_lib->vba.DISPCLKDPPCLKVCOSpeed));
}
}
mode_lib->vba.RequiredDISPCLK[i][j] = mode_lib->vba.WritebackRequiredDISPCLK;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
mode_lib->vba.RequiredDISPCLK[i][j] = dml_max(mode_lib->vba.RequiredDISPCLK[i][j],
mode_lib->vba.RequiredDISPCLKPerSurface[i][j][k]);
}
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k)
mode_lib->vba.NoOfDPPThisState[k] = mode_lib->vba.NoOfDPP[i][j][k];
dml32_CalculateDPPCLK(mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading,
mode_lib->vba.DISPCLKDPPCLKVCOSpeed, mode_lib->vba.MinDPPCLKUsingSingleDPP,
mode_lib->vba.NoOfDPPThisState,
/* Output */
&mode_lib->vba.GlobalDPPCLK, mode_lib->vba.RequiredDPPCLKThisState);
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k)
mode_lib->vba.RequiredDPPCLK[i][j][k] = mode_lib->vba.RequiredDPPCLKThisState[k];
mode_lib->vba.DISPCLK_DPPCLK_Support[i][j] = !((mode_lib->vba.RequiredDISPCLK[i][j]
> mode_lib->vba.MaxDispclk[i])
|| (mode_lib->vba.GlobalDPPCLK > mode_lib->vba.MaxDppclk[i]));
if (mode_lib->vba.TotalNumberOfActiveDPP[i][j] > mode_lib->vba.MaxNumDPP)
mode_lib->vba.TotalAvailablePipesSupport[i][j] = false;
} // j
} // i (VOLTAGE_STATE)
/* Total Available OTG, HDMIFRL, DP Support Check */
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalNumberOfActiveOTG = 0;
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalNumberOfActiveHDMIFRL = 0;
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalNumberOfActiveDP2p0 = 0;
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalNumberOfActiveDP2p0Outputs = 0;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalNumberOfActiveOTG = v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalNumberOfActiveOTG + 1;
if (mode_lib->vba.Output[k] == dm_dp2p0) {
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalNumberOfActiveDP2p0 = v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalNumberOfActiveDP2p0 + 1;
if (mode_lib->vba.OutputMultistreamId[k]
== k || mode_lib->vba.OutputMultistreamEn[k] == false) {
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalNumberOfActiveDP2p0Outputs = v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalNumberOfActiveDP2p0Outputs + 1;
}
}
}
}
mode_lib->vba.NumberOfOTGSupport = (v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalNumberOfActiveOTG <= mode_lib->vba.MaxNumOTG);
mode_lib->vba.NumberOfHDMIFRLSupport = (v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalNumberOfActiveHDMIFRL <= mode_lib->vba.MaxNumHDMIFRLOutputs);
mode_lib->vba.NumberOfDP2p0Support = (v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalNumberOfActiveDP2p0 <= mode_lib->vba.MaxNumDP2p0Streams
&& v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalNumberOfActiveDP2p0Outputs <= mode_lib->vba.MaxNumDP2p0Outputs);
/* Display IO and DSC Support Check */
mode_lib->vba.NonsupportedDSCInputBPC = false;
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (!(mode_lib->vba.DSCInputBitPerComponent[k] == 12.0
|| mode_lib->vba.DSCInputBitPerComponent[k] == 10.0
|| mode_lib->vba.DSCInputBitPerComponent[k] == 8.0)
|| mode_lib->vba.DSCInputBitPerComponent[k] > mode_lib->vba.MaximumDSCBitsPerComponent) {
mode_lib->vba.NonsupportedDSCInputBPC = true;
}
}
for (i = start_state; i < v->soc.num_states; ++i) {
mode_lib->vba.ExceededMultistreamSlots[i] = false;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.OutputMultistreamEn[k] == true && mode_lib->vba.OutputMultistreamId[k] == k) {
TotalSlots = mode_lib->vba.RequiredSlots[i][k];
for (j = 0; j < mode_lib->vba.NumberOfActiveSurfaces; ++j) {
if (mode_lib->vba.OutputMultistreamId[j] == k)
TotalSlots = TotalSlots + mode_lib->vba.RequiredSlots[i][j];
}
if (mode_lib->vba.Output[k] == dm_dp && TotalSlots > 63)
mode_lib->vba.ExceededMultistreamSlots[i] = true;
if (mode_lib->vba.Output[k] == dm_dp2p0 && TotalSlots > 64)
mode_lib->vba.ExceededMultistreamSlots[i] = true;
}
}
mode_lib->vba.LinkCapacitySupport[i] = true;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.BlendingAndTiming[k] == k
&& (mode_lib->vba.Output[k] == dm_dp || mode_lib->vba.Output[k] == dm_dp2p0
|| mode_lib->vba.Output[k] == dm_edp
|| mode_lib->vba.Output[k] == dm_hdmi)
&& mode_lib->vba.OutputBppPerState[i][k] == 0 &&
(mode_lib->vba.UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe)) {
/* Phantom pipes don't consider DSC in DML, so it could fail link check.
* However, we don't care about the link for phantom pipes.
*/
mode_lib->vba.LinkCapacitySupport[i] = false;
}
}
}
mode_lib->vba.P2IWith420 = false;
mode_lib->vba.DSCOnlyIfNecessaryWithBPP = false;
mode_lib->vba.DSC422NativeNotSupported = false;
mode_lib->vba.LinkRateDoesNotMatchDPVersion = false;
mode_lib->vba.LinkRateForMultistreamNotIndicated = false;
mode_lib->vba.BPPForMultistreamNotIndicated = false;
mode_lib->vba.MultistreamWithHDMIOreDP = false;
mode_lib->vba.MSOOrODMSplitWithNonDPLink = false;
mode_lib->vba.NotEnoughLanesForMSO = false;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.BlendingAndTiming[k] == k
&& (mode_lib->vba.Output[k] == dm_dp || mode_lib->vba.Output[k] == dm_dp2p0
|| mode_lib->vba.Output[k] == dm_edp
|| mode_lib->vba.Output[k] == dm_hdmi)) {
if (mode_lib->vba.OutputFormat[k]
== dm_420 && mode_lib->vba.Interlace[k] == 1 &&
mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true)
mode_lib->vba.P2IWith420 = true;
if (mode_lib->vba.DSCEnable[k] && mode_lib->vba.ForcedOutputLinkBPP[k] != 0)
mode_lib->vba.DSCOnlyIfNecessaryWithBPP = true;
if (mode_lib->vba.DSCEnable[k] && mode_lib->vba.OutputFormat[k] == dm_n422
&& !mode_lib->vba.DSC422NativeSupport)
mode_lib->vba.DSC422NativeNotSupported = true;
if (((mode_lib->vba.OutputLinkDPRate[k] == dm_dp_rate_hbr
|| mode_lib->vba.OutputLinkDPRate[k] == dm_dp_rate_hbr2
|| mode_lib->vba.OutputLinkDPRate[k] == dm_dp_rate_hbr3)
&& mode_lib->vba.Output[k] != dm_dp && mode_lib->vba.Output[k] != dm_edp)
|| ((mode_lib->vba.OutputLinkDPRate[k] == dm_dp_rate_uhbr10
|| mode_lib->vba.OutputLinkDPRate[k] == dm_dp_rate_uhbr13p5
|| mode_lib->vba.OutputLinkDPRate[k] == dm_dp_rate_uhbr20)
&& mode_lib->vba.Output[k] != dm_dp2p0))
mode_lib->vba.LinkRateDoesNotMatchDPVersion = true;
if (mode_lib->vba.OutputMultistreamEn[k] == true) {
if (mode_lib->vba.OutputMultistreamId[k] == k
&& mode_lib->vba.OutputLinkDPRate[k] == dm_dp_rate_na)
mode_lib->vba.LinkRateForMultistreamNotIndicated = true;
if (mode_lib->vba.OutputMultistreamId[k] == k && mode_lib->vba.ForcedOutputLinkBPP[k] == 0)
mode_lib->vba.BPPForMultistreamNotIndicated = true;
for (j = 0; j < mode_lib->vba.NumberOfActiveSurfaces; ++j) {
if (mode_lib->vba.OutputMultistreamId[k] == j
&& mode_lib->vba.ForcedOutputLinkBPP[k] == 0)
mode_lib->vba.BPPForMultistreamNotIndicated = true;
}
}
if ((mode_lib->vba.Output[k] == dm_edp || mode_lib->vba.Output[k] == dm_hdmi)) {
if (mode_lib->vba.OutputMultistreamEn[k] == true && mode_lib->vba.OutputMultistreamId[k] == k)
mode_lib->vba.MultistreamWithHDMIOreDP = true;
for (j = 0; j < mode_lib->vba.NumberOfActiveSurfaces; ++j) {
if (mode_lib->vba.OutputMultistreamEn[k] == true && mode_lib->vba.OutputMultistreamId[k] == j)
mode_lib->vba.MultistreamWithHDMIOreDP = true;
}
}
if (mode_lib->vba.Output[k] != dm_dp
&& (mode_lib->vba.ODMUse[k] == dm_odm_split_policy_1to2
|| mode_lib->vba.ODMUse[k] == dm_odm_mso_policy_1to2
|| mode_lib->vba.ODMUse[k] == dm_odm_mso_policy_1to4))
mode_lib->vba.MSOOrODMSplitWithNonDPLink = true;
if ((mode_lib->vba.ODMUse[k] == dm_odm_mso_policy_1to2
&& mode_lib->vba.OutputLinkDPLanes[k] < 2)
|| (mode_lib->vba.ODMUse[k] == dm_odm_mso_policy_1to4
&& mode_lib->vba.OutputLinkDPLanes[k] < 4))
mode_lib->vba.NotEnoughLanesForMSO = true;
}
}
for (i = start_state; i < v->soc.num_states; ++i) {
mode_lib->vba.DTBCLKRequiredMoreThanSupported[i] = false;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.BlendingAndTiming[k] == k
&& dml32_RequiredDTBCLK(mode_lib->vba.RequiresDSC[i][k],
mode_lib->vba.PixelClockBackEnd[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.OutputBppPerState[i][k],
mode_lib->vba.NumberOfDSCSlices[k], mode_lib->vba.HTotal[k],
mode_lib->vba.HActive[k], mode_lib->vba.AudioSampleRate[k],
mode_lib->vba.AudioSampleLayout[k])
> mode_lib->vba.DTBCLKPerState[i]) {
mode_lib->vba.DTBCLKRequiredMoreThanSupported[i] = true;
}
}
}
for (i = start_state; i < v->soc.num_states; ++i) {
mode_lib->vba.ODMCombine2To1SupportCheckOK[i] = true;
mode_lib->vba.ODMCombine4To1SupportCheckOK[i] = true;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.BlendingAndTiming[k] == k
&& mode_lib->vba.ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1
&& mode_lib->vba.Output[k] == dm_hdmi) {
mode_lib->vba.ODMCombine2To1SupportCheckOK[i] = false;
}
if (mode_lib->vba.BlendingAndTiming[k] == k
&& mode_lib->vba.ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1
&& (mode_lib->vba.Output[k] == dm_dp || mode_lib->vba.Output[k] == dm_edp
|| mode_lib->vba.Output[k] == dm_hdmi)) {
mode_lib->vba.ODMCombine4To1SupportCheckOK[i] = false;
}
}
}
for (i = start_state; i < v->soc.num_states; i++) {
mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] = false;
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
if (mode_lib->vba.Output[k] == dm_dp || mode_lib->vba.Output[k] == dm_dp2p0
|| mode_lib->vba.Output[k] == dm_edp) {
if (mode_lib->vba.OutputFormat[k] == dm_420) {
mode_lib->vba.DSCFormatFactor = 2;
} else if (mode_lib->vba.OutputFormat[k] == dm_444) {
mode_lib->vba.DSCFormatFactor = 1;
} else if (mode_lib->vba.OutputFormat[k] == dm_n422) {
mode_lib->vba.DSCFormatFactor = 2;
} else {
mode_lib->vba.DSCFormatFactor = 1;
}
if (mode_lib->vba.RequiresDSC[i][k] == true) {
if (mode_lib->vba.ODMCombineEnablePerState[i][k]
== dm_odm_combine_mode_4to1) {
if (mode_lib->vba.PixelClockBackEnd[k] / 12.0 / mode_lib->vba.DSCFormatFactor > (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i])
mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] = true;
} else if (mode_lib->vba.ODMCombineEnablePerState[i][k]
== dm_odm_combine_mode_2to1) {
if (mode_lib->vba.PixelClockBackEnd[k] / 6.0 / mode_lib->vba.DSCFormatFactor > (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i])
mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] = true;
} else {
if (mode_lib->vba.PixelClockBackEnd[k] / 3.0 / mode_lib->vba.DSCFormatFactor > (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i])
mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] = true;
}
}
}
}
}
}
/* Check DSC Unit and Slices Support */
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalDSCUnitsRequired = 0;
for (i = start_state; i < v->soc.num_states; ++i) {
mode_lib->vba.NotEnoughDSCUnits[i] = false;
mode_lib->vba.NotEnoughDSCSlices[i] = false;
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalDSCUnitsRequired = 0;
mode_lib->vba.PixelsPerLinePerDSCUnitSupport[i] = true;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.RequiresDSC[i][k] == true) {
if (mode_lib->vba.ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1) {
if (mode_lib->vba.HActive[k]
> 4 * mode_lib->vba.MaximumPixelsPerLinePerDSCUnit)
mode_lib->vba.PixelsPerLinePerDSCUnitSupport[i] = false;
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalDSCUnitsRequired = v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalDSCUnitsRequired + 4;
if (mode_lib->vba.NumberOfDSCSlices[k] > 16)
mode_lib->vba.NotEnoughDSCSlices[i] = true;
} else if (mode_lib->vba.ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
if (mode_lib->vba.HActive[k]
> 2 * mode_lib->vba.MaximumPixelsPerLinePerDSCUnit)
mode_lib->vba.PixelsPerLinePerDSCUnitSupport[i] = false;
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalDSCUnitsRequired = v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalDSCUnitsRequired + 2;
if (mode_lib->vba.NumberOfDSCSlices[k] > 8)
mode_lib->vba.NotEnoughDSCSlices[i] = true;
} else {
if (mode_lib->vba.HActive[k] > mode_lib->vba.MaximumPixelsPerLinePerDSCUnit)
mode_lib->vba.PixelsPerLinePerDSCUnitSupport[i] = false;
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalDSCUnitsRequired = v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalDSCUnitsRequired + 1;
if (mode_lib->vba.NumberOfDSCSlices[k] > 4)
mode_lib->vba.NotEnoughDSCSlices[i] = true;
}
}
}
if (v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalDSCUnitsRequired > mode_lib->vba.NumberOfDSC)
mode_lib->vba.NotEnoughDSCUnits[i] = true;
}
/*DSC Delay per state*/
for (i = start_state; i < v->soc.num_states; ++i) {
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
mode_lib->vba.DSCDelayPerState[i][k] = dml32_DSCDelayRequirement(
mode_lib->vba.RequiresDSC[i][k], mode_lib->vba.ODMCombineEnablePerState[i][k],
mode_lib->vba.DSCInputBitPerComponent[k],
mode_lib->vba.OutputBppPerState[i][k], mode_lib->vba.HActive[k],
mode_lib->vba.HTotal[k], mode_lib->vba.NumberOfDSCSlices[k],
mode_lib->vba.OutputFormat[k], mode_lib->vba.Output[k],
mode_lib->vba.PixelClock[k], mode_lib->vba.PixelClockBackEnd[k],
mode_lib->vba.ip.dsc_delay_factor_wa);
}
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
for (m = 0; m <= mode_lib->vba.NumberOfActiveSurfaces - 1; m++) {
for (j = 0; j <= mode_lib->vba.NumberOfActiveSurfaces - 1; j++) {
if (mode_lib->vba.BlendingAndTiming[k] == m &&
mode_lib->vba.RequiresDSC[i][m] == true) {
mode_lib->vba.DSCDelayPerState[i][k] =
mode_lib->vba.DSCDelayPerState[i][m];
}
}
}
}
}
//Calculate Swath, DET Configuration, DCFCLKDeepSleep
//
for (i = start_state; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
mode_lib->vba.RequiredDPPCLKThisState[k] = mode_lib->vba.RequiredDPPCLK[i][j][k];
mode_lib->vba.NoOfDPPThisState[k] = mode_lib->vba.NoOfDPP[i][j][k];
mode_lib->vba.ODMCombineEnableThisState[k] =
mode_lib->vba.ODMCombineEnablePerState[i][k];
}
dml32_CalculateSwathAndDETConfiguration(
mode_lib->vba.DETSizeOverride,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.ConfigReturnBufferSizeInKByte,
mode_lib->vba.MaxTotalDETInKByte,
mode_lib->vba.MinCompressedBufferSizeInKByte,
false, /* ForceSingleDPP */
mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.nomDETInKByte,
mode_lib->vba.UseUnboundedRequesting,
mode_lib->vba.DisableUnboundRequestIfCompBufReservedSpaceNeedAdjustment,
mode_lib->vba.ip.pixel_chunk_size_kbytes,
mode_lib->vba.ip.rob_buffer_size_kbytes,
mode_lib->vba.CompressedBufferSegmentSizeInkByteFinal,
mode_lib->vba.Output,
mode_lib->vba.ReadBandwidthLuma,
mode_lib->vba.ReadBandwidthChroma,
mode_lib->vba.MaximumSwathWidthLuma,
mode_lib->vba.MaximumSwathWidthChroma,
mode_lib->vba.SourceRotation,
mode_lib->vba.ViewportStationary,
mode_lib->vba.SourcePixelFormat,
mode_lib->vba.SurfaceTiling,
mode_lib->vba.ViewportWidth,
mode_lib->vba.ViewportHeight,
mode_lib->vba.ViewportXStartY,
mode_lib->vba.ViewportYStartY,
mode_lib->vba.ViewportXStartC,
mode_lib->vba.ViewportYStartC,
mode_lib->vba.SurfaceWidthY,
mode_lib->vba.SurfaceWidthC,
mode_lib->vba.SurfaceHeightY,
mode_lib->vba.SurfaceHeightC,
mode_lib->vba.Read256BlockHeightY,
mode_lib->vba.Read256BlockHeightC,
mode_lib->vba.Read256BlockWidthY,
mode_lib->vba.Read256BlockWidthC,
mode_lib->vba.ODMCombineEnableThisState,
mode_lib->vba.BlendingAndTiming,
mode_lib->vba.BytePerPixelY,
mode_lib->vba.BytePerPixelC,
mode_lib->vba.BytePerPixelInDETY,
mode_lib->vba.BytePerPixelInDETC,
mode_lib->vba.HActive,
mode_lib->vba.HRatio,
mode_lib->vba.HRatioChroma,
mode_lib->vba.NoOfDPPThisState,
/* Output */
mode_lib->vba.swath_width_luma_ub_this_state,
mode_lib->vba.swath_width_chroma_ub_this_state,
mode_lib->vba.SwathWidthYThisState,
mode_lib->vba.SwathWidthCThisState,
mode_lib->vba.SwathHeightYThisState,
mode_lib->vba.SwathHeightCThisState,
mode_lib->vba.DETBufferSizeInKByteThisState,
mode_lib->vba.DETBufferSizeYThisState,
mode_lib->vba.DETBufferSizeCThisState,
&mode_lib->vba.UnboundedRequestEnabledThisState,
&mode_lib->vba.CompressedBufferSizeInkByteThisState,
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer[0], /* Long CompBufReservedSpaceKBytes */
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_boolean[0], /* bool CompBufReservedSpaceNeedAdjustment */
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_boolean_array[0],
&mode_lib->vba.ViewportSizeSupport[i][j]);
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
mode_lib->vba.swath_width_luma_ub_all_states[i][j][k] =
mode_lib->vba.swath_width_luma_ub_this_state[k];
mode_lib->vba.swath_width_chroma_ub_all_states[i][j][k] =
mode_lib->vba.swath_width_chroma_ub_this_state[k];
mode_lib->vba.SwathWidthYAllStates[i][j][k] = mode_lib->vba.SwathWidthYThisState[k];
mode_lib->vba.SwathWidthCAllStates[i][j][k] = mode_lib->vba.SwathWidthCThisState[k];
mode_lib->vba.SwathHeightYAllStates[i][j][k] = mode_lib->vba.SwathHeightYThisState[k];
mode_lib->vba.SwathHeightCAllStates[i][j][k] = mode_lib->vba.SwathHeightCThisState[k];
mode_lib->vba.UnboundedRequestEnabledAllStates[i][j] =
mode_lib->vba.UnboundedRequestEnabledThisState;
mode_lib->vba.CompressedBufferSizeInkByteAllStates[i][j] =
mode_lib->vba.CompressedBufferSizeInkByteThisState;
mode_lib->vba.DETBufferSizeInKByteAllStates[i][j][k] =
mode_lib->vba.DETBufferSizeInKByteThisState[k];
mode_lib->vba.DETBufferSizeYAllStates[i][j][k] =
mode_lib->vba.DETBufferSizeYThisState[k];
mode_lib->vba.DETBufferSizeCAllStates[i][j][k] =
mode_lib->vba.DETBufferSizeCThisState[k];
}
}
}
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
mode_lib->vba.cursor_bw[k] = mode_lib->vba.NumberOfCursors[k] * mode_lib->vba.CursorWidth[k][0]
* mode_lib->vba.CursorBPP[k][0] / 8.0
/ (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k];
}
dml32_CalculateSurfaceSizeInMall(
mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.MALLAllocatedForDCNFinal,
mode_lib->vba.UseMALLForStaticScreen,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.DCCEnable,
mode_lib->vba.ViewportStationary,
mode_lib->vba.ViewportXStartY,
mode_lib->vba.ViewportYStartY,
mode_lib->vba.ViewportXStartC,
mode_lib->vba.ViewportYStartC,
mode_lib->vba.ViewportWidth,
mode_lib->vba.ViewportHeight,
mode_lib->vba.BytePerPixelY,
mode_lib->vba.ViewportWidthChroma,
mode_lib->vba.ViewportHeightChroma,
mode_lib->vba.BytePerPixelC,
mode_lib->vba.SurfaceWidthY,
mode_lib->vba.SurfaceWidthC,
mode_lib->vba.SurfaceHeightY,
mode_lib->vba.SurfaceHeightC,
mode_lib->vba.Read256BlockWidthY,
mode_lib->vba.Read256BlockWidthC,
mode_lib->vba.Read256BlockHeightY,
mode_lib->vba.Read256BlockHeightC,
mode_lib->vba.MacroTileWidthY,
mode_lib->vba.MacroTileWidthC,
mode_lib->vba.MacroTileHeightY,
mode_lib->vba.MacroTileHeightC,
mode_lib->vba.DCCMetaPitchY,
mode_lib->vba.DCCMetaPitchC,
/* Output */
mode_lib->vba.SurfaceSizeInMALL,
&mode_lib->vba.ExceededMALLSize);
for (i = start_state; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
mode_lib->vba.swath_width_luma_ub_this_state[k] =
mode_lib->vba.swath_width_luma_ub_all_states[i][j][k];
mode_lib->vba.swath_width_chroma_ub_this_state[k] =
mode_lib->vba.swath_width_chroma_ub_all_states[i][j][k];
mode_lib->vba.SwathWidthYThisState[k] = mode_lib->vba.SwathWidthYAllStates[i][j][k];
mode_lib->vba.SwathWidthCThisState[k] = mode_lib->vba.SwathWidthCAllStates[i][j][k];
mode_lib->vba.SwathHeightYThisState[k] = mode_lib->vba.SwathHeightYAllStates[i][j][k];
mode_lib->vba.SwathHeightCThisState[k] = mode_lib->vba.SwathHeightCAllStates[i][j][k];
mode_lib->vba.DETBufferSizeInKByteThisState[k] =
mode_lib->vba.DETBufferSizeInKByteAllStates[i][j][k];
mode_lib->vba.DETBufferSizeYThisState[k] =
mode_lib->vba.DETBufferSizeYAllStates[i][j][k];
mode_lib->vba.DETBufferSizeCThisState[k] =
mode_lib->vba.DETBufferSizeCAllStates[i][j][k];
mode_lib->vba.RequiredDPPCLKThisState[k] = mode_lib->vba.RequiredDPPCLK[i][j][k];
mode_lib->vba.NoOfDPPThisState[k] = mode_lib->vba.NoOfDPP[i][j][k];
}
mode_lib->vba.TotalNumberOfDCCActiveDPP[i][j] = 0;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.DCCEnable[k] == true) {
mode_lib->vba.TotalNumberOfDCCActiveDPP[i][j] =
mode_lib->vba.TotalNumberOfDCCActiveDPP[i][j]
+ mode_lib->vba.NoOfDPP[i][j][k];
}
}
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].PixelClock = mode_lib->vba.PixelClock[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].DPPPerSurface = mode_lib->vba.NoOfDPP[i][j][k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].SourceRotation = mode_lib->vba.SourceRotation[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].ViewportHeight = mode_lib->vba.ViewportHeight[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].ViewportHeightChroma = mode_lib->vba.ViewportHeightChroma[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidth256BytesY = mode_lib->vba.Read256BlockWidthY[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeight256BytesY = mode_lib->vba.Read256BlockHeightY[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidth256BytesC = mode_lib->vba.Read256BlockWidthC[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeight256BytesC = mode_lib->vba.Read256BlockHeightC[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthY = mode_lib->vba.MacroTileWidthY[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightY = mode_lib->vba.MacroTileHeightY[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthC = mode_lib->vba.MacroTileWidthC[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightC = mode_lib->vba.MacroTileHeightC[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].InterlaceEnable = mode_lib->vba.Interlace[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].HTotal = mode_lib->vba.HTotal[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].DCCEnable = mode_lib->vba.DCCEnable[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].SourcePixelFormat = mode_lib->vba.SourcePixelFormat[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].SurfaceTiling = mode_lib->vba.SurfaceTiling[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BytePerPixelY = mode_lib->vba.BytePerPixelY[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BytePerPixelC = mode_lib->vba.BytePerPixelC[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].ProgressiveToInterlaceUnitInOPP =
mode_lib->vba.ProgressiveToInterlaceUnitInOPP;
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].VRatio = mode_lib->vba.VRatio[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].VRatioChroma = mode_lib->vba.VRatioChroma[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].VTaps = mode_lib->vba.vtaps[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].VTapsChroma = mode_lib->vba.VTAPsChroma[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].PitchY = mode_lib->vba.PitchY[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].DCCMetaPitchY = mode_lib->vba.DCCMetaPitchY[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].PitchC = mode_lib->vba.PitchC[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].DCCMetaPitchC = mode_lib->vba.DCCMetaPitchC[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].ViewportStationary = mode_lib->vba.ViewportStationary[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].ViewportXStart = mode_lib->vba.ViewportXStartY[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].ViewportYStart = mode_lib->vba.ViewportYStartY[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].ViewportXStartC = mode_lib->vba.ViewportXStartC[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].ViewportYStartC = mode_lib->vba.ViewportYStartC[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].FORCE_ONE_ROW_FOR_FRAME = mode_lib->vba.ForceOneRowForFrame[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].SwathHeightY = mode_lib->vba.SwathHeightYThisState[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].SwathHeightC = mode_lib->vba.SwathHeightCThisState[k];
}
{
dml32_CalculateVMRowAndSwath(
mode_lib->vba.NumberOfActiveSurfaces,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters,
mode_lib->vba.SurfaceSizeInMALL,
mode_lib->vba.PTEBufferSizeInRequestsLuma,
mode_lib->vba.PTEBufferSizeInRequestsChroma,
mode_lib->vba.DCCMetaBufferSizeBytes,
mode_lib->vba.UseMALLForStaticScreen,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.MALLAllocatedForDCNFinal,
mode_lib->vba.SwathWidthYThisState,
mode_lib->vba.SwathWidthCThisState,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.HostVMEnable,
mode_lib->vba.HostVMMaxNonCachedPageTableLevels,
mode_lib->vba.GPUVMMaxPageTableLevels,
mode_lib->vba.GPUVMMinPageSizeKBytes,
mode_lib->vba.HostVMMinPageSize,
/* Output */
mode_lib->vba.PTEBufferSizeNotExceededPerState,
mode_lib->vba.DCCMetaBufferSizeNotExceededPerState,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[0],
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[1],
mode_lib->vba.dpte_row_height,
mode_lib->vba.dpte_row_height_chroma,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[2],
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[3],
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[4],
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[5],
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[6],
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[7],
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[8],
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[9],
mode_lib->vba.meta_row_height,
mode_lib->vba.meta_row_height_chroma,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[10],
mode_lib->vba.dpte_group_bytes,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[11],
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[12],
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[13],
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[14],
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[15],
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[16],
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[17],
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[18],
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[19],
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[20],
mode_lib->vba.PrefetchLinesYThisState,
mode_lib->vba.PrefetchLinesCThisState,
mode_lib->vba.PrefillY,
mode_lib->vba.PrefillC,
mode_lib->vba.MaxNumSwY,
mode_lib->vba.MaxNumSwC,
mode_lib->vba.meta_row_bandwidth_this_state,
mode_lib->vba.dpte_row_bandwidth_this_state,
mode_lib->vba.DPTEBytesPerRowThisState,
mode_lib->vba.PDEAndMetaPTEBytesPerFrameThisState,
mode_lib->vba.MetaRowBytesThisState,
mode_lib->vba.use_one_row_for_frame_this_state,
mode_lib->vba.use_one_row_for_frame_flip_this_state,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_boolean_array[0], // Boolean UsesMALLForStaticScreen[]
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_boolean_array[1], // Boolean PTE_BUFFER_MODE[]
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer_array[21]); // Long BIGK_FRAGMENT_SIZE[]
}
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
mode_lib->vba.PrefetchLinesY[i][j][k] = mode_lib->vba.PrefetchLinesYThisState[k];
mode_lib->vba.PrefetchLinesC[i][j][k] = mode_lib->vba.PrefetchLinesCThisState[k];
mode_lib->vba.meta_row_bandwidth[i][j][k] =
mode_lib->vba.meta_row_bandwidth_this_state[k];
mode_lib->vba.dpte_row_bandwidth[i][j][k] =
mode_lib->vba.dpte_row_bandwidth_this_state[k];
mode_lib->vba.DPTEBytesPerRow[i][j][k] = mode_lib->vba.DPTEBytesPerRowThisState[k];
mode_lib->vba.PDEAndMetaPTEBytesPerFrame[i][j][k] =
mode_lib->vba.PDEAndMetaPTEBytesPerFrameThisState[k];
mode_lib->vba.MetaRowBytes[i][j][k] = mode_lib->vba.MetaRowBytesThisState[k];
mode_lib->vba.use_one_row_for_frame[i][j][k] =
mode_lib->vba.use_one_row_for_frame_this_state[k];
mode_lib->vba.use_one_row_for_frame_flip[i][j][k] =
mode_lib->vba.use_one_row_for_frame_flip_this_state[k];
}
mode_lib->vba.PTEBufferSizeNotExceeded[i][j] = true;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.PTEBufferSizeNotExceededPerState[k] == false)
mode_lib->vba.PTEBufferSizeNotExceeded[i][j] = false;
}
mode_lib->vba.DCCMetaBufferSizeNotExceeded[i][j] = true;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.DCCMetaBufferSizeNotExceededPerState[k] == false)
mode_lib->vba.DCCMetaBufferSizeNotExceeded[i][j] = false;
}
mode_lib->vba.UrgLatency[i] = dml32_CalculateUrgentLatency(
mode_lib->vba.UrgentLatencyPixelDataOnly,
mode_lib->vba.UrgentLatencyPixelMixedWithVMData,
mode_lib->vba.UrgentLatencyVMDataOnly, mode_lib->vba.DoUrgentLatencyAdjustment,
mode_lib->vba.UrgentLatencyAdjustmentFabricClockComponent,
mode_lib->vba.UrgentLatencyAdjustmentFabricClockReference,
mode_lib->vba.FabricClockPerState[i]);
//bool NotUrgentLatencyHiding[DC__NUM_DPP__MAX];
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
dml32_CalculateUrgentBurstFactor(
mode_lib->vba.UsesMALLForPStateChange[k],
mode_lib->vba.swath_width_luma_ub_this_state[k],
mode_lib->vba.swath_width_chroma_ub_this_state[k],
mode_lib->vba.SwathHeightYThisState[k],
mode_lib->vba.SwathHeightCThisState[k],
(double) mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
mode_lib->vba.UrgLatency[i],
mode_lib->vba.CursorBufferSize,
mode_lib->vba.CursorWidth[k][0],
mode_lib->vba.CursorBPP[k][0],
mode_lib->vba.VRatio[k],
mode_lib->vba.VRatioChroma[k],
mode_lib->vba.BytePerPixelInDETY[k],
mode_lib->vba.BytePerPixelInDETC[k],
mode_lib->vba.DETBufferSizeYThisState[k],
mode_lib->vba.DETBufferSizeCThisState[k],
/* Output */
&mode_lib->vba.UrgentBurstFactorCursor[k],
&mode_lib->vba.UrgentBurstFactorLuma[k],
&mode_lib->vba.UrgentBurstFactorChroma[k],
&mode_lib->vba.NoUrgentLatencyHiding[k]);
}
dml32_CalculateDCFCLKDeepSleep(
mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.BytePerPixelY,
mode_lib->vba.BytePerPixelC,
mode_lib->vba.VRatio,
mode_lib->vba.VRatioChroma,
mode_lib->vba.SwathWidthYThisState,
mode_lib->vba.SwathWidthCThisState,
mode_lib->vba.NoOfDPPThisState,
mode_lib->vba.HRatio,
mode_lib->vba.HRatioChroma,
mode_lib->vba.PixelClock,
mode_lib->vba.PSCL_FACTOR,
mode_lib->vba.PSCL_FACTOR_CHROMA,
mode_lib->vba.RequiredDPPCLKThisState,
mode_lib->vba.ReadBandwidthLuma,
mode_lib->vba.ReadBandwidthChroma,
mode_lib->vba.ReturnBusWidth,
/* Output */
&mode_lib->vba.ProjectedDCFCLKDeepSleep[i][j]);
}
}
//Calculate Return BW
for (i = start_state; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
if (mode_lib->vba.WritebackEnable[k] == true) {
mode_lib->vba.WritebackDelayTime[k] =
mode_lib->vba.WritebackLatency
+ dml32_CalculateWriteBackDelay(
mode_lib->vba.WritebackPixelFormat[k],
mode_lib->vba.WritebackHRatio[k],
mode_lib->vba.WritebackVRatio[k],
mode_lib->vba.WritebackVTaps[k],
mode_lib->vba.WritebackDestinationWidth[k],
mode_lib->vba.WritebackDestinationHeight[k],
mode_lib->vba.WritebackSourceHeight[k],
mode_lib->vba.HTotal[k])
/ mode_lib->vba.RequiredDISPCLK[i][j];
} else {
mode_lib->vba.WritebackDelayTime[k] = 0.0;
}
for (m = 0; m <= mode_lib->vba.NumberOfActiveSurfaces - 1; m++) {
if (mode_lib->vba.BlendingAndTiming[m]
== k && mode_lib->vba.WritebackEnable[m] == true) {
mode_lib->vba.WritebackDelayTime[k] =
dml_max(mode_lib->vba.WritebackDelayTime[k],
mode_lib->vba.WritebackLatency
+ dml32_CalculateWriteBackDelay(
mode_lib->vba.WritebackPixelFormat[m],
mode_lib->vba.WritebackHRatio[m],
mode_lib->vba.WritebackVRatio[m],
mode_lib->vba.WritebackVTaps[m],
mode_lib->vba.WritebackDestinationWidth[m],
mode_lib->vba.WritebackDestinationHeight[m],
mode_lib->vba.WritebackSourceHeight[m],
mode_lib->vba.HTotal[m]) /
mode_lib->vba.RequiredDISPCLK[i][j]);
}
}
}
}
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
for (m = 0; m <= mode_lib->vba.NumberOfActiveSurfaces - 1; m++) {
if (mode_lib->vba.BlendingAndTiming[k] == m) {
mode_lib->vba.WritebackDelayTime[k] =
mode_lib->vba.WritebackDelayTime[m];
}
}
}
mode_lib->vba.MaxMaxVStartup[i][j] = 0;
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
mode_lib->vba.MaximumVStartup[i][j][k] = ((mode_lib->vba.Interlace[k] &&
!mode_lib->vba.ProgressiveToInterlaceUnitInOPP) ?
dml_floor((mode_lib->vba.VTotal[k] -
mode_lib->vba.VActive[k]) / 2.0, 1.0) :
mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k])
- dml_max(1.0, dml_ceil(1.0 *
mode_lib->vba.WritebackDelayTime[k] /
(mode_lib->vba.HTotal[k] /
mode_lib->vba.PixelClock[k]), 1.0));
// Clamp to max OTG vstartup register limit
if (mode_lib->vba.MaximumVStartup[i][j][k] > 1023)
mode_lib->vba.MaximumVStartup[i][j][k] = 1023;
mode_lib->vba.MaxMaxVStartup[i][j] = dml_max(mode_lib->vba.MaxMaxVStartup[i][j],
mode_lib->vba.MaximumVStartup[i][j][k]);
}
}
}
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.ReorderingBytes = mode_lib->vba.NumberOfChannels
* dml_max3(mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelDataOnly,
mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelMixedWithVMData,
mode_lib->vba.UrgentOutOfOrderReturnPerChannelVMDataOnly);
dml32_CalculateMinAndMaxPrefetchMode(mode_lib->vba.AllowForPStateChangeOrStutterInVBlankFinal,
&mode_lib->vba.MinPrefetchMode,
&mode_lib->vba.MaxPrefetchMode);
for (i = start_state; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j)
mode_lib->vba.DCFCLKState[i][j] = mode_lib->vba.DCFCLKPerState[i];
}
/* Immediate Flip and MALL parameters */
mode_lib->vba.ImmediateFlipRequiredFinal = false;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
mode_lib->vba.ImmediateFlipRequiredFinal = mode_lib->vba.ImmediateFlipRequiredFinal
|| (mode_lib->vba.ImmediateFlipRequirement[k] == dm_immediate_flip_required);
}
mode_lib->vba.ImmediateFlipRequiredButTheRequirementForEachSurfaceIsNotSpecified = false;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
mode_lib->vba.ImmediateFlipRequiredButTheRequirementForEachSurfaceIsNotSpecified =
mode_lib->vba.ImmediateFlipRequiredButTheRequirementForEachSurfaceIsNotSpecified
|| ((mode_lib->vba.ImmediateFlipRequirement[k]
!= dm_immediate_flip_required)
&& (mode_lib->vba.ImmediateFlipRequirement[k]
!= dm_immediate_flip_not_required));
}
mode_lib->vba.ImmediateFlipRequiredButTheRequirementForEachSurfaceIsNotSpecified =
mode_lib->vba.ImmediateFlipRequiredButTheRequirementForEachSurfaceIsNotSpecified
&& mode_lib->vba.ImmediateFlipRequiredFinal;
mode_lib->vba.ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = false;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
mode_lib->vba.ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe =
mode_lib->vba.ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe ||
((mode_lib->vba.HostVMEnable == true || mode_lib->vba.ImmediateFlipRequirement[k] !=
dm_immediate_flip_not_required) &&
(mode_lib->vba.UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame ||
mode_lib->vba.UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe));
}
mode_lib->vba.InvalidCombinationOfMALLUseForPStateAndStaticScreen = false;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
mode_lib->vba.InvalidCombinationOfMALLUseForPStateAndStaticScreen =
mode_lib->vba.InvalidCombinationOfMALLUseForPStateAndStaticScreen
|| ((mode_lib->vba.UseMALLForStaticScreen[k] == dm_use_mall_static_screen_enable
|| mode_lib->vba.UseMALLForStaticScreen[k] == dm_use_mall_static_screen_optimize)
&& (mode_lib->vba.UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe))
|| ((mode_lib->vba.UseMALLForStaticScreen[k] == dm_use_mall_static_screen_disable
|| mode_lib->vba.UseMALLForStaticScreen[k] == dm_use_mall_static_screen_optimize)
&& (mode_lib->vba.UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame));
}
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.FullFrameMALLPStateMethod = false;
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SubViewportMALLPStateMethod = false;
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.PhantomPipeMALLPStateMethod = false;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame)
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.FullFrameMALLPStateMethod = true;
if (mode_lib->vba.UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_sub_viewport)
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SubViewportMALLPStateMethod = true;
if (mode_lib->vba.UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe)
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.PhantomPipeMALLPStateMethod = true;
}
mode_lib->vba.InvalidCombinationOfMALLUseForPState = (v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SubViewportMALLPStateMethod
!= v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.PhantomPipeMALLPStateMethod) || (v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SubViewportMALLPStateMethod && v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.FullFrameMALLPStateMethod);
if (mode_lib->vba.UseMinimumRequiredDCFCLK == true) {
dml32_UseMinimumDCFCLK(
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.DRRDisplay,
mode_lib->vba.SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
mode_lib->vba.MaxInterDCNTileRepeaters,
mode_lib->vba.MaxPrefetchMode,
mode_lib->vba.DRAMClockChangeLatency,
mode_lib->vba.FCLKChangeLatency,
mode_lib->vba.SREnterPlusExitTime,
mode_lib->vba.ReturnBusWidth,
mode_lib->vba.RoundTripPingLatencyCycles,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.ReorderingBytes,
mode_lib->vba.PixelChunkSizeInKByte,
mode_lib->vba.MetaChunkSize,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.GPUVMMaxPageTableLevels,
mode_lib->vba.HostVMEnable,
mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.HostVMMinPageSize,
mode_lib->vba.HostVMMaxNonCachedPageTableLevels,
mode_lib->vba.DynamicMetadataVMEnabled,
mode_lib->vba.ImmediateFlipRequiredFinal,
mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
mode_lib->vba.MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation,
mode_lib->vba.PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency,
mode_lib->vba.VTotal,
mode_lib->vba.VActive,
mode_lib->vba.DynamicMetadataTransmittedBytes,
mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired,
mode_lib->vba.Interlace,
mode_lib->vba.RequiredDPPCLK,
mode_lib->vba.RequiredDISPCLK,
mode_lib->vba.UrgLatency,
mode_lib->vba.NoOfDPP,
mode_lib->vba.ProjectedDCFCLKDeepSleep,
mode_lib->vba.MaximumVStartup,
mode_lib->vba.TotalNumberOfActiveDPP,
mode_lib->vba.TotalNumberOfDCCActiveDPP,
mode_lib->vba.dpte_group_bytes,
mode_lib->vba.PrefetchLinesY,
mode_lib->vba.PrefetchLinesC,
mode_lib->vba.swath_width_luma_ub_all_states,
mode_lib->vba.swath_width_chroma_ub_all_states,
mode_lib->vba.BytePerPixelY,
mode_lib->vba.BytePerPixelC,
mode_lib->vba.HTotal,
mode_lib->vba.PixelClock,
mode_lib->vba.PDEAndMetaPTEBytesPerFrame,
mode_lib->vba.DPTEBytesPerRow,
mode_lib->vba.MetaRowBytes,
mode_lib->vba.DynamicMetadataEnable,
mode_lib->vba.ReadBandwidthLuma,
mode_lib->vba.ReadBandwidthChroma,
mode_lib->vba.DCFCLKPerState,
/* Output */
mode_lib->vba.DCFCLKState);
} // UseMinimumRequiredDCFCLK == true
for (i = start_state; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
mode_lib->vba.ReturnBWPerState[i][j] = dml32_get_return_bw_mbps(&mode_lib->vba.soc, i,
mode_lib->vba.HostVMEnable, mode_lib->vba.DCFCLKState[i][j],
mode_lib->vba.FabricClockPerState[i], mode_lib->vba.DRAMSpeedPerState[i]);
}
}
//Re-ordering Buffer Support Check
for (i = start_state; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024
/ mode_lib->vba.ReturnBWPerState[i][j]
> (mode_lib->vba.RoundTripPingLatencyCycles + 32)
/ mode_lib->vba.DCFCLKState[i][j]
+ v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.ReorderingBytes / mode_lib->vba.ReturnBWPerState[i][j]) {
mode_lib->vba.ROBSupport[i][j] = true;
} else {
mode_lib->vba.ROBSupport[i][j] = false;
}
}
}
//Vertical Active BW support check
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MaxTotalVActiveRDBandwidth = 0;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MaxTotalVActiveRDBandwidth += mode_lib->vba.ReadBandwidthLuma[k]
+ mode_lib->vba.ReadBandwidthChroma[k];
}
for (i = start_state; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][j] =
dml_min3(mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKState[i][j]
* mode_lib->vba.MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation / 100,
mode_lib->vba.FabricClockPerState[i]
* mode_lib->vba.FabricDatapathToDCNDataReturn
* mode_lib->vba.MaxAveragePercentOfIdealFabricBWDisplayCanUseInNormalSystemOperation / 100,
mode_lib->vba.DRAMSpeedPerState[i]
* mode_lib->vba.NumberOfChannels
* mode_lib->vba.DRAMChannelWidth
* (i < 2 ? mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperationSTROBE : mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation) / 100);
if (v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MaxTotalVActiveRDBandwidth
<= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][j]) {
mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][j] = true;
} else {
mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][j] = false;
}
}
}
/* Prefetch Check */
for (i = start_state; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
mode_lib->vba.TimeCalc = 24 / mode_lib->vba.ProjectedDCFCLKDeepSleep[i][j];
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
mode_lib->vba.NoOfDPPThisState[k] = mode_lib->vba.NoOfDPP[i][j][k];
mode_lib->vba.swath_width_luma_ub_this_state[k] =
mode_lib->vba.swath_width_luma_ub_all_states[i][j][k];
mode_lib->vba.swath_width_chroma_ub_this_state[k] =
mode_lib->vba.swath_width_chroma_ub_all_states[i][j][k];
mode_lib->vba.SwathWidthYThisState[k] = mode_lib->vba.SwathWidthYAllStates[i][j][k];
mode_lib->vba.SwathWidthCThisState[k] = mode_lib->vba.SwathWidthCAllStates[i][j][k];
mode_lib->vba.SwathHeightYThisState[k] = mode_lib->vba.SwathHeightYAllStates[i][j][k];
mode_lib->vba.SwathHeightCThisState[k] = mode_lib->vba.SwathHeightCAllStates[i][j][k];
mode_lib->vba.UnboundedRequestEnabledThisState =
mode_lib->vba.UnboundedRequestEnabledAllStates[i][j];
mode_lib->vba.CompressedBufferSizeInkByteThisState =
mode_lib->vba.CompressedBufferSizeInkByteAllStates[i][j];
mode_lib->vba.DETBufferSizeInKByteThisState[k] =
mode_lib->vba.DETBufferSizeInKByteAllStates[i][j][k];
mode_lib->vba.DETBufferSizeYThisState[k] =
mode_lib->vba.DETBufferSizeYAllStates[i][j][k];
mode_lib->vba.DETBufferSizeCThisState[k] =
mode_lib->vba.DETBufferSizeCAllStates[i][j][k];
}
mode_lib->vba.VActiveBandwithSupport[i][j] = dml32_CalculateVActiveBandwithSupport(
mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.ReturnBWPerState[i][j],
mode_lib->vba.NoUrgentLatencyHiding,
mode_lib->vba.ReadBandwidthLuma,
mode_lib->vba.ReadBandwidthChroma,
mode_lib->vba.cursor_bw,
mode_lib->vba.meta_row_bandwidth_this_state,
mode_lib->vba.dpte_row_bandwidth_this_state,
mode_lib->vba.NoOfDPPThisState,
mode_lib->vba.UrgentBurstFactorLuma,
mode_lib->vba.UrgentBurstFactorChroma,
mode_lib->vba.UrgentBurstFactorCursor);
mode_lib->vba.NotEnoughDETSwathFillLatencyHidingPerState[i][j] = dml32_CalculateDETSwathFillLatencyHiding(
mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.ReturnBWPerState[i][j],
mode_lib->vba.UrgLatency[i],
mode_lib->vba.SwathHeightYThisState,
mode_lib->vba.SwathHeightCThisState,
mode_lib->vba.swath_width_luma_ub_this_state,
mode_lib->vba.swath_width_chroma_ub_this_state,
mode_lib->vba.BytePerPixelInDETY,
mode_lib->vba.BytePerPixelInDETC,
mode_lib->vba.DETBufferSizeYThisState,
mode_lib->vba.DETBufferSizeCThisState,
mode_lib->vba.NoOfDPPThisState,
mode_lib->vba.HTotal,
mode_lib->vba.PixelClock,
mode_lib->vba.VRatio,
mode_lib->vba.VRatioChroma,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.UseUnboundedRequesting);
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.VMDataOnlyReturnBWPerState = dml32_get_return_bw_mbps_vm_only(&mode_lib->vba.soc, i,
mode_lib->vba.DCFCLKState[i][j], mode_lib->vba.FabricClockPerState[i],
mode_lib->vba.DRAMSpeedPerState[i]);
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.HostVMInefficiencyFactor = 1;
if (mode_lib->vba.GPUVMEnable && mode_lib->vba.HostVMEnable)
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.HostVMInefficiencyFactor = mode_lib->vba.ReturnBWPerState[i][j]
/ v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.VMDataOnlyReturnBWPerState;
mode_lib->vba.ExtraLatency = dml32_CalculateExtraLatency(
mode_lib->vba.RoundTripPingLatencyCycles, v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.ReorderingBytes,
mode_lib->vba.DCFCLKState[i][j], mode_lib->vba.TotalNumberOfActiveDPP[i][j],
mode_lib->vba.PixelChunkSizeInKByte,
mode_lib->vba.TotalNumberOfDCCActiveDPP[i][j], mode_lib->vba.MetaChunkSize,
mode_lib->vba.ReturnBWPerState[i][j], mode_lib->vba.GPUVMEnable,
mode_lib->vba.HostVMEnable, mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.NoOfDPPThisState, mode_lib->vba.dpte_group_bytes,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.HostVMInefficiencyFactor, mode_lib->vba.HostVMMinPageSize,
mode_lib->vba.HostVMMaxNonCachedPageTableLevels);
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.NextPrefetchModeState = mode_lib->vba.MinPrefetchMode;
mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup[i][j];
do {
mode_lib->vba.PrefetchModePerState[i][j] = v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.NextPrefetchModeState;
mode_lib->vba.MaxVStartup = mode_lib->vba.NextMaxVStartup;
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
mode_lib->vba.TWait = dml32_CalculateTWait(
mode_lib->vba.PrefetchModePerState[i][j],
mode_lib->vba.UsesMALLForPStateChange[k],
mode_lib->vba.SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
mode_lib->vba.DRRDisplay[k],
mode_lib->vba.DRAMClockChangeLatency,
mode_lib->vba.FCLKChangeLatency, mode_lib->vba.UrgLatency[i],
mode_lib->vba.SREnterPlusExitTime);
memset(&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull, 0, sizeof(DmlPipe));
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.Dppclk = mode_lib->vba.RequiredDPPCLK[i][j][k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.Dispclk = mode_lib->vba.RequiredDISPCLK[i][j];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.PixelClock = mode_lib->vba.PixelClock[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.DCFClkDeepSleep = mode_lib->vba.ProjectedDCFCLKDeepSleep[i][j];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.DPPPerSurface = mode_lib->vba.NoOfDPP[i][j][k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.ScalerEnabled = mode_lib->vba.ScalerEnabled[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.SourceRotation = mode_lib->vba.SourceRotation[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.BlockWidth256BytesY = mode_lib->vba.Read256BlockWidthY[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.BlockHeight256BytesY = mode_lib->vba.Read256BlockHeightY[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.BlockWidth256BytesC = mode_lib->vba.Read256BlockWidthC[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.BlockHeight256BytesC = mode_lib->vba.Read256BlockHeightC[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.InterlaceEnable = mode_lib->vba.Interlace[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.NumberOfCursors = mode_lib->vba.NumberOfCursors[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.VBlank = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.HTotal = mode_lib->vba.HTotal[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.HActive = mode_lib->vba.HActive[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.DCCEnable = mode_lib->vba.DCCEnable[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.ODMMode = mode_lib->vba.ODMCombineEnablePerState[i][k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.SourcePixelFormat = mode_lib->vba.SourcePixelFormat[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.BytePerPixelY = mode_lib->vba.BytePerPixelY[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.BytePerPixelC = mode_lib->vba.BytePerPixelC[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.ProgressiveToInterlaceUnitInOPP =
mode_lib->vba.ProgressiveToInterlaceUnitInOPP;
mode_lib->vba.NoTimeForPrefetch[i][j][k] =
dml32_CalculatePrefetchSchedule(
v,
k,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.HostVMInefficiencyFactor,
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe,
v->DSCDelayPerState[i][k],
v->SwathWidthYThisState[k] / v->HRatio[k],
dml_min(v->MaxVStartup, v->MaximumVStartup[i][j][k]),
v->MaximumVStartup[i][j][k],
v->UrgLatency[i],
v->ExtraLatency,
v->TimeCalc,
v->PDEAndMetaPTEBytesPerFrame[i][j][k],
v->MetaRowBytes[i][j][k],
v->DPTEBytesPerRow[i][j][k],
v->PrefetchLinesY[i][j][k],
v->SwathWidthYThisState[k],
v->PrefillY[k],
v->MaxNumSwY[k],
v->PrefetchLinesC[i][j][k],
v->SwathWidthCThisState[k],
v->PrefillC[k],
v->MaxNumSwC[k],
v->swath_width_luma_ub_this_state[k],
v->swath_width_chroma_ub_this_state[k],
v->SwathHeightYThisState[k],
v->SwathHeightCThisState[k], v->TWait,
(v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ) ?
mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
/* Output */
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTXAfterScaler[k],
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTYAfterScaler[k],
&v->LineTimesForPrefetch[k],
&v->PrefetchBW[k],
&v->LinesForMetaPTE[k],
&v->LinesForMetaAndDPTERow[k],
&v->VRatioPreY[i][j][k],
&v->VRatioPreC[i][j][k],
&v->RequiredPrefetchPixelDataBWLuma[0][0][k],
&v->RequiredPrefetchPixelDataBWChroma[0][0][k],
&v->NoTimeForDynamicMetadata[i][j][k],
&v->Tno_bw[k],
&v->prefetch_vmrow_bw[k],
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[0], // double *Tdmdl_vm
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[1], // double *Tdmdl
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[2], // double *TSetup
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer[0], // unsigned int *VUpdateOffsetPix
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[3], // unsigned int *VUpdateWidthPix
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[4]); // unsigned int *VReadyOffsetPix
}
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
dml32_CalculateUrgentBurstFactor(
mode_lib->vba.UsesMALLForPStateChange[k],
mode_lib->vba.swath_width_luma_ub_this_state[k],
mode_lib->vba.swath_width_chroma_ub_this_state[k],
mode_lib->vba.SwathHeightYThisState[k],
mode_lib->vba.SwathHeightCThisState[k],
mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
mode_lib->vba.UrgLatency[i], mode_lib->vba.CursorBufferSize,
mode_lib->vba.CursorWidth[k][0], mode_lib->vba.CursorBPP[k][0],
mode_lib->vba.VRatioPreY[i][j][k],
mode_lib->vba.VRatioPreC[i][j][k],
mode_lib->vba.BytePerPixelInDETY[k],
mode_lib->vba.BytePerPixelInDETC[k],
mode_lib->vba.DETBufferSizeYThisState[k],
mode_lib->vba.DETBufferSizeCThisState[k],
/* Output */
&mode_lib->vba.UrgentBurstFactorCursorPre[k],
&mode_lib->vba.UrgentBurstFactorLumaPre[k],
&mode_lib->vba.UrgentBurstFactorChromaPre[k],
&mode_lib->vba.NotUrgentLatencyHidingPre[k]);
}
{
dml32_CalculatePrefetchBandwithSupport(
mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.ReturnBWPerState[i][j],
mode_lib->vba.NotUrgentLatencyHidingPre,
mode_lib->vba.ReadBandwidthLuma,
mode_lib->vba.ReadBandwidthChroma,
mode_lib->vba.RequiredPrefetchPixelDataBWLuma[0][0],
mode_lib->vba.RequiredPrefetchPixelDataBWChroma[0][0],
mode_lib->vba.cursor_bw,
mode_lib->vba.meta_row_bandwidth_this_state,
mode_lib->vba.dpte_row_bandwidth_this_state,
mode_lib->vba.cursor_bw_pre,
mode_lib->vba.prefetch_vmrow_bw,
mode_lib->vba.NoOfDPPThisState,
mode_lib->vba.UrgentBurstFactorLuma,
mode_lib->vba.UrgentBurstFactorChroma,
mode_lib->vba.UrgentBurstFactorCursor,
mode_lib->vba.UrgentBurstFactorLumaPre,
mode_lib->vba.UrgentBurstFactorChromaPre,
mode_lib->vba.UrgentBurstFactorCursorPre,
v->PrefetchBW,
v->VRatio,
v->MaxVRatioPre,
/* output */
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[0], // Single *PrefetchBandwidth
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[1], // Single *FractionOfUrgentBandwidth
&mode_lib->vba.PrefetchSupported[i][j]);
}
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (mode_lib->vba.LineTimesForPrefetch[k]
< 2.0 || mode_lib->vba.LinesForMetaPTE[k] >= 32.0
|| mode_lib->vba.LinesForMetaAndDPTERow[k] >= 16.0
|| mode_lib->vba.NoTimeForPrefetch[i][j][k] == true) {
mode_lib->vba.PrefetchSupported[i][j] = false;
}
}
mode_lib->vba.DynamicMetadataSupported[i][j] = true;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.NoTimeForDynamicMetadata[i][j][k] == true)
mode_lib->vba.DynamicMetadataSupported[i][j] = false;
}
mode_lib->vba.VRatioInPrefetchSupported[i][j] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (mode_lib->vba.VRatioPreY[i][j][k] > mode_lib->vba.MaxVRatioPre
|| mode_lib->vba.VRatioPreC[i][j][k] > mode_lib->vba.MaxVRatioPre
|| mode_lib->vba.NoTimeForPrefetch[i][j][k] == true) {
mode_lib->vba.VRatioInPrefetchSupported[i][j] = false;
}
}
mode_lib->vba.AnyLinesForVMOrRowTooLarge = false;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.LinesForMetaAndDPTERow[k] >= 16
|| mode_lib->vba.LinesForMetaPTE[k] >= 32) {
mode_lib->vba.AnyLinesForVMOrRowTooLarge = true;
}
}
if (mode_lib->vba.PrefetchSupported[i][j] == true
&& mode_lib->vba.VRatioInPrefetchSupported[i][j] == true) {
mode_lib->vba.BandwidthAvailableForImmediateFlip =
dml32_CalculateBandwidthAvailableForImmediateFlip(
mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.ReturnBWPerState[i][j],
mode_lib->vba.ReadBandwidthLuma,
mode_lib->vba.ReadBandwidthChroma,
mode_lib->vba.RequiredPrefetchPixelDataBWLuma[0][0],
mode_lib->vba.RequiredPrefetchPixelDataBWChroma[0][0],
mode_lib->vba.cursor_bw,
mode_lib->vba.cursor_bw_pre,
mode_lib->vba.NoOfDPPThisState,
mode_lib->vba.UrgentBurstFactorLuma,
mode_lib->vba.UrgentBurstFactorChroma,
mode_lib->vba.UrgentBurstFactorCursor,
mode_lib->vba.UrgentBurstFactorLumaPre,
mode_lib->vba.UrgentBurstFactorChromaPre,
mode_lib->vba.UrgentBurstFactorCursorPre);
mode_lib->vba.TotImmediateFlipBytes = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (!(mode_lib->vba.ImmediateFlipRequirement[k] ==
dm_immediate_flip_not_required)) {
mode_lib->vba.TotImmediateFlipBytes =
mode_lib->vba.TotImmediateFlipBytes
+ mode_lib->vba.NoOfDPP[i][j][k]
* mode_lib->vba.PDEAndMetaPTEBytesPerFrame[i][j][k]
+ mode_lib->vba.MetaRowBytes[i][j][k];
if (mode_lib->vba.use_one_row_for_frame_flip[i][j][k]) {
mode_lib->vba.TotImmediateFlipBytes =
mode_lib->vba.TotImmediateFlipBytes + 2
* mode_lib->vba.DPTEBytesPerRow[i][j][k];
} else {
mode_lib->vba.TotImmediateFlipBytes =
mode_lib->vba.TotImmediateFlipBytes
+ mode_lib->vba.DPTEBytesPerRow[i][j][k];
}
}
}
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
dml32_CalculateFlipSchedule(v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.HostVMInefficiencyFactor,
mode_lib->vba.ExtraLatency,
mode_lib->vba.UrgLatency[i],
mode_lib->vba.GPUVMMaxPageTableLevels,
mode_lib->vba.HostVMEnable,
mode_lib->vba.HostVMMaxNonCachedPageTableLevels,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.HostVMMinPageSize,
mode_lib->vba.PDEAndMetaPTEBytesPerFrame[i][j][k],
mode_lib->vba.MetaRowBytes[i][j][k],
mode_lib->vba.DPTEBytesPerRow[i][j][k],
mode_lib->vba.BandwidthAvailableForImmediateFlip,
mode_lib->vba.TotImmediateFlipBytes,
mode_lib->vba.SourcePixelFormat[k],
(mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]),
mode_lib->vba.VRatio[k],
mode_lib->vba.VRatioChroma[k],
mode_lib->vba.Tno_bw[k],
mode_lib->vba.DCCEnable[k],
mode_lib->vba.dpte_row_height[k],
mode_lib->vba.meta_row_height[k],
mode_lib->vba.dpte_row_height_chroma[k],
mode_lib->vba.meta_row_height_chroma[k],
mode_lib->vba.use_one_row_for_frame_flip[i][j][k], // 24
/* Output */
&mode_lib->vba.DestinationLinesToRequestVMInImmediateFlip[k],
&mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip[k],
&mode_lib->vba.final_flip_bw[k],
&mode_lib->vba.ImmediateFlipSupportedForPipe[k]);
}
{
dml32_CalculateImmediateFlipBandwithSupport(mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.ReturnBWPerState[i][j],
mode_lib->vba.ImmediateFlipRequirement,
mode_lib->vba.final_flip_bw,
mode_lib->vba.ReadBandwidthLuma,
mode_lib->vba.ReadBandwidthChroma,
mode_lib->vba.RequiredPrefetchPixelDataBWLuma[0][0],
mode_lib->vba.RequiredPrefetchPixelDataBWChroma[0][0],
mode_lib->vba.cursor_bw,
mode_lib->vba.meta_row_bandwidth_this_state,
mode_lib->vba.dpte_row_bandwidth_this_state,
mode_lib->vba.cursor_bw_pre,
mode_lib->vba.prefetch_vmrow_bw,
mode_lib->vba.DPPPerPlane,
mode_lib->vba.UrgentBurstFactorLuma,
mode_lib->vba.UrgentBurstFactorChroma,
mode_lib->vba.UrgentBurstFactorCursor,
mode_lib->vba.UrgentBurstFactorLumaPre,
mode_lib->vba.UrgentBurstFactorChromaPre,
mode_lib->vba.UrgentBurstFactorCursorPre,
/* output */
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[0], // Single *TotalBandwidth
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[1], // Single *FractionOfUrgentBandwidth
&mode_lib->vba.ImmediateFlipSupportedForState[i][j]); // Boolean *ImmediateFlipBandwidthSupport
}
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (!(mode_lib->vba.ImmediateFlipRequirement[k]
== dm_immediate_flip_not_required)
&& (mode_lib->vba.ImmediateFlipSupportedForPipe[k]
== false))
mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false;
}
} else { // if prefetch not support, assume iflip not supported
mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false;
}
if (mode_lib->vba.MaxVStartup <= __DML_VBA_MIN_VSTARTUP__
|| mode_lib->vba.AnyLinesForVMOrRowTooLarge == false) {
mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup[i][j];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.NextPrefetchModeState = v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.NextPrefetchModeState + 1;
} else {
mode_lib->vba.NextMaxVStartup = mode_lib->vba.NextMaxVStartup - 1;
}
} while (!((mode_lib->vba.PrefetchSupported[i][j] == true
&& mode_lib->vba.DynamicMetadataSupported[i][j] == true
&& mode_lib->vba.VRatioInPrefetchSupported[i][j] == true &&
// consider flip support is okay if when there is no hostvm and the
// user does't require a iflip OR the flip bw is ok
// If there is hostvm, DCN needs to support iflip for invalidation
((mode_lib->vba.HostVMEnable == false
&& !mode_lib->vba.ImmediateFlipRequiredFinal)
|| mode_lib->vba.ImmediateFlipSupportedForState[i][j] == true))
|| (mode_lib->vba.NextMaxVStartup == mode_lib->vba.MaxMaxVStartup[i][j]
&& v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.NextPrefetchModeState > mode_lib->vba.MaxPrefetchMode)));
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
mode_lib->vba.use_one_row_for_frame_this_state[k] =
mode_lib->vba.use_one_row_for_frame[i][j][k];
}
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.mSOCParameters.UrgentLatency = mode_lib->vba.UrgLatency[i];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.mSOCParameters.ExtraLatency = mode_lib->vba.ExtraLatency;
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.mSOCParameters.WritebackLatency = mode_lib->vba.WritebackLatency;
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.mSOCParameters.DRAMClockChangeLatency = mode_lib->vba.DRAMClockChangeLatency;
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.mSOCParameters.FCLKChangeLatency = mode_lib->vba.FCLKChangeLatency;
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.mSOCParameters.SRExitTime = mode_lib->vba.SRExitTime;
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.mSOCParameters.SREnterPlusExitTime = mode_lib->vba.SREnterPlusExitTime;
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.mSOCParameters.SRExitZ8Time = mode_lib->vba.SRExitZ8Time;
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.mSOCParameters.SREnterPlusExitZ8Time = mode_lib->vba.SREnterPlusExitZ8Time;
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.mSOCParameters.USRRetrainingLatency = mode_lib->vba.USRRetrainingLatency;
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.mSOCParameters.SMNLatency = mode_lib->vba.SMNLatency;
{
dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
v,
v->PrefetchModePerState[i][j],
v->DCFCLKState[i][j],
v->ReturnBWPerState[i][j],
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.mSOCParameters,
v->SOCCLKPerState[i],
v->ProjectedDCFCLKDeepSleep[i][j],
v->DETBufferSizeYThisState,
v->DETBufferSizeCThisState,
v->SwathHeightYThisState,
v->SwathHeightCThisState,
v->SwathWidthYThisState, // 24
v->SwathWidthCThisState,
v->NoOfDPPThisState,
v->BytePerPixelInDETY,
v->BytePerPixelInDETC,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTXAfterScaler,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTYAfterScaler,
v->UnboundedRequestEnabledThisState,
v->CompressedBufferSizeInkByteThisState,
/* Output */
&v->DRAMClockChangeSupport[i][j],
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single2[0], // double *MaxActiveDRAMClockChangeLatencySupported
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer[0], // Long SubViewportLinesNeededInMALL[]
&v->FCLKChangeSupport[i][j],
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single2[1], // double *MinActiveFCLKChangeLatencySupported
&mode_lib->vba.USRRetrainingSupport[i][j],
mode_lib->vba.ActiveDRAMClockChangeLatencyMarginPerState[i][j]);
}
}
} // End of Prefetch Check
/*Cursor Support Check*/
mode_lib->vba.CursorSupport = true;
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (mode_lib->vba.CursorWidth[k][0] > 0.0) {
if (mode_lib->vba.CursorBPP[k][0] == 64 && mode_lib->vba.Cursor64BppSupport == false)
mode_lib->vba.CursorSupport = false;
}
}
/*Valid Pitch Check*/
mode_lib->vba.PitchSupport = true;
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
mode_lib->vba.AlignedYPitch[k] = dml_ceil(
dml_max(mode_lib->vba.PitchY[k], mode_lib->vba.SurfaceWidthY[k]),
mode_lib->vba.MacroTileWidthY[k]);
if (mode_lib->vba.DCCEnable[k] == true) {
mode_lib->vba.AlignedDCCMetaPitchY[k] = dml_ceil(
dml_max(mode_lib->vba.DCCMetaPitchY[k], mode_lib->vba.SurfaceWidthY[k]),
64.0 * mode_lib->vba.Read256BlockWidthY[k]);
} else {
mode_lib->vba.AlignedDCCMetaPitchY[k] = mode_lib->vba.DCCMetaPitchY[k];
}
if (mode_lib->vba.SourcePixelFormat[k] != dm_444_64 && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_rgbe
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_8) {
mode_lib->vba.AlignedCPitch[k] = dml_ceil(
dml_max(mode_lib->vba.PitchC[k], mode_lib->vba.SurfaceWidthC[k]),
mode_lib->vba.MacroTileWidthC[k]);
if (mode_lib->vba.DCCEnable[k] == true) {
mode_lib->vba.AlignedDCCMetaPitchC[k] = dml_ceil(
dml_max(mode_lib->vba.DCCMetaPitchC[k],
mode_lib->vba.SurfaceWidthC[k]),
64.0 * mode_lib->vba.Read256BlockWidthC[k]);
} else {
mode_lib->vba.AlignedDCCMetaPitchC[k] = mode_lib->vba.DCCMetaPitchC[k];
}
} else {
mode_lib->vba.AlignedCPitch[k] = mode_lib->vba.PitchC[k];
mode_lib->vba.AlignedDCCMetaPitchC[k] = mode_lib->vba.DCCMetaPitchC[k];
}
if (mode_lib->vba.AlignedYPitch[k] > mode_lib->vba.PitchY[k]
|| mode_lib->vba.AlignedCPitch[k] > mode_lib->vba.PitchC[k]
|| mode_lib->vba.AlignedDCCMetaPitchY[k] > mode_lib->vba.DCCMetaPitchY[k]
|| mode_lib->vba.AlignedDCCMetaPitchC[k] > mode_lib->vba.DCCMetaPitchC[k]) {
mode_lib->vba.PitchSupport = false;
}
}
mode_lib->vba.ViewportExceedsSurface = false;
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (mode_lib->vba.ViewportWidth[k] > mode_lib->vba.SurfaceWidthY[k]
|| mode_lib->vba.ViewportHeight[k] > mode_lib->vba.SurfaceHeightY[k]) {
mode_lib->vba.ViewportExceedsSurface = true;
if (mode_lib->vba.SourcePixelFormat[k] != dm_444_64
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_32
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_8
&& mode_lib->vba.SourcePixelFormat[k] != dm_rgbe) {
if (mode_lib->vba.ViewportWidthChroma[k] > mode_lib->vba.SurfaceWidthC[k]
|| mode_lib->vba.ViewportHeightChroma[k]
> mode_lib->vba.SurfaceHeightC[k]) {
mode_lib->vba.ViewportExceedsSurface = true;
}
}
}
}
/*Mode Support, Voltage State and SOC Configuration*/
mode_support_configuration(v, mode_lib);
MaximumMPCCombine = 0;
for (i = v->soc.num_states; i >= start_state; i--) {
if (i == v->soc.num_states || mode_lib->vba.ModeSupport[i][0] == true ||
mode_lib->vba.ModeSupport[i][1] == true) {
mode_lib->vba.VoltageLevel = i;
mode_lib->vba.ModeIsSupported = mode_lib->vba.ModeSupport[i][0] == true
|| mode_lib->vba.ModeSupport[i][1] == true;
if (mode_lib->vba.ModeSupport[i][0] == true)
MaximumMPCCombine = 0;
else
MaximumMPCCombine = 1;
}
}
mode_lib->vba.ImmediateFlipSupport =
mode_lib->vba.ImmediateFlipSupportedForState[mode_lib->vba.VoltageLevel][MaximumMPCCombine];
mode_lib->vba.UnboundedRequestEnabled =
mode_lib->vba.UnboundedRequestEnabledAllStates[mode_lib->vba.VoltageLevel][MaximumMPCCombine];
mode_lib->vba.CompressedBufferSizeInkByte =
mode_lib->vba.CompressedBufferSizeInkByteAllStates[mode_lib->vba.VoltageLevel][MaximumMPCCombine]; // Not used, informational
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
mode_lib->vba.MPCCombineEnable[k] =
mode_lib->vba.MPCCombine[mode_lib->vba.VoltageLevel][MaximumMPCCombine][k];
mode_lib->vba.DPPPerPlane[k] = mode_lib->vba.NoOfDPP[mode_lib->vba.VoltageLevel][MaximumMPCCombine][k];
mode_lib->vba.SwathHeightY[k] =
mode_lib->vba.SwathHeightYAllStates[mode_lib->vba.VoltageLevel][MaximumMPCCombine][k];
mode_lib->vba.SwathHeightC[k] =
mode_lib->vba.SwathHeightCAllStates[mode_lib->vba.VoltageLevel][MaximumMPCCombine][k];
mode_lib->vba.DETBufferSizeInKByte[k] =
mode_lib->vba.DETBufferSizeInKByteAllStates[mode_lib->vba.VoltageLevel][MaximumMPCCombine][k];
mode_lib->vba.DETBufferSizeY[k] =
mode_lib->vba.DETBufferSizeYAllStates[mode_lib->vba.VoltageLevel][MaximumMPCCombine][k];
mode_lib->vba.DETBufferSizeC[k] =
mode_lib->vba.DETBufferSizeCAllStates[mode_lib->vba.VoltageLevel][MaximumMPCCombine][k];
mode_lib->vba.OutputType[k] = mode_lib->vba.OutputTypePerState[mode_lib->vba.VoltageLevel][k];
mode_lib->vba.OutputRate[k] = mode_lib->vba.OutputRatePerState[mode_lib->vba.VoltageLevel][k];
}
mode_lib->vba.DCFCLK = mode_lib->vba.DCFCLKState[mode_lib->vba.VoltageLevel][MaximumMPCCombine];
mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBWPerState[mode_lib->vba.VoltageLevel][MaximumMPCCombine];
mode_lib->vba.DISPCLK = mode_lib->vba.RequiredDISPCLK[mode_lib->vba.VoltageLevel][MaximumMPCCombine];
mode_lib->vba.maxMpcComb = MaximumMPCCombine;
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
mode_lib->vba.ODMCombineEnabled[k] =
mode_lib->vba.ODMCombineEnablePerState[mode_lib->vba.VoltageLevel][k];
} else {
mode_lib->vba.ODMCombineEnabled[k] = dm_odm_combine_mode_disabled;
}
mode_lib->vba.DSCEnabled[k] = mode_lib->vba.RequiresDSC[mode_lib->vba.VoltageLevel][k];
mode_lib->vba.FECEnable[k] = mode_lib->vba.RequiresFEC[mode_lib->vba.VoltageLevel][k];
mode_lib->vba.OutputBpp[k] = mode_lib->vba.OutputBppPerState[mode_lib->vba.VoltageLevel][k];
}
mode_lib->vba.UrgentWatermark = mode_lib->vba.Watermark.UrgentWatermark;
mode_lib->vba.StutterEnterPlusExitWatermark = mode_lib->vba.Watermark.StutterEnterPlusExitWatermark;
mode_lib->vba.StutterExitWatermark = mode_lib->vba.Watermark.StutterExitWatermark;
mode_lib->vba.WritebackDRAMClockChangeWatermark = mode_lib->vba.Watermark.WritebackDRAMClockChangeWatermark;
mode_lib->vba.DRAMClockChangeWatermark = mode_lib->vba.Watermark.DRAMClockChangeWatermark;
mode_lib->vba.UrgentLatency = mode_lib->vba.UrgLatency[mode_lib->vba.VoltageLevel];
mode_lib->vba.DCFCLKDeepSleep = mode_lib->vba.ProjectedDCFCLKDeepSleep[mode_lib->vba.VoltageLevel][MaximumMPCCombine];
/* VBA has Error type to Error Msg output here, but not necessary for DML-C */
} // ModeSupportAndSystemConfigurationFull
| linux-master | drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c |
// SPDX-License-Identifier: MIT
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dcn32_fpu.h"
#include "dcn32/dcn32_resource.h"
#include "dcn20/dcn20_resource.h"
#include "display_mode_vba_util_32.h"
#include "dml/dcn32/display_mode_vba_32.h"
// We need this includes for WATERMARKS_* defines
#include "clk_mgr/dcn32/dcn32_smu13_driver_if.h"
#include "dcn30/dcn30_resource.h"
#include "link.h"
#define DC_LOGGER_INIT(logger)
static const struct subvp_high_refresh_list subvp_high_refresh_list = {
.min_refresh = 120,
.max_refresh = 175,
.res = {
{.width = 3840, .height = 2160, },
{.width = 3440, .height = 1440, },
{.width = 2560, .height = 1440, }},
};
struct _vcs_dpi_ip_params_st dcn3_2_ip = {
.gpuvm_enable = 0,
.gpuvm_max_page_table_levels = 4,
.hostvm_enable = 0,
.rob_buffer_size_kbytes = 128,
.det_buffer_size_kbytes = DCN3_2_DEFAULT_DET_SIZE,
.config_return_buffer_size_in_kbytes = 1280,
.compressed_buffer_segment_size_in_kbytes = 64,
.meta_fifo_size_in_kentries = 22,
.zero_size_buffer_entries = 512,
.compbuf_reserved_space_64b = 256,
.compbuf_reserved_space_zs = 64,
.dpp_output_buffer_pixels = 2560,
.opp_output_buffer_lines = 1,
.pixel_chunk_size_kbytes = 8,
.alpha_pixel_chunk_size_kbytes = 4,
.min_pixel_chunk_size_bytes = 1024,
.dcc_meta_buffer_size_bytes = 6272,
.meta_chunk_size_kbytes = 2,
.min_meta_chunk_size_bytes = 256,
.writeback_chunk_size_kbytes = 8,
.ptoi_supported = false,
.num_dsc = 4,
.maximum_dsc_bits_per_component = 12,
.maximum_pixels_per_line_per_dsc_unit = 6016,
.dsc422_native_support = true,
.is_line_buffer_bpp_fixed = true,
.line_buffer_fixed_bpp = 57,
.line_buffer_size_bits = 1171920,
.max_line_buffer_lines = 32,
.writeback_interface_buffer_size_kbytes = 90,
.max_num_dpp = 4,
.max_num_otg = 4,
.max_num_hdmi_frl_outputs = 1,
.max_num_wb = 1,
.max_dchub_pscl_bw_pix_per_clk = 4,
.max_pscl_lb_bw_pix_per_clk = 2,
.max_lb_vscl_bw_pix_per_clk = 4,
.max_vscl_hscl_bw_pix_per_clk = 4,
.max_hscl_ratio = 6,
.max_vscl_ratio = 6,
.max_hscl_taps = 8,
.max_vscl_taps = 8,
.dpte_buffer_size_in_pte_reqs_luma = 64,
.dpte_buffer_size_in_pte_reqs_chroma = 34,
.dispclk_ramp_margin_percent = 1,
.max_inter_dcn_tile_repeaters = 8,
.cursor_buffer_size = 16,
.cursor_chunk_size = 2,
.writeback_line_buffer_buffer_size = 0,
.writeback_min_hscl_ratio = 1,
.writeback_min_vscl_ratio = 1,
.writeback_max_hscl_ratio = 1,
.writeback_max_vscl_ratio = 1,
.writeback_max_hscl_taps = 1,
.writeback_max_vscl_taps = 1,
.dppclk_delay_subtotal = 47,
.dppclk_delay_scl = 50,
.dppclk_delay_scl_lb_only = 16,
.dppclk_delay_cnvc_formatter = 28,
.dppclk_delay_cnvc_cursor = 6,
.dispclk_delay_subtotal = 125,
.dynamic_metadata_vm_enabled = false,
.odm_combine_4to1_supported = false,
.dcc_supported = true,
.max_num_dp2p0_outputs = 2,
.max_num_dp2p0_streams = 4,
};
struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
.clock_limits = {
{
.state = 0,
.dcfclk_mhz = 1564.0,
.fabricclk_mhz = 2500.0,
.dispclk_mhz = 2150.0,
.dppclk_mhz = 2150.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.phyclk_d32_mhz = 625.0,
.socclk_mhz = 1200.0,
.dscclk_mhz = 716.667,
.dram_speed_mts = 18000.0,
.dtbclk_mhz = 1564.0,
},
},
.num_states = 1,
.sr_exit_time_us = 42.97,
.sr_enter_plus_exit_time_us = 49.94,
.sr_exit_z8_time_us = 285.0,
.sr_enter_plus_exit_z8_time_us = 320,
.writeback_latency_us = 12.0,
.round_trip_ping_latency_dcfclk_cycles = 263,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
.urgent_latency_vm_data_only_us = 4.0,
.fclk_change_latency_us = 25,
.usr_retraining_latency_us = 2,
.smn_latency_us = 2,
.mall_allocated_for_dcn_mbytes = 64,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
.pct_ideal_sdp_bw_after_urgent = 90.0,
.pct_ideal_fabric_bw_after_urgent = 67.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented
.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0, // N/A, for now keep as is until DML implemented
.pct_ideal_dram_bw_after_urgent_strobe = 67.0,
.max_avg_sdp_bw_use_normal_percent = 80.0,
.max_avg_fabric_bw_use_normal_percent = 60.0,
.max_avg_dram_bw_use_normal_strobe_percent = 50.0,
.max_avg_dram_bw_use_normal_percent = 15.0,
.num_chans = 24,
.dram_channel_width_bytes = 2,
.fabric_datapath_to_dcn_data_return_bytes = 64,
.return_bus_width_bytes = 64,
.downspread_percent = 0.38,
.dcn_downspread_percent = 0.5,
.dram_clock_change_latency_us = 400,
.dispclk_dppclk_vco_speed_mhz = 4300.0,
.do_urgent_latency_adjustment = true,
.urgent_latency_adjustment_fabric_clock_component_us = 1.0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
};
void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr)
{
/* defaults */
double pstate_latency_us = clk_mgr->base.ctx->dc->dml.soc.dram_clock_change_latency_us;
double fclk_change_latency_us = clk_mgr->base.ctx->dc->dml.soc.fclk_change_latency_us;
double sr_exit_time_us = clk_mgr->base.ctx->dc->dml.soc.sr_exit_time_us;
double sr_enter_plus_exit_time_us = clk_mgr->base.ctx->dc->dml.soc.sr_enter_plus_exit_time_us;
/* For min clocks use as reported by PM FW and report those as min */
uint16_t min_uclk_mhz = clk_mgr->base.bw_params->clk_table.entries[0].memclk_mhz;
uint16_t min_dcfclk_mhz = clk_mgr->base.bw_params->clk_table.entries[0].dcfclk_mhz;
uint16_t setb_min_uclk_mhz = min_uclk_mhz;
uint16_t dcfclk_mhz_for_the_second_state = clk_mgr->base.ctx->dc->dml.soc.clock_limits[2].dcfclk_mhz;
dc_assert_fp_enabled();
/* For Set B ranges use min clocks state 2 when available, and report those to PM FW */
if (dcfclk_mhz_for_the_second_state)
clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = dcfclk_mhz_for_the_second_state;
else
clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = clk_mgr->base.bw_params->clk_table.entries[0].dcfclk_mhz;
if (clk_mgr->base.bw_params->clk_table.entries[2].memclk_mhz)
setb_min_uclk_mhz = clk_mgr->base.bw_params->clk_table.entries[2].memclk_mhz;
/* Set A - Normal - default values */
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].valid = true;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us = pstate_latency_us;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us = fclk_change_latency_us;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us = sr_exit_time_us;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF;
/* Set B - Performance - higher clocks, using DPM[2] DCFCLK and UCLK */
clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].valid = true;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us = pstate_latency_us;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.fclk_change_latency_us = fclk_change_latency_us;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us = sr_exit_time_us;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_dcfclk = 0xFFFF;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_uclk = setb_min_uclk_mhz;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_uclk = 0xFFFF;
/* Set C - Dummy P-State - P-State latency set to "dummy p-state" value */
/* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */
if (clk_mgr->base.ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) {
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].valid = true;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = 50;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.fclk_change_latency_us = fclk_change_latency_us;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us = sr_exit_time_us;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_dcfclk = 0xFFFF;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_uclk = min_uclk_mhz;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF;
clk_mgr->base.bw_params->dummy_pstate_table[0].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[0].memclk_mhz * 16;
clk_mgr->base.bw_params->dummy_pstate_table[0].dummy_pstate_latency_us = 50;
clk_mgr->base.bw_params->dummy_pstate_table[1].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[1].memclk_mhz * 16;
clk_mgr->base.bw_params->dummy_pstate_table[1].dummy_pstate_latency_us = 9;
clk_mgr->base.bw_params->dummy_pstate_table[2].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[2].memclk_mhz * 16;
clk_mgr->base.bw_params->dummy_pstate_table[2].dummy_pstate_latency_us = 8;
clk_mgr->base.bw_params->dummy_pstate_table[3].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[3].memclk_mhz * 16;
clk_mgr->base.bw_params->dummy_pstate_table[3].dummy_pstate_latency_us = 5;
}
/* Set D - MALL - SR enter and exit time specific to MALL, TBD after bringup or later phase for now use DRAM values / 2 */
/* For MALL DRAM clock change latency is N/A, for watermak calculations use lowest value dummy P state latency */
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].valid = true;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us = clk_mgr->base.bw_params->dummy_pstate_table[3].dummy_pstate_latency_us;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.fclk_change_latency_us = fclk_change_latency_us;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us = sr_exit_time_us / 2; // TBD
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us / 2; // TBD
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.wm_type = WATERMARKS_MALL;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_dcfclk = 0xFFFF;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_uclk = min_uclk_mhz;
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF;
}
/*
* Finds dummy_latency_index when MCLK switching using firmware based
* vblank stretch is enabled. This function will iterate through the
* table of dummy pstate latencies until the lowest value that allows
* dm_allow_self_refresh_and_mclk_switch to happen is found
*/
int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
int vlevel)
{
const int max_latency_table_entries = 4;
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
int dummy_latency_index = 0;
enum clock_change_support temp_clock_change_support = vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
dc_assert_fp_enabled();
while (dummy_latency_index < max_latency_table_entries) {
if (temp_clock_change_support != dm_dram_clock_change_unsupported)
vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_support;
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
/* for subvp + DRR case, if subvp pipes are still present we support pstate */
if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported &&
dcn32_subvp_in_use(dc, context))
vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_support;
if (vlevel < context->bw_ctx.dml.vba.soc.num_states &&
vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported)
break;
dummy_latency_index++;
}
if (dummy_latency_index == max_latency_table_entries) {
ASSERT(dummy_latency_index != max_latency_table_entries);
/* If the execution gets here, it means dummy p_states are
* not possible. This should never happen and would mean
* something is severely wrong.
* Here we reset dummy_latency_index to 3, because it is
* better to have underflows than system crashes.
*/
dummy_latency_index = max_latency_table_entries - 1;
}
return dummy_latency_index;
}
/**
* dcn32_helper_populate_phantom_dlg_params - Get DLG params for phantom pipes
* and populate pipe_ctx with those params.
* @dc: [in] current dc state
* @context: [in] new dc state
* @pipes: [in] DML pipe params array
* @pipe_cnt: [in] DML pipe count
*
* This function must be called AFTER the phantom pipes are added to context
* and run through DML (so that the DLG params for the phantom pipes can be
* populated), and BEFORE we program the timing for the phantom pipes.
*/
void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt)
{
uint32_t i, pipe_idx;
dc_assert_fp_enabled();
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->stream)
continue;
if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
pipes[pipe_idx].pipe.dest.vstartup_start =
get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
pipes[pipe_idx].pipe.dest.vupdate_offset =
get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
pipes[pipe_idx].pipe.dest.vupdate_width =
get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
pipes[pipe_idx].pipe.dest.vready_offset =
get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
pipe->pipe_dlg_param = pipes[pipe_idx].pipe.dest;
}
pipe_idx++;
}
}
/**
* dcn32_predict_pipe_split - Predict if pipe split will occur for a given DML pipe
* @context: [in] New DC state to be programmed
* @pipe_e2e: [in] DML pipe end to end context
*
* This function takes in a DML pipe (pipe_e2e) and predicts if pipe split is required (both
* ODM and MPC). For pipe split, ODM combine is determined by the ODM mode, and MPC combine is
* determined by DPPClk requirements
*
* This function follows the same policy as DML:
* - Check for ODM combine requirements / policy first
* - MPC combine is only chosen if there is no ODM combine requirements / policy in place, and
* MPC is required
*
* Return: Number of splits expected (1 for 2:1 split, 3 for 4:1 split, 0 for no splits).
*/
uint8_t dcn32_predict_pipe_split(struct dc_state *context,
display_e2e_pipe_params_st *pipe_e2e)
{
double pscl_throughput;
double pscl_throughput_chroma;
double dpp_clk_single_dpp, clock;
double clk_frequency = 0.0;
double vco_speed = context->bw_ctx.dml.soc.dispclk_dppclk_vco_speed_mhz;
bool total_available_pipes_support = false;
uint32_t number_of_dpp = 0;
enum odm_combine_mode odm_mode = dm_odm_combine_mode_disabled;
double req_dispclk_per_surface = 0;
uint8_t num_splits = 0;
dc_assert_fp_enabled();
dml32_CalculateODMMode(context->bw_ctx.dml.ip.maximum_pixels_per_line_per_dsc_unit,
pipe_e2e->pipe.dest.hactive,
pipe_e2e->dout.output_format,
pipe_e2e->dout.output_type,
pipe_e2e->pipe.dest.odm_combine_policy,
context->bw_ctx.dml.soc.clock_limits[context->bw_ctx.dml.soc.num_states - 1].dispclk_mhz,
context->bw_ctx.dml.soc.clock_limits[context->bw_ctx.dml.soc.num_states - 1].dispclk_mhz,
pipe_e2e->dout.dsc_enable != 0,
0, /* TotalNumberOfActiveDPP can be 0 since we're predicting pipe split requirement */
context->bw_ctx.dml.ip.max_num_dpp,
pipe_e2e->pipe.dest.pixel_rate_mhz,
context->bw_ctx.dml.soc.dcn_downspread_percent,
context->bw_ctx.dml.ip.dispclk_ramp_margin_percent,
context->bw_ctx.dml.soc.dispclk_dppclk_vco_speed_mhz,
pipe_e2e->dout.dsc_slices,
/* Output */
&total_available_pipes_support,
&number_of_dpp,
&odm_mode,
&req_dispclk_per_surface);
dml32_CalculateSinglePipeDPPCLKAndSCLThroughput(pipe_e2e->pipe.scale_ratio_depth.hscl_ratio,
pipe_e2e->pipe.scale_ratio_depth.hscl_ratio_c,
pipe_e2e->pipe.scale_ratio_depth.vscl_ratio,
pipe_e2e->pipe.scale_ratio_depth.vscl_ratio_c,
context->bw_ctx.dml.ip.max_dchub_pscl_bw_pix_per_clk,
context->bw_ctx.dml.ip.max_pscl_lb_bw_pix_per_clk,
pipe_e2e->pipe.dest.pixel_rate_mhz,
pipe_e2e->pipe.src.source_format,
pipe_e2e->pipe.scale_taps.htaps,
pipe_e2e->pipe.scale_taps.htaps_c,
pipe_e2e->pipe.scale_taps.vtaps,
pipe_e2e->pipe.scale_taps.vtaps_c,
/* Output */
&pscl_throughput, &pscl_throughput_chroma,
&dpp_clk_single_dpp);
clock = dpp_clk_single_dpp * (1 + context->bw_ctx.dml.soc.dcn_downspread_percent / 100);
if (clock > 0)
clk_frequency = vco_speed * 4.0 / ((int)(vco_speed * 4.0) / clock);
if (odm_mode == dm_odm_combine_mode_2to1)
num_splits = 1;
else if (odm_mode == dm_odm_combine_mode_4to1)
num_splits = 3;
else if (clk_frequency > context->bw_ctx.dml.soc.clock_limits[context->bw_ctx.dml.soc.num_states - 1].dppclk_mhz)
num_splits = 1;
return num_splits;
}
static float calculate_net_bw_in_kbytes_sec(struct _vcs_dpi_voltage_scaling_st *entry)
{
float memory_bw_kbytes_sec;
float fabric_bw_kbytes_sec;
float sdp_bw_kbytes_sec;
float limiting_bw_kbytes_sec;
memory_bw_kbytes_sec = entry->dram_speed_mts *
dcn3_2_soc.num_chans *
dcn3_2_soc.dram_channel_width_bytes *
((float)dcn3_2_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only / 100);
fabric_bw_kbytes_sec = entry->fabricclk_mhz *
dcn3_2_soc.return_bus_width_bytes *
((float)dcn3_2_soc.pct_ideal_fabric_bw_after_urgent / 100);
sdp_bw_kbytes_sec = entry->dcfclk_mhz *
dcn3_2_soc.return_bus_width_bytes *
((float)dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / 100);
limiting_bw_kbytes_sec = memory_bw_kbytes_sec;
if (fabric_bw_kbytes_sec < limiting_bw_kbytes_sec)
limiting_bw_kbytes_sec = fabric_bw_kbytes_sec;
if (sdp_bw_kbytes_sec < limiting_bw_kbytes_sec)
limiting_bw_kbytes_sec = sdp_bw_kbytes_sec;
return limiting_bw_kbytes_sec;
}
static void get_optimal_ntuple(struct _vcs_dpi_voltage_scaling_st *entry)
{
if (entry->dcfclk_mhz > 0) {
float bw_on_sdp = entry->dcfclk_mhz * dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / 100);
entry->fabricclk_mhz = bw_on_sdp / (dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_fabric_bw_after_urgent / 100));
entry->dram_speed_mts = bw_on_sdp / (dcn3_2_soc.num_chans *
dcn3_2_soc.dram_channel_width_bytes * ((float)dcn3_2_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only / 100));
} else if (entry->fabricclk_mhz > 0) {
float bw_on_fabric = entry->fabricclk_mhz * dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_fabric_bw_after_urgent / 100);
entry->dcfclk_mhz = bw_on_fabric / (dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / 100));
entry->dram_speed_mts = bw_on_fabric / (dcn3_2_soc.num_chans *
dcn3_2_soc.dram_channel_width_bytes * ((float)dcn3_2_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only / 100));
} else if (entry->dram_speed_mts > 0) {
float bw_on_dram = entry->dram_speed_mts * dcn3_2_soc.num_chans *
dcn3_2_soc.dram_channel_width_bytes * ((float)dcn3_2_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only / 100);
entry->fabricclk_mhz = bw_on_dram / (dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_fabric_bw_after_urgent / 100));
entry->dcfclk_mhz = bw_on_dram / (dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / 100));
}
}
static void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
unsigned int *num_entries,
struct _vcs_dpi_voltage_scaling_st *entry)
{
int i = 0;
int index = 0;
dc_assert_fp_enabled();
if (*num_entries == 0) {
table[0] = *entry;
(*num_entries)++;
} else {
while (entry->net_bw_in_kbytes_sec > table[index].net_bw_in_kbytes_sec) {
index++;
if (index >= *num_entries)
break;
}
for (i = *num_entries; i > index; i--)
table[i] = table[i - 1];
table[index] = *entry;
(*num_entries)++;
}
}
/**
* dcn32_set_phantom_stream_timing - Set timing params for the phantom stream
* @dc: current dc state
* @context: new dc state
* @ref_pipe: Main pipe for the phantom stream
* @phantom_stream: target phantom stream state
* @pipes: DML pipe params
* @pipe_cnt: number of DML pipes
* @dc_pipe_idx: DC pipe index for the main pipe (i.e. ref_pipe)
*
* Set timing params of the phantom stream based on calculated output from DML.
* This function first gets the DML pipe index using the DC pipe index, then
* calls into DML (get_subviewport_lines_needed_in_mall) to get the number of
* lines required for SubVP MCLK switching and assigns to the phantom stream
* accordingly.
*
* - The number of SubVP lines calculated in DML does not take into account
* FW processing delays and required pstate allow width, so we must include
* that separately.
*
* - Set phantom backporch = vstartup of main pipe
*/
void dcn32_set_phantom_stream_timing(struct dc *dc,
struct dc_state *context,
struct pipe_ctx *ref_pipe,
struct dc_stream_state *phantom_stream,
display_e2e_pipe_params_st *pipes,
unsigned int pipe_cnt,
unsigned int dc_pipe_idx)
{
unsigned int i, pipe_idx;
struct pipe_ctx *pipe;
uint32_t phantom_vactive, phantom_bp, pstate_width_fw_delay_lines;
unsigned int num_dpp;
unsigned int vlevel = context->bw_ctx.dml.vba.VoltageLevel;
unsigned int dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
unsigned int socclk = context->bw_ctx.dml.vba.SOCCLKPerState[vlevel];
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
struct dc_stream_state *main_stream = ref_pipe->stream;
dc_assert_fp_enabled();
// Find DML pipe index (pipe_idx) using dc_pipe_idx
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->stream)
continue;
if (i == dc_pipe_idx)
break;
pipe_idx++;
}
// Calculate lines required for pstate allow width and FW processing delays
pstate_width_fw_delay_lines = ((double)(dc->caps.subvp_fw_processing_delay_us +
dc->caps.subvp_pstate_allow_width_us) / 1000000) *
(ref_pipe->stream->timing.pix_clk_100hz * 100) /
(double)ref_pipe->stream->timing.h_total;
// Update clks_cfg for calling into recalculate
pipes[0].clks_cfg.voltage = vlevel;
pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
pipes[0].clks_cfg.socclk_mhz = socclk;
// DML calculation for MALL region doesn't take into account FW delay
// and required pstate allow width for multi-display cases
/* Add 16 lines margin to the MALL REGION because SUB_VP_START_LINE must be aligned
* to 2 swaths (i.e. 16 lines)
*/
phantom_vactive = get_subviewport_lines_needed_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx) +
pstate_width_fw_delay_lines + dc->caps.subvp_swath_height_margin_lines;
// W/A for DCC corruption with certain high resolution timings.
// Determing if pipesplit is used. If so, add meta_row_height to the phantom vactive.
num_dpp = vba->NoOfDPP[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]];
phantom_vactive += num_dpp > 1 ? vba->meta_row_height[vba->pipe_plane[pipe_idx]] : 0;
/* dc->debug.subvp_extra_lines 0 by default*/
phantom_vactive += dc->debug.subvp_extra_lines;
// For backporch of phantom pipe, use vstartup of the main pipe
phantom_bp = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
phantom_stream->dst.y = 0;
phantom_stream->dst.height = phantom_vactive;
/* When scaling, DML provides the end to end required number of lines for MALL.
* dst.height is always correct for this case, but src.height is not which causes a
* delta between main and phantom pipe scaling outputs. Need to adjust src.height on
* phantom for this case.
*/
phantom_stream->src.y = 0;
phantom_stream->src.height = (double)phantom_vactive * (double)main_stream->src.height / (double)main_stream->dst.height;
phantom_stream->timing.v_addressable = phantom_vactive;
phantom_stream->timing.v_front_porch = 1;
phantom_stream->timing.v_total = phantom_stream->timing.v_addressable +
phantom_stream->timing.v_front_porch +
phantom_stream->timing.v_sync_width +
phantom_bp;
phantom_stream->timing.flags.DSC = 0; // Don't need DSC for phantom timing
}
/**
* dcn32_get_num_free_pipes - Calculate number of free pipes
* @dc: current dc state
* @context: new dc state
*
* This function assumes that a "used" pipe is a pipe that has
* both a stream and a plane assigned to it.
*
* Return: Number of free pipes available in the context
*/
static unsigned int dcn32_get_num_free_pipes(struct dc *dc, struct dc_state *context)
{
unsigned int i;
unsigned int free_pipes = 0;
unsigned int num_pipes = 0;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (pipe->stream && !pipe->top_pipe) {
while (pipe) {
num_pipes++;
pipe = pipe->bottom_pipe;
}
}
}
free_pipes = dc->res_pool->pipe_count - num_pipes;
return free_pipes;
}
/**
* dcn32_assign_subvp_pipe - Function to decide which pipe will use Sub-VP.
* @dc: current dc state
* @context: new dc state
* @index: [out] dc pipe index for the pipe chosen to have phantom pipes assigned
*
* We enter this function if we are Sub-VP capable (i.e. enough pipes available)
* and regular P-State switching (i.e. VACTIVE/VBLANK) is not supported, or if
* we are forcing SubVP P-State switching on the current config.
*
* The number of pipes used for the chosen surface must be less than or equal to the
* number of free pipes available.
*
* In general we choose surfaces with the longest frame time first (better for SubVP + VBLANK).
* For multi-display cases the ActiveDRAMClockChangeMargin doesn't provide enough info on its own
* for determining which should be the SubVP pipe (need a way to determine if a pipe / plane doesn't
* support MCLK switching naturally [i.e. ACTIVE or VBLANK]).
*
* Return: True if a valid pipe assignment was found for Sub-VP. Otherwise false.
*/
static bool dcn32_assign_subvp_pipe(struct dc *dc,
struct dc_state *context,
unsigned int *index)
{
unsigned int i, pipe_idx;
unsigned int max_frame_time = 0;
bool valid_assignment_found = false;
unsigned int free_pipes = dcn32_get_num_free_pipes(dc, context);
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
unsigned int num_pipes = 0;
unsigned int refresh_rate = 0;
if (!pipe->stream)
continue;
// Round up
refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 +
pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1)
/ (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total);
/* SubVP pipe candidate requirements:
* - Refresh rate < 120hz
* - Not able to switch in vactive naturally (switching in active means the
* DET provides enough buffer to hide the P-State switch latency -- trying
* to combine this with SubVP can cause issues with the scheduling).
* - Not TMZ surface
*/
if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) &&
!(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&
(!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
pipe->stream->mall_stream_config.type == SUBVP_NONE &&
(refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) &&
!pipe->plane_state->address.tmz_surface &&
(vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 ||
(vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 &&
dcn32_allow_subvp_with_active_margin(pipe)))) {
while (pipe) {
num_pipes++;
pipe = pipe->bottom_pipe;
}
pipe = &context->res_ctx.pipe_ctx[i];
if (num_pipes <= free_pipes) {
struct dc_stream_state *stream = pipe->stream;
unsigned int frame_us = (stream->timing.v_total * stream->timing.h_total /
(double)(stream->timing.pix_clk_100hz * 100)) * 1000000;
if (frame_us > max_frame_time) {
*index = i;
max_frame_time = frame_us;
valid_assignment_found = true;
}
}
}
pipe_idx++;
}
return valid_assignment_found;
}
/**
* dcn32_enough_pipes_for_subvp - Function to check if there are "enough" pipes for SubVP.
* @dc: current dc state
* @context: new dc state
*
* This function returns true if there are enough free pipes
* to create the required phantom pipes for any given stream
* (that does not already have phantom pipe assigned).
*
* e.g. For a 2 stream config where the first stream uses one
* pipe and the second stream uses 2 pipes (i.e. pipe split),
* this function will return true because there is 1 remaining
* pipe which can be used as the phantom pipe for the non pipe
* split pipe.
*
* Return:
* True if there are enough free pipes to assign phantom pipes to at least one
* stream that does not already have phantom pipes assigned. Otherwise false.
*/
static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context)
{
unsigned int i, split_cnt, free_pipes;
unsigned int min_pipe_split = dc->res_pool->pipe_count + 1; // init as max number of pipes + 1
bool subvp_possible = false;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
// Find the minimum pipe split count for non SubVP pipes
if (resource_is_pipe_type(pipe, OPP_HEAD) &&
pipe->stream->mall_stream_config.type == SUBVP_NONE) {
split_cnt = 0;
while (pipe) {
split_cnt++;
pipe = pipe->bottom_pipe;
}
if (split_cnt < min_pipe_split)
min_pipe_split = split_cnt;
}
}
free_pipes = dcn32_get_num_free_pipes(dc, context);
// SubVP only possible if at least one pipe is being used (i.e. free_pipes
// should not equal to the pipe_count)
if (free_pipes >= min_pipe_split && free_pipes < dc->res_pool->pipe_count)
subvp_possible = true;
return subvp_possible;
}
/**
* subvp_subvp_schedulable - Determine if SubVP + SubVP config is schedulable
* @dc: current dc state
* @context: new dc state
*
* High level algorithm:
* 1. Find longest microschedule length (in us) between the two SubVP pipes
* 2. Check if the worst case overlap (VBLANK in middle of ACTIVE) for both
* pipes still allows for the maximum microschedule to fit in the active
* region for both pipes.
*
* Return: True if the SubVP + SubVP config is schedulable, false otherwise
*/
static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
{
struct pipe_ctx *subvp_pipes[2];
struct dc_stream_state *phantom = NULL;
uint32_t microschedule_lines = 0;
uint32_t index = 0;
uint32_t i;
uint32_t max_microschedule_us = 0;
int32_t vactive1_us, vactive2_us, vblank1_us, vblank2_us;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
uint32_t time_us = 0;
/* Loop to calculate the maximum microschedule time between the two SubVP pipes,
* and also to store the two main SubVP pipe pointers in subvp_pipes[2].
*/
if (pipe->stream && pipe->plane_state && !pipe->top_pipe &&
pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
phantom = pipe->stream->mall_stream_config.paired_stream;
microschedule_lines = (phantom->timing.v_total - phantom->timing.v_front_porch) +
phantom->timing.v_addressable;
// Round up when calculating microschedule time (+ 1 at the end)
time_us = (microschedule_lines * phantom->timing.h_total) /
(double)(phantom->timing.pix_clk_100hz * 100) * 1000000 +
dc->caps.subvp_prefetch_end_to_mall_start_us +
dc->caps.subvp_fw_processing_delay_us + 1;
if (time_us > max_microschedule_us)
max_microschedule_us = time_us;
subvp_pipes[index] = pipe;
index++;
// Maximum 2 SubVP pipes
if (index == 2)
break;
}
}
vactive1_us = ((subvp_pipes[0]->stream->timing.v_addressable * subvp_pipes[0]->stream->timing.h_total) /
(double)(subvp_pipes[0]->stream->timing.pix_clk_100hz * 100)) * 1000000;
vactive2_us = ((subvp_pipes[1]->stream->timing.v_addressable * subvp_pipes[1]->stream->timing.h_total) /
(double)(subvp_pipes[1]->stream->timing.pix_clk_100hz * 100)) * 1000000;
vblank1_us = (((subvp_pipes[0]->stream->timing.v_total - subvp_pipes[0]->stream->timing.v_addressable) *
subvp_pipes[0]->stream->timing.h_total) /
(double)(subvp_pipes[0]->stream->timing.pix_clk_100hz * 100)) * 1000000;
vblank2_us = (((subvp_pipes[1]->stream->timing.v_total - subvp_pipes[1]->stream->timing.v_addressable) *
subvp_pipes[1]->stream->timing.h_total) /
(double)(subvp_pipes[1]->stream->timing.pix_clk_100hz * 100)) * 1000000;
if ((vactive1_us - vblank2_us) / 2 > max_microschedule_us &&
(vactive2_us - vblank1_us) / 2 > max_microschedule_us)
return true;
return false;
}
/**
* subvp_drr_schedulable() - Determine if SubVP + DRR config is schedulable
* @dc: current dc state
* @context: new dc state
*
* High level algorithm:
* 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe
* 2. Determine the frame time for the DRR display when adding required margin for MCLK switching
* (the margin is equal to the MALL region + DRR margin (500us))
* 3.If (SubVP Active - Prefetch > Stretched DRR frame + max(MALL region, Stretched DRR frame))
* then report the configuration as supported
*
* Return: True if the SubVP + DRR config is schedulable, false otherwise
*/
static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context)
{
bool schedulable = false;
uint32_t i;
struct pipe_ctx *pipe = NULL;
struct pipe_ctx *drr_pipe = NULL;
struct dc_crtc_timing *main_timing = NULL;
struct dc_crtc_timing *phantom_timing = NULL;
struct dc_crtc_timing *drr_timing = NULL;
int16_t prefetch_us = 0;
int16_t mall_region_us = 0;
int16_t drr_frame_us = 0; // nominal frame time
int16_t subvp_active_us = 0;
int16_t stretched_drr_us = 0;
int16_t drr_stretched_vblank_us = 0;
int16_t max_vblank_mallregion = 0;
// Find SubVP pipe
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
// We check for master pipe, but it shouldn't matter since we only need
// the pipe for timing info (stream should be same for any pipe splits)
if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
!resource_is_pipe_type(pipe, DPP_PIPE))
continue;
// Find the SubVP pipe
if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
break;
}
// Find the DRR pipe
for (i = 0; i < dc->res_pool->pipe_count; i++) {
drr_pipe = &context->res_ctx.pipe_ctx[i];
// We check for master pipe only
if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
!resource_is_pipe_type(pipe, DPP_PIPE))
continue;
if (drr_pipe->stream->mall_stream_config.type == SUBVP_NONE && drr_pipe->stream->ignore_msa_timing_param &&
(drr_pipe->stream->allow_freesync || drr_pipe->stream->vrr_active_variable))
break;
}
main_timing = &pipe->stream->timing;
phantom_timing = &pipe->stream->mall_stream_config.paired_stream->timing;
drr_timing = &drr_pipe->stream->timing;
prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total /
(double)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
dc->caps.subvp_prefetch_end_to_mall_start_us;
subvp_active_us = main_timing->v_addressable * main_timing->h_total /
(double)(main_timing->pix_clk_100hz * 100) * 1000000;
drr_frame_us = drr_timing->v_total * drr_timing->h_total /
(double)(drr_timing->pix_clk_100hz * 100) * 1000000;
// P-State allow width and FW delays already included phantom_timing->v_addressable
mall_region_us = phantom_timing->v_addressable * phantom_timing->h_total /
(double)(phantom_timing->pix_clk_100hz * 100) * 1000000;
stretched_drr_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
drr_stretched_vblank_us = (drr_timing->v_total - drr_timing->v_addressable) * drr_timing->h_total /
(double)(drr_timing->pix_clk_100hz * 100) * 1000000 + (stretched_drr_us - drr_frame_us);
max_vblank_mallregion = drr_stretched_vblank_us > mall_region_us ? drr_stretched_vblank_us : mall_region_us;
/* We consider SubVP + DRR schedulable if the stretched frame duration of the DRR display (i.e. the
* highest refresh rate + margin that can support UCLK P-State switch) passes the static analysis
* for VBLANK: (VACTIVE region of the SubVP pipe can fit the MALL prefetch, VBLANK frame time,
* and the max of (VBLANK blanking time, MALL region)).
*/
if (stretched_drr_us < (1 / (double)drr_timing->min_refresh_in_uhz) * 1000000 * 1000000 &&
subvp_active_us - prefetch_us - stretched_drr_us - max_vblank_mallregion > 0)
schedulable = true;
return schedulable;
}
/**
* subvp_vblank_schedulable - Determine if SubVP + VBLANK config is schedulable
* @dc: current dc state
* @context: new dc state
*
* High level algorithm:
* 1. Get timing for SubVP pipe, phantom pipe, and VBLANK pipe
* 2. If (SubVP Active - Prefetch > Vblank Frame Time + max(MALL region, Vblank blanking time))
* then report the configuration as supported
* 3. If the VBLANK display is DRR, then take the DRR static schedulability path
*
* Return: True if the SubVP + VBLANK/DRR config is schedulable, false otherwise
*/
static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
{
struct pipe_ctx *pipe = NULL;
struct pipe_ctx *subvp_pipe = NULL;
bool found = false;
bool schedulable = false;
uint32_t i = 0;
uint8_t vblank_index = 0;
uint16_t prefetch_us = 0;
uint16_t mall_region_us = 0;
uint16_t vblank_frame_us = 0;
uint16_t subvp_active_us = 0;
uint16_t vblank_blank_us = 0;
uint16_t max_vblank_mallregion = 0;
struct dc_crtc_timing *main_timing = NULL;
struct dc_crtc_timing *phantom_timing = NULL;
struct dc_crtc_timing *vblank_timing = NULL;
/* For SubVP + VBLANK/DRR cases, we assume there can only be
* a single VBLANK/DRR display. If DML outputs SubVP + VBLANK
* is supported, it is either a single VBLANK case or two VBLANK
* displays which are synchronized (in which case they have identical
* timings).
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
// We check for master pipe, but it shouldn't matter since we only need
// the pipe for timing info (stream should be same for any pipe splits)
if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
!resource_is_pipe_type(pipe, DPP_PIPE))
continue;
if (!found && pipe->stream->mall_stream_config.type == SUBVP_NONE) {
// Found pipe which is not SubVP or Phantom (i.e. the VBLANK pipe).
vblank_index = i;
found = true;
}
if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN)
subvp_pipe = pipe;
}
if (found) {
main_timing = &subvp_pipe->stream->timing;
phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing;
// Prefetch time is equal to VACTIVE + BP + VSYNC of the phantom pipe
// Also include the prefetch end to mallstart delay time
prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total /
(double)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
dc->caps.subvp_prefetch_end_to_mall_start_us;
// P-State allow width and FW delays already included phantom_timing->v_addressable
mall_region_us = phantom_timing->v_addressable * phantom_timing->h_total /
(double)(phantom_timing->pix_clk_100hz * 100) * 1000000;
vblank_frame_us = vblank_timing->v_total * vblank_timing->h_total /
(double)(vblank_timing->pix_clk_100hz * 100) * 1000000;
vblank_blank_us = (vblank_timing->v_total - vblank_timing->v_addressable) * vblank_timing->h_total /
(double)(vblank_timing->pix_clk_100hz * 100) * 1000000;
subvp_active_us = main_timing->v_addressable * main_timing->h_total /
(double)(main_timing->pix_clk_100hz * 100) * 1000000;
max_vblank_mallregion = vblank_blank_us > mall_region_us ? vblank_blank_us : mall_region_us;
// Schedulable if VACTIVE region of the SubVP pipe can fit the MALL prefetch, VBLANK frame time,
// and the max of (VBLANK blanking time, MALL region)
// TODO: Possibly add some margin (i.e. the below conditions should be [...] > X instead of [...] > 0)
if (subvp_active_us - prefetch_us - vblank_frame_us - max_vblank_mallregion > 0)
schedulable = true;
}
return schedulable;
}
/**
* subvp_subvp_admissable() - Determine if subvp + subvp config is admissible
*
* @dc: Current DC state
* @context: New DC state to be programmed
*
* SubVP + SubVP is admissible under the following conditions:
* - All SubVP pipes are < 120Hz OR
* - All SubVP pipes are >= 120hz
*
* Return: True if admissible, false otherwise
*/
static bool subvp_subvp_admissable(struct dc *dc,
struct dc_state *context)
{
bool result = false;
uint32_t i;
uint8_t subvp_count = 0;
uint32_t min_refresh = subvp_high_refresh_list.min_refresh, max_refresh = 0;
uint64_t refresh_rate = 0;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->stream)
continue;
if (pipe->plane_state && !pipe->top_pipe &&
pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
if ((uint32_t)refresh_rate < min_refresh)
min_refresh = (uint32_t)refresh_rate;
if ((uint32_t)refresh_rate > max_refresh)
max_refresh = (uint32_t)refresh_rate;
subvp_count++;
}
}
if (subvp_count == 2 && ((min_refresh < 120 && max_refresh < 120) ||
(min_refresh >= subvp_high_refresh_list.min_refresh &&
max_refresh <= subvp_high_refresh_list.max_refresh)))
result = true;
return result;
}
/**
* subvp_validate_static_schedulability - Check which SubVP case is calculated
* and handle static analysis based on the case.
* @dc: current dc state
* @context: new dc state
* @vlevel: Voltage level calculated by DML
*
* Three cases:
* 1. SubVP + SubVP
* 2. SubVP + VBLANK (DRR checked internally)
* 3. SubVP + VACTIVE (currently unsupported)
*
* Return: True if statically schedulable, false otherwise
*/
static bool subvp_validate_static_schedulability(struct dc *dc,
struct dc_state *context,
int vlevel)
{
bool schedulable = false;
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
uint32_t i, pipe_idx;
uint8_t subvp_count = 0;
uint8_t vactive_count = 0;
uint8_t non_subvp_pipes = 0;
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->stream)
continue;
if (pipe->plane_state && !pipe->top_pipe) {
if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
subvp_count++;
if (pipe->stream->mall_stream_config.type == SUBVP_NONE) {
non_subvp_pipes++;
}
}
// Count how many planes that aren't SubVP/phantom are capable of VACTIVE
// switching (SubVP + VACTIVE unsupported). In situations where we force
// SubVP for a VACTIVE plane, we don't want to increment the vactive_count.
if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vlevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 &&
pipe->stream->mall_stream_config.type == SUBVP_NONE) {
vactive_count++;
}
pipe_idx++;
}
if (subvp_count == 2) {
// Static schedulability check for SubVP + SubVP case
schedulable = subvp_subvp_admissable(dc, context) && subvp_subvp_schedulable(dc, context);
} else if (subvp_count == 1 && non_subvp_pipes == 0) {
// Single SubVP configs will be supported by default as long as it's suppported by DML
schedulable = true;
} else if (subvp_count == 1 && non_subvp_pipes == 1) {
if (dcn32_subvp_drr_admissable(dc, context))
schedulable = subvp_drr_schedulable(dc, context);
else if (dcn32_subvp_vblank_admissable(dc, context, vlevel))
schedulable = subvp_vblank_schedulable(dc, context);
} else if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vactive_w_mall_sub_vp &&
vactive_count > 0) {
// For single display SubVP cases, DML will output dm_dram_clock_change_vactive_w_mall_sub_vp by default.
// We tell the difference between SubVP vs. SubVP + VACTIVE by checking the vactive_count.
// SubVP + VACTIVE currently unsupported
schedulable = false;
}
return schedulable;
}
static void dcn32_full_validate_bw_helper(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *vlevel,
int *split,
bool *merge,
int *pipe_cnt)
{
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
unsigned int dc_pipe_idx = 0;
int i = 0;
bool found_supported_config = false;
dc_assert_fp_enabled();
/*
* DML favors voltage over p-state, but we're more interested in
* supporting p-state over voltage. We can't support p-state in
* prefetch mode > 0 so try capping the prefetch mode to start.
* Override present for testing.
*/
if (dc->debug.dml_disallow_alternate_prefetch_modes)
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
dm_prefetch_support_uclk_fclk_and_stutter;
else
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
dm_prefetch_support_uclk_fclk_and_stutter_if_possible;
*vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
/* This may adjust vlevel and maxMpcComb */
if (*vlevel < context->bw_ctx.dml.soc.num_states) {
*vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
vba->VoltageLevel = *vlevel;
}
/* Conditions for setting up phantom pipes for SubVP:
* 1. Not force disable SubVP
* 2. Full update (i.e. !fast_validate)
* 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?)
* 4. Display configuration passes validation
* 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
*/
if (!dc->debug.force_disable_subvp && !dc->caps.dmub_caps.gecc_enable && dcn32_all_pipes_have_stream_and_plane(dc, context) &&
!dcn32_mpo_in_use(context) && !dcn32_any_surfaces_rotated(dc, context) &&
(*vlevel == context->bw_ctx.dml.soc.num_states ||
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported ||
dc->debug.force_subvp_mclk_switch)) {
dcn32_merge_pipes_for_subvp(dc, context);
memset(merge, 0, MAX_PIPES * sizeof(bool));
/* to re-initialize viewport after the pipe merge */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (!pipe_ctx->plane_state || !pipe_ctx->stream)
continue;
resource_build_scaling_params(pipe_ctx);
}
while (!found_supported_config && dcn32_enough_pipes_for_subvp(dc, context) &&
dcn32_assign_subvp_pipe(dc, context, &dc_pipe_idx)) {
/* For the case where *vlevel = num_states, bandwidth validation has failed for this config.
* Adding phantom pipes won't change the validation result, so change the DML input param
* for P-State support before adding phantom pipes and recalculating the DML result.
* However, this case is only applicable for SubVP + DRR cases because the prefetch mode
* will not allow for switch in VBLANK. The DRR display must have it's VBLANK stretched
* enough to support MCLK switching.
*/
if (*vlevel == context->bw_ctx.dml.soc.num_states &&
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final ==
dm_prefetch_support_uclk_fclk_and_stutter) {
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
dm_prefetch_support_fclk_and_stutter;
/* There are params (such as FabricClock) that need to be recalculated
* after validation fails (otherwise it will be 0). Calculation for
* phantom vactive requires call into DML, so we must ensure all the
* vba params are valid otherwise we'll get incorrect phantom vactive.
*/
*vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
}
dc->res_pool->funcs->add_phantom_pipes(dc, context, pipes, *pipe_cnt, dc_pipe_idx);
*pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false);
// Populate dppclk to trigger a recalculate in dml_get_voltage_level
// so the phantom pipe DLG params can be assigned correctly.
pipes[0].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, *pipe_cnt, 0);
*vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
/* Check that vlevel requested supports pstate or not
* if not, select the lowest vlevel that supports it
*/
for (i = *vlevel; i < context->bw_ctx.dml.soc.num_states; i++) {
if (vba->DRAMClockChangeSupport[i][vba->maxMpcComb] != dm_dram_clock_change_unsupported) {
*vlevel = i;
break;
}
}
if (*vlevel < context->bw_ctx.dml.soc.num_states
&& subvp_validate_static_schedulability(dc, context, *vlevel))
found_supported_config = true;
if (found_supported_config) {
// For SubVP + DRR cases, we can force the lowest vlevel that supports the mode
if (dcn32_subvp_drr_admissable(dc, context) && subvp_drr_schedulable(dc, context)) {
/* find lowest vlevel that supports the config */
for (i = *vlevel; i >= 0; i--) {
if (vba->ModeSupport[i][vba->maxMpcComb]) {
*vlevel = i;
} else {
break;
}
}
}
}
}
// If SubVP pipe config is unsupported (or cannot be used for UCLK switching)
// remove phantom pipes and repopulate dml pipes
if (!found_supported_config) {
dc->res_pool->funcs->remove_phantom_pipes(dc, context, false);
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] = dm_dram_clock_change_unsupported;
*pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false);
*vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
/* This may adjust vlevel and maxMpcComb */
if (*vlevel < context->bw_ctx.dml.soc.num_states) {
*vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
vba->VoltageLevel = *vlevel;
}
} else {
// Most populate phantom DLG params before programming hardware / timing for phantom pipe
dcn32_helper_populate_phantom_dlg_params(dc, context, pipes, *pipe_cnt);
/* Call validate_apply_pipe_split flags after calling DML getters for
* phantom dlg params, or some of the VBA params indicating pipe split
* can be overwritten by the getters.
*
* When setting up SubVP config, all pipes are merged before attempting to
* add phantom pipes. If pipe split (ODM / MPC) is required, both the main
* and phantom pipes will be split in the regular pipe splitting sequence.
*/
memset(split, 0, MAX_PIPES * sizeof(int));
memset(merge, 0, MAX_PIPES * sizeof(bool));
*vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
vba->VoltageLevel = *vlevel;
// Note: We can't apply the phantom pipes to hardware at this time. We have to wait
// until driver has acquired the DMCUB lock to do it safely.
}
}
}
static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
{
int i;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
if (dc->link_srv->dp_is_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
return true;
}
return false;
}
static void dcn20_adjust_freesync_v_startup(const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start)
{
struct dc_crtc_timing patched_crtc_timing;
uint32_t asic_blank_end = 0;
uint32_t asic_blank_start = 0;
uint32_t newVstartup = 0;
patched_crtc_timing = *dc_crtc_timing;
if (patched_crtc_timing.flags.INTERLACE == 1) {
if (patched_crtc_timing.v_front_porch < 2)
patched_crtc_timing.v_front_porch = 2;
} else {
if (patched_crtc_timing.v_front_porch < 1)
patched_crtc_timing.v_front_porch = 1;
}
/* blank_start = frame end - front porch */
asic_blank_start = patched_crtc_timing.v_total -
patched_crtc_timing.v_front_porch;
/* blank_end = blank_start - active */
asic_blank_end = asic_blank_start -
patched_crtc_timing.v_border_bottom -
patched_crtc_timing.v_addressable -
patched_crtc_timing.v_border_top;
newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start);
*vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start);
}
static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt, int vlevel)
{
int i, pipe_idx, active_hubp_count = 0;
bool usr_retraining_support = false;
bool unbounded_req_enabled = false;
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
dc_assert_fp_enabled();
/* Writeback MCIF_WB arbitration parameters */
dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
context->bw_ctx.bw.dcn.clk.dispclk_khz = context->bw_ctx.dml.vba.DISPCLK * 1000;
context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000;
context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000;
context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000;
context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000;
context->bw_ctx.bw.dcn.clk.p_state_change_support =
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
!= dm_dram_clock_change_unsupported;
/* Pstate change might not be supported by hardware, but it might be
* possible with firmware driven vertical blank stretching.
*/
context->bw_ctx.bw.dcn.clk.p_state_change_support |= context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching;
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context);
context->bw_ctx.bw.dcn.clk.ref_dtbclk_khz = context->bw_ctx.dml.vba.DTBCLKPerState[vlevel] * 1000;
if (context->bw_ctx.dml.vba.FCLKChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_fclock_change_unsupported)
context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = false;
else
context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = true;
usr_retraining_support = context->bw_ctx.dml.vba.USRRetrainingSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
ASSERT(usr_retraining_support);
if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz;
unbounded_req_enabled = get_unbounded_request_enabled(&context->bw_ctx.dml, pipes, pipe_cnt);
if (unbounded_req_enabled && pipe_cnt > 1) {
// Unbounded requesting should not ever be used when more than 1 pipe is enabled.
ASSERT(false);
unbounded_req_enabled = false;
}
context->bw_ctx.bw.dcn.mall_ss_size_bytes = 0;
context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes = 0;
context->bw_ctx.bw.dcn.mall_subvp_size_bytes = 0;
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
if (context->res_ctx.pipe_ctx[i].plane_state)
active_hubp_count++;
pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt,
pipe_idx);
pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt,
pipe_idx);
pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt,
pipe_idx);
pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt,
pipe_idx);
if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
// Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0;
context->res_ctx.pipe_ctx[i].unbounded_req = false;
} else {
context->res_ctx.pipe_ctx[i].det_buffer_size_kb = get_det_buffer_size_kbytes(&context->bw_ctx.dml, pipes, pipe_cnt,
pipe_idx);
context->res_ctx.pipe_ctx[i].unbounded_req = unbounded_req_enabled;
}
if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
if (context->res_ctx.pipe_ctx[i].plane_state)
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
else
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 0;
context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes = get_surface_size_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0)
context->res_ctx.pipe_ctx[i].has_vactive_margin = true;
else
context->res_ctx.pipe_ctx[i].has_vactive_margin = false;
/* MALL Allocation Sizes */
/* count from active, top pipes per plane only */
if (context->res_ctx.pipe_ctx[i].stream && context->res_ctx.pipe_ctx[i].plane_state &&
(context->res_ctx.pipe_ctx[i].top_pipe == NULL ||
context->res_ctx.pipe_ctx[i].plane_state != context->res_ctx.pipe_ctx[i].top_pipe->plane_state) &&
context->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) {
/* SS: all active surfaces stored in MALL */
if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type != SUBVP_PHANTOM) {
context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes;
if (context->res_ctx.pipe_ctx[i].stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED) {
/* SS PSR On: all active surfaces part of streams not supporting PSR stored in MALL */
context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes;
}
} else {
/* SUBVP: phantom surfaces only stored in MALL */
context->bw_ctx.bw.dcn.mall_subvp_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes;
}
}
if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
dcn20_adjust_freesync_v_startup(
&context->res_ctx.pipe_ctx[i].stream->timing,
&context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
pipe_idx++;
}
/* If DCN isn't making memory requests we can allow pstate change and lower clocks */
if (!active_hubp_count) {
context->bw_ctx.bw.dcn.clk.socclk_khz = 0;
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
context->bw_ctx.bw.dcn.clk.dcfclk_khz = 0;
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = 0;
context->bw_ctx.bw.dcn.clk.dramclk_khz = 0;
context->bw_ctx.bw.dcn.clk.fclk_khz = 0;
context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = true;
}
/*save a original dppclock copy*/
context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz;
context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dppclk_mhz
* 1000;
context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz
* 1000;
context->bw_ctx.bw.dcn.clk.num_ways = dcn32_helper_calculate_num_ways_for_subvp(dc, context);
context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].stream)
context->bw_ctx.bw.dcn.compbuf_size_kb -= context->res_ctx.pipe_ctx[i].det_buffer_size_kb;
}
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg_v2(&context->bw_ctx.dml,
&context->res_ctx.pipe_ctx[i].dlg_regs, &context->res_ctx.pipe_ctx[i].ttu_regs, pipes,
pipe_cnt, pipe_idx);
context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg_v2(&context->res_ctx.pipe_ctx[i].rq_regs,
&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
pipe_idx++;
}
}
static struct pipe_ctx *dcn32_find_split_pipe(
struct dc *dc,
struct dc_state *context,
int old_index)
{
struct pipe_ctx *pipe = NULL;
int i;
if (old_index >= 0 && context->res_ctx.pipe_ctx[old_index].stream == NULL) {
pipe = &context->res_ctx.pipe_ctx[old_index];
pipe->pipe_idx = old_index;
}
if (!pipe)
for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
if (dc->current_state->res_ctx.pipe_ctx[i].top_pipe == NULL
&& dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) {
if (context->res_ctx.pipe_ctx[i].stream == NULL) {
pipe = &context->res_ctx.pipe_ctx[i];
pipe->pipe_idx = i;
break;
}
}
}
/*
* May need to fix pipes getting tossed from 1 opp to another on flip
* Add for debugging transient underflow during topology updates:
* ASSERT(pipe);
*/
if (!pipe)
for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
if (context->res_ctx.pipe_ctx[i].stream == NULL) {
pipe = &context->res_ctx.pipe_ctx[i];
pipe->pipe_idx = i;
break;
}
}
return pipe;
}
static bool dcn32_split_stream_for_mpc_or_odm(
const struct dc *dc,
struct resource_context *res_ctx,
struct pipe_ctx *pri_pipe,
struct pipe_ctx *sec_pipe,
bool odm)
{
int pipe_idx = sec_pipe->pipe_idx;
const struct resource_pool *pool = dc->res_pool;
DC_LOGGER_INIT(dc->ctx->logger);
if (odm && pri_pipe->plane_state) {
/* ODM + window MPO, where MPO window is on left half only */
if (pri_pipe->plane_state->clip_rect.x + pri_pipe->plane_state->clip_rect.width <=
pri_pipe->stream->src.x + pri_pipe->stream->src.width/2) {
DC_LOG_SCALER("%s - ODM + window MPO(left). pri_pipe:%d\n",
__func__,
pri_pipe->pipe_idx);
return true;
}
/* ODM + window MPO, where MPO window is on right half only */
if (pri_pipe->plane_state->clip_rect.x >= pri_pipe->stream->src.x + pri_pipe->stream->src.width/2) {
DC_LOG_SCALER("%s - ODM + window MPO(right). pri_pipe:%d\n",
__func__,
pri_pipe->pipe_idx);
return true;
}
}
*sec_pipe = *pri_pipe;
sec_pipe->pipe_idx = pipe_idx;
sec_pipe->plane_res.mi = pool->mis[pipe_idx];
sec_pipe->plane_res.hubp = pool->hubps[pipe_idx];
sec_pipe->plane_res.ipp = pool->ipps[pipe_idx];
sec_pipe->plane_res.xfm = pool->transforms[pipe_idx];
sec_pipe->plane_res.dpp = pool->dpps[pipe_idx];
sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst;
sec_pipe->stream_res.dsc = NULL;
if (odm) {
if (pri_pipe->next_odm_pipe) {
ASSERT(pri_pipe->next_odm_pipe != sec_pipe);
sec_pipe->next_odm_pipe = pri_pipe->next_odm_pipe;
sec_pipe->next_odm_pipe->prev_odm_pipe = sec_pipe;
}
if (pri_pipe->top_pipe && pri_pipe->top_pipe->next_odm_pipe) {
pri_pipe->top_pipe->next_odm_pipe->bottom_pipe = sec_pipe;
sec_pipe->top_pipe = pri_pipe->top_pipe->next_odm_pipe;
}
if (pri_pipe->bottom_pipe && pri_pipe->bottom_pipe->next_odm_pipe) {
pri_pipe->bottom_pipe->next_odm_pipe->top_pipe = sec_pipe;
sec_pipe->bottom_pipe = pri_pipe->bottom_pipe->next_odm_pipe;
}
pri_pipe->next_odm_pipe = sec_pipe;
sec_pipe->prev_odm_pipe = pri_pipe;
ASSERT(sec_pipe->top_pipe == NULL);
if (!sec_pipe->top_pipe)
sec_pipe->stream_res.opp = pool->opps[pipe_idx];
else
sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp;
if (sec_pipe->stream->timing.flags.DSC == 1) {
dcn20_acquire_dsc(dc, res_ctx, &sec_pipe->stream_res.dsc, pipe_idx);
ASSERT(sec_pipe->stream_res.dsc);
if (sec_pipe->stream_res.dsc == NULL)
return false;
}
} else {
if (pri_pipe->bottom_pipe) {
ASSERT(pri_pipe->bottom_pipe != sec_pipe);
sec_pipe->bottom_pipe = pri_pipe->bottom_pipe;
sec_pipe->bottom_pipe->top_pipe = sec_pipe;
}
pri_pipe->bottom_pipe = sec_pipe;
sec_pipe->top_pipe = pri_pipe;
ASSERT(pri_pipe->plane_state);
}
return true;
}
bool dcn32_internal_validate_bw(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *vlevel_out,
bool fast_validate)
{
bool out = false;
bool repopulate_pipes = false;
int split[MAX_PIPES] = { 0 };
bool merge[MAX_PIPES] = { false };
bool newly_split[MAX_PIPES] = { false };
int pipe_cnt, i, pipe_idx;
int vlevel = context->bw_ctx.dml.soc.num_states;
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
dc_assert_fp_enabled();
ASSERT(pipes);
if (!pipes)
return false;
// For each full update, remove all existing phantom pipes first
dc->res_pool->funcs->remove_phantom_pipes(dc, context, fast_validate);
dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
if (!pipe_cnt) {
out = true;
goto validate_out;
}
dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context);
if (!fast_validate)
dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt);
if (fast_validate ||
(dc->debug.dml_disallow_alternate_prefetch_modes &&
(vlevel == context->bw_ctx.dml.soc.num_states ||
vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) {
/*
* If dml_disallow_alternate_prefetch_modes is false, then we have already
* tried alternate prefetch modes during full validation.
*
* If mode is unsupported or there is no p-state support, then
* fall back to favouring voltage.
*
* If Prefetch mode 0 failed for this config, or passed with Max UCLK, then try
* to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2)
*/
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
dm_prefetch_support_none;
context->bw_ctx.dml.validate_max_state = fast_validate;
vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
context->bw_ctx.dml.validate_max_state = false;
if (vlevel < context->bw_ctx.dml.soc.num_states) {
memset(split, 0, sizeof(split));
memset(merge, 0, sizeof(merge));
vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
// dcn20_validate_apply_pipe_split_flags can modify voltage level outside of DML
vba->VoltageLevel = vlevel;
}
}
dml_log_mode_support_params(&context->bw_ctx.dml);
if (vlevel == context->bw_ctx.dml.soc.num_states)
goto validate_fail;
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *mpo_pipe = pipe->bottom_pipe;
if (!pipe->stream)
continue;
if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
&& !dc->config.enable_windowed_mpo_odm
&& pipe->plane_state && mpo_pipe
&& memcmp(&mpo_pipe->plane_state->clip_rect,
&pipe->stream->src,
sizeof(struct rect)) != 0) {
ASSERT(mpo_pipe->plane_state != pipe->plane_state);
goto validate_fail;
}
pipe_idx++;
}
/* merge pipes if necessary */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
/*skip pipes that don't need merging*/
if (!merge[i])
continue;
/* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */
if (pipe->prev_odm_pipe) {
/*split off odm pipe*/
pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe;
if (pipe->next_odm_pipe)
pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe;
/*2:1ODM+MPC Split MPO to Single Pipe + MPC Split MPO*/
if (pipe->bottom_pipe) {
if (pipe->bottom_pipe->prev_odm_pipe || pipe->bottom_pipe->next_odm_pipe) {
/*MPC split rules will handle this case*/
pipe->bottom_pipe->top_pipe = NULL;
} else {
/* when merging an ODM pipes, the bottom MPC pipe must now point to
* the previous ODM pipe and its associated stream assets
*/
if (pipe->prev_odm_pipe->bottom_pipe) {
/* 3 plane MPO*/
pipe->bottom_pipe->top_pipe = pipe->prev_odm_pipe->bottom_pipe;
pipe->prev_odm_pipe->bottom_pipe->bottom_pipe = pipe->bottom_pipe;
} else {
/* 2 plane MPO*/
pipe->bottom_pipe->top_pipe = pipe->prev_odm_pipe;
pipe->prev_odm_pipe->bottom_pipe = pipe->bottom_pipe;
}
memcpy(&pipe->bottom_pipe->stream_res, &pipe->bottom_pipe->top_pipe->stream_res, sizeof(struct stream_resource));
}
}
if (pipe->top_pipe) {
pipe->top_pipe->bottom_pipe = NULL;
}
pipe->bottom_pipe = NULL;
pipe->next_odm_pipe = NULL;
pipe->plane_state = NULL;
pipe->stream = NULL;
pipe->top_pipe = NULL;
pipe->prev_odm_pipe = NULL;
if (pipe->stream_res.dsc)
dcn20_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc);
memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
memset(&pipe->link_res, 0, sizeof(pipe->link_res));
repopulate_pipes = true;
} else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
struct pipe_ctx *top_pipe = pipe->top_pipe;
struct pipe_ctx *bottom_pipe = pipe->bottom_pipe;
top_pipe->bottom_pipe = bottom_pipe;
if (bottom_pipe)
bottom_pipe->top_pipe = top_pipe;
pipe->top_pipe = NULL;
pipe->bottom_pipe = NULL;
pipe->plane_state = NULL;
pipe->stream = NULL;
memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
memset(&pipe->link_res, 0, sizeof(pipe->link_res));
repopulate_pipes = true;
} else
ASSERT(0); /* Should never try to merge master pipe */
}
for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
struct pipe_ctx *hsplit_pipe = NULL;
bool odm;
int old_index = -1;
if (!pipe->stream || newly_split[i])
continue;
pipe_idx++;
odm = vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled;
if (!pipe->plane_state && !odm)
continue;
if (split[i]) {
if (odm) {
if (split[i] == 4 && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe)
old_index = old_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
else if (old_pipe->next_odm_pipe)
old_index = old_pipe->next_odm_pipe->pipe_idx;
} else {
if (split[i] == 4 && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe &&
old_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
old_index = old_pipe->bottom_pipe->bottom_pipe->pipe_idx;
else if (old_pipe->bottom_pipe &&
old_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
old_index = old_pipe->bottom_pipe->pipe_idx;
}
hsplit_pipe = dcn32_find_split_pipe(dc, context, old_index);
ASSERT(hsplit_pipe);
if (!hsplit_pipe)
goto validate_fail;
if (!dcn32_split_stream_for_mpc_or_odm(
dc, &context->res_ctx,
pipe, hsplit_pipe, odm))
goto validate_fail;
newly_split[hsplit_pipe->pipe_idx] = true;
repopulate_pipes = true;
}
if (split[i] == 4) {
struct pipe_ctx *pipe_4to1;
if (odm && old_pipe->next_odm_pipe)
old_index = old_pipe->next_odm_pipe->pipe_idx;
else if (!odm && old_pipe->bottom_pipe &&
old_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
old_index = old_pipe->bottom_pipe->pipe_idx;
else
old_index = -1;
pipe_4to1 = dcn32_find_split_pipe(dc, context, old_index);
ASSERT(pipe_4to1);
if (!pipe_4to1)
goto validate_fail;
if (!dcn32_split_stream_for_mpc_or_odm(
dc, &context->res_ctx,
pipe, pipe_4to1, odm))
goto validate_fail;
newly_split[pipe_4to1->pipe_idx] = true;
if (odm && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe
&& old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe)
old_index = old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
else if (!odm && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe &&
old_pipe->bottom_pipe->bottom_pipe->bottom_pipe &&
old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
old_index = old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx;
else
old_index = -1;
pipe_4to1 = dcn32_find_split_pipe(dc, context, old_index);
ASSERT(pipe_4to1);
if (!pipe_4to1)
goto validate_fail;
if (!dcn32_split_stream_for_mpc_or_odm(
dc, &context->res_ctx,
hsplit_pipe, pipe_4to1, odm))
goto validate_fail;
newly_split[pipe_4to1->pipe_idx] = true;
}
if (odm)
dcn20_build_mapped_resource(dc, context, pipe->stream);
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (pipe->plane_state) {
if (!resource_build_scaling_params(pipe))
goto validate_fail;
}
}
/* Actual dsc count per stream dsc validation*/
if (!dcn20_validate_dsc(dc, context)) {
vba->ValidationStatus[vba->soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE;
goto validate_fail;
}
if (repopulate_pipes) {
int flag_max_mpc_comb = vba->maxMpcComb;
int flag_vlevel = vlevel;
int i;
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
/* repopulate_pipes = 1 means the pipes were either split or merged. In this case
* we have to re-calculate the DET allocation and run through DML once more to
* ensure all the params are calculated correctly. We do not need to run the
* pipe split check again after this call (pipes are already split / merged).
* */
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
dm_prefetch_support_uclk_fclk_and_stutter_if_possible;
vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
if (vlevel == context->bw_ctx.dml.soc.num_states) {
/* failed after DET size changes */
goto validate_fail;
} else if (flag_max_mpc_comb == 0 &&
flag_max_mpc_comb != context->bw_ctx.dml.vba.maxMpcComb) {
/* check the context constructed with pipe split flags is still valid*/
bool flags_valid = false;
for (i = flag_vlevel; i < context->bw_ctx.dml.soc.num_states; i++) {
if (vba->ModeSupport[i][flag_max_mpc_comb]) {
vba->maxMpcComb = flag_max_mpc_comb;
vba->VoltageLevel = i;
vlevel = i;
flags_valid = true;
}
}
/* this should never happen */
if (!flags_valid)
goto validate_fail;
}
}
*vlevel_out = vlevel;
*pipe_cnt_out = pipe_cnt;
out = true;
goto validate_out;
validate_fail:
out = false;
validate_out:
return out;
}
void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
int vlevel)
{
int i, pipe_idx, vlevel_temp = 0;
double dcfclk = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
double dcfclk_from_fw_based_mclk_switching = dcfclk_from_validation;
bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
dm_dram_clock_change_unsupported;
unsigned int dummy_latency_index = 0;
int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
bool subvp_in_use = dcn32_subvp_in_use(dc, context);
unsigned int min_dram_speed_mts_margin;
bool need_fclk_lat_as_dummy = false;
bool is_subvp_p_drr = false;
struct dc_stream_state *fpo_candidate_stream = NULL;
dc_assert_fp_enabled();
/* need to find dummy latency index for subvp */
if (subvp_in_use) {
/* Override DRAMClockChangeSupport for SubVP + DRR case where the DRR cannot switch without stretching it's VBLANK */
if (!pstate_en) {
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp;
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = dm_prefetch_support_fclk_and_stutter;
pstate_en = true;
is_subvp_p_drr = true;
}
dummy_latency_index = dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(dc,
context, pipes, pipe_cnt, vlevel);
/* For DCN32/321 need to validate with fclk pstate change latency equal to dummy so prefetch is
* scheduled correctly to account for dummy pstate.
*/
if (context->bw_ctx.dml.soc.fclk_change_latency_us < dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us) {
need_fclk_lat_as_dummy = true;
context->bw_ctx.dml.soc.fclk_change_latency_us =
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
}
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
if (is_subvp_p_drr) {
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp;
}
}
context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
for (i = 0; i < context->stream_count; i++) {
if (context->streams[i])
context->streams[i]->fpo_in_use = false;
}
if (!pstate_en || (!dc->debug.disable_fpo_optimizations &&
pstate_en && vlevel != 0)) {
/* only when the mclk switch can not be natural, is the fw based vblank stretch attempted */
fpo_candidate_stream = dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(dc, context);
if (fpo_candidate_stream) {
fpo_candidate_stream->fpo_in_use = true;
context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = true;
}
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
dummy_latency_index = dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(dc,
context, pipes, pipe_cnt, vlevel);
/* After calling dcn30_find_dummy_latency_index_for_fw_based_mclk_switch
* we reinstate the original dram_clock_change_latency_us on the context
* and all variables that may have changed up to this point, except the
* newly found dummy_latency_index
*/
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
/* For DCN32/321 need to validate with fclk pstate change latency equal to dummy so
* prefetch is scheduled correctly to account for dummy pstate.
*/
if (context->bw_ctx.dml.soc.fclk_change_latency_us < dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us) {
need_fclk_lat_as_dummy = true;
context->bw_ctx.dml.soc.fclk_change_latency_us =
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
}
dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel_temp, false);
if (vlevel_temp < vlevel) {
vlevel = vlevel_temp;
maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
dcfclk_from_fw_based_mclk_switching = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
pstate_en = true;
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank;
} else {
/* Restore FCLK latency and re-run validation to go back to original validation
* output if we find that enabling FPO does not give us any benefit (i.e. lower
* voltage level)
*/
context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
for (i = 0; i < context->stream_count; i++) {
if (context->streams[i])
context->streams[i]->fpo_in_use = false;
}
context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us;
dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
}
}
}
/* Set B:
* For Set B calculations use clocks from clock_limits[2] when available i.e. when SMU is present,
* otherwise use arbitrary low value from spreadsheet for DCFCLK as lower is safer for watermark
* calculations to cover bootup clocks.
* DCFCLK: soc.clock_limits[2] when available
* UCLK: soc.clock_limits[2] when available
*/
if (dcn3_2_soc.num_states > 2) {
vlevel_temp = 2;
dcfclk = dcn3_2_soc.clock_limits[2].dcfclk_mhz;
} else
dcfclk = 615; //DCFCLK Vmin_lv
pipes[0].clks_cfg.voltage = vlevel_temp;
pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz;
if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.fclk_change_latency_us;
context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
}
context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.usr_retraining_ns = get_usr_retraining_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
/* Set D:
* All clocks min.
* DCFCLK: Min, as reported by PM FW when available
* UCLK : Min, as reported by PM FW when available
* sr_enter_exit/sr_exit should be lower than used for DRAM (TBD after bringup or later, use as decided in Clk Mgr)
*/
/*
if (dcn3_2_soc.num_states > 2) {
vlevel_temp = 0;
dcfclk = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
} else
dcfclk = 615; //DCFCLK Vmin_lv
pipes[0].clks_cfg.voltage = vlevel_temp;
pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz;
if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.fclk_change_latency_us;
context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
}
context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.usr_retraining_ns = get_usr_retraining_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
*/
/* Set C, for Dummy P-State:
* All clocks min.
* DCFCLK: Min, as reported by PM FW, when available
* UCLK : Min, as reported by PM FW, when available
* pstate latency as per UCLK state dummy pstate latency
*/
// For Set A and Set C use values from validation
pipes[0].clks_cfg.voltage = vlevel;
pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_validation;
pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_fw_based_mclk_switching;
}
if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
min_dram_speed_mts_margin = 160;
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->dummy_pstate_table[0].dummy_pstate_latency_us;
if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] ==
dm_dram_clock_change_unsupported) {
int min_dram_speed_mts_offset = dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1;
min_dram_speed_mts =
dc->clk_mgr->bw_params->clk_table.entries[min_dram_speed_mts_offset].memclk_mhz * 16;
}
if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && !subvp_in_use) {
/* find largest table entry that is lower than dram speed,
* but lower than DPM0 still uses DPM0
*/
for (dummy_latency_index = 3; dummy_latency_index > 0; dummy_latency_index--)
if (min_dram_speed_mts + min_dram_speed_mts_margin >
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dram_speed_mts)
break;
}
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.fclk_change_latency_us;
context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
}
context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
/* On DCN32/321, PMFW will set PSTATE_CHANGE_TYPE = 1 (FCLK) for UCLK dummy p-state.
* In this case we must program FCLK WM Set C to use the UCLK dummy p-state WM
* value.
*/
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.usr_retraining_ns = get_usr_retraining_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
if ((!pstate_en) && (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid)) {
/* The only difference between A and C is p-state latency, if p-state is not supported
* with full p-state latency we want to calculate DLG based on dummy p-state latency,
* Set A p-state watermark set to 0 on DCN30, when p-state unsupported, for now keep as DCN30.
*/
context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0;
/* Calculate FCLK p-state change watermark based on FCLK pstate change latency in case
* UCLK p-state is not supported, to avoid underflow in case FCLK pstate is supported
*/
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
} else {
/* Set A:
* All clocks min.
* DCFCLK: Min, as reported by PM FW, when available
* UCLK: Min, as reported by PM FW, when available
*/
/* For set A set the correct latency values (i.e. non-dummy values) unconditionally
*/
context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.usr_retraining_ns = get_usr_retraining_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
}
/* Make set D = set A since we do not optimized watermarks for MALL */
context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
if (dc->config.forced_clocks) {
pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
}
if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
pipe_idx++;
}
context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
/* for proper prefetch calculations, if dummy lat > fclk lat, use fclk lat = dummy lat */
if (need_fclk_lat_as_dummy)
context->bw_ctx.dml.soc.fclk_change_latency_us =
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
dcn32_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
if (!pstate_en)
/* Restore full p-state latency */
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
/* revert fclk lat changes if required */
if (need_fclk_lat_as_dummy)
context->bw_ctx.dml.soc.fclk_change_latency_us =
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us;
}
static void dcn32_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
unsigned int *optimal_dcfclk,
unsigned int *optimal_fclk)
{
double bw_from_dram, bw_from_dram1, bw_from_dram2;
bw_from_dram1 = uclk_mts * dcn3_2_soc.num_chans *
dcn3_2_soc.dram_channel_width_bytes * (dcn3_2_soc.max_avg_dram_bw_use_normal_percent / 100);
bw_from_dram2 = uclk_mts * dcn3_2_soc.num_chans *
dcn3_2_soc.dram_channel_width_bytes * (dcn3_2_soc.max_avg_sdp_bw_use_normal_percent / 100);
bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
if (optimal_fclk)
*optimal_fclk = bw_from_dram /
(dcn3_2_soc.fabric_datapath_to_dcn_data_return_bytes * (dcn3_2_soc.max_avg_sdp_bw_use_normal_percent / 100));
if (optimal_dcfclk)
*optimal_dcfclk = bw_from_dram /
(dcn3_2_soc.return_bus_width_bytes * (dcn3_2_soc.max_avg_sdp_bw_use_normal_percent / 100));
}
static void remove_entry_from_table_at_index(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries,
unsigned int index)
{
int i;
if (*num_entries == 0)
return;
for (i = index; i < *num_entries - 1; i++) {
table[i] = table[i + 1];
}
memset(&table[--(*num_entries)], 0, sizeof(struct _vcs_dpi_voltage_scaling_st));
}
void dcn32_patch_dpm_table(struct clk_bw_params *bw_params)
{
int i;
unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0,
max_phyclk_mhz = 0, max_dtbclk_mhz = 0, max_fclk_mhz = 0, max_uclk_mhz = 0;
for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
if (bw_params->clk_table.entries[i].fclk_mhz > max_fclk_mhz)
max_fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
if (bw_params->clk_table.entries[i].memclk_mhz > max_uclk_mhz)
max_uclk_mhz = bw_params->clk_table.entries[i].memclk_mhz;
if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
if (bw_params->clk_table.entries[i].dtbclk_mhz > max_dtbclk_mhz)
max_dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
}
/* Scan through clock values we currently have and if they are 0,
* then populate it with dcn3_2_soc.clock_limits[] value.
*
* Do it for DCFCLK, DISPCLK, DTBCLK and UCLK as any of those being
* 0, will cause it to skip building the clock table.
*/
if (max_dcfclk_mhz == 0)
bw_params->clk_table.entries[0].dcfclk_mhz = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
if (max_dispclk_mhz == 0)
bw_params->clk_table.entries[0].dispclk_mhz = dcn3_2_soc.clock_limits[0].dispclk_mhz;
if (max_dtbclk_mhz == 0)
bw_params->clk_table.entries[0].dtbclk_mhz = dcn3_2_soc.clock_limits[0].dtbclk_mhz;
if (max_uclk_mhz == 0)
bw_params->clk_table.entries[0].memclk_mhz = dcn3_2_soc.clock_limits[0].dram_speed_mts / 16;
}
static void swap_table_entries(struct _vcs_dpi_voltage_scaling_st *first_entry,
struct _vcs_dpi_voltage_scaling_st *second_entry)
{
struct _vcs_dpi_voltage_scaling_st temp_entry = *first_entry;
*first_entry = *second_entry;
*second_entry = temp_entry;
}
/*
* sort_entries_with_same_bw - Sort entries sharing the same bandwidth by DCFCLK
*/
static void sort_entries_with_same_bw(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
{
unsigned int start_index = 0;
unsigned int end_index = 0;
unsigned int current_bw = 0;
for (int i = 0; i < (*num_entries - 1); i++) {
if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) {
current_bw = table[i].net_bw_in_kbytes_sec;
start_index = i;
end_index = ++i;
while ((i < (*num_entries - 1)) && (table[i+1].net_bw_in_kbytes_sec == current_bw))
end_index = ++i;
}
if (start_index != end_index) {
for (int j = start_index; j < end_index; j++) {
for (int k = start_index; k < end_index; k++) {
if (table[k].dcfclk_mhz > table[k+1].dcfclk_mhz)
swap_table_entries(&table[k], &table[k+1]);
}
}
}
start_index = 0;
end_index = 0;
}
}
/*
* remove_inconsistent_entries - Ensure entries with the same bandwidth have MEMCLK and FCLK monotonically increasing
* and remove entries that do not
*/
static void remove_inconsistent_entries(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
{
for (int i = 0; i < (*num_entries - 1); i++) {
if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) {
if ((table[i].dram_speed_mts > table[i+1].dram_speed_mts) ||
(table[i].fabricclk_mhz > table[i+1].fabricclk_mhz))
remove_entry_from_table_at_index(table, num_entries, i);
}
}
}
/*
* override_max_clk_values - Overwrite the max clock frequencies with the max DC mode timings
* Input:
* max_clk_limit - struct containing the desired clock timings
* Output:
* curr_clk_limit - struct containing the timings that need to be overwritten
* Return: 0 upon success, non-zero for failure
*/
static int override_max_clk_values(struct clk_limit_table_entry *max_clk_limit,
struct clk_limit_table_entry *curr_clk_limit)
{
if (NULL == max_clk_limit || NULL == curr_clk_limit)
return -1; //invalid parameters
//only overwrite if desired max clock frequency is initialized
if (max_clk_limit->dcfclk_mhz != 0)
curr_clk_limit->dcfclk_mhz = max_clk_limit->dcfclk_mhz;
if (max_clk_limit->fclk_mhz != 0)
curr_clk_limit->fclk_mhz = max_clk_limit->fclk_mhz;
if (max_clk_limit->memclk_mhz != 0)
curr_clk_limit->memclk_mhz = max_clk_limit->memclk_mhz;
if (max_clk_limit->socclk_mhz != 0)
curr_clk_limit->socclk_mhz = max_clk_limit->socclk_mhz;
if (max_clk_limit->dtbclk_mhz != 0)
curr_clk_limit->dtbclk_mhz = max_clk_limit->dtbclk_mhz;
if (max_clk_limit->dispclk_mhz != 0)
curr_clk_limit->dispclk_mhz = max_clk_limit->dispclk_mhz;
return 0;
}
static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk_bw_params *bw_params,
struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
{
int i, j;
struct _vcs_dpi_voltage_scaling_st entry = {0};
struct clk_limit_table_entry max_clk_data = {0};
unsigned int min_dcfclk_mhz = 199, min_fclk_mhz = 299;
static const unsigned int num_dcfclk_stas = 5;
unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {199, 615, 906, 1324, 1564};
unsigned int num_uclk_dpms = 0;
unsigned int num_fclk_dpms = 0;
unsigned int num_dcfclk_dpms = 0;
unsigned int num_dc_uclk_dpms = 0;
unsigned int num_dc_fclk_dpms = 0;
unsigned int num_dc_dcfclk_dpms = 0;
for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
if (bw_params->clk_table.entries[i].dcfclk_mhz > max_clk_data.dcfclk_mhz)
max_clk_data.dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
if (bw_params->clk_table.entries[i].fclk_mhz > max_clk_data.fclk_mhz)
max_clk_data.fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
if (bw_params->clk_table.entries[i].memclk_mhz > max_clk_data.memclk_mhz)
max_clk_data.memclk_mhz = bw_params->clk_table.entries[i].memclk_mhz;
if (bw_params->clk_table.entries[i].dispclk_mhz > max_clk_data.dispclk_mhz)
max_clk_data.dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
if (bw_params->clk_table.entries[i].dppclk_mhz > max_clk_data.dppclk_mhz)
max_clk_data.dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
if (bw_params->clk_table.entries[i].phyclk_mhz > max_clk_data.phyclk_mhz)
max_clk_data.phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
if (bw_params->clk_table.entries[i].dtbclk_mhz > max_clk_data.dtbclk_mhz)
max_clk_data.dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
if (bw_params->clk_table.entries[i].memclk_mhz > 0) {
num_uclk_dpms++;
if (bw_params->clk_table.entries[i].memclk_mhz <= bw_params->dc_mode_limit.memclk_mhz)
num_dc_uclk_dpms++;
}
if (bw_params->clk_table.entries[i].fclk_mhz > 0) {
num_fclk_dpms++;
if (bw_params->clk_table.entries[i].fclk_mhz <= bw_params->dc_mode_limit.fclk_mhz)
num_dc_fclk_dpms++;
}
if (bw_params->clk_table.entries[i].dcfclk_mhz > 0) {
num_dcfclk_dpms++;
if (bw_params->clk_table.entries[i].dcfclk_mhz <= bw_params->dc_mode_limit.dcfclk_mhz)
num_dc_dcfclk_dpms++;
}
}
if (!disable_dc_mode_overwrite) {
//Overwrite max frequencies with max DC mode frequencies for DC mode systems
override_max_clk_values(&bw_params->dc_mode_limit, &max_clk_data);
num_uclk_dpms = num_dc_uclk_dpms;
num_fclk_dpms = num_dc_fclk_dpms;
num_dcfclk_dpms = num_dc_dcfclk_dpms;
bw_params->clk_table.num_entries_per_clk.num_memclk_levels = num_uclk_dpms;
bw_params->clk_table.num_entries_per_clk.num_fclk_levels = num_fclk_dpms;
}
if (num_dcfclk_dpms > 0 && bw_params->clk_table.entries[0].fclk_mhz > min_fclk_mhz)
min_fclk_mhz = bw_params->clk_table.entries[0].fclk_mhz;
if (!max_clk_data.dcfclk_mhz || !max_clk_data.dispclk_mhz || !max_clk_data.dtbclk_mhz)
return -1;
if (max_clk_data.dppclk_mhz == 0)
max_clk_data.dppclk_mhz = max_clk_data.dispclk_mhz;
if (max_clk_data.fclk_mhz == 0)
max_clk_data.fclk_mhz = max_clk_data.dcfclk_mhz *
dcn3_2_soc.pct_ideal_sdp_bw_after_urgent /
dcn3_2_soc.pct_ideal_fabric_bw_after_urgent;
if (max_clk_data.phyclk_mhz == 0)
max_clk_data.phyclk_mhz = dcn3_2_soc.clock_limits[0].phyclk_mhz;
*num_entries = 0;
entry.dispclk_mhz = max_clk_data.dispclk_mhz;
entry.dscclk_mhz = max_clk_data.dispclk_mhz / 3;
entry.dppclk_mhz = max_clk_data.dppclk_mhz;
entry.dtbclk_mhz = max_clk_data.dtbclk_mhz;
entry.phyclk_mhz = max_clk_data.phyclk_mhz;
entry.phyclk_d18_mhz = dcn3_2_soc.clock_limits[0].phyclk_d18_mhz;
entry.phyclk_d32_mhz = dcn3_2_soc.clock_limits[0].phyclk_d32_mhz;
// Insert all the DCFCLK STAs
for (i = 0; i < num_dcfclk_stas; i++) {
entry.dcfclk_mhz = dcfclk_sta_targets[i];
entry.fabricclk_mhz = 0;
entry.dram_speed_mts = 0;
get_optimal_ntuple(&entry);
entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
insert_entry_into_table_sorted(table, num_entries, &entry);
}
// Insert the max DCFCLK
entry.dcfclk_mhz = max_clk_data.dcfclk_mhz;
entry.fabricclk_mhz = 0;
entry.dram_speed_mts = 0;
get_optimal_ntuple(&entry);
entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
insert_entry_into_table_sorted(table, num_entries, &entry);
// Insert the UCLK DPMS
for (i = 0; i < num_uclk_dpms; i++) {
entry.dcfclk_mhz = 0;
entry.fabricclk_mhz = 0;
entry.dram_speed_mts = bw_params->clk_table.entries[i].memclk_mhz * 16;
get_optimal_ntuple(&entry);
entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
insert_entry_into_table_sorted(table, num_entries, &entry);
}
// If FCLK is coarse grained, insert individual DPMs.
if (num_fclk_dpms > 2) {
for (i = 0; i < num_fclk_dpms; i++) {
entry.dcfclk_mhz = 0;
entry.fabricclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
entry.dram_speed_mts = 0;
get_optimal_ntuple(&entry);
entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
insert_entry_into_table_sorted(table, num_entries, &entry);
}
}
// If FCLK fine grained, only insert max
else {
entry.dcfclk_mhz = 0;
entry.fabricclk_mhz = max_clk_data.fclk_mhz;
entry.dram_speed_mts = 0;
get_optimal_ntuple(&entry);
entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
insert_entry_into_table_sorted(table, num_entries, &entry);
}
// At this point, the table contains all "points of interest" based on
// DPMs from PMFW, and STAs. Table is sorted by BW, and all clock
// ratios (by derate, are exact).
// Remove states that require higher clocks than are supported
for (i = *num_entries - 1; i >= 0 ; i--) {
if (table[i].dcfclk_mhz > max_clk_data.dcfclk_mhz ||
table[i].fabricclk_mhz > max_clk_data.fclk_mhz ||
table[i].dram_speed_mts > max_clk_data.memclk_mhz * 16)
remove_entry_from_table_at_index(table, num_entries, i);
}
// Insert entry with all max dc limits without bandwidth matching
if (!disable_dc_mode_overwrite) {
struct _vcs_dpi_voltage_scaling_st max_dc_limits_entry = entry;
max_dc_limits_entry.dcfclk_mhz = max_clk_data.dcfclk_mhz;
max_dc_limits_entry.fabricclk_mhz = max_clk_data.fclk_mhz;
max_dc_limits_entry.dram_speed_mts = max_clk_data.memclk_mhz * 16;
max_dc_limits_entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&max_dc_limits_entry);
insert_entry_into_table_sorted(table, num_entries, &max_dc_limits_entry);
sort_entries_with_same_bw(table, num_entries);
remove_inconsistent_entries(table, num_entries);
}
// At this point, the table only contains supported points of interest
// it could be used as is, but some states may be redundant due to
// coarse grained nature of some clocks, so we want to round up to
// coarse grained DPMs and remove duplicates.
// Round up UCLKs
for (i = *num_entries - 1; i >= 0 ; i--) {
for (j = 0; j < num_uclk_dpms; j++) {
if (bw_params->clk_table.entries[j].memclk_mhz * 16 >= table[i].dram_speed_mts) {
table[i].dram_speed_mts = bw_params->clk_table.entries[j].memclk_mhz * 16;
break;
}
}
}
// If FCLK is coarse grained, round up to next DPMs
if (num_fclk_dpms > 2) {
for (i = *num_entries - 1; i >= 0 ; i--) {
for (j = 0; j < num_fclk_dpms; j++) {
if (bw_params->clk_table.entries[j].fclk_mhz >= table[i].fabricclk_mhz) {
table[i].fabricclk_mhz = bw_params->clk_table.entries[j].fclk_mhz;
break;
}
}
}
}
// Otherwise, round up to minimum.
else {
for (i = *num_entries - 1; i >= 0 ; i--) {
if (table[i].fabricclk_mhz < min_fclk_mhz) {
table[i].fabricclk_mhz = min_fclk_mhz;
}
}
}
// Round DCFCLKs up to minimum
for (i = *num_entries - 1; i >= 0 ; i--) {
if (table[i].dcfclk_mhz < min_dcfclk_mhz) {
table[i].dcfclk_mhz = min_dcfclk_mhz;
}
}
// Remove duplicate states, note duplicate states are always neighbouring since table is sorted.
i = 0;
while (i < *num_entries - 1) {
if (table[i].dcfclk_mhz == table[i + 1].dcfclk_mhz &&
table[i].fabricclk_mhz == table[i + 1].fabricclk_mhz &&
table[i].dram_speed_mts == table[i + 1].dram_speed_mts)
remove_entry_from_table_at_index(table, num_entries, i + 1);
else
i++;
}
// Fix up the state indicies
for (i = *num_entries - 1; i >= 0 ; i--) {
table[i].state = i;
}
return 0;
}
/*
* dcn32_update_bw_bounding_box
*
* This would override some dcn3_2 ip_or_soc initial parameters hardcoded from
* spreadsheet with actual values as per dGPU SKU:
* - with passed few options from dc->config
* - with dentist_vco_frequency from Clk Mgr (currently hardcoded, but might
* need to get it from PM FW)
* - with passed latency values (passed in ns units) in dc-> bb override for
* debugging purposes
* - with passed latencies from VBIOS (in 100_ns units) if available for
* certain dGPU SKU
* - with number of DRAM channels from VBIOS (which differ for certain dGPU SKU
* of the same ASIC)
* - clocks levels with passed clk_table entries from Clk Mgr as reported by PM
* FW for different clocks (which might differ for certain dGPU SKU of the
* same ASIC)
*/
void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params)
{
dc_assert_fp_enabled();
/* Overrides from dc->config options */
dcn3_2_ip.clamp_min_dcfclk = dc->config.clamp_min_dcfclk;
/* Override from passed dc->bb_overrides if available*/
if ((int)(dcn3_2_soc.sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns
&& dc->bb_overrides.sr_exit_time_ns) {
dcn3_2_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
}
if ((int)(dcn3_2_soc.sr_enter_plus_exit_time_us * 1000)
!= dc->bb_overrides.sr_enter_plus_exit_time_ns
&& dc->bb_overrides.sr_enter_plus_exit_time_ns) {
dcn3_2_soc.sr_enter_plus_exit_time_us =
dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
}
if ((int)(dcn3_2_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns
&& dc->bb_overrides.urgent_latency_ns) {
dcn3_2_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
dcn3_2_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
}
if ((int)(dcn3_2_soc.dram_clock_change_latency_us * 1000)
!= dc->bb_overrides.dram_clock_change_latency_ns
&& dc->bb_overrides.dram_clock_change_latency_ns) {
dcn3_2_soc.dram_clock_change_latency_us =
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
}
if ((int)(dcn3_2_soc.fclk_change_latency_us * 1000)
!= dc->bb_overrides.fclk_clock_change_latency_ns
&& dc->bb_overrides.fclk_clock_change_latency_ns) {
dcn3_2_soc.fclk_change_latency_us =
dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
}
if ((int)(dcn3_2_soc.dummy_pstate_latency_us * 1000)
!= dc->bb_overrides.dummy_clock_change_latency_ns
&& dc->bb_overrides.dummy_clock_change_latency_ns) {
dcn3_2_soc.dummy_pstate_latency_us =
dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0;
}
/* Override from VBIOS if VBIOS bb_info available */
if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
struct bp_soc_bb_info bb_info = {0};
if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
if (bb_info.dram_clock_change_latency_100ns > 0)
dcn3_2_soc.dram_clock_change_latency_us =
bb_info.dram_clock_change_latency_100ns * 10;
if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
dcn3_2_soc.sr_enter_plus_exit_time_us =
bb_info.dram_sr_enter_exit_latency_100ns * 10;
if (bb_info.dram_sr_exit_latency_100ns > 0)
dcn3_2_soc.sr_exit_time_us =
bb_info.dram_sr_exit_latency_100ns * 10;
}
}
/* Override from VBIOS for num_chan */
if (dc->ctx->dc_bios->vram_info.num_chans) {
dcn3_2_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
dcn3_2_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc,
dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel);
}
if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
dcn3_2_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
/* DML DSC delay factor workaround */
dcn3_2_ip.dsc_delay_factor_wa = dc->debug.dsc_delay_factor_wa_x1000 / 1000.0;
dcn3_2_ip.min_prefetch_in_strobe_us = dc->debug.min_prefetch_in_strobe_ns / 1000.0;
/* Override dispclk_dppclk_vco_speed_mhz from Clk Mgr */
dcn3_2_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
/* Overrides Clock levelsfrom CLK Mgr table entries as reported by PM FW */
if (bw_params->clk_table.entries[0].memclk_mhz) {
if (dc->debug.use_legacy_soc_bb_mechanism) {
unsigned int i = 0, j = 0, num_states = 0;
unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0};
unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0};
unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0};
unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0};
unsigned int min_dcfclk = UINT_MAX;
/* Set 199 as first value in STA target array to have a minimum DCFCLK value.
* For DCN32 we set min to 199 so minimum FCLK DPM0 (300Mhz can be achieved) */
unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {199, 615, 906, 1324, 1564};
unsigned int num_dcfclk_sta_targets = 4, num_uclk_states = 0;
unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0;
for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
if (bw_params->clk_table.entries[i].dcfclk_mhz != 0 &&
bw_params->clk_table.entries[i].dcfclk_mhz < min_dcfclk)
min_dcfclk = bw_params->clk_table.entries[i].dcfclk_mhz;
if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
}
if (min_dcfclk > dcfclk_sta_targets[0])
dcfclk_sta_targets[0] = min_dcfclk;
if (!max_dcfclk_mhz)
max_dcfclk_mhz = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
if (!max_dispclk_mhz)
max_dispclk_mhz = dcn3_2_soc.clock_limits[0].dispclk_mhz;
if (!max_dppclk_mhz)
max_dppclk_mhz = dcn3_2_soc.clock_limits[0].dppclk_mhz;
if (!max_phyclk_mhz)
max_phyclk_mhz = dcn3_2_soc.clock_limits[0].phyclk_mhz;
if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
// If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array
dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz;
num_dcfclk_sta_targets++;
} else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
// If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates
for (i = 0; i < num_dcfclk_sta_targets; i++) {
if (dcfclk_sta_targets[i] > max_dcfclk_mhz) {
dcfclk_sta_targets[i] = max_dcfclk_mhz;
break;
}
}
// Update size of array since we "removed" duplicates
num_dcfclk_sta_targets = i + 1;
}
num_uclk_states = bw_params->clk_table.num_entries;
// Calculate optimal dcfclk for each uclk
for (i = 0; i < num_uclk_states; i++) {
dcn32_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
&optimal_dcfclk_for_uclk[i], NULL);
if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) {
optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz;
}
}
// Calculate optimal uclk for each dcfclk sta target
for (i = 0; i < num_dcfclk_sta_targets; i++) {
for (j = 0; j < num_uclk_states; j++) {
if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
optimal_uclk_for_dcfclk_sta_targets[i] =
bw_params->clk_table.entries[j].memclk_mhz * 16;
break;
}
}
}
i = 0;
j = 0;
// create the final dcfclk and uclk table
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
} else {
j = num_uclk_states;
}
}
}
while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
}
while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES &&
optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
}
dcn3_2_soc.num_states = num_states;
for (i = 0; i < dcn3_2_soc.num_states; i++) {
dcn3_2_soc.clock_limits[i].state = i;
dcn3_2_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
dcn3_2_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i];
/* Fill all states with max values of all these clocks */
dcn3_2_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
dcn3_2_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
dcn3_2_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz;
dcn3_2_soc.clock_limits[i].dscclk_mhz = max_dispclk_mhz / 3;
/* Populate from bw_params for DTBCLK, SOCCLK */
if (i > 0) {
if (!bw_params->clk_table.entries[i].dtbclk_mhz) {
dcn3_2_soc.clock_limits[i].dtbclk_mhz = dcn3_2_soc.clock_limits[i-1].dtbclk_mhz;
} else {
dcn3_2_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
}
} else if (bw_params->clk_table.entries[i].dtbclk_mhz) {
dcn3_2_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
}
if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0)
dcn3_2_soc.clock_limits[i].socclk_mhz = dcn3_2_soc.clock_limits[i-1].socclk_mhz;
else
dcn3_2_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz;
if (!dram_speed_mts[i] && i > 0)
dcn3_2_soc.clock_limits[i].dram_speed_mts = dcn3_2_soc.clock_limits[i-1].dram_speed_mts;
else
dcn3_2_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
/* These clocks cannot come from bw_params, always fill from dcn3_2_soc[0] */
/* PHYCLK_D18, PHYCLK_D32 */
dcn3_2_soc.clock_limits[i].phyclk_d18_mhz = dcn3_2_soc.clock_limits[0].phyclk_d18_mhz;
dcn3_2_soc.clock_limits[i].phyclk_d32_mhz = dcn3_2_soc.clock_limits[0].phyclk_d32_mhz;
}
} else {
build_synthetic_soc_states(dc->debug.disable_dc_mode_overwrite, bw_params,
dcn3_2_soc.clock_limits, &dcn3_2_soc.num_states);
}
/* Re-init DML with updated bb */
dml_init_instance(&dc->dml, &dcn3_2_soc, &dcn3_2_ip, DML_PROJECT_DCN32);
if (dc->current_state)
dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_2_soc, &dcn3_2_ip, DML_PROJECT_DCN32);
}
}
void dcn32_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
int pipe_cnt)
{
dc_assert_fp_enabled();
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
}
bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe)
{
bool allow = false;
uint32_t refresh_rate = 0;
/* Allow subvp on displays that have active margin for 2560x1440@60hz displays
* only for now. There must be no scaling as well.
*
* For now we only enable on 2560x1440@60hz displays to enable 4K60 + 1440p60 configs
* for p-state switching.
*/
if (pipe->stream && pipe->plane_state) {
refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 +
pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1)
/ (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total);
if (pipe->stream->timing.v_addressable == 1440 &&
pipe->stream->timing.h_addressable == 2560 &&
refresh_rate >= 55 && refresh_rate <= 65 &&
pipe->plane_state->src_rect.height == 1440 &&
pipe->plane_state->src_rect.width == 2560 &&
pipe->plane_state->dst_rect.height == 1440 &&
pipe->plane_state->dst_rect.width == 2560)
allow = true;
}
return allow;
}
/**
* dcn32_allow_subvp_high_refresh_rate: Determine if the high refresh rate config will allow subvp
*
* @dc: Current DC state
* @context: New DC state to be programmed
* @pipe: Pipe to be considered for use in subvp
*
* On high refresh rate display configs, we will allow subvp under the following conditions:
* 1. Resolution is 3840x2160, 3440x1440, or 2560x1440
* 2. Refresh rate is between 120hz - 165hz
* 3. No scaling
* 4. Freesync is inactive
* 5. For single display cases, freesync must be disabled
*
* Return: True if pipe can be used for subvp, false otherwise
*/
bool dcn32_allow_subvp_high_refresh_rate(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe)
{
bool allow = false;
uint32_t refresh_rate = 0;
uint32_t subvp_min_refresh = subvp_high_refresh_list.min_refresh;
uint32_t subvp_max_refresh = subvp_high_refresh_list.max_refresh;
uint32_t min_refresh = subvp_max_refresh;
uint32_t i;
/* Only allow SubVP on high refresh displays if all connected displays
* are considered "high refresh" (i.e. >= 120hz). We do not want to
* allow combinations such as 120hz (SubVP) + 60hz (SubVP).
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (!pipe_ctx->stream)
continue;
refresh_rate = (pipe_ctx->stream->timing.pix_clk_100hz * 100 +
pipe_ctx->stream->timing.v_total * pipe_ctx->stream->timing.h_total - 1)
/ (double)(pipe_ctx->stream->timing.v_total * pipe_ctx->stream->timing.h_total);
if (refresh_rate < min_refresh)
min_refresh = refresh_rate;
}
if (!dc->debug.disable_subvp_high_refresh && min_refresh >= subvp_min_refresh && pipe->stream &&
pipe->plane_state && !(pipe->stream->vrr_active_variable || pipe->stream->vrr_active_fixed)) {
refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 +
pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1)
/ (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total);
if (refresh_rate >= subvp_min_refresh && refresh_rate <= subvp_max_refresh) {
for (i = 0; i < SUBVP_HIGH_REFRESH_LIST_LEN; i++) {
uint32_t width = subvp_high_refresh_list.res[i].width;
uint32_t height = subvp_high_refresh_list.res[i].height;
if (dcn32_check_native_scaling_for_res(pipe, width, height)) {
if ((context->stream_count == 1 && !pipe->stream->allow_freesync) || context->stream_count > 1) {
allow = true;
break;
}
}
}
}
}
return allow;
}
/**
* dcn32_determine_max_vratio_prefetch: Determine max Vratio for prefetch by driver policy
*
* @dc: Current DC state
* @context: New DC state to be programmed
*
* Return: Max vratio for prefetch
*/
double dcn32_determine_max_vratio_prefetch(struct dc *dc, struct dc_state *context)
{
double max_vratio_pre = __DML_MAX_BW_RATIO_PRE__; // Default value is 4
int i;
/* For single display MPO configs, allow the max vratio to be 8
* if any plane is YUV420 format
*/
if (context->stream_count == 1 && context->stream_status[0].plane_count > 1) {
for (i = 0; i < context->stream_status[0].plane_count; i++) {
if (context->stream_status[0].plane_states[i]->format == SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr ||
context->stream_status[0].plane_states[i]->format == SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb) {
max_vratio_pre = __DML_MAX_VRATIO_PRE__;
}
}
}
return max_vratio_pre;
}
/**
* dcn32_assign_fpo_vactive_candidate - Assign the FPO stream candidate for FPO + VActive case
*
* This function chooses the FPO candidate stream for FPO + VActive cases (2 stream config).
* For FPO + VAtive cases, the assumption is that one display has ActiveMargin > 0, and the
* other display has ActiveMargin <= 0. This function will choose the pipe/stream that has
* ActiveMargin <= 0 to be the FPO stream candidate if found.
*
*
* @dc: current dc state
* @context: new dc state
* @fpo_candidate_stream: pointer to FPO stream candidate if one is found
*
* Return: void
*/
void dcn32_assign_fpo_vactive_candidate(struct dc *dc, const struct dc_state *context, struct dc_stream_state **fpo_candidate_stream)
{
unsigned int i, pipe_idx;
const struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->stream)
continue;
if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) {
*fpo_candidate_stream = pipe->stream;
break;
}
pipe_idx++;
}
}
/**
* dcn32_find_vactive_pipe - Determines if the config has a pipe that can switch in VACTIVE
*
* @dc: current dc state
* @context: new dc state
* @vactive_margin_req_us: The vactive marign required for a vactive pipe to be considered "found"
*
* Return: True if VACTIVE display is found, false otherwise
*/
bool dcn32_find_vactive_pipe(struct dc *dc, const struct dc_state *context, uint32_t vactive_margin_req_us)
{
unsigned int i, pipe_idx;
const struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
bool vactive_found = false;
unsigned int blank_us = 0;
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->stream)
continue;
blank_us = ((pipe->stream->timing.v_total - pipe->stream->timing.v_addressable) * pipe->stream->timing.h_total /
(double)(pipe->stream->timing.pix_clk_100hz * 100)) * 1000000;
if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] >= vactive_margin_req_us &&
!(pipe->stream->vrr_active_variable || pipe->stream->vrr_active_fixed) && blank_us < dc->debug.fpo_vactive_max_blank_us) {
vactive_found = true;
break;
}
pipe_idx++;
}
return vactive_found;
}
void dcn32_set_clock_limits(const struct _vcs_dpi_soc_bounding_box_st *soc_bb)
{
dc_assert_fp_enabled();
dcn3_2_soc.clock_limits[0].dcfclk_mhz = 1200.0;
}
void dcn32_override_min_req_memclk(struct dc *dc, struct dc_state *context)
{
// WA: restrict FPO and SubVP to use first non-strobe mode (DCN32 BW issue)
if ((context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dcn32_subvp_in_use(dc, context)) &&
dc->dml.soc.num_chans <= 8) {
int num_mclk_levels = dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels;
if (context->bw_ctx.dml.vba.DRAMSpeed <= dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 16 &&
num_mclk_levels > 1) {
context->bw_ctx.dml.vba.DRAMSpeed = dc->clk_mgr->bw_params->clk_table.entries[1].memclk_mhz * 16;
context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;
}
}
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c |
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "../display_mode_lib.h"
#include "../display_mode_vba.h"
#include "../dml_inline_defs.h"
#include "display_rq_dlg_calc_32.h"
static bool is_dual_plane(enum source_format_class source_format)
{
bool ret_val = 0;
if ((source_format == dm_420_12) || (source_format == dm_420_8) || (source_format == dm_420_10)
|| (source_format == dm_rgbe_alpha))
ret_val = 1;
return ret_val;
}
void dml32_rq_dlg_get_rq_reg(display_rq_regs_st *rq_regs,
struct display_mode_lib *mode_lib,
const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx)
{
const display_pipe_source_params_st *src = &e2e_pipe_param[pipe_idx].pipe.src;
bool dual_plane = is_dual_plane((enum source_format_class) (src->source_format));
double stored_swath_l_bytes;
double stored_swath_c_bytes;
bool is_phantom_pipe;
uint32_t pixel_chunk_bytes = 0;
uint32_t min_pixel_chunk_bytes = 0;
uint32_t meta_chunk_bytes = 0;
uint32_t min_meta_chunk_bytes = 0;
uint32_t dpte_group_bytes = 0;
uint32_t mpte_group_bytes = 0;
uint32_t p1_pixel_chunk_bytes = 0;
uint32_t p1_min_pixel_chunk_bytes = 0;
uint32_t p1_meta_chunk_bytes = 0;
uint32_t p1_min_meta_chunk_bytes = 0;
uint32_t p1_dpte_group_bytes = 0;
uint32_t p1_mpte_group_bytes = 0;
unsigned int detile_buf_size_in_bytes;
unsigned int detile_buf_plane1_addr;
unsigned int pte_row_height_linear;
memset(rq_regs, 0, sizeof(*rq_regs));
dml_print("DML_DLG::%s: Calculation for pipe[%d] start, num_pipes=%d\n", __func__, pipe_idx, num_pipes);
pixel_chunk_bytes = get_pixel_chunk_size_in_kbyte(mode_lib, e2e_pipe_param, num_pipes) * 1024; // From VBA
min_pixel_chunk_bytes = get_min_pixel_chunk_size_in_byte(mode_lib, e2e_pipe_param, num_pipes); // From VBA
if (pixel_chunk_bytes == 64 * 1024)
min_pixel_chunk_bytes = 0;
meta_chunk_bytes = get_meta_chunk_size_in_kbyte(mode_lib, e2e_pipe_param, num_pipes) * 1024; // From VBA
min_meta_chunk_bytes = get_min_meta_chunk_size_in_byte(mode_lib, e2e_pipe_param, num_pipes); // From VBA
dpte_group_bytes = get_dpte_group_size_in_bytes(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
mpte_group_bytes = get_vm_group_size_in_bytes(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
p1_pixel_chunk_bytes = pixel_chunk_bytes;
p1_min_pixel_chunk_bytes = min_pixel_chunk_bytes;
p1_meta_chunk_bytes = meta_chunk_bytes;
p1_min_meta_chunk_bytes = min_meta_chunk_bytes;
p1_dpte_group_bytes = dpte_group_bytes;
p1_mpte_group_bytes = mpte_group_bytes;
if ((enum source_format_class) src->source_format == dm_rgbe_alpha)
p1_pixel_chunk_bytes = get_alpha_pixel_chunk_size_in_kbyte(mode_lib, e2e_pipe_param, num_pipes) * 1024;
rq_regs->rq_regs_l.chunk_size = dml_log2(pixel_chunk_bytes) - 10;
rq_regs->rq_regs_c.chunk_size = dml_log2(p1_pixel_chunk_bytes) - 10;
if (min_pixel_chunk_bytes == 0)
rq_regs->rq_regs_l.min_chunk_size = 0;
else
rq_regs->rq_regs_l.min_chunk_size = dml_log2(min_pixel_chunk_bytes) - 8 + 1;
if (p1_min_pixel_chunk_bytes == 0)
rq_regs->rq_regs_c.min_chunk_size = 0;
else
rq_regs->rq_regs_c.min_chunk_size = dml_log2(p1_min_pixel_chunk_bytes) - 8 + 1;
rq_regs->rq_regs_l.meta_chunk_size = dml_log2(meta_chunk_bytes) - 10;
rq_regs->rq_regs_c.meta_chunk_size = dml_log2(p1_meta_chunk_bytes) - 10;
if (min_meta_chunk_bytes == 0)
rq_regs->rq_regs_l.min_meta_chunk_size = 0;
else
rq_regs->rq_regs_l.min_meta_chunk_size = dml_log2(min_meta_chunk_bytes) - 6 + 1;
if (p1_min_meta_chunk_bytes == 0)
rq_regs->rq_regs_c.min_meta_chunk_size = 0;
else
rq_regs->rq_regs_c.min_meta_chunk_size = dml_log2(p1_min_meta_chunk_bytes) - 6 + 1;
rq_regs->rq_regs_l.dpte_group_size = dml_log2(dpte_group_bytes) - 6;
rq_regs->rq_regs_l.mpte_group_size = dml_log2(mpte_group_bytes) - 6;
rq_regs->rq_regs_c.dpte_group_size = dml_log2(p1_dpte_group_bytes) - 6;
rq_regs->rq_regs_c.mpte_group_size = dml_log2(p1_mpte_group_bytes) - 6;
detile_buf_size_in_bytes = get_det_buffer_size_kbytes(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * 1024;
detile_buf_plane1_addr = 0;
pte_row_height_linear = get_dpte_row_height_linear_l(mode_lib, e2e_pipe_param, num_pipes,
pipe_idx);
if (src->sw_mode == dm_sw_linear)
ASSERT(pte_row_height_linear >= 8);
rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(pte_row_height_linear), 1) - 3;
if (dual_plane) {
unsigned int p1_pte_row_height_linear = get_dpte_row_height_linear_c(mode_lib, e2e_pipe_param,
num_pipes, pipe_idx);
;
if (src->sw_mode == dm_sw_linear)
ASSERT(p1_pte_row_height_linear >= 8);
rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(p1_pte_row_height_linear), 1) - 3;
}
rq_regs->rq_regs_l.swath_height = dml_log2(get_swath_height_l(mode_lib, e2e_pipe_param, num_pipes, pipe_idx));
rq_regs->rq_regs_c.swath_height = dml_log2(get_swath_height_c(mode_lib, e2e_pipe_param, num_pipes, pipe_idx));
// FIXME: take the max between luma, chroma chunk size?
// okay for now, as we are setting pixel_chunk_bytes to 8kb anyways
if (pixel_chunk_bytes >= 32 * 1024 || (dual_plane && p1_pixel_chunk_bytes >= 32 * 1024)) { //32kb
rq_regs->drq_expansion_mode = 0;
} else {
rq_regs->drq_expansion_mode = 2;
}
rq_regs->prq_expansion_mode = 1;
rq_regs->mrq_expansion_mode = 1;
rq_regs->crq_expansion_mode = 1;
stored_swath_l_bytes = get_det_stored_buffer_size_l_bytes(mode_lib, e2e_pipe_param, num_pipes,
pipe_idx);
stored_swath_c_bytes = get_det_stored_buffer_size_c_bytes(mode_lib, e2e_pipe_param, num_pipes,
pipe_idx);
is_phantom_pipe = get_is_phantom_pipe(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
// Note: detile_buf_plane1_addr is in unit of 1KB
if (dual_plane) {
if (is_phantom_pipe) {
detile_buf_plane1_addr = ((1024.0 * 1024.0) / 2.0 / 1024.0); // half to chroma
} else {
if (stored_swath_l_bytes / stored_swath_c_bytes <= 1.5) {
detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 1024.0); // half to chroma
#ifdef __DML_RQ_DLG_CALC_DEBUG__
dml_print("DML_DLG: %s: detile_buf_plane1_addr = %d (1/2 to chroma)\n",
__func__, detile_buf_plane1_addr);
#endif
} else {
detile_buf_plane1_addr =
dml_round_to_multiple(
(unsigned int) ((2.0 * detile_buf_size_in_bytes) / 3.0),
1024, 0) / 1024.0; // 2/3 to luma
#ifdef __DML_RQ_DLG_CALC_DEBUG__
dml_print("DML_DLG: %s: detile_buf_plane1_addr = %d (1/3 chroma)\n",
__func__, detile_buf_plane1_addr);
#endif
}
}
}
rq_regs->plane1_base_address = detile_buf_plane1_addr;
#ifdef __DML_RQ_DLG_CALC_DEBUG__
dml_print("DML_DLG: %s: is_phantom_pipe = %d\n", __func__, is_phantom_pipe);
dml_print("DML_DLG: %s: stored_swath_l_bytes = %f\n", __func__, stored_swath_l_bytes);
dml_print("DML_DLG: %s: stored_swath_c_bytes = %f\n", __func__, stored_swath_c_bytes);
dml_print("DML_DLG: %s: detile_buf_size_in_bytes = %d\n", __func__, detile_buf_size_in_bytes);
dml_print("DML_DLG: %s: detile_buf_plane1_addr = %d\n", __func__, detile_buf_plane1_addr);
dml_print("DML_DLG: %s: plane1_base_address = %d\n", __func__, rq_regs->plane1_base_address);
#endif
print__rq_regs_st(mode_lib, rq_regs);
dml_print("DML_DLG::%s: Calculation for pipe[%d] done, num_pipes=%d\n", __func__, pipe_idx, num_pipes);
}
void dml32_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx)
{
const display_pipe_source_params_st *src = &e2e_pipe_param[pipe_idx].pipe.src;
const display_pipe_dest_params_st *dst = &e2e_pipe_param[pipe_idx].pipe.dest;
const display_clocks_and_cfg_st *clks = &e2e_pipe_param[pipe_idx].clks_cfg;
double refcyc_per_req_delivery_pre_cur0 = 0.;
double refcyc_per_req_delivery_cur0 = 0.;
double refcyc_per_req_delivery_pre_c = 0.;
double refcyc_per_req_delivery_c = 0.;
double refcyc_per_req_delivery_pre_l;
double refcyc_per_req_delivery_l;
double refcyc_per_line_delivery_pre_c = 0.;
double refcyc_per_line_delivery_c = 0.;
double refcyc_per_line_delivery_pre_l;
double refcyc_per_line_delivery_l;
double min_ttu_vblank;
double vratio_pre_l;
double vratio_pre_c;
unsigned int min_dst_y_next_start;
unsigned int htotal = dst->htotal;
unsigned int hblank_end = dst->hblank_end;
unsigned int vblank_end = dst->vblank_end;
bool interlaced = dst->interlaced;
double pclk_freq_in_mhz = dst->pixel_rate_mhz;
unsigned int vready_after_vcount0;
double refclk_freq_in_mhz = clks->refclk_mhz;
double ref_freq_to_pix_freq = refclk_freq_in_mhz / pclk_freq_in_mhz;
bool dual_plane = 0;
unsigned int pipe_index_in_combine[DC__NUM_PIPES__MAX];
unsigned int dst_x_after_scaler;
unsigned int dst_y_after_scaler;
double dst_y_prefetch;
double dst_y_per_vm_vblank;
double dst_y_per_row_vblank;
double dst_y_per_vm_flip;
double dst_y_per_row_flip;
double max_dst_y_per_vm_vblank = 32.0;
double max_dst_y_per_row_vblank = 16.0;
double dst_y_per_pte_row_nom_l;
double dst_y_per_pte_row_nom_c;
double dst_y_per_meta_row_nom_l;
double dst_y_per_meta_row_nom_c;
double refcyc_per_pte_group_nom_l;
double refcyc_per_pte_group_nom_c;
double refcyc_per_pte_group_vblank_l;
double refcyc_per_pte_group_vblank_c;
double refcyc_per_pte_group_flip_l;
double refcyc_per_pte_group_flip_c;
double refcyc_per_meta_chunk_nom_l;
double refcyc_per_meta_chunk_nom_c;
double refcyc_per_meta_chunk_vblank_l;
double refcyc_per_meta_chunk_vblank_c;
double refcyc_per_meta_chunk_flip_l;
double refcyc_per_meta_chunk_flip_c;
memset(dlg_regs, 0, sizeof(*dlg_regs));
memset(ttu_regs, 0, sizeof(*ttu_regs));
dml_print("DML_DLG::%s: Calculation for pipe[%d] starts, num_pipes=%d\n", __func__, pipe_idx, num_pipes);
dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
dml_print("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, ref_freq_to_pix_freq);
dml_print("DML_DLG: %s: interlaced = %d\n", __func__, interlaced);
ASSERT(ref_freq_to_pix_freq < 4.0);
dlg_regs->ref_freq_to_pix_freq = (unsigned int) (ref_freq_to_pix_freq * dml_pow(2, 19));
dlg_regs->refcyc_per_htotal = (unsigned int) (ref_freq_to_pix_freq * (double) htotal * dml_pow(2, 8));
dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; // 15 bits
min_ttu_vblank = get_min_ttu_vblank_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
min_dst_y_next_start = get_min_dst_y_next_start(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
dml_print("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, min_ttu_vblank);
dml_print("DML_DLG: %s: min_dst_y_next_start = %d\n", __func__, min_dst_y_next_start);
dml_print("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, ref_freq_to_pix_freq);
dual_plane = is_dual_plane((enum source_format_class) (src->source_format));
vready_after_vcount0 = get_vready_at_or_after_vsync(mode_lib, e2e_pipe_param, num_pipes,
pipe_idx); // From VBA
dlg_regs->vready_after_vcount0 = vready_after_vcount0;
dml_print("DML_DLG: %s: vready_after_vcount0 = %d\n", __func__, dlg_regs->vready_after_vcount0);
dst_x_after_scaler = dml_ceil(get_dst_x_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx), 1);
dst_y_after_scaler = dml_ceil(get_dst_y_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx), 1);
// do some adjustment on the dst_after scaler to account for odm combine mode
dml_print("DML_DLG: %s: input dst_x_after_scaler = %d\n", __func__, dst_x_after_scaler);
dml_print("DML_DLG: %s: input dst_y_after_scaler = %d\n", __func__, dst_y_after_scaler);
// need to figure out which side of odm combine we're in
if (dst->odm_combine == dm_odm_combine_mode_2to1 || dst->odm_combine == dm_odm_combine_mode_4to1) {
// figure out which pipes go together
bool visited[DC__NUM_PIPES__MAX];
unsigned int i, j, k;
for (k = 0; k < num_pipes; ++k) {
visited[k] = false;
pipe_index_in_combine[k] = 0;
}
for (i = 0; i < num_pipes; i++) {
if (e2e_pipe_param[i].pipe.src.is_hsplit && !visited[i]) {
unsigned int grp = e2e_pipe_param[i].pipe.src.hsplit_grp;
unsigned int grp_idx = 0;
for (j = i; j < num_pipes; j++) {
if (e2e_pipe_param[j].pipe.src.hsplit_grp == grp
&& e2e_pipe_param[j].pipe.src.is_hsplit && !visited[j]) {
pipe_index_in_combine[j] = grp_idx;
dml_print("DML_DLG: %s: pipe[%d] is in grp %d idx %d\n",
__func__, j, grp, grp_idx);
grp_idx++;
visited[j] = true;
}
}
}
}
}
if (dst->odm_combine == dm_odm_combine_mode_disabled) {
// FIXME how about ODM split??
dlg_regs->refcyc_h_blank_end = (unsigned int) ((double) hblank_end * ref_freq_to_pix_freq);
} else {
if (dst->odm_combine == dm_odm_combine_mode_2to1 || dst->odm_combine == dm_odm_combine_mode_4to1) {
// TODO: We should really check that 4to1 is supported before setting it to 4
unsigned int odm_combine_factor = (dst->odm_combine == dm_odm_combine_mode_2to1 ? 2 : 4);
unsigned int odm_pipe_index = pipe_index_in_combine[pipe_idx];
dlg_regs->refcyc_h_blank_end = (unsigned int) (((double) hblank_end
+ odm_pipe_index * (double) dst->hactive / odm_combine_factor) * ref_freq_to_pix_freq);
}
}
ASSERT(dlg_regs->refcyc_h_blank_end < (unsigned int)dml_pow(2, 13));
dml_print("DML_DLG: %s: htotal= %d\n", __func__, htotal);
dml_print("DML_DLG: %s: dst_x_after_scaler[%d]= %d\n", __func__, pipe_idx, dst_x_after_scaler);
dml_print("DML_DLG: %s: dst_y_after_scaler[%d] = %d\n", __func__, pipe_idx, dst_y_after_scaler);
dst_y_prefetch = get_dst_y_prefetch(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
// From VBA
dst_y_per_vm_vblank = get_dst_y_per_vm_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
// From VBA
dst_y_per_row_vblank = get_dst_y_per_row_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
dst_y_per_vm_flip = get_dst_y_per_vm_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
dst_y_per_row_flip = get_dst_y_per_row_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
// magic!
if (htotal <= 75) {
max_dst_y_per_vm_vblank = 100.0;
max_dst_y_per_row_vblank = 100.0;
}
dml_print("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, dst_y_prefetch);
dml_print("DML_DLG: %s: dst_y_per_vm_flip = %3.2f\n", __func__, dst_y_per_vm_flip);
dml_print("DML_DLG: %s: dst_y_per_row_flip = %3.2f\n", __func__, dst_y_per_row_flip);
dml_print("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, dst_y_per_vm_vblank);
dml_print("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, dst_y_per_row_vblank);
ASSERT(dst_y_per_vm_vblank < max_dst_y_per_vm_vblank);
ASSERT(dst_y_per_row_vblank < max_dst_y_per_row_vblank);
ASSERT(dst_y_prefetch > (dst_y_per_vm_vblank + dst_y_per_row_vblank));
vratio_pre_l = get_vratio_prefetch_l(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
vratio_pre_c = get_vratio_prefetch_c(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
dml_print("DML_DLG: %s: vratio_pre_l = %3.2f\n", __func__, vratio_pre_l);
dml_print("DML_DLG: %s: vratio_pre_c = %3.2f\n", __func__, vratio_pre_c);
// Active
refcyc_per_line_delivery_pre_l = get_refcyc_per_line_delivery_pre_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
refcyc_per_line_delivery_l = get_refcyc_per_line_delivery_l_in_us(mode_lib, e2e_pipe_param, num_pipes,
pipe_idx) * refclk_freq_in_mhz; // From VBA
dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n", __func__, refcyc_per_line_delivery_pre_l);
dml_print("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n", __func__, refcyc_per_line_delivery_l);
if (dual_plane) {
refcyc_per_line_delivery_pre_c = get_refcyc_per_line_delivery_pre_c_in_us(mode_lib, e2e_pipe_param,
num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
refcyc_per_line_delivery_c = get_refcyc_per_line_delivery_c_in_us(mode_lib, e2e_pipe_param, num_pipes,
pipe_idx) * refclk_freq_in_mhz; // From VBA
dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n",
__func__, refcyc_per_line_delivery_pre_c);
dml_print("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n",
__func__, refcyc_per_line_delivery_c);
}
if (src->dynamic_metadata_enable && src->gpuvm)
dlg_regs->refcyc_per_vm_dmdata = get_refcyc_per_vm_dmdata_in_us(mode_lib, e2e_pipe_param, num_pipes,
pipe_idx) * refclk_freq_in_mhz; // From VBA
dlg_regs->dmdata_dl_delta = get_dmdata_dl_delta_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx)
* refclk_freq_in_mhz; // From VBA
refcyc_per_req_delivery_pre_l = get_refcyc_per_req_delivery_pre_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
refcyc_per_req_delivery_l = get_refcyc_per_req_delivery_l_in_us(mode_lib, e2e_pipe_param, num_pipes,
pipe_idx) * refclk_freq_in_mhz; // From VBA
dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n", __func__, refcyc_per_req_delivery_pre_l);
dml_print("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n", __func__, refcyc_per_req_delivery_l);
if (dual_plane) {
refcyc_per_req_delivery_pre_c = get_refcyc_per_req_delivery_pre_c_in_us(mode_lib, e2e_pipe_param,
num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
refcyc_per_req_delivery_c = get_refcyc_per_req_delivery_c_in_us(mode_lib, e2e_pipe_param, num_pipes,
pipe_idx) * refclk_freq_in_mhz; // From VBA
dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n",
__func__, refcyc_per_req_delivery_pre_c);
dml_print("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n", __func__, refcyc_per_req_delivery_c);
}
// TTU - Cursor
ASSERT(src->num_cursors <= 1);
if (src->num_cursors > 0) {
refcyc_per_req_delivery_pre_cur0 = get_refcyc_per_cursor_req_delivery_pre_in_us(mode_lib,
e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
refcyc_per_req_delivery_cur0 = get_refcyc_per_cursor_req_delivery_in_us(mode_lib, e2e_pipe_param,
num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_cur0 = %3.2f\n",
__func__, refcyc_per_req_delivery_pre_cur0);
dml_print("DML_DLG: %s: refcyc_per_req_delivery_cur0 = %3.2f\n",
__func__, refcyc_per_req_delivery_cur0);
}
// Assign to register structures
dlg_regs->min_dst_y_next_start = min_dst_y_next_start * dml_pow(2, 2);
ASSERT(dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18));
dlg_regs->dst_y_after_scaler = dst_y_after_scaler; // in terms of line
dlg_regs->refcyc_x_after_scaler = dst_x_after_scaler * ref_freq_to_pix_freq; // in terms of refclk
dlg_regs->dst_y_prefetch = (unsigned int) (dst_y_prefetch * dml_pow(2, 2));
dlg_regs->dst_y_per_vm_vblank = (unsigned int) (dst_y_per_vm_vblank * dml_pow(2, 2));
dlg_regs->dst_y_per_row_vblank = (unsigned int) (dst_y_per_row_vblank * dml_pow(2, 2));
dlg_regs->dst_y_per_vm_flip = (unsigned int) (dst_y_per_vm_flip * dml_pow(2, 2));
dlg_regs->dst_y_per_row_flip = (unsigned int) (dst_y_per_row_flip * dml_pow(2, 2));
dlg_regs->vratio_prefetch = (unsigned int) (vratio_pre_l * dml_pow(2, 19));
dlg_regs->vratio_prefetch_c = (unsigned int) (vratio_pre_c * dml_pow(2, 19));
dml_print("DML_DLG: %s: dlg_regs->dst_y_per_vm_vblank = 0x%x\n", __func__, dlg_regs->dst_y_per_vm_vblank);
dml_print("DML_DLG: %s: dlg_regs->dst_y_per_row_vblank = 0x%x\n", __func__, dlg_regs->dst_y_per_row_vblank);
dml_print("DML_DLG: %s: dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, dlg_regs->dst_y_per_vm_flip);
dml_print("DML_DLG: %s: dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, dlg_regs->dst_y_per_row_flip);
dlg_regs->refcyc_per_vm_group_vblank = get_refcyc_per_vm_group_vblank_in_us(mode_lib, e2e_pipe_param,
num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
dlg_regs->refcyc_per_vm_group_flip = get_refcyc_per_vm_group_flip_in_us(mode_lib, e2e_pipe_param, num_pipes,
pipe_idx) * refclk_freq_in_mhz; // From VBA
dlg_regs->refcyc_per_vm_req_vblank = get_refcyc_per_vm_req_vblank_in_us(mode_lib, e2e_pipe_param, num_pipes,
pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10); // From VBA
dlg_regs->refcyc_per_vm_req_flip = get_refcyc_per_vm_req_flip_in_us(mode_lib, e2e_pipe_param, num_pipes,
pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10); // From VBA
// From VBA
dst_y_per_pte_row_nom_l = get_dst_y_per_pte_row_nom_l(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
// From VBA
dst_y_per_pte_row_nom_c = get_dst_y_per_pte_row_nom_c(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
// From VBA
dst_y_per_meta_row_nom_l = get_dst_y_per_meta_row_nom_l(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
// From VBA
dst_y_per_meta_row_nom_c = get_dst_y_per_meta_row_nom_c(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
refcyc_per_pte_group_nom_l = get_refcyc_per_pte_group_nom_l_in_us(mode_lib, e2e_pipe_param, num_pipes,
pipe_idx) * refclk_freq_in_mhz; // From VBA
refcyc_per_pte_group_nom_c = get_refcyc_per_pte_group_nom_c_in_us(mode_lib, e2e_pipe_param, num_pipes,
pipe_idx) * refclk_freq_in_mhz; // From VBA
refcyc_per_pte_group_vblank_l = get_refcyc_per_pte_group_vblank_l_in_us(mode_lib, e2e_pipe_param,
num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
refcyc_per_pte_group_vblank_c = get_refcyc_per_pte_group_vblank_c_in_us(mode_lib, e2e_pipe_param,
num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
refcyc_per_pte_group_flip_l = get_refcyc_per_pte_group_flip_l_in_us(mode_lib, e2e_pipe_param, num_pipes,
pipe_idx) * refclk_freq_in_mhz; // From VBA
refcyc_per_pte_group_flip_c = get_refcyc_per_pte_group_flip_c_in_us(mode_lib, e2e_pipe_param, num_pipes,
pipe_idx) * refclk_freq_in_mhz; // From VBA
refcyc_per_meta_chunk_nom_l = get_refcyc_per_meta_chunk_nom_l_in_us(mode_lib, e2e_pipe_param, num_pipes,
pipe_idx) * refclk_freq_in_mhz; // From VBA
refcyc_per_meta_chunk_nom_c = get_refcyc_per_meta_chunk_nom_c_in_us(mode_lib, e2e_pipe_param, num_pipes,
pipe_idx) * refclk_freq_in_mhz; // From VBA
refcyc_per_meta_chunk_vblank_l = get_refcyc_per_meta_chunk_vblank_l_in_us(mode_lib, e2e_pipe_param,
num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
refcyc_per_meta_chunk_vblank_c = get_refcyc_per_meta_chunk_vblank_c_in_us(mode_lib, e2e_pipe_param,
num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
refcyc_per_meta_chunk_flip_l = get_refcyc_per_meta_chunk_flip_l_in_us(mode_lib, e2e_pipe_param,
num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
refcyc_per_meta_chunk_flip_c = get_refcyc_per_meta_chunk_flip_c_in_us(mode_lib, e2e_pipe_param,
num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
dlg_regs->dst_y_per_pte_row_nom_l = dst_y_per_pte_row_nom_l * dml_pow(2, 2);
dlg_regs->dst_y_per_pte_row_nom_c = dst_y_per_pte_row_nom_c * dml_pow(2, 2);
dlg_regs->dst_y_per_meta_row_nom_l = dst_y_per_meta_row_nom_l * dml_pow(2, 2);
dlg_regs->dst_y_per_meta_row_nom_c = dst_y_per_meta_row_nom_c * dml_pow(2, 2);
dlg_regs->refcyc_per_pte_group_nom_l = refcyc_per_pte_group_nom_l;
dlg_regs->refcyc_per_pte_group_nom_c = refcyc_per_pte_group_nom_c;
dlg_regs->refcyc_per_pte_group_vblank_l = refcyc_per_pte_group_vblank_l;
dlg_regs->refcyc_per_pte_group_vblank_c = refcyc_per_pte_group_vblank_c;
dlg_regs->refcyc_per_pte_group_flip_l = refcyc_per_pte_group_flip_l;
dlg_regs->refcyc_per_pte_group_flip_c = refcyc_per_pte_group_flip_c;
dlg_regs->refcyc_per_meta_chunk_nom_l = refcyc_per_meta_chunk_nom_l;
dlg_regs->refcyc_per_meta_chunk_nom_c = refcyc_per_meta_chunk_nom_c;
dlg_regs->refcyc_per_meta_chunk_vblank_l = refcyc_per_meta_chunk_vblank_l;
dlg_regs->refcyc_per_meta_chunk_vblank_c = refcyc_per_meta_chunk_vblank_c;
dlg_regs->refcyc_per_meta_chunk_flip_l = refcyc_per_meta_chunk_flip_l;
dlg_regs->refcyc_per_meta_chunk_flip_c = refcyc_per_meta_chunk_flip_c;
dlg_regs->refcyc_per_line_delivery_pre_l = (unsigned int) dml_floor(refcyc_per_line_delivery_pre_l, 1);
dlg_regs->refcyc_per_line_delivery_l = (unsigned int) dml_floor(refcyc_per_line_delivery_l, 1);
dlg_regs->refcyc_per_line_delivery_pre_c = (unsigned int) dml_floor(refcyc_per_line_delivery_pre_c, 1);
dlg_regs->refcyc_per_line_delivery_c = (unsigned int) dml_floor(refcyc_per_line_delivery_c, 1);
dlg_regs->chunk_hdl_adjust_cur0 = 3;
dlg_regs->dst_y_offset_cur0 = 0;
dlg_regs->chunk_hdl_adjust_cur1 = 3;
dlg_regs->dst_y_offset_cur1 = 0;
dlg_regs->dst_y_delta_drq_limit = 0x7fff; // off
ttu_regs->refcyc_per_req_delivery_pre_l = (unsigned int) (refcyc_per_req_delivery_pre_l * dml_pow(2, 10));
ttu_regs->refcyc_per_req_delivery_l = (unsigned int) (refcyc_per_req_delivery_l * dml_pow(2, 10));
ttu_regs->refcyc_per_req_delivery_pre_c = (unsigned int) (refcyc_per_req_delivery_pre_c * dml_pow(2, 10));
ttu_regs->refcyc_per_req_delivery_c = (unsigned int) (refcyc_per_req_delivery_c * dml_pow(2, 10));
ttu_regs->refcyc_per_req_delivery_pre_cur0 =
(unsigned int) (refcyc_per_req_delivery_pre_cur0 * dml_pow(2, 10));
ttu_regs->refcyc_per_req_delivery_cur0 = (unsigned int) (refcyc_per_req_delivery_cur0 * dml_pow(2, 10));
ttu_regs->refcyc_per_req_delivery_pre_cur1 = 0;
ttu_regs->refcyc_per_req_delivery_cur1 = 0;
ttu_regs->qos_level_low_wm = 0;
ttu_regs->qos_level_high_wm = (unsigned int) (4.0 * (double) htotal * ref_freq_to_pix_freq);
ttu_regs->qos_level_flip = 14;
ttu_regs->qos_level_fixed_l = 8;
ttu_regs->qos_level_fixed_c = 8;
ttu_regs->qos_level_fixed_cur0 = 8;
ttu_regs->qos_ramp_disable_l = 0;
ttu_regs->qos_ramp_disable_c = 0;
ttu_regs->qos_ramp_disable_cur0 = 0;
ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
// CHECK for HW registers' range, assert or clamp
ASSERT(refcyc_per_req_delivery_pre_l < dml_pow(2, 13));
ASSERT(refcyc_per_req_delivery_l < dml_pow(2, 13));
ASSERT(refcyc_per_req_delivery_pre_c < dml_pow(2, 13));
ASSERT(refcyc_per_req_delivery_c < dml_pow(2, 13));
if (dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int) dml_pow(2, 23))
dlg_regs->refcyc_per_vm_group_vblank = dml_pow(2, 23) - 1;
if (dlg_regs->refcyc_per_vm_group_flip >= (unsigned int) dml_pow(2, 23))
dlg_regs->refcyc_per_vm_group_flip = dml_pow(2, 23) - 1;
if (dlg_regs->refcyc_per_vm_req_vblank >= (unsigned int) dml_pow(2, 23))
dlg_regs->refcyc_per_vm_req_vblank = dml_pow(2, 23) - 1;
if (dlg_regs->refcyc_per_vm_req_flip >= (unsigned int) dml_pow(2, 23))
dlg_regs->refcyc_per_vm_req_flip = dml_pow(2, 23) - 1;
ASSERT(dlg_regs->dst_y_after_scaler < (unsigned int) 8);
ASSERT(dlg_regs->refcyc_x_after_scaler < (unsigned int)dml_pow(2, 13));
ASSERT(dlg_regs->dst_y_per_pte_row_nom_l < (unsigned int)dml_pow(2, 17));
if (dual_plane) {
if (dlg_regs->dst_y_per_pte_row_nom_c >= (unsigned int) dml_pow(2, 17)) {
// FIXME what so special about chroma, can we just assert?
dml_print("DML_DLG: %s: Warning dst_y_per_pte_row_nom_c %u > register max U15.2 %u\n",
__func__, dlg_regs->dst_y_per_pte_row_nom_c, (unsigned int)dml_pow(2, 17) - 1);
}
}
ASSERT(dlg_regs->dst_y_per_meta_row_nom_l < (unsigned int)dml_pow(2, 17));
ASSERT(dlg_regs->dst_y_per_meta_row_nom_c < (unsigned int)dml_pow(2, 17));
if (dlg_regs->refcyc_per_pte_group_nom_l >= (unsigned int) dml_pow(2, 23))
dlg_regs->refcyc_per_pte_group_nom_l = dml_pow(2, 23) - 1;
if (dual_plane) {
if (dlg_regs->refcyc_per_pte_group_nom_c >= (unsigned int) dml_pow(2, 23))
dlg_regs->refcyc_per_pte_group_nom_c = dml_pow(2, 23) - 1;
}
ASSERT(dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)dml_pow(2, 13));
if (dual_plane) {
ASSERT(dlg_regs->refcyc_per_pte_group_vblank_c < (unsigned int)dml_pow(2, 13));
}
if (dlg_regs->refcyc_per_meta_chunk_nom_l >= (unsigned int) dml_pow(2, 23))
dlg_regs->refcyc_per_meta_chunk_nom_l = dml_pow(2, 23) - 1;
if (dual_plane) {
if (dlg_regs->refcyc_per_meta_chunk_nom_c >= (unsigned int) dml_pow(2, 23))
dlg_regs->refcyc_per_meta_chunk_nom_c = dml_pow(2, 23) - 1;
}
ASSERT(dlg_regs->refcyc_per_meta_chunk_vblank_l < (unsigned int)dml_pow(2, 13));
ASSERT(dlg_regs->refcyc_per_meta_chunk_vblank_c < (unsigned int)dml_pow(2, 13));
ASSERT(dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int)dml_pow(2, 13));
ASSERT(dlg_regs->refcyc_per_line_delivery_l < (unsigned int)dml_pow(2, 13));
ASSERT(dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int)dml_pow(2, 13));
ASSERT(dlg_regs->refcyc_per_line_delivery_c < (unsigned int)dml_pow(2, 13));
ASSERT(ttu_regs->qos_level_low_wm < dml_pow(2, 14));
ASSERT(ttu_regs->qos_level_high_wm < dml_pow(2, 14));
ASSERT(ttu_regs->min_ttu_vblank < dml_pow(2, 24));
print__ttu_regs_st(mode_lib, ttu_regs);
print__dlg_regs_st(mode_lib, dlg_regs);
dml_print("DML_DLG::%s: Calculation for pipe[%d] done, num_pipes=%d\n", __func__, pipe_idx, num_pipes);
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c |
// SPDX-License-Identifier: MIT
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "resource.h"
#include "clk_mgr.h"
#include "dchubbub.h"
#include "dcn20/dcn20_resource.h"
#include "dcn21/dcn21_resource.h"
#include "clk_mgr/dcn21/rn_clk_mgr.h"
#include "link.h"
#include "dcn20_fpu.h"
#define DC_LOGGER_INIT(logger)
#ifndef MAX
#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
#endif
#ifndef MIN
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
#endif
/* Constant */
#define LPDDR_MEM_RETRAIN_LATENCY 4.977 /* Number obtained from LPDDR4 Training Counter Requirement doc */
/**
* DOC: DCN2x FPU manipulation Overview
*
* The DCN architecture relies on FPU operations, which require special
* compilation flags and the use of kernel_fpu_begin/end functions; ideally, we
* want to avoid spreading FPU access across multiple files. With this idea in
* mind, this file aims to centralize all DCN20 and DCN2.1 (DCN2x) functions
* that require FPU access in a single place. Code in this file follows the
* following code pattern:
*
* 1. Functions that use FPU operations should be isolated in static functions.
* 2. The FPU functions should have the noinline attribute to ensure anything
* that deals with FP register is contained within this call.
* 3. All function that needs to be accessed outside this file requires a
* public interface that not uses any FPU reference.
* 4. Developers **must not** use DC_FP_START/END in this file, but they need
* to ensure that the caller invokes it before access any function available
* in this file. For this reason, public functions in this file must invoke
* dc_assert_fp_enabled();
*
* Let's expand a little bit more the idea in the code pattern. To fully
* isolate FPU operations in a single place, we must avoid situations where
* compilers spill FP values to registers due to FP enable in a specific C
* file. Note that even if we isolate all FPU functions in a single file and
* call its interface from other files, the compiler might enable the use of
* FPU before we call DC_FP_START. Nevertheless, it is the programmer's
* responsibility to invoke DC_FP_START/END in the correct place. To highlight
* situations where developers forgot to use the FP protection before calling
* the DC FPU interface functions, we introduce a helper that checks if the
* function is invoked under FP protection. If not, it will trigger a kernel
* warning.
*/
struct _vcs_dpi_ip_params_st dcn2_0_ip = {
.odm_capable = 1,
.gpuvm_enable = 0,
.hostvm_enable = 0,
.gpuvm_max_page_table_levels = 4,
.hostvm_max_page_table_levels = 4,
.hostvm_cached_page_table_levels = 0,
.pte_group_size_bytes = 2048,
.num_dsc = 6,
.rob_buffer_size_kbytes = 168,
.det_buffer_size_kbytes = 164,
.dpte_buffer_size_in_pte_reqs_luma = 84,
.pde_proc_buffer_size_64k_reqs = 48,
.dpp_output_buffer_pixels = 2560,
.opp_output_buffer_lines = 1,
.pixel_chunk_size_kbytes = 8,
.pte_chunk_size_kbytes = 2,
.meta_chunk_size_kbytes = 2,
.writeback_chunk_size_kbytes = 2,
.line_buffer_size_bits = 789504,
.is_line_buffer_bpp_fixed = 0,
.line_buffer_fixed_bpp = 0,
.dcc_supported = true,
.max_line_buffer_lines = 12,
.writeback_luma_buffer_size_kbytes = 12,
.writeback_chroma_buffer_size_kbytes = 8,
.writeback_chroma_line_buffer_width_pixels = 4,
.writeback_max_hscl_ratio = 1,
.writeback_max_vscl_ratio = 1,
.writeback_min_hscl_ratio = 1,
.writeback_min_vscl_ratio = 1,
.writeback_max_hscl_taps = 12,
.writeback_max_vscl_taps = 12,
.writeback_line_buffer_luma_buffer_size = 0,
.writeback_line_buffer_chroma_buffer_size = 14643,
.cursor_buffer_size = 8,
.cursor_chunk_size = 2,
.max_num_otg = 6,
.max_num_dpp = 6,
.max_num_wb = 1,
.max_dchub_pscl_bw_pix_per_clk = 4,
.max_pscl_lb_bw_pix_per_clk = 2,
.max_lb_vscl_bw_pix_per_clk = 4,
.max_vscl_hscl_bw_pix_per_clk = 4,
.max_hscl_ratio = 8,
.max_vscl_ratio = 8,
.hscl_mults = 4,
.vscl_mults = 4,
.max_hscl_taps = 8,
.max_vscl_taps = 8,
.dispclk_ramp_margin_percent = 1,
.underscan_factor = 1.10,
.min_vblank_lines = 32, //
.dppclk_delay_subtotal = 77, //
.dppclk_delay_scl_lb_only = 16,
.dppclk_delay_scl = 50,
.dppclk_delay_cnvc_formatter = 8,
.dppclk_delay_cnvc_cursor = 6,
.dispclk_delay_subtotal = 87, //
.dcfclk_cstate_latency = 10, // SRExitTime
.max_inter_dcn_tile_repeaters = 8,
.xfc_supported = true,
.xfc_fill_bw_overhead_percent = 10.0,
.xfc_fill_constant_bytes = 0,
.number_of_cursors = 1,
};
struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = {
.odm_capable = 1,
.gpuvm_enable = 0,
.hostvm_enable = 0,
.gpuvm_max_page_table_levels = 4,
.hostvm_max_page_table_levels = 4,
.hostvm_cached_page_table_levels = 0,
.num_dsc = 5,
.rob_buffer_size_kbytes = 168,
.det_buffer_size_kbytes = 164,
.dpte_buffer_size_in_pte_reqs_luma = 84,
.dpte_buffer_size_in_pte_reqs_chroma = 42,//todo
.dpp_output_buffer_pixels = 2560,
.opp_output_buffer_lines = 1,
.pixel_chunk_size_kbytes = 8,
.pte_enable = 1,
.max_page_table_levels = 4,
.pte_chunk_size_kbytes = 2,
.meta_chunk_size_kbytes = 2,
.writeback_chunk_size_kbytes = 2,
.line_buffer_size_bits = 789504,
.is_line_buffer_bpp_fixed = 0,
.line_buffer_fixed_bpp = 0,
.dcc_supported = true,
.max_line_buffer_lines = 12,
.writeback_luma_buffer_size_kbytes = 12,
.writeback_chroma_buffer_size_kbytes = 8,
.writeback_chroma_line_buffer_width_pixels = 4,
.writeback_max_hscl_ratio = 1,
.writeback_max_vscl_ratio = 1,
.writeback_min_hscl_ratio = 1,
.writeback_min_vscl_ratio = 1,
.writeback_max_hscl_taps = 12,
.writeback_max_vscl_taps = 12,
.writeback_line_buffer_luma_buffer_size = 0,
.writeback_line_buffer_chroma_buffer_size = 14643,
.cursor_buffer_size = 8,
.cursor_chunk_size = 2,
.max_num_otg = 5,
.max_num_dpp = 5,
.max_num_wb = 1,
.max_dchub_pscl_bw_pix_per_clk = 4,
.max_pscl_lb_bw_pix_per_clk = 2,
.max_lb_vscl_bw_pix_per_clk = 4,
.max_vscl_hscl_bw_pix_per_clk = 4,
.max_hscl_ratio = 8,
.max_vscl_ratio = 8,
.hscl_mults = 4,
.vscl_mults = 4,
.max_hscl_taps = 8,
.max_vscl_taps = 8,
.dispclk_ramp_margin_percent = 1,
.underscan_factor = 1.10,
.min_vblank_lines = 32, //
.dppclk_delay_subtotal = 77, //
.dppclk_delay_scl_lb_only = 16,
.dppclk_delay_scl = 50,
.dppclk_delay_cnvc_formatter = 8,
.dppclk_delay_cnvc_cursor = 6,
.dispclk_delay_subtotal = 87, //
.dcfclk_cstate_latency = 10, // SRExitTime
.max_inter_dcn_tile_repeaters = 8,
.xfc_supported = true,
.xfc_fill_bw_overhead_percent = 10.0,
.xfc_fill_constant_bytes = 0,
.ptoi_supported = 0,
.number_of_cursors = 1,
};
struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
/* Defaults that get patched on driver load from firmware. */
.clock_limits = {
{
.state = 0,
.dcfclk_mhz = 560.0,
.fabricclk_mhz = 560.0,
.dispclk_mhz = 513.0,
.dppclk_mhz = 513.0,
.phyclk_mhz = 540.0,
.socclk_mhz = 560.0,
.dscclk_mhz = 171.0,
.dram_speed_mts = 8960.0,
},
{
.state = 1,
.dcfclk_mhz = 694.0,
.fabricclk_mhz = 694.0,
.dispclk_mhz = 642.0,
.dppclk_mhz = 642.0,
.phyclk_mhz = 600.0,
.socclk_mhz = 694.0,
.dscclk_mhz = 214.0,
.dram_speed_mts = 11104.0,
},
{
.state = 2,
.dcfclk_mhz = 875.0,
.fabricclk_mhz = 875.0,
.dispclk_mhz = 734.0,
.dppclk_mhz = 734.0,
.phyclk_mhz = 810.0,
.socclk_mhz = 875.0,
.dscclk_mhz = 245.0,
.dram_speed_mts = 14000.0,
},
{
.state = 3,
.dcfclk_mhz = 1000.0,
.fabricclk_mhz = 1000.0,
.dispclk_mhz = 1100.0,
.dppclk_mhz = 1100.0,
.phyclk_mhz = 810.0,
.socclk_mhz = 1000.0,
.dscclk_mhz = 367.0,
.dram_speed_mts = 16000.0,
},
{
.state = 4,
.dcfclk_mhz = 1200.0,
.fabricclk_mhz = 1200.0,
.dispclk_mhz = 1284.0,
.dppclk_mhz = 1284.0,
.phyclk_mhz = 810.0,
.socclk_mhz = 1200.0,
.dscclk_mhz = 428.0,
.dram_speed_mts = 16000.0,
},
/*Extra state, no dispclk ramping*/
{
.state = 5,
.dcfclk_mhz = 1200.0,
.fabricclk_mhz = 1200.0,
.dispclk_mhz = 1284.0,
.dppclk_mhz = 1284.0,
.phyclk_mhz = 810.0,
.socclk_mhz = 1200.0,
.dscclk_mhz = 428.0,
.dram_speed_mts = 16000.0,
},
},
.num_states = 5,
.sr_exit_time_us = 8.6,
.sr_enter_plus_exit_time_us = 10.9,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
.urgent_latency_vm_data_only_us = 4.0,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0,
.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
.max_avg_sdp_bw_use_normal_percent = 40.0,
.max_avg_dram_bw_use_normal_percent = 40.0,
.writeback_latency_us = 12.0,
.ideal_dram_bw_after_urgent_percent = 40.0,
.max_request_size_bytes = 256,
.dram_channel_width_bytes = 2,
.fabric_datapath_to_dcn_data_return_bytes = 64,
.dcn_downspread_percent = 0.5,
.downspread_percent = 0.38,
.dram_page_open_time_ns = 50.0,
.dram_rw_turnaround_time_ns = 17.5,
.dram_return_buffer_per_channel_bytes = 8192,
.round_trip_ping_latency_dcfclk_cycles = 131,
.urgent_out_of_order_return_per_channel_bytes = 256,
.channel_interleave_bytes = 256,
.num_banks = 8,
.num_chans = 16,
.vmm_page_size_bytes = 4096,
.dram_clock_change_latency_us = 404.0,
.dummy_pstate_latency_us = 5.0,
.writeback_dram_clock_change_latency_us = 23.0,
.return_bus_width_bytes = 64,
.dispclk_dppclk_vco_speed_mhz = 3850,
.xfc_bus_transport_time_us = 20,
.xfc_xbuf_latency_tolerance_us = 4,
.use_urgent_burst_bw = 0
};
struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
.clock_limits = {
{
.state = 0,
.dcfclk_mhz = 560.0,
.fabricclk_mhz = 560.0,
.dispclk_mhz = 513.0,
.dppclk_mhz = 513.0,
.phyclk_mhz = 540.0,
.socclk_mhz = 560.0,
.dscclk_mhz = 171.0,
.dram_speed_mts = 8960.0,
},
{
.state = 1,
.dcfclk_mhz = 694.0,
.fabricclk_mhz = 694.0,
.dispclk_mhz = 642.0,
.dppclk_mhz = 642.0,
.phyclk_mhz = 600.0,
.socclk_mhz = 694.0,
.dscclk_mhz = 214.0,
.dram_speed_mts = 11104.0,
},
{
.state = 2,
.dcfclk_mhz = 875.0,
.fabricclk_mhz = 875.0,
.dispclk_mhz = 734.0,
.dppclk_mhz = 734.0,
.phyclk_mhz = 810.0,
.socclk_mhz = 875.0,
.dscclk_mhz = 245.0,
.dram_speed_mts = 14000.0,
},
{
.state = 3,
.dcfclk_mhz = 1000.0,
.fabricclk_mhz = 1000.0,
.dispclk_mhz = 1100.0,
.dppclk_mhz = 1100.0,
.phyclk_mhz = 810.0,
.socclk_mhz = 1000.0,
.dscclk_mhz = 367.0,
.dram_speed_mts = 16000.0,
},
{
.state = 4,
.dcfclk_mhz = 1200.0,
.fabricclk_mhz = 1200.0,
.dispclk_mhz = 1284.0,
.dppclk_mhz = 1284.0,
.phyclk_mhz = 810.0,
.socclk_mhz = 1200.0,
.dscclk_mhz = 428.0,
.dram_speed_mts = 16000.0,
},
/*Extra state, no dispclk ramping*/
{
.state = 5,
.dcfclk_mhz = 1200.0,
.fabricclk_mhz = 1200.0,
.dispclk_mhz = 1284.0,
.dppclk_mhz = 1284.0,
.phyclk_mhz = 810.0,
.socclk_mhz = 1200.0,
.dscclk_mhz = 428.0,
.dram_speed_mts = 16000.0,
},
},
.num_states = 5,
.sr_exit_time_us = 11.6,
.sr_enter_plus_exit_time_us = 13.9,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
.urgent_latency_vm_data_only_us = 4.0,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0,
.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
.max_avg_sdp_bw_use_normal_percent = 40.0,
.max_avg_dram_bw_use_normal_percent = 40.0,
.writeback_latency_us = 12.0,
.ideal_dram_bw_after_urgent_percent = 40.0,
.max_request_size_bytes = 256,
.dram_channel_width_bytes = 2,
.fabric_datapath_to_dcn_data_return_bytes = 64,
.dcn_downspread_percent = 0.5,
.downspread_percent = 0.38,
.dram_page_open_time_ns = 50.0,
.dram_rw_turnaround_time_ns = 17.5,
.dram_return_buffer_per_channel_bytes = 8192,
.round_trip_ping_latency_dcfclk_cycles = 131,
.urgent_out_of_order_return_per_channel_bytes = 256,
.channel_interleave_bytes = 256,
.num_banks = 8,
.num_chans = 8,
.vmm_page_size_bytes = 4096,
.dram_clock_change_latency_us = 404.0,
.dummy_pstate_latency_us = 5.0,
.writeback_dram_clock_change_latency_us = 23.0,
.return_bus_width_bytes = 64,
.dispclk_dppclk_vco_speed_mhz = 3850,
.xfc_bus_transport_time_us = 20,
.xfc_xbuf_latency_tolerance_us = 4,
.use_urgent_burst_bw = 0
};
struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
struct _vcs_dpi_ip_params_st dcn2_1_ip = {
.odm_capable = 1,
.gpuvm_enable = 1,
.hostvm_enable = 1,
.gpuvm_max_page_table_levels = 1,
.hostvm_max_page_table_levels = 4,
.hostvm_cached_page_table_levels = 2,
.num_dsc = 3,
.rob_buffer_size_kbytes = 168,
.det_buffer_size_kbytes = 164,
.dpte_buffer_size_in_pte_reqs_luma = 44,
.dpte_buffer_size_in_pte_reqs_chroma = 42,//todo
.dpp_output_buffer_pixels = 2560,
.opp_output_buffer_lines = 1,
.pixel_chunk_size_kbytes = 8,
.pte_enable = 1,
.max_page_table_levels = 4,
.pte_chunk_size_kbytes = 2,
.meta_chunk_size_kbytes = 2,
.min_meta_chunk_size_bytes = 256,
.writeback_chunk_size_kbytes = 2,
.line_buffer_size_bits = 789504,
.is_line_buffer_bpp_fixed = 0,
.line_buffer_fixed_bpp = 0,
.dcc_supported = true,
.max_line_buffer_lines = 12,
.writeback_luma_buffer_size_kbytes = 12,
.writeback_chroma_buffer_size_kbytes = 8,
.writeback_chroma_line_buffer_width_pixels = 4,
.writeback_max_hscl_ratio = 1,
.writeback_max_vscl_ratio = 1,
.writeback_min_hscl_ratio = 1,
.writeback_min_vscl_ratio = 1,
.writeback_max_hscl_taps = 12,
.writeback_max_vscl_taps = 12,
.writeback_line_buffer_luma_buffer_size = 0,
.writeback_line_buffer_chroma_buffer_size = 14643,
.cursor_buffer_size = 8,
.cursor_chunk_size = 2,
.max_num_otg = 4,
.max_num_dpp = 4,
.max_num_wb = 1,
.max_dchub_pscl_bw_pix_per_clk = 4,
.max_pscl_lb_bw_pix_per_clk = 2,
.max_lb_vscl_bw_pix_per_clk = 4,
.max_vscl_hscl_bw_pix_per_clk = 4,
.max_hscl_ratio = 4,
.max_vscl_ratio = 4,
.hscl_mults = 4,
.vscl_mults = 4,
.max_hscl_taps = 8,
.max_vscl_taps = 8,
.dispclk_ramp_margin_percent = 1,
.underscan_factor = 1.10,
.min_vblank_lines = 32, //
.dppclk_delay_subtotal = 77, //
.dppclk_delay_scl_lb_only = 16,
.dppclk_delay_scl = 50,
.dppclk_delay_cnvc_formatter = 8,
.dppclk_delay_cnvc_cursor = 6,
.dispclk_delay_subtotal = 87, //
.dcfclk_cstate_latency = 10, // SRExitTime
.max_inter_dcn_tile_repeaters = 8,
.xfc_supported = false,
.xfc_fill_bw_overhead_percent = 10.0,
.xfc_fill_constant_bytes = 0,
.ptoi_supported = 0,
.number_of_cursors = 1,
};
struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.clock_limits = {
{
.state = 0,
.dcfclk_mhz = 400.0,
.fabricclk_mhz = 400.0,
.dispclk_mhz = 600.0,
.dppclk_mhz = 400.00,
.phyclk_mhz = 600.0,
.socclk_mhz = 278.0,
.dscclk_mhz = 205.67,
.dram_speed_mts = 1600.0,
},
{
.state = 1,
.dcfclk_mhz = 464.52,
.fabricclk_mhz = 800.0,
.dispclk_mhz = 654.55,
.dppclk_mhz = 626.09,
.phyclk_mhz = 600.0,
.socclk_mhz = 278.0,
.dscclk_mhz = 205.67,
.dram_speed_mts = 1600.0,
},
{
.state = 2,
.dcfclk_mhz = 514.29,
.fabricclk_mhz = 933.0,
.dispclk_mhz = 757.89,
.dppclk_mhz = 685.71,
.phyclk_mhz = 600.0,
.socclk_mhz = 278.0,
.dscclk_mhz = 287.67,
.dram_speed_mts = 1866.0,
},
{
.state = 3,
.dcfclk_mhz = 576.00,
.fabricclk_mhz = 1067.0,
.dispclk_mhz = 847.06,
.dppclk_mhz = 757.89,
.phyclk_mhz = 600.0,
.socclk_mhz = 715.0,
.dscclk_mhz = 318.334,
.dram_speed_mts = 2134.0,
},
{
.state = 4,
.dcfclk_mhz = 626.09,
.fabricclk_mhz = 1200.0,
.dispclk_mhz = 900.00,
.dppclk_mhz = 847.06,
.phyclk_mhz = 810.0,
.socclk_mhz = 953.0,
.dscclk_mhz = 300.0,
.dram_speed_mts = 2400.0,
},
{
.state = 5,
.dcfclk_mhz = 685.71,
.fabricclk_mhz = 1333.0,
.dispclk_mhz = 1028.57,
.dppclk_mhz = 960.00,
.phyclk_mhz = 810.0,
.socclk_mhz = 278.0,
.dscclk_mhz = 342.86,
.dram_speed_mts = 2666.0,
},
{
.state = 6,
.dcfclk_mhz = 757.89,
.fabricclk_mhz = 1467.0,
.dispclk_mhz = 1107.69,
.dppclk_mhz = 1028.57,
.phyclk_mhz = 810.0,
.socclk_mhz = 715.0,
.dscclk_mhz = 369.23,
.dram_speed_mts = 3200.0,
},
{
.state = 7,
.dcfclk_mhz = 847.06,
.fabricclk_mhz = 1600.0,
.dispclk_mhz = 1395.0,
.dppclk_mhz = 1285.00,
.phyclk_mhz = 1325.0,
.socclk_mhz = 953.0,
.dscclk_mhz = 489.0,
.dram_speed_mts = 4266.0,
},
/*Extra state, no dispclk ramping*/
{
.state = 8,
.dcfclk_mhz = 847.06,
.fabricclk_mhz = 1600.0,
.dispclk_mhz = 1395.0,
.dppclk_mhz = 1285.0,
.phyclk_mhz = 1325.0,
.socclk_mhz = 953.0,
.dscclk_mhz = 489.0,
.dram_speed_mts = 4266.0,
},
},
.sr_exit_time_us = 12.5,
.sr_enter_plus_exit_time_us = 17.0,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
.urgent_latency_vm_data_only_us = 4.0,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 75.0,
.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
.max_avg_sdp_bw_use_normal_percent = 60.0,
.max_avg_dram_bw_use_normal_percent = 100.0,
.writeback_latency_us = 12.0,
.max_request_size_bytes = 256,
.dram_channel_width_bytes = 4,
.fabric_datapath_to_dcn_data_return_bytes = 32,
.dcn_downspread_percent = 0.5,
.downspread_percent = 0.38,
.dram_page_open_time_ns = 50.0,
.dram_rw_turnaround_time_ns = 17.5,
.dram_return_buffer_per_channel_bytes = 8192,
.round_trip_ping_latency_dcfclk_cycles = 128,
.urgent_out_of_order_return_per_channel_bytes = 4096,
.channel_interleave_bytes = 256,
.num_banks = 8,
.num_chans = 4,
.vmm_page_size_bytes = 4096,
.dram_clock_change_latency_us = 23.84,
.return_bus_width_bytes = 64,
.dispclk_dppclk_vco_speed_mhz = 3600,
.xfc_bus_transport_time_us = 4,
.xfc_xbuf_latency_tolerance_us = 4,
.use_urgent_burst_bw = 1,
.num_states = 8
};
struct wm_table ddr4_wm_table_gs = {
.entries = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 7.09,
.sr_enter_plus_exit_time_us = 8.14,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 10.12,
.sr_enter_plus_exit_time_us = 11.48,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 10.12,
.sr_enter_plus_exit_time_us = 11.48,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 10.12,
.sr_enter_plus_exit_time_us = 11.48,
.valid = true,
},
}
};
struct wm_table lpddr4_wm_table_gs = {
.entries = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 5.32,
.sr_enter_plus_exit_time_us = 6.38,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 9.82,
.sr_enter_plus_exit_time_us = 11.196,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 9.89,
.sr_enter_plus_exit_time_us = 11.24,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 9.748,
.sr_enter_plus_exit_time_us = 11.102,
.valid = true,
},
}
};
struct wm_table lpddr4_wm_table_with_disabled_ppt = {
.entries = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 8.32,
.sr_enter_plus_exit_time_us = 9.38,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 9.82,
.sr_enter_plus_exit_time_us = 11.196,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 9.89,
.sr_enter_plus_exit_time_us = 11.24,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 9.748,
.sr_enter_plus_exit_time_us = 11.102,
.valid = true,
},
}
};
struct wm_table ddr4_wm_table_rn = {
.entries = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 11.90,
.sr_enter_plus_exit_time_us = 12.80,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 13.18,
.sr_enter_plus_exit_time_us = 14.30,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 13.18,
.sr_enter_plus_exit_time_us = 14.30,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 13.18,
.sr_enter_plus_exit_time_us = 14.30,
.valid = true,
},
}
};
struct wm_table ddr4_1R_wm_table_rn = {
.entries = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 13.90,
.sr_enter_plus_exit_time_us = 14.80,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 13.90,
.sr_enter_plus_exit_time_us = 14.80,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 13.90,
.sr_enter_plus_exit_time_us = 14.80,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 13.90,
.sr_enter_plus_exit_time_us = 14.80,
.valid = true,
},
}
};
struct wm_table lpddr4_wm_table_rn = {
.entries = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 7.32,
.sr_enter_plus_exit_time_us = 8.38,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 9.82,
.sr_enter_plus_exit_time_us = 11.196,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 9.89,
.sr_enter_plus_exit_time_us = 11.24,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 9.748,
.sr_enter_plus_exit_time_us = 11.102,
.valid = true,
},
}
};
void dcn20_populate_dml_writeback_from_context(struct dc *dc,
struct resource_context *res_ctx,
display_e2e_pipe_params_st *pipes)
{
int pipe_cnt, i;
dc_assert_fp_enabled();
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_writeback_info *wb_info = &res_ctx->pipe_ctx[i].stream->writeback_info[0];
if (!res_ctx->pipe_ctx[i].stream)
continue;
/* Set writeback information */
pipes[pipe_cnt].dout.wb_enable = (wb_info->wb_enabled == true) ? 1 : 0;
pipes[pipe_cnt].dout.num_active_wb++;
pipes[pipe_cnt].dout.wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_height;
pipes[pipe_cnt].dout.wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_width;
pipes[pipe_cnt].dout.wb.wb_dst_width = wb_info->dwb_params.dest_width;
pipes[pipe_cnt].dout.wb.wb_dst_height = wb_info->dwb_params.dest_height;
pipes[pipe_cnt].dout.wb.wb_htaps_luma = 1;
pipes[pipe_cnt].dout.wb.wb_vtaps_luma = 1;
pipes[pipe_cnt].dout.wb.wb_htaps_chroma = wb_info->dwb_params.scaler_taps.h_taps_c;
pipes[pipe_cnt].dout.wb.wb_vtaps_chroma = wb_info->dwb_params.scaler_taps.v_taps_c;
pipes[pipe_cnt].dout.wb.wb_hratio = 1.0;
pipes[pipe_cnt].dout.wb.wb_vratio = 1.0;
if (wb_info->dwb_params.out_format == dwb_scaler_mode_yuv420) {
if (wb_info->dwb_params.output_depth == DWB_OUTPUT_PIXEL_DEPTH_8BPC)
pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_8;
else
pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_10;
} else {
pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_444_32;
}
pipe_cnt++;
}
}
void dcn20_fpu_set_wb_arb_params(struct mcif_arb_params *wb_arb_params,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt, int i)
{
int k;
dc_assert_fp_enabled();
for (k = 0; k < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); k++) {
wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
}
wb_arb_params->time_per_pixel = 16.0 * 1000 / (context->res_ctx.pipe_ctx[i].stream->phy_pix_clk / 1000); /* 4 bit fraction, ms */
}
static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
{
int i;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
if (dc->link_srv->dp_is_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
return true;
}
return false;
}
static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struct dc_state *context)
{
int plane_count;
int i;
unsigned int min_dst_y_next_start_us;
plane_count = 0;
min_dst_y_next_start_us = 0;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].plane_state)
plane_count++;
}
/*
* Z9 and Z10 allowed cases:
* 1. 0 Planes enabled
* 2. single eDP, on link 0, 1 plane and stutter period > 5ms
* Z10 only cases:
* 1. single eDP, on link 0, 1 plane and stutter period >= 5ms
* Z8 cases:
* 1. stutter period sufficient
* Zstate not allowed cases:
* 1. Everything else
*/
if (plane_count == 0)
return DCN_ZSTATE_SUPPORT_ALLOW;
else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
struct dc_link *link = context->streams[0]->sink->link;
struct dc_stream_status *stream_status = &context->stream_status[0];
struct dc_stream_state *current_stream = context->streams[0];
int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
bool is_pwrseq0 = link->link_index == 0;
bool isFreesyncVideo;
isFreesyncVideo = current_stream->adjust.v_total_min == current_stream->adjust.v_total_max;
isFreesyncVideo = isFreesyncVideo && current_stream->timing.v_total < current_stream->adjust.v_total_min;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].stream == current_stream && isFreesyncVideo) {
min_dst_y_next_start_us = context->res_ctx.pipe_ctx[i].dlg_regs.min_dst_y_next_start_us;
break;
}
}
/* Don't support multi-plane configurations */
if (stream_status->plane_count > 1)
return DCN_ZSTATE_SUPPORT_DISALLOW;
if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || min_dst_y_next_start_us > 5000))
return DCN_ZSTATE_SUPPORT_ALLOW;
else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
else
return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY : DCN_ZSTATE_SUPPORT_DISALLOW;
} else {
return DCN_ZSTATE_SUPPORT_DISALLOW;
}
}
static void dcn20_adjust_freesync_v_startup(
const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start)
{
struct dc_crtc_timing patched_crtc_timing;
uint32_t asic_blank_end = 0;
uint32_t asic_blank_start = 0;
uint32_t newVstartup = 0;
patched_crtc_timing = *dc_crtc_timing;
if (patched_crtc_timing.flags.INTERLACE == 1) {
if (patched_crtc_timing.v_front_porch < 2)
patched_crtc_timing.v_front_porch = 2;
} else {
if (patched_crtc_timing.v_front_porch < 1)
patched_crtc_timing.v_front_porch = 1;
}
/* blank_start = frame end - front porch */
asic_blank_start = patched_crtc_timing.v_total -
patched_crtc_timing.v_front_porch;
/* blank_end = blank_start - active */
asic_blank_end = asic_blank_start -
patched_crtc_timing.v_border_bottom -
patched_crtc_timing.v_addressable -
patched_crtc_timing.v_border_top;
newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start);
*vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start);
}
void dcn20_calculate_dlg_params(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
int vlevel)
{
int i, pipe_idx, active_hubp_count = 0;
dc_assert_fp_enabled();
/* Writeback MCIF_WB arbitration parameters */
dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
context->bw_ctx.bw.dcn.clk.dispclk_khz = context->bw_ctx.dml.vba.DISPCLK * 1000;
context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000;
context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000;
context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;
if (dc->debug.min_dram_clk_khz > context->bw_ctx.bw.dcn.clk.dramclk_khz)
context->bw_ctx.bw.dcn.clk.dramclk_khz = dc->debug.min_dram_clk_khz;
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000;
context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000;
context->bw_ctx.bw.dcn.clk.p_state_change_support =
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
!= dm_dram_clock_change_unsupported;
/* Pstate change might not be supported by hardware, but it might be
* possible with firmware driven vertical blank stretching.
*/
context->bw_ctx.bw.dcn.clk.p_state_change_support |= context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching;
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context);
if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz;
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
if (context->res_ctx.pipe_ctx[i].plane_state)
active_hubp_count++;
pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
// Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0;
context->res_ctx.pipe_ctx[i].unbounded_req = false;
} else {
context->res_ctx.pipe_ctx[i].det_buffer_size_kb = context->bw_ctx.dml.ip.det_buffer_size_kbytes;
context->res_ctx.pipe_ctx[i].unbounded_req = pipes[pipe_idx].pipe.src.unbounded_req_mode;
}
if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
if (dc->ctx->dce_version < DCN_VERSION_3_1 &&
context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
dcn20_adjust_freesync_v_startup(
&context->res_ctx.pipe_ctx[i].stream->timing,
&context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
pipe_idx++;
}
/* If DCN isn't making memory requests we can allow pstate change */
if (!active_hubp_count) {
context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
}
/*save a original dppclock copy*/
context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz;
context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dppclk_mhz * 1000;
context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz * 1000;
context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes
- context->bw_ctx.dml.ip.det_buffer_size_kbytes * pipe_idx;
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
bool cstate_en = context->bw_ctx.dml.vba.PrefetchMode[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != 2;
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
/* cstate disabled on 201 */
if (dc->ctx->dce_version == DCN_VERSION_2_01)
cstate_en = false;
context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg(&context->bw_ctx.dml,
&context->res_ctx.pipe_ctx[i].dlg_regs,
&context->res_ctx.pipe_ctx[i].ttu_regs,
pipes,
pipe_cnt,
pipe_idx,
cstate_en,
context->bw_ctx.bw.dcn.clk.p_state_change_support,
false, false, true);
context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml,
&context->res_ctx.pipe_ctx[i].rq_regs,
&pipes[pipe_idx].pipe);
pipe_idx++;
}
context->bw_ctx.bw.dcn.clk.zstate_support = decide_zstate_support(dc, context);
}
static void swizzle_to_dml_params(
enum swizzle_mode_values swizzle,
unsigned int *sw_mode)
{
switch (swizzle) {
case DC_SW_LINEAR:
*sw_mode = dm_sw_linear;
break;
case DC_SW_4KB_S:
*sw_mode = dm_sw_4kb_s;
break;
case DC_SW_4KB_S_X:
*sw_mode = dm_sw_4kb_s_x;
break;
case DC_SW_4KB_D:
*sw_mode = dm_sw_4kb_d;
break;
case DC_SW_4KB_D_X:
*sw_mode = dm_sw_4kb_d_x;
break;
case DC_SW_64KB_S:
*sw_mode = dm_sw_64kb_s;
break;
case DC_SW_64KB_S_X:
*sw_mode = dm_sw_64kb_s_x;
break;
case DC_SW_64KB_S_T:
*sw_mode = dm_sw_64kb_s_t;
break;
case DC_SW_64KB_D:
*sw_mode = dm_sw_64kb_d;
break;
case DC_SW_64KB_D_X:
*sw_mode = dm_sw_64kb_d_x;
break;
case DC_SW_64KB_D_T:
*sw_mode = dm_sw_64kb_d_t;
break;
case DC_SW_64KB_R_X:
*sw_mode = dm_sw_64kb_r_x;
break;
case DC_SW_VAR_S:
*sw_mode = dm_sw_var_s;
break;
case DC_SW_VAR_S_X:
*sw_mode = dm_sw_var_s_x;
break;
case DC_SW_VAR_D:
*sw_mode = dm_sw_var_d;
break;
case DC_SW_VAR_D_X:
*sw_mode = dm_sw_var_d_x;
break;
case DC_SW_VAR_R_X:
*sw_mode = dm_sw_var_r_x;
break;
default:
ASSERT(0); /* Not supported */
break;
}
}
int dcn20_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate)
{
int pipe_cnt, i;
bool synchronized_vblank = true;
struct resource_context *res_ctx = &context->res_ctx;
dc_assert_fp_enabled();
for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) {
if (!res_ctx->pipe_ctx[i].stream)
continue;
if (pipe_cnt < 0) {
pipe_cnt = i;
continue;
}
if (res_ctx->pipe_ctx[pipe_cnt].stream == res_ctx->pipe_ctx[i].stream)
continue;
if (dc->debug.disable_timing_sync ||
(!resource_are_streams_timing_synchronizable(
res_ctx->pipe_ctx[pipe_cnt].stream,
res_ctx->pipe_ctx[i].stream) &&
!resource_are_vblanks_synchronizable(
res_ctx->pipe_ctx[pipe_cnt].stream,
res_ctx->pipe_ctx[i].stream))) {
synchronized_vblank = false;
break;
}
}
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing;
unsigned int v_total;
unsigned int front_porch;
int output_bpc;
struct audio_check aud_check = {0};
if (!res_ctx->pipe_ctx[i].stream)
continue;
v_total = timing->v_total;
front_porch = timing->v_front_porch;
/* todo:
pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0;
pipes[pipe_cnt].pipe.src.dcc = 0;
pipes[pipe_cnt].pipe.src.vm = 0;*/
pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
pipes[pipe_cnt].pipe.dest.use_maximum_vstartup = dc->ctx->dce_version == DCN_VERSION_2_01;
pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC;
/* todo: rotation?*/
pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h;
if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) {
pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true;
/* 1/2 vblank */
pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active =
(v_total - timing->v_addressable
- timing->v_border_top - timing->v_border_bottom) / 2;
/* 36 bytes dp, 32 hdmi */
pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes =
dc_is_dp_signal(res_ctx->pipe_ctx[i].stream->signal) ? 36 : 32;
}
pipes[pipe_cnt].pipe.src.dcc = false;
pipes[pipe_cnt].pipe.src.dcc_rate = 1;
pipes[pipe_cnt].pipe.dest.synchronized_vblank_all_planes = synchronized_vblank;
pipes[pipe_cnt].pipe.dest.synchronize_timings = synchronized_vblank;
pipes[pipe_cnt].pipe.dest.hblank_start = timing->h_total - timing->h_front_porch;
pipes[pipe_cnt].pipe.dest.hblank_end = pipes[pipe_cnt].pipe.dest.hblank_start
- timing->h_addressable
- timing->h_border_left
- timing->h_border_right;
pipes[pipe_cnt].pipe.dest.vblank_start = v_total - front_porch;
pipes[pipe_cnt].pipe.dest.vblank_end = pipes[pipe_cnt].pipe.dest.vblank_start
- timing->v_addressable
- timing->v_border_top
- timing->v_border_bottom;
pipes[pipe_cnt].pipe.dest.htotal = timing->h_total;
pipes[pipe_cnt].pipe.dest.vtotal = v_total;
pipes[pipe_cnt].pipe.dest.hactive =
timing->h_addressable + timing->h_border_left + timing->h_border_right;
pipes[pipe_cnt].pipe.dest.vactive =
timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
pipes[pipe_cnt].pipe.dest.interlaced = timing->flags.INTERLACE;
pipes[pipe_cnt].pipe.dest.pixel_rate_mhz = timing->pix_clk_100hz/10000.0;
if (timing->timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
pipes[pipe_cnt].pipe.dest.pixel_rate_mhz *= 2;
pipes[pipe_cnt].pipe.dest.otg_inst = res_ctx->pipe_ctx[i].stream_res.tg->inst;
pipes[pipe_cnt].dout.dp_lanes = 4;
pipes[pipe_cnt].dout.dp_rate = dm_dp_rate_na;
pipes[pipe_cnt].dout.is_virtual = 0;
pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
switch (resource_get_num_odm_splits(&res_ctx->pipe_ctx[i])) {
case 1:
pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_2to1;
break;
case 3:
pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_4to1;
break;
default:
pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_disabled;
}
pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx;
if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state
== res_ctx->pipe_ctx[i].plane_state) {
struct pipe_ctx *first_pipe = res_ctx->pipe_ctx[i].top_pipe;
int split_idx = 0;
while (first_pipe->top_pipe && first_pipe->top_pipe->plane_state
== res_ctx->pipe_ctx[i].plane_state) {
first_pipe = first_pipe->top_pipe;
split_idx++;
}
/* Treat 4to1 mpc combine as an mpo of 2 2-to-1 combines */
if (split_idx == 0)
pipes[pipe_cnt].pipe.src.hsplit_grp = first_pipe->pipe_idx;
else if (split_idx == 1)
pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx;
else if (split_idx == 2)
pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].top_pipe->pipe_idx;
} else if (res_ctx->pipe_ctx[i].prev_odm_pipe) {
struct pipe_ctx *first_pipe = res_ctx->pipe_ctx[i].prev_odm_pipe;
while (first_pipe->prev_odm_pipe)
first_pipe = first_pipe->prev_odm_pipe;
pipes[pipe_cnt].pipe.src.hsplit_grp = first_pipe->pipe_idx;
}
switch (res_ctx->pipe_ctx[i].stream->signal) {
case SIGNAL_TYPE_DISPLAY_PORT_MST:
case SIGNAL_TYPE_DISPLAY_PORT:
pipes[pipe_cnt].dout.output_type = dm_dp;
if (dc->link_srv->dp_is_128b_132b_signal(&res_ctx->pipe_ctx[i]))
pipes[pipe_cnt].dout.output_type = dm_dp2p0;
break;
case SIGNAL_TYPE_EDP:
pipes[pipe_cnt].dout.output_type = dm_edp;
break;
case SIGNAL_TYPE_HDMI_TYPE_A:
case SIGNAL_TYPE_DVI_SINGLE_LINK:
case SIGNAL_TYPE_DVI_DUAL_LINK:
pipes[pipe_cnt].dout.output_type = dm_hdmi;
break;
default:
/* In case there is no signal, set dp with 4 lanes to allow max config */
pipes[pipe_cnt].dout.is_virtual = 1;
pipes[pipe_cnt].dout.output_type = dm_dp;
pipes[pipe_cnt].dout.dp_lanes = 4;
}
switch (res_ctx->pipe_ctx[i].stream->timing.display_color_depth) {
case COLOR_DEPTH_666:
output_bpc = 6;
break;
case COLOR_DEPTH_888:
output_bpc = 8;
break;
case COLOR_DEPTH_101010:
output_bpc = 10;
break;
case COLOR_DEPTH_121212:
output_bpc = 12;
break;
case COLOR_DEPTH_141414:
output_bpc = 14;
break;
case COLOR_DEPTH_161616:
output_bpc = 16;
break;
case COLOR_DEPTH_999:
output_bpc = 9;
break;
case COLOR_DEPTH_111111:
output_bpc = 11;
break;
default:
output_bpc = 8;
break;
}
switch (res_ctx->pipe_ctx[i].stream->timing.pixel_encoding) {
case PIXEL_ENCODING_RGB:
case PIXEL_ENCODING_YCBCR444:
pipes[pipe_cnt].dout.output_format = dm_444;
pipes[pipe_cnt].dout.output_bpp = output_bpc * 3;
break;
case PIXEL_ENCODING_YCBCR420:
pipes[pipe_cnt].dout.output_format = dm_420;
pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3.0) / 2;
break;
case PIXEL_ENCODING_YCBCR422:
if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC &&
!res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.ycbcr422_simple)
pipes[pipe_cnt].dout.output_format = dm_n422;
else
pipes[pipe_cnt].dout.output_format = dm_s422;
pipes[pipe_cnt].dout.output_bpp = output_bpc * 2;
break;
default:
pipes[pipe_cnt].dout.output_format = dm_444;
pipes[pipe_cnt].dout.output_bpp = output_bpc * 3;
}
if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC)
pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0;
/* todo: default max for now, until there is logic reflecting this in dc*/
pipes[pipe_cnt].dout.dsc_input_bpc = 12;
/*fill up the audio sample rate (unit in kHz)*/
get_audio_check(&res_ctx->pipe_ctx[i].stream->audio_info, &aud_check);
pipes[pipe_cnt].dout.max_audio_sample_rate = aud_check.max_audiosample_rate / 1000;
/*
* For graphic plane, cursor number is 1, nv12 is 0
* bw calculations due to cursor on/off
*/
if (res_ctx->pipe_ctx[i].plane_state &&
(res_ctx->pipe_ctx[i].plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE ||
res_ctx->pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM))
pipes[pipe_cnt].pipe.src.num_cursors = 0;
else
pipes[pipe_cnt].pipe.src.num_cursors = dc->dml.ip.number_of_cursors;
pipes[pipe_cnt].pipe.src.cur0_src_width = 256;
pipes[pipe_cnt].pipe.src.cur0_bpp = dm_cur_32bit;
if (!res_ctx->pipe_ctx[i].plane_state) {
pipes[pipe_cnt].pipe.src.is_hsplit = pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
pipes[pipe_cnt].pipe.src.source_scan = dm_horz;
pipes[pipe_cnt].pipe.src.source_rotation = dm_rotation_0;
pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_4kb_s;
pipes[pipe_cnt].pipe.src.macro_tile_size = dm_64k_tile;
pipes[pipe_cnt].pipe.src.viewport_width = timing->h_addressable;
if (pipes[pipe_cnt].pipe.src.viewport_width > 1920)
pipes[pipe_cnt].pipe.src.viewport_width = 1920;
pipes[pipe_cnt].pipe.src.viewport_height = timing->v_addressable;
if (pipes[pipe_cnt].pipe.src.viewport_height > 1080)
pipes[pipe_cnt].pipe.src.viewport_height = 1080;
pipes[pipe_cnt].pipe.src.surface_height_y = pipes[pipe_cnt].pipe.src.viewport_height;
pipes[pipe_cnt].pipe.src.surface_width_y = pipes[pipe_cnt].pipe.src.viewport_width;
pipes[pipe_cnt].pipe.src.surface_height_c = pipes[pipe_cnt].pipe.src.viewport_height;
pipes[pipe_cnt].pipe.src.surface_width_c = pipes[pipe_cnt].pipe.src.viewport_width;
pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 255) / 256) * 256;
pipes[pipe_cnt].pipe.src.source_format = dm_444_32;
pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/
pipes[pipe_cnt].pipe.dest.recout_height = pipes[pipe_cnt].pipe.src.viewport_height; /*vp_height/vratio*/
pipes[pipe_cnt].pipe.dest.full_recout_width = pipes[pipe_cnt].pipe.dest.recout_width; /*when is_hsplit != 1*/
pipes[pipe_cnt].pipe.dest.full_recout_height = pipes[pipe_cnt].pipe.dest.recout_height; /*when is_hsplit != 1*/
pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_16;
pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio = 1.0;
pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio = 1.0;
pipes[pipe_cnt].pipe.scale_ratio_depth.scl_enable = 0; /*Lb only or Full scl*/
pipes[pipe_cnt].pipe.scale_taps.htaps = 1;
pipes[pipe_cnt].pipe.scale_taps.vtaps = 1;
pipes[pipe_cnt].pipe.dest.vtotal_min = v_total;
pipes[pipe_cnt].pipe.dest.vtotal_max = v_total;
if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_2to1) {
pipes[pipe_cnt].pipe.src.viewport_width /= 2;
pipes[pipe_cnt].pipe.dest.recout_width /= 2;
} else if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_4to1) {
pipes[pipe_cnt].pipe.src.viewport_width /= 4;
pipes[pipe_cnt].pipe.dest.recout_width /= 4;
}
} else {
struct dc_plane_state *pln = res_ctx->pipe_ctx[i].plane_state;
struct scaler_data *scl = &res_ctx->pipe_ctx[i].plane_res.scl_data;
pipes[pipe_cnt].pipe.src.immediate_flip = pln->flip_immediate;
pipes[pipe_cnt].pipe.src.is_hsplit = (res_ctx->pipe_ctx[i].bottom_pipe && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln)
|| (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln)
|| pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
/* stereo is not split */
if (pln->stereo_format == PLANE_STEREO_FORMAT_SIDE_BY_SIDE ||
pln->stereo_format == PLANE_STEREO_FORMAT_TOP_AND_BOTTOM) {
pipes[pipe_cnt].pipe.src.is_hsplit = false;
pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx;
}
pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
|| pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
switch (pln->rotation) {
case ROTATION_ANGLE_0:
pipes[pipe_cnt].pipe.src.source_rotation = dm_rotation_0;
break;
case ROTATION_ANGLE_90:
pipes[pipe_cnt].pipe.src.source_rotation = dm_rotation_90;
break;
case ROTATION_ANGLE_180:
pipes[pipe_cnt].pipe.src.source_rotation = dm_rotation_180;
break;
case ROTATION_ANGLE_270:
pipes[pipe_cnt].pipe.src.source_rotation = dm_rotation_270;
break;
default:
break;
}
pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y;
pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c.y;
pipes[pipe_cnt].pipe.src.viewport_x_y = scl->viewport.x;
pipes[pipe_cnt].pipe.src.viewport_x_c = scl->viewport_c.x;
pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport.width;
pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width;
pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height;
pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height;
pipes[pipe_cnt].pipe.src.viewport_width_max = pln->src_rect.width;
pipes[pipe_cnt].pipe.src.viewport_height_max = pln->src_rect.height;
pipes[pipe_cnt].pipe.src.surface_width_y = pln->plane_size.surface_size.width;
pipes[pipe_cnt].pipe.src.surface_height_y = pln->plane_size.surface_size.height;
pipes[pipe_cnt].pipe.src.surface_width_c = pln->plane_size.chroma_size.width;
pipes[pipe_cnt].pipe.src.surface_height_c = pln->plane_size.chroma_size.height;
if (pln->format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA
|| pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch;
pipes[pipe_cnt].pipe.src.data_pitch_c = pln->plane_size.chroma_pitch;
pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.meta_pitch;
pipes[pipe_cnt].pipe.src.meta_pitch_c = pln->dcc.meta_pitch_c;
} else {
pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch;
pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.meta_pitch;
}
pipes[pipe_cnt].pipe.src.dcc = pln->dcc.enable;
pipes[pipe_cnt].pipe.dest.recout_width = scl->recout.width;
pipes[pipe_cnt].pipe.dest.recout_height = scl->recout.height;
pipes[pipe_cnt].pipe.dest.full_recout_height = scl->recout.height;
pipes[pipe_cnt].pipe.dest.full_recout_width = scl->recout.width;
if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_2to1)
pipes[pipe_cnt].pipe.dest.full_recout_width *= 2;
else if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_4to1)
pipes[pipe_cnt].pipe.dest.full_recout_width *= 4;
else {
struct pipe_ctx *split_pipe = res_ctx->pipe_ctx[i].bottom_pipe;
while (split_pipe && split_pipe->plane_state == pln) {
pipes[pipe_cnt].pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width;
split_pipe = split_pipe->bottom_pipe;
}
split_pipe = res_ctx->pipe_ctx[i].top_pipe;
while (split_pipe && split_pipe->plane_state == pln) {
pipes[pipe_cnt].pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width;
split_pipe = split_pipe->top_pipe;
}
}
pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_16;
pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio = (double) scl->ratios.horz.value / (1ULL<<32);
pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio_c = (double) scl->ratios.horz_c.value / (1ULL<<32);
pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio = (double) scl->ratios.vert.value / (1ULL<<32);
pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio_c = (double) scl->ratios.vert_c.value / (1ULL<<32);
pipes[pipe_cnt].pipe.scale_ratio_depth.scl_enable =
scl->ratios.vert.value != dc_fixpt_one.value
|| scl->ratios.horz.value != dc_fixpt_one.value
|| scl->ratios.vert_c.value != dc_fixpt_one.value
|| scl->ratios.horz_c.value != dc_fixpt_one.value /*Lb only or Full scl*/
|| dc->debug.always_scale; /*support always scale*/
pipes[pipe_cnt].pipe.scale_taps.htaps = scl->taps.h_taps;
pipes[pipe_cnt].pipe.scale_taps.htaps_c = scl->taps.h_taps_c;
pipes[pipe_cnt].pipe.scale_taps.vtaps = scl->taps.v_taps;
pipes[pipe_cnt].pipe.scale_taps.vtaps_c = scl->taps.v_taps_c;
pipes[pipe_cnt].pipe.src.macro_tile_size =
swizzle_mode_to_macro_tile_size(pln->tiling_info.gfx9.swizzle);
swizzle_to_dml_params(pln->tiling_info.gfx9.swizzle,
&pipes[pipe_cnt].pipe.src.sw_mode);
switch (pln->format) {
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
pipes[pipe_cnt].pipe.src.source_format = dm_420_8;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
pipes[pipe_cnt].pipe.src.source_format = dm_420_10;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
pipes[pipe_cnt].pipe.src.source_format = dm_444_64;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
pipes[pipe_cnt].pipe.src.source_format = dm_444_16;
break;
case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
pipes[pipe_cnt].pipe.src.source_format = dm_444_8;
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
pipes[pipe_cnt].pipe.src.source_format = dm_rgbe_alpha;
break;
default:
pipes[pipe_cnt].pipe.src.source_format = dm_444_32;
break;
}
}
pipe_cnt++;
}
/* populate writeback information */
dc->res_pool->funcs->populate_dml_writeback_from_context(dc, res_ctx, pipes);
return pipe_cnt;
}
void dcn20_calculate_wm(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *out_pipe_cnt,
int *pipe_split_from,
int vlevel,
bool fast_validate)
{
int pipe_cnt, i, pipe_idx;
dc_assert_fp_enabled();
for (i = 0, pipe_idx = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.vba.RequiredDISPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
if (pipe_split_from[i] < 0) {
pipes[pipe_cnt].clks_cfg.dppclk_mhz =
context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx];
if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_idx] == pipe_idx)
pipes[pipe_cnt].pipe.dest.odm_combine =
context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx];
else
pipes[pipe_cnt].pipe.dest.odm_combine = 0;
pipe_idx++;
} else {
pipes[pipe_cnt].clks_cfg.dppclk_mhz =
context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_split_from[i]];
if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_split_from[i]] == pipe_split_from[i])
pipes[pipe_cnt].pipe.dest.odm_combine =
context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_split_from[i]];
else
pipes[pipe_cnt].pipe.dest.odm_combine = 0;
}
if (dc->config.forced_clocks) {
pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
pipes[pipe_cnt].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
}
if (dc->debug.min_disp_clk_khz > pipes[pipe_cnt].clks_cfg.dispclk_mhz * 1000)
pipes[pipe_cnt].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
if (dc->debug.min_dpp_clk_khz > pipes[pipe_cnt].clks_cfg.dppclk_mhz * 1000)
pipes[pipe_cnt].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
pipe_cnt++;
}
if (pipe_cnt != pipe_idx) {
if (dc->res_pool->funcs->populate_dml_pipes)
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
context, pipes, fast_validate);
else
pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
context, pipes, fast_validate);
}
*out_pipe_cnt = pipe_cnt;
pipes[0].clks_cfg.voltage = vlevel;
pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz;
pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
/* only pipe 0 is read for voltage and dcf/soc clocks */
if (vlevel < 1) {
pipes[0].clks_cfg.voltage = 1;
pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[1].dcfclk_mhz;
pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[1].socclk_mhz;
}
context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
if (vlevel < 2) {
pipes[0].clks_cfg.voltage = 2;
pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz;
pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].socclk_mhz;
}
context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
if (vlevel < 3) {
pipes[0].clks_cfg.voltage = 3;
pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz;
pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].socclk_mhz;
}
context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
pipes[0].clks_cfg.voltage = vlevel;
pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz;
pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
}
void dcn20_update_bounding_box(struct dc *dc,
struct _vcs_dpi_soc_bounding_box_st *bb,
struct pp_smu_nv_clock_table *max_clocks,
unsigned int *uclk_states,
unsigned int num_states)
{
int num_calculated_states = 0;
int min_dcfclk = 0;
int i;
dc_assert_fp_enabled();
if (num_states == 0)
return;
memset(bb->clock_limits, 0, sizeof(bb->clock_limits));
if (dc->bb_overrides.min_dcfclk_mhz > 0) {
min_dcfclk = dc->bb_overrides.min_dcfclk_mhz;
} else {
if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev))
min_dcfclk = 310;
else
// Accounting for SOC/DCF relationship, we can go as high as
// 506Mhz in Vmin.
min_dcfclk = 506;
}
for (i = 0; i < num_states; i++) {
int min_fclk_required_by_uclk;
bb->clock_limits[i].state = i;
bb->clock_limits[i].dram_speed_mts = uclk_states[i] * 16 / 1000;
// FCLK:UCLK ratio is 1.08
min_fclk_required_by_uclk = div_u64(((unsigned long long)uclk_states[i]) * 1080,
1000000);
bb->clock_limits[i].fabricclk_mhz = (min_fclk_required_by_uclk < min_dcfclk) ?
min_dcfclk : min_fclk_required_by_uclk;
bb->clock_limits[i].socclk_mhz = (bb->clock_limits[i].fabricclk_mhz > max_clocks->socClockInKhz / 1000) ?
max_clocks->socClockInKhz / 1000 : bb->clock_limits[i].fabricclk_mhz;
bb->clock_limits[i].dcfclk_mhz = (bb->clock_limits[i].fabricclk_mhz > max_clocks->dcfClockInKhz / 1000) ?
max_clocks->dcfClockInKhz / 1000 : bb->clock_limits[i].fabricclk_mhz;
bb->clock_limits[i].dispclk_mhz = max_clocks->displayClockInKhz / 1000;
bb->clock_limits[i].dppclk_mhz = max_clocks->displayClockInKhz / 1000;
bb->clock_limits[i].dscclk_mhz = max_clocks->displayClockInKhz / (1000 * 3);
bb->clock_limits[i].phyclk_mhz = max_clocks->phyClockInKhz / 1000;
num_calculated_states++;
}
bb->clock_limits[num_calculated_states - 1].socclk_mhz = max_clocks->socClockInKhz / 1000;
bb->clock_limits[num_calculated_states - 1].fabricclk_mhz = max_clocks->socClockInKhz / 1000;
bb->clock_limits[num_calculated_states - 1].dcfclk_mhz = max_clocks->dcfClockInKhz / 1000;
bb->num_states = num_calculated_states;
// Duplicate the last state, DML always an extra state identical to max state to work
memcpy(&bb->clock_limits[num_calculated_states], &bb->clock_limits[num_calculated_states - 1], sizeof(struct _vcs_dpi_voltage_scaling_st));
bb->clock_limits[num_calculated_states].state = bb->num_states;
}
void dcn20_cap_soc_clocks(struct _vcs_dpi_soc_bounding_box_st *bb,
struct pp_smu_nv_clock_table max_clocks)
{
int i;
dc_assert_fp_enabled();
// First pass - cap all clocks higher than the reported max
for (i = 0; i < bb->num_states; i++) {
if ((bb->clock_limits[i].dcfclk_mhz > (max_clocks.dcfClockInKhz / 1000))
&& max_clocks.dcfClockInKhz != 0)
bb->clock_limits[i].dcfclk_mhz = (max_clocks.dcfClockInKhz / 1000);
if ((bb->clock_limits[i].dram_speed_mts > (max_clocks.uClockInKhz / 1000) * 16)
&& max_clocks.uClockInKhz != 0)
bb->clock_limits[i].dram_speed_mts = (max_clocks.uClockInKhz / 1000) * 16;
if ((bb->clock_limits[i].fabricclk_mhz > (max_clocks.fabricClockInKhz / 1000))
&& max_clocks.fabricClockInKhz != 0)
bb->clock_limits[i].fabricclk_mhz = (max_clocks.fabricClockInKhz / 1000);
if ((bb->clock_limits[i].dispclk_mhz > (max_clocks.displayClockInKhz / 1000))
&& max_clocks.displayClockInKhz != 0)
bb->clock_limits[i].dispclk_mhz = (max_clocks.displayClockInKhz / 1000);
if ((bb->clock_limits[i].dppclk_mhz > (max_clocks.dppClockInKhz / 1000))
&& max_clocks.dppClockInKhz != 0)
bb->clock_limits[i].dppclk_mhz = (max_clocks.dppClockInKhz / 1000);
if ((bb->clock_limits[i].phyclk_mhz > (max_clocks.phyClockInKhz / 1000))
&& max_clocks.phyClockInKhz != 0)
bb->clock_limits[i].phyclk_mhz = (max_clocks.phyClockInKhz / 1000);
if ((bb->clock_limits[i].socclk_mhz > (max_clocks.socClockInKhz / 1000))
&& max_clocks.socClockInKhz != 0)
bb->clock_limits[i].socclk_mhz = (max_clocks.socClockInKhz / 1000);
if ((bb->clock_limits[i].dscclk_mhz > (max_clocks.dscClockInKhz / 1000))
&& max_clocks.dscClockInKhz != 0)
bb->clock_limits[i].dscclk_mhz = (max_clocks.dscClockInKhz / 1000);
}
// Second pass - remove all duplicate clock states
for (i = bb->num_states - 1; i > 1; i--) {
bool duplicate = true;
if (bb->clock_limits[i-1].dcfclk_mhz != bb->clock_limits[i].dcfclk_mhz)
duplicate = false;
if (bb->clock_limits[i-1].dispclk_mhz != bb->clock_limits[i].dispclk_mhz)
duplicate = false;
if (bb->clock_limits[i-1].dppclk_mhz != bb->clock_limits[i].dppclk_mhz)
duplicate = false;
if (bb->clock_limits[i-1].dram_speed_mts != bb->clock_limits[i].dram_speed_mts)
duplicate = false;
if (bb->clock_limits[i-1].dscclk_mhz != bb->clock_limits[i].dscclk_mhz)
duplicate = false;
if (bb->clock_limits[i-1].fabricclk_mhz != bb->clock_limits[i].fabricclk_mhz)
duplicate = false;
if (bb->clock_limits[i-1].phyclk_mhz != bb->clock_limits[i].phyclk_mhz)
duplicate = false;
if (bb->clock_limits[i-1].socclk_mhz != bb->clock_limits[i].socclk_mhz)
duplicate = false;
if (duplicate)
bb->num_states--;
}
}
void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
{
dc_assert_fp_enabled();
if ((int)(bb->sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns
&& dc->bb_overrides.sr_exit_time_ns) {
bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
}
if ((int)(bb->sr_enter_plus_exit_time_us * 1000)
!= dc->bb_overrides.sr_enter_plus_exit_time_ns
&& dc->bb_overrides.sr_enter_plus_exit_time_ns) {
bb->sr_enter_plus_exit_time_us =
dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
}
if ((int)(bb->sr_exit_z8_time_us * 1000)
!= dc->bb_overrides.sr_exit_z8_time_ns
&& dc->bb_overrides.sr_exit_z8_time_ns) {
bb->sr_exit_z8_time_us = dc->bb_overrides.sr_exit_z8_time_ns / 1000.0;
}
if ((int)(bb->sr_enter_plus_exit_z8_time_us * 1000)
!= dc->bb_overrides.sr_enter_plus_exit_z8_time_ns
&& dc->bb_overrides.sr_enter_plus_exit_z8_time_ns) {
bb->sr_enter_plus_exit_z8_time_us = dc->bb_overrides.sr_enter_plus_exit_z8_time_ns / 1000.0;
}
if ((int)(bb->urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns
&& dc->bb_overrides.urgent_latency_ns) {
bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
}
if ((int)(bb->dram_clock_change_latency_us * 1000)
!= dc->bb_overrides.dram_clock_change_latency_ns
&& dc->bb_overrides.dram_clock_change_latency_ns) {
bb->dram_clock_change_latency_us =
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
}
if ((int)(bb->dummy_pstate_latency_us * 1000)
!= dc->bb_overrides.dummy_clock_change_latency_ns
&& dc->bb_overrides.dummy_clock_change_latency_ns) {
bb->dummy_pstate_latency_us =
dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0;
}
}
static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *context,
bool fast_validate)
{
bool out = false;
BW_VAL_TRACE_SETUP();
int vlevel = 0;
int pipe_split_from[MAX_PIPES];
int pipe_cnt = 0;
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
DC_LOGGER_INIT(dc->ctx->logger);
BW_VAL_TRACE_COUNT();
out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, fast_validate);
if (pipe_cnt == 0)
goto validate_out;
if (!out)
goto validate_fail;
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
if (fast_validate) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate);
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
BW_VAL_TRACE_END_WATERMARKS();
goto validate_out;
validate_fail:
DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
BW_VAL_TRACE_SKIP(fail);
out = false;
validate_out:
kfree(pipes);
BW_VAL_TRACE_FINISH();
return out;
}
bool dcn20_validate_bandwidth_fp(struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
bool voltage_supported = false;
bool full_pstate_supported = false;
bool dummy_pstate_supported = false;
double p_state_latency_us;
dc_assert_fp_enabled();
p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us;
context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support =
dc->debug.disable_dram_clock_change_vactive_support;
context->bw_ctx.dml.soc.allow_dram_clock_one_display_vactive =
dc->debug.enable_dram_clock_change_one_display_vactive;
/*Unsafe due to current pipe merge and split logic*/
ASSERT(context != dc->current_state);
if (fast_validate) {
return dcn20_validate_bandwidth_internal(dc, context, true);
}
// Best case, we support full UCLK switch latency
voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 ||
(voltage_supported && full_pstate_supported)) {
context->bw_ctx.bw.dcn.clk.p_state_change_support = full_pstate_supported;
goto restore_dml_state;
}
// Fallback: Try to only support G6 temperature read latency
context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us;
voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) {
context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
goto restore_dml_state;
}
// ERROR: fallback is supposed to always work.
ASSERT(false);
restore_dml_state:
context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us;
return voltage_supported;
}
void dcn20_fpu_set_wm_ranges(int i,
struct pp_smu_wm_range_sets *ranges,
struct _vcs_dpi_soc_bounding_box_st *loaded_bb)
{
dc_assert_fp_enabled();
ranges->reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? (loaded_bb->clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0;
ranges->reader_wm_sets[i].max_fill_clk_mhz = loaded_bb->clock_limits[i].dram_speed_mts / 16;
}
void dcn20_fpu_adjust_dppclk(struct vba_vars_st *v,
int vlevel,
int max_mpc_comb,
int pipe_idx,
bool is_validating_bw)
{
dc_assert_fp_enabled();
if (is_validating_bw)
v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] *= 2;
else
v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] /= 2;
}
int dcn21_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate)
{
uint32_t pipe_cnt;
int i;
dc_assert_fp_enabled();
pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
for (i = 0; i < pipe_cnt; i++) {
pipes[i].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active;
pipes[i].pipe.src.gpuvm = 1;
}
return pipe_cnt;
}
static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
{
int i;
if (dc->bb_overrides.sr_exit_time_ns) {
for (i = 0; i < WM_SET_COUNT; i++) {
dc->clk_mgr->bw_params->wm_table.entries[i].sr_exit_time_us =
dc->bb_overrides.sr_exit_time_ns / 1000.0;
}
}
if (dc->bb_overrides.sr_enter_plus_exit_time_ns) {
for (i = 0; i < WM_SET_COUNT; i++) {
dc->clk_mgr->bw_params->wm_table.entries[i].sr_enter_plus_exit_time_us =
dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
}
}
if (dc->bb_overrides.urgent_latency_ns) {
bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
}
if (dc->bb_overrides.dram_clock_change_latency_ns) {
for (i = 0; i < WM_SET_COUNT; i++) {
dc->clk_mgr->bw_params->wm_table.entries[i].pstate_latency_us =
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
}
}
}
static void calculate_wm_set_for_vlevel(int vlevel,
struct wm_range_table_entry *table_entry,
struct dcn_watermarks *wm_set,
struct display_mode_lib *dml,
display_e2e_pipe_params_st *pipes,
int pipe_cnt)
{
double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us;
ASSERT(vlevel < dml->soc.num_states);
/* only pipe 0 is read for voltage and dcf/soc clocks */
pipes[0].clks_cfg.voltage = vlevel;
pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz;
pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;
dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;
wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
}
static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *out_pipe_cnt,
int *pipe_split_from,
int vlevel_req,
bool fast_validate)
{
int pipe_cnt, i, pipe_idx;
int vlevel, vlevel_max;
struct wm_range_table_entry *table_entry;
struct clk_bw_params *bw_params = dc->clk_mgr->bw_params;
ASSERT(bw_params);
patch_bounding_box(dc, &context->bw_ctx.dml.soc);
for (i = 0, pipe_idx = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.vba.RequiredDISPCLK[vlevel_req][context->bw_ctx.dml.vba.maxMpcComb];
if (pipe_split_from[i] < 0) {
pipes[pipe_cnt].clks_cfg.dppclk_mhz =
context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel_req][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx];
if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_idx] == pipe_idx)
pipes[pipe_cnt].pipe.dest.odm_combine =
context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel_req][pipe_idx];
else
pipes[pipe_cnt].pipe.dest.odm_combine = 0;
pipe_idx++;
} else {
pipes[pipe_cnt].clks_cfg.dppclk_mhz =
context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel_req][context->bw_ctx.dml.vba.maxMpcComb][pipe_split_from[i]];
if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_split_from[i]] == pipe_split_from[i])
pipes[pipe_cnt].pipe.dest.odm_combine =
context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel_req][pipe_split_from[i]];
else
pipes[pipe_cnt].pipe.dest.odm_combine = 0;
}
pipe_cnt++;
}
if (pipe_cnt != pipe_idx) {
if (dc->res_pool->funcs->populate_dml_pipes)
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
context, pipes, fast_validate);
else
pipe_cnt = dcn21_populate_dml_pipes_from_context(dc,
context, pipes, fast_validate);
}
*out_pipe_cnt = pipe_cnt;
vlevel_max = bw_params->clk_table.num_entries - 1;
/* WM Set D */
table_entry = &bw_params->wm_table.entries[WM_D];
if (table_entry->wm_type == WM_TYPE_RETRAINING)
vlevel = 0;
else
vlevel = vlevel_max;
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d,
&context->bw_ctx.dml, pipes, pipe_cnt);
/* WM Set C */
table_entry = &bw_params->wm_table.entries[WM_C];
vlevel = MIN(MAX(vlevel_req, 3), vlevel_max);
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
&context->bw_ctx.dml, pipes, pipe_cnt);
/* WM Set B */
table_entry = &bw_params->wm_table.entries[WM_B];
vlevel = MIN(MAX(vlevel_req, 2), vlevel_max);
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
&context->bw_ctx.dml, pipes, pipe_cnt);
/* WM Set A */
table_entry = &bw_params->wm_table.entries[WM_A];
vlevel = MIN(vlevel_req, vlevel_max);
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a,
&context->bw_ctx.dml, pipes, pipe_cnt);
}
bool dcn21_validate_bandwidth_fp(struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
bool out = false;
BW_VAL_TRACE_SETUP();
int vlevel = 0;
int pipe_split_from[MAX_PIPES];
int pipe_cnt = 0;
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
DC_LOGGER_INIT(dc->ctx->logger);
BW_VAL_TRACE_COUNT();
dc_assert_fp_enabled();
/*Unsafe due to current pipe merge and split logic*/
ASSERT(context != dc->current_state);
out = dcn21_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, fast_validate);
if (pipe_cnt == 0)
goto validate_out;
if (!out)
goto validate_fail;
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
if (fast_validate) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate);
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
BW_VAL_TRACE_END_WATERMARKS();
goto validate_out;
validate_fail:
DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
BW_VAL_TRACE_SKIP(fail);
out = false;
validate_out:
kfree(pipes);
BW_VAL_TRACE_FINISH();
return out;
}
static struct _vcs_dpi_voltage_scaling_st construct_low_pstate_lvl(struct clk_limit_table *clk_table, unsigned int high_voltage_lvl)
{
struct _vcs_dpi_voltage_scaling_st low_pstate_lvl;
int i;
low_pstate_lvl.state = 1;
low_pstate_lvl.dcfclk_mhz = clk_table->entries[0].dcfclk_mhz;
low_pstate_lvl.fabricclk_mhz = clk_table->entries[0].fclk_mhz;
low_pstate_lvl.socclk_mhz = clk_table->entries[0].socclk_mhz;
low_pstate_lvl.dram_speed_mts = clk_table->entries[0].memclk_mhz * 2;
low_pstate_lvl.dispclk_mhz = dcn2_1_soc.clock_limits[high_voltage_lvl].dispclk_mhz;
low_pstate_lvl.dppclk_mhz = dcn2_1_soc.clock_limits[high_voltage_lvl].dppclk_mhz;
low_pstate_lvl.dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[high_voltage_lvl].dram_bw_per_chan_gbps;
low_pstate_lvl.dscclk_mhz = dcn2_1_soc.clock_limits[high_voltage_lvl].dscclk_mhz;
low_pstate_lvl.dtbclk_mhz = dcn2_1_soc.clock_limits[high_voltage_lvl].dtbclk_mhz;
low_pstate_lvl.phyclk_d18_mhz = dcn2_1_soc.clock_limits[high_voltage_lvl].phyclk_d18_mhz;
low_pstate_lvl.phyclk_mhz = dcn2_1_soc.clock_limits[high_voltage_lvl].phyclk_mhz;
for (i = clk_table->num_entries; i > 1; i--)
clk_table->entries[i] = clk_table->entries[i-1];
clk_table->entries[1] = clk_table->entries[0];
clk_table->num_entries++;
return low_pstate_lvl;
}
void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
struct _vcs_dpi_voltage_scaling_st *s = dc->scratch.update_bw_bounding_box.clock_limits;
struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
unsigned int i, closest_clk_lvl = 0, k = 0;
int j;
dc_assert_fp_enabled();
dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
dcn2_1_ip.max_num_dpp = pool->base.pipe_count;
dcn2_1_soc.num_chans = bw_params->num_channels;
ASSERT(clk_table->num_entries);
/* Copy dcn2_1_soc.clock_limits to clock_limits to avoid copying over null states later */
memcpy(s, dcn2_1_soc.clock_limits, sizeof(dcn2_1_soc.clock_limits));
for (i = 0; i < clk_table->num_entries; i++) {
/* loop backwards*/
for (closest_clk_lvl = 0, j = dcn2_1_soc.num_states - 1; j >= 0; j--) {
if ((unsigned int) dcn2_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
closest_clk_lvl = j;
break;
}
}
/* clk_table[1] is reserved for min DF PState. skip here to fill in later. */
if (i == 1)
k++;
s[k].state = k;
s[k].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
s[k].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
s[k].socclk_mhz = clk_table->entries[i].socclk_mhz;
s[k].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
s[k].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
s[k].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
s[k].dram_bw_per_chan_gbps =
dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
s[k].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
s[k].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
s[k].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
s[k].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
k++;
}
memcpy(&dcn2_1_soc.clock_limits, s, sizeof(dcn2_1_soc.clock_limits));
if (clk_table->num_entries) {
dcn2_1_soc.num_states = clk_table->num_entries + 1;
/* fill in min DF PState */
dcn2_1_soc.clock_limits[1] = construct_low_pstate_lvl(clk_table, closest_clk_lvl);
/* duplicate last level */
dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1];
dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states;
}
dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
}
void dcn21_clk_mgr_set_bw_params_wm_table(struct clk_bw_params *bw_params)
{
dc_assert_fp_enabled();
bw_params->wm_table.entries[WM_D].pstate_latency_us = LPDDR_MEM_RETRAIN_LATENCY;
bw_params->wm_table.entries[WM_D].wm_inst = WM_D;
bw_params->wm_table.entries[WM_D].wm_type = WM_TYPE_RETRAINING;
bw_params->wm_table.entries[WM_D].valid = true;
}
void dcn201_populate_dml_writeback_from_context_fpu(struct dc *dc,
struct resource_context *res_ctx,
display_e2e_pipe_params_st *pipes)
{
int pipe_cnt, i, j;
double max_calc_writeback_dispclk;
double writeback_dispclk;
struct writeback_st dout_wb;
dc_assert_fp_enabled();
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_stream_state *stream = res_ctx->pipe_ctx[i].stream;
if (!stream)
continue;
max_calc_writeback_dispclk = 0;
/* Set writeback information */
pipes[pipe_cnt].dout.wb_enable = 0;
pipes[pipe_cnt].dout.num_active_wb = 0;
for (j = 0; j < stream->num_wb_info; j++) {
struct dc_writeback_info *wb_info = &stream->writeback_info[j];
if (wb_info->wb_enabled && wb_info->writeback_source_plane &&
(wb_info->writeback_source_plane == res_ctx->pipe_ctx[i].plane_state)) {
pipes[pipe_cnt].dout.wb_enable = 1;
pipes[pipe_cnt].dout.num_active_wb++;
dout_wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_en ?
wb_info->dwb_params.cnv_params.crop_height :
wb_info->dwb_params.cnv_params.src_height;
dout_wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_en ?
wb_info->dwb_params.cnv_params.crop_width :
wb_info->dwb_params.cnv_params.src_width;
dout_wb.wb_dst_width = wb_info->dwb_params.dest_width;
dout_wb.wb_dst_height = wb_info->dwb_params.dest_height;
dout_wb.wb_htaps_luma = wb_info->dwb_params.scaler_taps.h_taps;
dout_wb.wb_vtaps_luma = wb_info->dwb_params.scaler_taps.v_taps;
dout_wb.wb_htaps_chroma = wb_info->dwb_params.scaler_taps.h_taps_c;
dout_wb.wb_vtaps_chroma = wb_info->dwb_params.scaler_taps.v_taps_c;
dout_wb.wb_hratio = wb_info->dwb_params.cnv_params.crop_en ?
(double)wb_info->dwb_params.cnv_params.crop_width /
(double)wb_info->dwb_params.dest_width :
(double)wb_info->dwb_params.cnv_params.src_width /
(double)wb_info->dwb_params.dest_width;
dout_wb.wb_vratio = wb_info->dwb_params.cnv_params.crop_en ?
(double)wb_info->dwb_params.cnv_params.crop_height /
(double)wb_info->dwb_params.dest_height :
(double)wb_info->dwb_params.cnv_params.src_height /
(double)wb_info->dwb_params.dest_height;
if (wb_info->dwb_params.out_format == dwb_scaler_mode_yuv420) {
if (wb_info->dwb_params.output_depth == DWB_OUTPUT_PIXEL_DEPTH_8BPC)
dout_wb.wb_pixel_format = dm_420_8;
else
dout_wb.wb_pixel_format = dm_420_10;
} else
dout_wb.wb_pixel_format = dm_444_32;
/* Workaround for cases where multiple writebacks are connected to same plane
* In which case, need to compute worst case and set the associated writeback parameters
* This workaround is necessary due to DML computation assuming only 1 set of writeback
* parameters per pipe */
writeback_dispclk = CalculateWriteBackDISPCLK(
dout_wb.wb_pixel_format,
pipes[pipe_cnt].pipe.dest.pixel_rate_mhz,
dout_wb.wb_hratio,
dout_wb.wb_vratio,
dout_wb.wb_htaps_luma,
dout_wb.wb_vtaps_luma,
dout_wb.wb_htaps_chroma,
dout_wb.wb_vtaps_chroma,
dout_wb.wb_dst_width,
pipes[pipe_cnt].pipe.dest.htotal,
2);
if (writeback_dispclk > max_calc_writeback_dispclk) {
max_calc_writeback_dispclk = writeback_dispclk;
pipes[pipe_cnt].dout.wb = dout_wb;
}
}
}
pipe_cnt++;
}
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "../display_mode_lib.h"
#include "../display_mode_vba.h"
#include "display_rq_dlg_calc_20.h"
// Function: dml20_rq_dlg_get_rq_params
// Calculate requestor related parameters that register definition agnostic
// (i.e. this layer does try to separate real values from register definition)
// Input:
// pipe_src_param - pipe source configuration (e.g. vp, pitch, etc.)
// Output:
// rq_param - values that can be used to setup RQ (e.g. swath_height, plane1_addr, etc.)
//
static void dml20_rq_dlg_get_rq_params(
struct display_mode_lib *mode_lib,
display_rq_params_st *rq_param,
const display_pipe_source_params_st *pipe_src_param);
// Function: dml20_rq_dlg_get_dlg_params
// Calculate deadline related parameters
//
static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
display_dlg_regs_st *disp_dlg_regs,
display_ttu_regs_st *disp_ttu_regs,
const display_rq_dlg_params_st *rq_dlg_param,
const display_dlg_sys_params_st *dlg_sys_param,
const bool cstate_en,
const bool pstate_en);
/*
* NOTE:
* This file is gcc-parseable HW gospel, coming straight from HW engineers.
*
* It doesn't adhere to Linux kernel style and sometimes will do things in odd
* ways. Unless there is something clearly wrong with it the code should
* remain as-is as it provides us with a guarantee from HW that it is correct.
*/
static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
double *refcyc_per_req_delivery_pre_cur,
double *refcyc_per_req_delivery_cur,
double refclk_freq_in_mhz,
double ref_freq_to_pix_freq,
double hscale_pixel_rate_l,
double hscl_ratio,
double vratio_pre_l,
double vratio_l,
unsigned int cur_width,
enum cursor_bpp cur_bpp);
#include "../dml_inline_defs.h"
static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
{
unsigned int ret_val = 0;
if (source_format == dm_444_16) {
if (!is_chroma)
ret_val = 2;
} else if (source_format == dm_444_32) {
if (!is_chroma)
ret_val = 4;
} else if (source_format == dm_444_64) {
if (!is_chroma)
ret_val = 8;
} else if (source_format == dm_420_8) {
if (is_chroma)
ret_val = 2;
else
ret_val = 1;
} else if (source_format == dm_420_10) {
if (is_chroma)
ret_val = 4;
else
ret_val = 2;
} else if (source_format == dm_444_8) {
ret_val = 1;
}
return ret_val;
}
static bool is_dual_plane(enum source_format_class source_format)
{
bool ret_val = false;
if ((source_format == dm_420_8) || (source_format == dm_420_10))
ret_val = true;
return ret_val;
}
static double get_refcyc_per_delivery(struct display_mode_lib *mode_lib,
double refclk_freq_in_mhz,
double pclk_freq_in_mhz,
bool odm_combine,
unsigned int recout_width,
unsigned int hactive,
double vratio,
double hscale_pixel_rate,
unsigned int delivery_width,
unsigned int req_per_swath_ub)
{
double refcyc_per_delivery = 0.0;
if (vratio <= 1.0) {
if (odm_combine)
refcyc_per_delivery = (double) refclk_freq_in_mhz
* dml_min((double) recout_width, (double) hactive / 2.0)
/ pclk_freq_in_mhz / (double) req_per_swath_ub;
else
refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) recout_width
/ pclk_freq_in_mhz / (double) req_per_swath_ub;
} else {
refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) delivery_width
/ (double) hscale_pixel_rate / (double) req_per_swath_ub;
}
dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
dml_print("DML_DLG: %s: recout_width = %d\n", __func__, recout_width);
dml_print("DML_DLG: %s: vratio = %3.2f\n", __func__, vratio);
dml_print("DML_DLG: %s: req_per_swath_ub = %d\n", __func__, req_per_swath_ub);
dml_print("DML_DLG: %s: refcyc_per_delivery= %3.2f\n", __func__, refcyc_per_delivery);
return refcyc_per_delivery;
}
static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_size)
{
if (tile_size == dm_256k_tile)
return (256 * 1024);
else if (tile_size == dm_64k_tile)
return (64 * 1024);
else
return (4 * 1024);
}
static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib,
display_data_rq_regs_st *rq_regs,
const display_data_rq_sizing_params_st *rq_sizing)
{
dml_print("DML_DLG: %s: rq_sizing param\n", __func__);
print__data_rq_sizing_params_st(mode_lib, rq_sizing);
rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10;
if (rq_sizing->min_chunk_bytes == 0)
rq_regs->min_chunk_size = 0;
else
rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1;
rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10;
if (rq_sizing->min_meta_chunk_bytes == 0)
rq_regs->min_meta_chunk_size = 0;
else
rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1;
rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6;
rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6;
}
static void extract_rq_regs(struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
const display_rq_params_st *rq_param)
{
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
unsigned int detile_buf_plane1_addr = 0;
extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l);
rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_l.dpte_row_height),
1) - 3;
if (rq_param->yuv420) {
extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c);
rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_c.dpte_row_height),
1) - 3;
}
rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height);
rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height);
// TODO: take the max between luma, chroma chunk size?
// okay for now, as we are setting chunk_bytes to 8kb anyways
if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
rq_regs->drq_expansion_mode = 0;
} else {
rq_regs->drq_expansion_mode = 2;
}
rq_regs->prq_expansion_mode = 1;
rq_regs->mrq_expansion_mode = 1;
rq_regs->crq_expansion_mode = 1;
if (rq_param->yuv420) {
if ((double) rq_param->misc.rq_l.stored_swath_bytes
/ (double) rq_param->misc.rq_c.stored_swath_bytes <= 1.5) {
detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma
} else {
detile_buf_plane1_addr = dml_round_to_multiple((unsigned int) ((2.0 * detile_buf_size_in_bytes) / 3.0),
256,
0) / 64.0; // 2/3 to chroma
}
}
rq_regs->plane1_base_address = detile_buf_plane1_addr;
}
static void handle_det_buf_split(struct display_mode_lib *mode_lib,
display_rq_params_st *rq_param,
const display_pipe_source_params_st *pipe_src_param)
{
unsigned int total_swath_bytes = 0;
unsigned int swath_bytes_l = 0;
unsigned int swath_bytes_c = 0;
unsigned int full_swath_bytes_packed_l = 0;
unsigned int full_swath_bytes_packed_c = 0;
bool req128_l = false;
bool req128_c = false;
bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear);
bool surf_vert = (pipe_src_param->source_scan == dm_vert);
unsigned int log2_swath_height_l = 0;
unsigned int log2_swath_height_c = 0;
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
full_swath_bytes_packed_l = rq_param->misc.rq_l.full_swath_bytes;
full_swath_bytes_packed_c = rq_param->misc.rq_c.full_swath_bytes;
if (rq_param->yuv420_10bpc) {
full_swath_bytes_packed_l = dml_round_to_multiple(rq_param->misc.rq_l.full_swath_bytes * 2 / 3,
256,
1) + 256;
full_swath_bytes_packed_c = dml_round_to_multiple(rq_param->misc.rq_c.full_swath_bytes * 2 / 3,
256,
1) + 256;
}
if (rq_param->yuv420) {
total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;
if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request
req128_l = false;
req128_c = false;
swath_bytes_l = full_swath_bytes_packed_l;
swath_bytes_c = full_swath_bytes_packed_c;
} else { //128b request (for luma only for yuv420 8bpc)
req128_l = true;
req128_c = false;
swath_bytes_l = full_swath_bytes_packed_l / 2;
swath_bytes_c = full_swath_bytes_packed_c;
}
// Note: assumption, the config that pass in will fit into
// the detiled buffer.
} else {
total_swath_bytes = 2 * full_swath_bytes_packed_l;
if (total_swath_bytes <= detile_buf_size_in_bytes)
req128_l = false;
else
req128_l = true;
swath_bytes_l = total_swath_bytes;
swath_bytes_c = 0;
}
rq_param->misc.rq_l.stored_swath_bytes = swath_bytes_l;
rq_param->misc.rq_c.stored_swath_bytes = swath_bytes_c;
if (surf_linear) {
log2_swath_height_l = 0;
log2_swath_height_c = 0;
} else {
unsigned int swath_height_l;
unsigned int swath_height_c;
if (!surf_vert) {
swath_height_l = rq_param->misc.rq_l.blk256_height;
swath_height_c = rq_param->misc.rq_c.blk256_height;
} else {
swath_height_l = rq_param->misc.rq_l.blk256_width;
swath_height_c = rq_param->misc.rq_c.blk256_width;
}
if (swath_height_l > 0)
log2_swath_height_l = dml_log2(swath_height_l);
if (req128_l && log2_swath_height_l > 0)
log2_swath_height_l -= 1;
if (swath_height_c > 0)
log2_swath_height_c = dml_log2(swath_height_c);
if (req128_c && log2_swath_height_c > 0)
log2_swath_height_c -= 1;
}
rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
dml_print("DML_DLG: %s: req128_l = %0d\n", __func__, req128_l);
dml_print("DML_DLG: %s: req128_c = %0d\n", __func__, req128_c);
dml_print("DML_DLG: %s: full_swath_bytes_packed_l = %0d\n",
__func__,
full_swath_bytes_packed_l);
dml_print("DML_DLG: %s: full_swath_bytes_packed_c = %0d\n",
__func__,
full_swath_bytes_packed_c);
}
static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
display_data_rq_dlg_params_st *rq_dlg_param,
display_data_rq_misc_params_st *rq_misc_param,
display_data_rq_sizing_params_st *rq_sizing_param,
unsigned int vp_width,
unsigned int vp_height,
unsigned int data_pitch,
unsigned int meta_pitch,
unsigned int source_format,
unsigned int tiling,
unsigned int macro_tile_size,
unsigned int source_scan,
unsigned int is_chroma)
{
bool surf_linear = (tiling == dm_sw_linear);
bool surf_vert = (source_scan == dm_vert);
unsigned int bytes_per_element;
unsigned int bytes_per_element_y = get_bytes_per_element((enum source_format_class)(source_format),
false);
unsigned int bytes_per_element_c = get_bytes_per_element((enum source_format_class)(source_format),
true);
unsigned int blk256_width = 0;
unsigned int blk256_height = 0;
unsigned int blk256_width_y = 0;
unsigned int blk256_height_y = 0;
unsigned int blk256_width_c = 0;
unsigned int blk256_height_c = 0;
unsigned int log2_bytes_per_element;
unsigned int log2_blk256_width;
unsigned int log2_blk256_height;
unsigned int blk_bytes;
unsigned int log2_blk_bytes;
unsigned int log2_blk_height;
unsigned int log2_blk_width;
unsigned int log2_meta_req_bytes;
unsigned int log2_meta_req_height;
unsigned int log2_meta_req_width;
unsigned int meta_req_width;
unsigned int meta_req_height;
unsigned int log2_meta_row_height;
unsigned int meta_row_width_ub;
unsigned int log2_meta_chunk_bytes;
unsigned int log2_meta_chunk_height;
//full sized meta chunk width in unit of data elements
unsigned int log2_meta_chunk_width;
unsigned int log2_min_meta_chunk_bytes;
unsigned int min_meta_chunk_width;
unsigned int meta_chunk_width;
unsigned int meta_chunk_per_row_int;
unsigned int meta_row_remainder;
unsigned int meta_chunk_threshold;
unsigned int meta_blk_bytes;
unsigned int meta_blk_height;
unsigned int meta_blk_width;
unsigned int meta_surface_bytes;
unsigned int vmpg_bytes;
unsigned int meta_pte_req_per_frame_ub;
unsigned int meta_pte_bytes_per_frame_ub;
const unsigned int log2_vmpg_bytes = dml_log2(mode_lib->soc.vmm_page_size_bytes);
const unsigned int dpte_buf_in_pte_reqs = mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma;
const unsigned int pde_proc_buffer_size_64k_reqs =
mode_lib->ip.pde_proc_buffer_size_64k_reqs;
unsigned int log2_vmpg_height = 0;
unsigned int log2_vmpg_width = 0;
unsigned int log2_dpte_req_height_ptes = 0;
unsigned int log2_dpte_req_height = 0;
unsigned int log2_dpte_req_width = 0;
unsigned int log2_dpte_row_height_linear = 0;
unsigned int log2_dpte_row_height = 0;
unsigned int log2_dpte_group_width = 0;
unsigned int dpte_row_width_ub = 0;
unsigned int dpte_req_height = 0;
unsigned int dpte_req_width = 0;
unsigned int dpte_group_width = 0;
unsigned int log2_dpte_group_bytes = 0;
unsigned int log2_dpte_group_length = 0;
unsigned int pde_buf_entries;
bool yuv420 = (source_format == dm_420_8 || source_format == dm_420_10);
Calculate256BBlockSizes((enum source_format_class)(source_format),
(enum dm_swizzle_mode)(tiling),
bytes_per_element_y,
bytes_per_element_c,
&blk256_height_y,
&blk256_height_c,
&blk256_width_y,
&blk256_width_c);
if (!is_chroma) {
blk256_width = blk256_width_y;
blk256_height = blk256_height_y;
bytes_per_element = bytes_per_element_y;
} else {
blk256_width = blk256_width_c;
blk256_height = blk256_height_c;
bytes_per_element = bytes_per_element_c;
}
log2_bytes_per_element = dml_log2(bytes_per_element);
dml_print("DML_DLG: %s: surf_linear = %d\n", __func__, surf_linear);
dml_print("DML_DLG: %s: surf_vert = %d\n", __func__, surf_vert);
dml_print("DML_DLG: %s: blk256_width = %d\n", __func__, blk256_width);
dml_print("DML_DLG: %s: blk256_height = %d\n", __func__, blk256_height);
log2_blk256_width = dml_log2((double) blk256_width);
log2_blk256_height = dml_log2((double) blk256_height);
blk_bytes = surf_linear ?
256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
log2_blk_bytes = dml_log2((double) blk_bytes);
log2_blk_height = 0;
log2_blk_width = 0;
// remember log rule
// "+" in log is multiply
// "-" in log is divide
// "/2" is like square root
// blk is vertical biased
if (tiling != dm_sw_linear)
log2_blk_height = log2_blk256_height
+ dml_ceil((double) (log2_blk_bytes - 8) / 2.0, 1);
else
log2_blk_height = 0; // blk height of 1
log2_blk_width = log2_blk_bytes - log2_bytes_per_element - log2_blk_height;
if (!surf_vert) {
rq_dlg_param->swath_width_ub = dml_round_to_multiple(vp_width - 1, blk256_width, 1)
+ blk256_width;
rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_width;
} else {
rq_dlg_param->swath_width_ub = dml_round_to_multiple(vp_height - 1, blk256_height, 1)
+ blk256_height;
rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_height;
}
if (!surf_vert)
rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_height
* bytes_per_element;
else
rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_width
* bytes_per_element;
rq_misc_param->blk256_height = blk256_height;
rq_misc_param->blk256_width = blk256_width;
// -------
// meta
// -------
log2_meta_req_bytes = 6; // meta request is 64b and is 8x8byte meta element
// each 64b meta request for dcn is 8x8 meta elements and
// a meta element covers one 256b block of the data surface.
log2_meta_req_height = log2_blk256_height + 3; // meta req is 8x8 byte, each byte represent 1 blk256
log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element
- log2_meta_req_height;
meta_req_width = 1 << log2_meta_req_width;
meta_req_height = 1 << log2_meta_req_height;
log2_meta_row_height = 0;
meta_row_width_ub = 0;
// the dimensions of a meta row are meta_row_width x meta_row_height in elements.
// calculate upper bound of the meta_row_width
if (!surf_vert) {
log2_meta_row_height = log2_meta_req_height;
meta_row_width_ub = dml_round_to_multiple(vp_width - 1, meta_req_width, 1)
+ meta_req_width;
rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_width;
} else {
log2_meta_row_height = log2_meta_req_width;
meta_row_width_ub = dml_round_to_multiple(vp_height - 1, meta_req_height, 1)
+ meta_req_height;
rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_height;
}
rq_dlg_param->meta_bytes_per_row_ub = rq_dlg_param->meta_req_per_row_ub * 64;
rq_dlg_param->meta_row_height = 1 << log2_meta_row_height;
log2_meta_chunk_bytes = dml_log2(rq_sizing_param->meta_chunk_bytes);
log2_meta_chunk_height = log2_meta_row_height;
//full sized meta chunk width in unit of data elements
log2_meta_chunk_width = log2_meta_chunk_bytes + 8 - log2_bytes_per_element
- log2_meta_chunk_height;
log2_min_meta_chunk_bytes = dml_log2(rq_sizing_param->min_meta_chunk_bytes);
min_meta_chunk_width = 1
<< (log2_min_meta_chunk_bytes + 8 - log2_bytes_per_element
- log2_meta_chunk_height);
meta_chunk_width = 1 << log2_meta_chunk_width;
meta_chunk_per_row_int = (unsigned int) (meta_row_width_ub / meta_chunk_width);
meta_row_remainder = meta_row_width_ub % meta_chunk_width;
meta_chunk_threshold = 0;
meta_blk_bytes = 4096;
meta_blk_height = blk256_height * 64;
meta_blk_width = meta_blk_bytes * 256 / bytes_per_element / meta_blk_height;
meta_surface_bytes = meta_pitch
* (dml_round_to_multiple(vp_height - 1, meta_blk_height, 1) + meta_blk_height)
* bytes_per_element / 256;
vmpg_bytes = mode_lib->soc.vmm_page_size_bytes;
meta_pte_req_per_frame_ub = (dml_round_to_multiple(meta_surface_bytes - vmpg_bytes,
8 * vmpg_bytes,
1) + 8 * vmpg_bytes) / (8 * vmpg_bytes);
meta_pte_bytes_per_frame_ub = meta_pte_req_per_frame_ub * 64; //64B mpte request
rq_dlg_param->meta_pte_bytes_per_frame_ub = meta_pte_bytes_per_frame_ub;
dml_print("DML_DLG: %s: meta_blk_height = %d\n", __func__, meta_blk_height);
dml_print("DML_DLG: %s: meta_blk_width = %d\n", __func__, meta_blk_width);
dml_print("DML_DLG: %s: meta_surface_bytes = %d\n", __func__, meta_surface_bytes);
dml_print("DML_DLG: %s: meta_pte_req_per_frame_ub = %d\n",
__func__,
meta_pte_req_per_frame_ub);
dml_print("DML_DLG: %s: meta_pte_bytes_per_frame_ub = %d\n",
__func__,
meta_pte_bytes_per_frame_ub);
if (!surf_vert)
meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_width;
else
meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_height;
if (meta_row_remainder <= meta_chunk_threshold)
rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 1;
else
rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 2;
// ------
// dpte
// ------
if (surf_linear) {
log2_vmpg_height = 0; // one line high
} else {
log2_vmpg_height = (log2_vmpg_bytes - 8) / 2 + log2_blk256_height;
}
log2_vmpg_width = log2_vmpg_bytes - log2_bytes_per_element - log2_vmpg_height;
// only 3 possible shapes for dpte request in dimensions of ptes: 8x1, 4x2, 2x4.
if (surf_linear) { //one 64B PTE request returns 8 PTEs
log2_dpte_req_height_ptes = 0;
log2_dpte_req_width = log2_vmpg_width + 3;
log2_dpte_req_height = 0;
} else if (log2_blk_bytes == 12) { //4KB tile means 4kB page size
//one 64B req gives 8x1 PTEs for 4KB tile
log2_dpte_req_height_ptes = 0;
log2_dpte_req_width = log2_blk_width + 3;
log2_dpte_req_height = log2_blk_height + 0;
} else if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) { // tile block >= 64KB
//two 64B reqs of 2x4 PTEs give 16 PTEs to cover 64KB
log2_dpte_req_height_ptes = 4;
log2_dpte_req_width = log2_blk256_width + 4; // log2_64KB_width
log2_dpte_req_height = log2_blk256_height + 4; // log2_64KB_height
} else { //64KB page size and must 64KB tile block
//one 64B req gives 8x1 PTEs for 64KB tile
log2_dpte_req_height_ptes = 0;
log2_dpte_req_width = log2_blk_width + 3;
log2_dpte_req_height = log2_blk_height + 0;
}
// The dpte request dimensions in data elements is dpte_req_width x dpte_req_height
// log2_vmpg_width is how much 1 pte represent, now calculating how much a 64b pte req represent
// That depends on the pte shape (i.e. 8x1, 4x2, 2x4)
//log2_dpte_req_height = log2_vmpg_height + log2_dpte_req_height_ptes;
//log2_dpte_req_width = log2_vmpg_width + log2_dpte_req_width_ptes;
dpte_req_height = 1 << log2_dpte_req_height;
dpte_req_width = 1 << log2_dpte_req_width;
// calculate pitch dpte row buffer can hold
// round the result down to a power of two.
pde_buf_entries = yuv420 ? (pde_proc_buffer_size_64k_reqs >> 1) : pde_proc_buffer_size_64k_reqs;
if (surf_linear) {
unsigned int dpte_row_height;
log2_dpte_row_height_linear = dml_floor(dml_log2(dml_min(64 * 1024 * pde_buf_entries
/ bytes_per_element,
dpte_buf_in_pte_reqs
* dpte_req_width)
/ data_pitch),
1);
ASSERT(log2_dpte_row_height_linear >= 3);
if (log2_dpte_row_height_linear > 7)
log2_dpte_row_height_linear = 7;
log2_dpte_row_height = log2_dpte_row_height_linear;
// For linear, the dpte row is pitch dependent and the pte requests wrap at the pitch boundary.
// the dpte_row_width_ub is the upper bound of data_pitch*dpte_row_height in elements with this unique buffering.
dpte_row_height = 1 << log2_dpte_row_height;
dpte_row_width_ub = dml_round_to_multiple(data_pitch * dpte_row_height - 1,
dpte_req_width,
1) + dpte_req_width;
rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
} else {
// the upper bound of the dpte_row_width without dependency on viewport position follows.
// for tiled mode, row height is the same as req height and row store up to vp size upper bound
if (!surf_vert) {
log2_dpte_row_height = log2_dpte_req_height;
dpte_row_width_ub = dml_round_to_multiple(vp_width - 1, dpte_req_width, 1)
+ dpte_req_width;
rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
} else {
log2_dpte_row_height =
(log2_blk_width < log2_dpte_req_width) ?
log2_blk_width : log2_dpte_req_width;
dpte_row_width_ub = dml_round_to_multiple(vp_height - 1, dpte_req_height, 1)
+ dpte_req_height;
rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_height;
}
}
if (log2_blk_bytes >= 16 && log2_vmpg_bytes == 12) // tile block >= 64KB
rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 128; //2*64B dpte request
else
rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 64; //64B dpte request
rq_dlg_param->dpte_row_height = 1 << log2_dpte_row_height;
// the dpte_group_bytes is reduced for the specific case of vertical
// access of a tile surface that has dpte request of 8x1 ptes.
if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
//full size
rq_sizing_param->dpte_group_bytes = 2048;
//since pte request size is 64byte, the number of data pte requests per full sized group is as follows.
log2_dpte_group_bytes = dml_log2(rq_sizing_param->dpte_group_bytes);
log2_dpte_group_length = log2_dpte_group_bytes - 6; //length in 64b requests
// full sized data pte group width in elements
if (!surf_vert)
log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_width;
else
log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_height;
//But if the tile block >=64KB and the page size is 4KB, then each dPTE request is 2*64B
if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) // tile block >= 64KB
log2_dpte_group_width = log2_dpte_group_width - 1;
dpte_group_width = 1 << log2_dpte_group_width;
// since dpte groups are only aligned to dpte_req_width and not dpte_group_width,
// the upper bound for the dpte groups per row is as follows.
rq_dlg_param->dpte_groups_per_row_ub = dml_ceil((double) dpte_row_width_ub / dpte_group_width,
1);
}
static void get_surf_rq_param(struct display_mode_lib *mode_lib,
display_data_rq_sizing_params_st *rq_sizing_param,
display_data_rq_dlg_params_st *rq_dlg_param,
display_data_rq_misc_params_st *rq_misc_param,
const display_pipe_source_params_st *pipe_src_param,
bool is_chroma)
{
bool mode_422 = false;
unsigned int vp_width = 0;
unsigned int vp_height = 0;
unsigned int data_pitch = 0;
unsigned int meta_pitch = 0;
unsigned int ppe = mode_422 ? 2 : 1;
// TODO check if ppe apply for both luma and chroma in 422 case
if (is_chroma) {
vp_width = pipe_src_param->viewport_width_c / ppe;
vp_height = pipe_src_param->viewport_height_c;
data_pitch = pipe_src_param->data_pitch_c;
meta_pitch = pipe_src_param->meta_pitch_c;
} else {
vp_width = pipe_src_param->viewport_width / ppe;
vp_height = pipe_src_param->viewport_height;
data_pitch = pipe_src_param->data_pitch;
meta_pitch = pipe_src_param->meta_pitch;
}
rq_sizing_param->chunk_bytes = 8192;
if (rq_sizing_param->chunk_bytes == 64 * 1024)
rq_sizing_param->min_chunk_bytes = 0;
else
rq_sizing_param->min_chunk_bytes = 1024;
rq_sizing_param->meta_chunk_bytes = 2048;
rq_sizing_param->min_meta_chunk_bytes = 256;
rq_sizing_param->mpte_group_bytes = 2048;
get_meta_and_pte_attr(mode_lib,
rq_dlg_param,
rq_misc_param,
rq_sizing_param,
vp_width,
vp_height,
data_pitch,
meta_pitch,
pipe_src_param->source_format,
pipe_src_param->sw_mode,
pipe_src_param->macro_tile_size,
pipe_src_param->source_scan,
is_chroma);
}
static void dml20_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
display_rq_params_st *rq_param,
const display_pipe_source_params_st *pipe_src_param)
{
// get param for luma surface
rq_param->yuv420 = pipe_src_param->source_format == dm_420_8
|| pipe_src_param->source_format == dm_420_10;
rq_param->yuv420_10bpc = pipe_src_param->source_format == dm_420_10;
get_surf_rq_param(mode_lib,
&(rq_param->sizing.rq_l),
&(rq_param->dlg.rq_l),
&(rq_param->misc.rq_l),
pipe_src_param,
0);
if (is_dual_plane((enum source_format_class)(pipe_src_param->source_format))) {
// get param for chroma surface
get_surf_rq_param(mode_lib,
&(rq_param->sizing.rq_c),
&(rq_param->dlg.rq_c),
&(rq_param->misc.rq_c),
pipe_src_param,
1);
}
// calculate how to split the det buffer space between luma and chroma
handle_det_buf_split(mode_lib, rq_param, pipe_src_param);
print__rq_params_st(mode_lib, rq_param);
}
void dml20_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
const display_pipe_params_st *pipe_param)
{
display_rq_params_st rq_param = {0};
memset(rq_regs, 0, sizeof(*rq_regs));
dml20_rq_dlg_get_rq_params(mode_lib, &rq_param, &pipe_param->src);
extract_rq_regs(mode_lib, rq_regs, &rq_param);
print__rq_regs_st(mode_lib, rq_regs);
}
// Note: currently taken in as is.
// Nice to decouple code from hw register implement and extract code that are repeated for luma and chroma.
static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
display_dlg_regs_st *disp_dlg_regs,
display_ttu_regs_st *disp_ttu_regs,
const display_rq_dlg_params_st *rq_dlg_param,
const display_dlg_sys_params_st *dlg_sys_param,
const bool cstate_en,
const bool pstate_en)
{
const display_pipe_source_params_st *src = &e2e_pipe_param[pipe_idx].pipe.src;
const display_pipe_dest_params_st *dst = &e2e_pipe_param[pipe_idx].pipe.dest;
const display_output_params_st *dout = &e2e_pipe_param[pipe_idx].dout;
const display_clocks_and_cfg_st *clks = &e2e_pipe_param[pipe_idx].clks_cfg;
const scaler_ratio_depth_st *scl = &e2e_pipe_param[pipe_idx].pipe.scale_ratio_depth;
const scaler_taps_st *taps = &e2e_pipe_param[pipe_idx].pipe.scale_taps;
// -------------------------
// Section 1.15.2.1: OTG dependent Params
// -------------------------
// Timing
unsigned int htotal = dst->htotal;
// unsigned int hblank_start = dst.hblank_start; // TODO: Remove
unsigned int hblank_end = dst->hblank_end;
unsigned int vblank_start = dst->vblank_start;
unsigned int vblank_end = dst->vblank_end;
unsigned int min_vblank = mode_lib->ip.min_vblank_lines;
double dppclk_freq_in_mhz = clks->dppclk_mhz;
double dispclk_freq_in_mhz = clks->dispclk_mhz;
double refclk_freq_in_mhz = clks->refclk_mhz;
double pclk_freq_in_mhz = dst->pixel_rate_mhz;
bool interlaced = dst->interlaced;
double ref_freq_to_pix_freq = refclk_freq_in_mhz / pclk_freq_in_mhz;
double min_dcfclk_mhz;
double t_calc_us;
double min_ttu_vblank;
double min_dst_y_ttu_vblank;
unsigned int dlg_vblank_start;
bool dual_plane;
bool mode_422;
unsigned int access_dir;
unsigned int vp_height_l;
unsigned int vp_width_l;
unsigned int vp_height_c;
unsigned int vp_width_c;
// Scaling
unsigned int htaps_l;
unsigned int htaps_c;
double hratio_l;
double hratio_c;
double vratio_l;
double vratio_c;
bool scl_enable;
double line_time_in_us;
// double vinit_l;
// double vinit_c;
// double vinit_bot_l;
// double vinit_bot_c;
// unsigned int swath_height_l;
unsigned int swath_width_ub_l;
// unsigned int dpte_bytes_per_row_ub_l;
unsigned int dpte_groups_per_row_ub_l;
// unsigned int meta_pte_bytes_per_frame_ub_l;
// unsigned int meta_bytes_per_row_ub_l;
// unsigned int swath_height_c;
unsigned int swath_width_ub_c;
// unsigned int dpte_bytes_per_row_ub_c;
unsigned int dpte_groups_per_row_ub_c;
unsigned int meta_chunks_per_row_ub_l;
unsigned int meta_chunks_per_row_ub_c;
unsigned int vupdate_offset;
unsigned int vupdate_width;
unsigned int vready_offset;
unsigned int dppclk_delay_subtotal;
unsigned int dispclk_delay_subtotal;
unsigned int pixel_rate_delay_subtotal;
unsigned int vstartup_start;
unsigned int dst_x_after_scaler;
unsigned int dst_y_after_scaler;
double line_wait;
double dst_y_prefetch;
double dst_y_per_vm_vblank;
double dst_y_per_row_vblank;
double dst_y_per_vm_flip;
double dst_y_per_row_flip;
double min_dst_y_per_vm_vblank;
double min_dst_y_per_row_vblank;
double lsw;
double vratio_pre_l;
double vratio_pre_c;
unsigned int req_per_swath_ub_l;
unsigned int req_per_swath_ub_c;
unsigned int meta_row_height_l;
unsigned int meta_row_height_c;
unsigned int swath_width_pixels_ub_l;
unsigned int swath_width_pixels_ub_c;
unsigned int scaler_rec_in_width_l;
unsigned int scaler_rec_in_width_c;
unsigned int dpte_row_height_l;
unsigned int dpte_row_height_c;
double hscale_pixel_rate_l;
double hscale_pixel_rate_c;
double min_hratio_fact_l;
double min_hratio_fact_c;
double refcyc_per_line_delivery_pre_l;
double refcyc_per_line_delivery_pre_c;
double refcyc_per_line_delivery_l;
double refcyc_per_line_delivery_c;
double refcyc_per_req_delivery_pre_l;
double refcyc_per_req_delivery_pre_c;
double refcyc_per_req_delivery_l;
double refcyc_per_req_delivery_c;
unsigned int full_recout_width;
double refcyc_per_req_delivery_pre_cur0;
double refcyc_per_req_delivery_cur0;
double refcyc_per_req_delivery_pre_cur1;
double refcyc_per_req_delivery_cur1;
memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
dml_print("DML_DLG: %s: cstate_en = %d\n", __func__, cstate_en);
dml_print("DML_DLG: %s: pstate_en = %d\n", __func__, pstate_en);
dml_print("DML_DLG: %s: dppclk_freq_in_mhz = %3.2f\n", __func__, dppclk_freq_in_mhz);
dml_print("DML_DLG: %s: dispclk_freq_in_mhz = %3.2f\n", __func__, dispclk_freq_in_mhz);
dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
dml_print("DML_DLG: %s: interlaced = %d\n", __func__, interlaced);
ASSERT(ref_freq_to_pix_freq < 4.0);
disp_dlg_regs->ref_freq_to_pix_freq =
(unsigned int) (ref_freq_to_pix_freq * dml_pow(2, 19));
disp_dlg_regs->refcyc_per_htotal = (unsigned int) (ref_freq_to_pix_freq * (double) htotal
* dml_pow(2, 8));
disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; // 15 bits
disp_dlg_regs->refcyc_h_blank_end = (unsigned int) ((double) hblank_end
* (double) ref_freq_to_pix_freq);
ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13));
min_dcfclk_mhz = dlg_sys_param->deepsleep_dcfclk_mhz;
t_calc_us = get_tcalc(mode_lib, e2e_pipe_param, num_pipes);
min_ttu_vblank = get_min_ttu_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double) htotal;
dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
disp_dlg_regs->min_dst_y_next_start = (unsigned int) ((double) dlg_vblank_start * dml_pow(2, 2));
ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int) dml_pow(2, 18));
dml_print("DML_DLG: %s: min_dcfclk_mhz = %3.2f\n",
__func__,
min_dcfclk_mhz);
dml_print("DML_DLG: %s: min_ttu_vblank = %3.2f\n",
__func__,
min_ttu_vblank);
dml_print("DML_DLG: %s: min_dst_y_ttu_vblank = %3.2f\n",
__func__,
min_dst_y_ttu_vblank);
dml_print("DML_DLG: %s: t_calc_us = %3.2f\n",
__func__,
t_calc_us);
dml_print("DML_DLG: %s: disp_dlg_regs->min_dst_y_next_start = 0x%0x\n",
__func__,
disp_dlg_regs->min_dst_y_next_start);
dml_print("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n",
__func__,
ref_freq_to_pix_freq);
// -------------------------
// Section 1.15.2.2: Prefetch, Active and TTU
// -------------------------
// Prefetch Calc
// Source
// dcc_en = src.dcc;
dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
mode_422 = false; // TODO
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
// bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
// bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
vp_height_l = src->viewport_height;
vp_width_l = src->viewport_width;
vp_height_c = src->viewport_height_c;
vp_width_c = src->viewport_width_c;
// Scaling
htaps_l = taps->htaps;
htaps_c = taps->htaps_c;
hratio_l = scl->hscl_ratio;
hratio_c = scl->hscl_ratio_c;
vratio_l = scl->vscl_ratio;
vratio_c = scl->vscl_ratio_c;
scl_enable = scl->scl_enable;
line_time_in_us = (htotal / pclk_freq_in_mhz);
// vinit_l = scl.vinit;
// vinit_c = scl.vinit_c;
// vinit_bot_l = scl.vinit_bot;
// vinit_bot_c = scl.vinit_bot_c;
// unsigned int swath_height_l = rq_dlg_param->rq_l.swath_height;
swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub;
// unsigned int dpte_bytes_per_row_ub_l = rq_dlg_param->rq_l.dpte_bytes_per_row_ub;
dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub;
// unsigned int meta_pte_bytes_per_frame_ub_l = rq_dlg_param->rq_l.meta_pte_bytes_per_frame_ub;
// unsigned int meta_bytes_per_row_ub_l = rq_dlg_param->rq_l.meta_bytes_per_row_ub;
// unsigned int swath_height_c = rq_dlg_param->rq_c.swath_height;
swath_width_ub_c = rq_dlg_param->rq_c.swath_width_ub;
// dpte_bytes_per_row_ub_c = rq_dlg_param->rq_c.dpte_bytes_per_row_ub;
dpte_groups_per_row_ub_c = rq_dlg_param->rq_c.dpte_groups_per_row_ub;
meta_chunks_per_row_ub_l = rq_dlg_param->rq_l.meta_chunks_per_row_ub;
meta_chunks_per_row_ub_c = rq_dlg_param->rq_c.meta_chunks_per_row_ub;
vupdate_offset = dst->vupdate_offset;
vupdate_width = dst->vupdate_width;
vready_offset = dst->vready_offset;
dppclk_delay_subtotal = mode_lib->ip.dppclk_delay_subtotal;
dispclk_delay_subtotal = mode_lib->ip.dispclk_delay_subtotal;
if (scl_enable)
dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_scl;
else
dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_scl_lb_only;
dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_cnvc_formatter
+ src->num_cursors * mode_lib->ip.dppclk_delay_cnvc_cursor;
if (dout->dsc_enable) {
double dsc_delay = get_dsc_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
dispclk_delay_subtotal += dsc_delay;
}
pixel_rate_delay_subtotal = dppclk_delay_subtotal * pclk_freq_in_mhz / dppclk_freq_in_mhz
+ dispclk_delay_subtotal * pclk_freq_in_mhz / dispclk_freq_in_mhz;
vstartup_start = dst->vstartup_start;
if (interlaced) {
if (vstartup_start / 2.0
- (double) (vready_offset + vupdate_width + vupdate_offset) / htotal
<= vblank_end / 2.0)
disp_dlg_regs->vready_after_vcount0 = 1;
else
disp_dlg_regs->vready_after_vcount0 = 0;
} else {
if (vstartup_start
- (double) (vready_offset + vupdate_width + vupdate_offset) / htotal
<= vblank_end)
disp_dlg_regs->vready_after_vcount0 = 1;
else
disp_dlg_regs->vready_after_vcount0 = 0;
}
// TODO: Where is this coming from?
if (interlaced)
vstartup_start = vstartup_start / 2;
// TODO: What if this min_vblank doesn't match the value in the dml_config_settings.cpp?
if (vstartup_start >= min_vblank) {
dml_print("WARNING: DML_DLG: %s: vblank_start=%d vblank_end=%d\n",
__func__,
vblank_start,
vblank_end);
dml_print("WARNING: DML_DLG: %s: vstartup_start=%d should be less than min_vblank=%d\n",
__func__,
vstartup_start,
min_vblank);
min_vblank = vstartup_start + 1;
dml_print("WARNING: DML_DLG: %s: vstartup_start=%d should be less than min_vblank=%d\n",
__func__,
vstartup_start,
min_vblank);
}
dst_x_after_scaler = get_dst_x_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
dst_y_after_scaler = get_dst_y_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
dml_print("DML_DLG: %s: htotal = %d\n", __func__, htotal);
dml_print("DML_DLG: %s: pixel_rate_delay_subtotal = %d\n",
__func__,
pixel_rate_delay_subtotal);
dml_print("DML_DLG: %s: dst_x_after_scaler = %d\n",
__func__,
dst_x_after_scaler);
dml_print("DML_DLG: %s: dst_y_after_scaler = %d\n",
__func__,
dst_y_after_scaler);
// Lwait
line_wait = mode_lib->soc.urgent_latency_us;
if (cstate_en)
line_wait = dml_max(mode_lib->soc.sr_enter_plus_exit_time_us, line_wait);
if (pstate_en)
line_wait = dml_max(mode_lib->soc.dram_clock_change_latency_us
+ mode_lib->soc.urgent_latency_us,
line_wait);
line_wait = line_wait / line_time_in_us;
dst_y_prefetch = get_dst_y_prefetch(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
dml_print("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, dst_y_prefetch);
dst_y_per_vm_vblank = get_dst_y_per_vm_vblank(mode_lib,
e2e_pipe_param,
num_pipes,
pipe_idx);
dst_y_per_row_vblank = get_dst_y_per_row_vblank(mode_lib,
e2e_pipe_param,
num_pipes,
pipe_idx);
dst_y_per_vm_flip = get_dst_y_per_vm_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
dst_y_per_row_flip = get_dst_y_per_row_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
min_dst_y_per_vm_vblank = 8.0;
min_dst_y_per_row_vblank = 16.0;
// magic!
if (htotal <= 75) {
min_vblank = 300;
min_dst_y_per_vm_vblank = 100.0;
min_dst_y_per_row_vblank = 100.0;
}
dml_print("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, dst_y_per_vm_vblank);
dml_print("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, dst_y_per_row_vblank);
ASSERT(dst_y_per_vm_vblank < min_dst_y_per_vm_vblank);
ASSERT(dst_y_per_row_vblank < min_dst_y_per_row_vblank);
ASSERT(dst_y_prefetch > (dst_y_per_vm_vblank + dst_y_per_row_vblank));
lsw = dst_y_prefetch - (dst_y_per_vm_vblank + dst_y_per_row_vblank);
dml_print("DML_DLG: %s: lsw = %3.2f\n", __func__, lsw);
vratio_pre_l = get_vratio_prefetch_l(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
vratio_pre_c = get_vratio_prefetch_c(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
dml_print("DML_DLG: %s: vratio_pre_l=%3.2f\n", __func__, vratio_pre_l);
dml_print("DML_DLG: %s: vratio_pre_c=%3.2f\n", __func__, vratio_pre_c);
// Active
req_per_swath_ub_l = rq_dlg_param->rq_l.req_per_swath_ub;
req_per_swath_ub_c = rq_dlg_param->rq_c.req_per_swath_ub;
meta_row_height_l = rq_dlg_param->rq_l.meta_row_height;
meta_row_height_c = rq_dlg_param->rq_c.meta_row_height;
swath_width_pixels_ub_l = 0;
swath_width_pixels_ub_c = 0;
scaler_rec_in_width_l = 0;
scaler_rec_in_width_c = 0;
dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height;
dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height;
if (mode_422) {
swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element
swath_width_pixels_ub_c = swath_width_ub_c * 2;
} else {
swath_width_pixels_ub_l = swath_width_ub_l * 1;
swath_width_pixels_ub_c = swath_width_ub_c * 1;
}
hscale_pixel_rate_l = 0.;
hscale_pixel_rate_c = 0.;
min_hratio_fact_l = 1.0;
min_hratio_fact_c = 1.0;
if (htaps_l <= 1)
min_hratio_fact_l = 2.0;
else if (htaps_l <= 6) {
if ((hratio_l * 2.0) > 4.0)
min_hratio_fact_l = 4.0;
else
min_hratio_fact_l = hratio_l * 2.0;
} else {
if (hratio_l > 4.0)
min_hratio_fact_l = 4.0;
else
min_hratio_fact_l = hratio_l;
}
hscale_pixel_rate_l = min_hratio_fact_l * dppclk_freq_in_mhz;
if (htaps_c <= 1)
min_hratio_fact_c = 2.0;
else if (htaps_c <= 6) {
if ((hratio_c * 2.0) > 4.0)
min_hratio_fact_c = 4.0;
else
min_hratio_fact_c = hratio_c * 2.0;
} else {
if (hratio_c > 4.0)
min_hratio_fact_c = 4.0;
else
min_hratio_fact_c = hratio_c;
}
hscale_pixel_rate_c = min_hratio_fact_c * dppclk_freq_in_mhz;
refcyc_per_line_delivery_pre_l = 0.;
refcyc_per_line_delivery_pre_c = 0.;
refcyc_per_line_delivery_l = 0.;
refcyc_per_line_delivery_c = 0.;
refcyc_per_req_delivery_pre_l = 0.;
refcyc_per_req_delivery_pre_c = 0.;
refcyc_per_req_delivery_l = 0.;
refcyc_per_req_delivery_c = 0.;
full_recout_width = 0;
// In ODM
if (src->is_hsplit) {
// This "hack" is only allowed (and valid) for MPC combine. In ODM
// combine, you MUST specify the full_recout_width...according to Oswin
if (dst->full_recout_width == 0 && !dst->odm_combine) {
dml_print("DML_DLG: %s: Warning: full_recout_width not set in hsplit mode\n",
__func__);
full_recout_width = dst->recout_width * 2; // assume half split for dcn1
} else
full_recout_width = dst->full_recout_width;
} else
full_recout_width = dst->recout_width;
// As of DCN2, mpc_combine and odm_combine are mutually exclusive
refcyc_per_line_delivery_pre_l = get_refcyc_per_delivery(mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_pre_l,
hscale_pixel_rate_l,
swath_width_pixels_ub_l,
1); // per line
refcyc_per_line_delivery_l = get_refcyc_per_delivery(mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_l,
hscale_pixel_rate_l,
swath_width_pixels_ub_l,
1); // per line
dml_print("DML_DLG: %s: full_recout_width = %d\n",
__func__,
full_recout_width);
dml_print("DML_DLG: %s: hscale_pixel_rate_l = %3.2f\n",
__func__,
hscale_pixel_rate_l);
dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n",
__func__,
refcyc_per_line_delivery_pre_l);
dml_print("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n",
__func__,
refcyc_per_line_delivery_l);
if (dual_plane) {
refcyc_per_line_delivery_pre_c = get_refcyc_per_delivery(mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_pre_c,
hscale_pixel_rate_c,
swath_width_pixels_ub_c,
1); // per line
refcyc_per_line_delivery_c = get_refcyc_per_delivery(mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_c,
hscale_pixel_rate_c,
swath_width_pixels_ub_c,
1); // per line
dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n",
__func__,
refcyc_per_line_delivery_pre_c);
dml_print("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n",
__func__,
refcyc_per_line_delivery_c);
}
// TTU - Luma / Chroma
if (access_dir) { // vertical access
scaler_rec_in_width_l = vp_height_l;
scaler_rec_in_width_c = vp_height_c;
} else {
scaler_rec_in_width_l = vp_width_l;
scaler_rec_in_width_c = vp_width_c;
}
refcyc_per_req_delivery_pre_l = get_refcyc_per_delivery(mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_pre_l,
hscale_pixel_rate_l,
scaler_rec_in_width_l,
req_per_swath_ub_l); // per req
refcyc_per_req_delivery_l = get_refcyc_per_delivery(mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_l,
hscale_pixel_rate_l,
scaler_rec_in_width_l,
req_per_swath_ub_l); // per req
dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n",
__func__,
refcyc_per_req_delivery_pre_l);
dml_print("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n",
__func__,
refcyc_per_req_delivery_l);
ASSERT(refcyc_per_req_delivery_pre_l < dml_pow(2, 13));
ASSERT(refcyc_per_req_delivery_l < dml_pow(2, 13));
if (dual_plane) {
refcyc_per_req_delivery_pre_c = get_refcyc_per_delivery(mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_pre_c,
hscale_pixel_rate_c,
scaler_rec_in_width_c,
req_per_swath_ub_c); // per req
refcyc_per_req_delivery_c = get_refcyc_per_delivery(mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_c,
hscale_pixel_rate_c,
scaler_rec_in_width_c,
req_per_swath_ub_c); // per req
dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n",
__func__,
refcyc_per_req_delivery_pre_c);
dml_print("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n",
__func__,
refcyc_per_req_delivery_c);
ASSERT(refcyc_per_req_delivery_pre_c < dml_pow(2, 13));
ASSERT(refcyc_per_req_delivery_c < dml_pow(2, 13));
}
// TTU - Cursor
refcyc_per_req_delivery_pre_cur0 = 0.0;
refcyc_per_req_delivery_cur0 = 0.0;
if (src->num_cursors > 0) {
calculate_ttu_cursor(mode_lib,
&refcyc_per_req_delivery_pre_cur0,
&refcyc_per_req_delivery_cur0,
refclk_freq_in_mhz,
ref_freq_to_pix_freq,
hscale_pixel_rate_l,
scl->hscl_ratio,
vratio_pre_l,
vratio_l,
src->cur0_src_width,
(enum cursor_bpp)(src->cur0_bpp));
}
refcyc_per_req_delivery_pre_cur1 = 0.0;
refcyc_per_req_delivery_cur1 = 0.0;
if (src->num_cursors > 1) {
calculate_ttu_cursor(mode_lib,
&refcyc_per_req_delivery_pre_cur1,
&refcyc_per_req_delivery_cur1,
refclk_freq_in_mhz,
ref_freq_to_pix_freq,
hscale_pixel_rate_l,
scl->hscl_ratio,
vratio_pre_l,
vratio_l,
src->cur1_src_width,
(enum cursor_bpp)(src->cur1_bpp));
}
// TTU - Misc
// all hard-coded
// Assignment to register structures
disp_dlg_regs->dst_y_after_scaler = dst_y_after_scaler; // in terms of line
disp_dlg_regs->refcyc_x_after_scaler = dst_x_after_scaler * ref_freq_to_pix_freq; // in terms of refclk
ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int) dml_pow(2, 13));
disp_dlg_regs->dst_y_prefetch = (unsigned int) (dst_y_prefetch * dml_pow(2, 2));
disp_dlg_regs->dst_y_per_vm_vblank = (unsigned int) (dst_y_per_vm_vblank * dml_pow(2, 2));
disp_dlg_regs->dst_y_per_row_vblank = (unsigned int) (dst_y_per_row_vblank * dml_pow(2, 2));
disp_dlg_regs->dst_y_per_vm_flip = (unsigned int) (dst_y_per_vm_flip * dml_pow(2, 2));
disp_dlg_regs->dst_y_per_row_flip = (unsigned int) (dst_y_per_row_flip * dml_pow(2, 2));
disp_dlg_regs->vratio_prefetch = (unsigned int) (vratio_pre_l * dml_pow(2, 19));
disp_dlg_regs->vratio_prefetch_c = (unsigned int) (vratio_pre_c * dml_pow(2, 19));
disp_dlg_regs->refcyc_per_pte_group_vblank_l =
(unsigned int) (dst_y_per_row_vblank * (double) htotal
* ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l);
ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int) dml_pow(2, 13));
if (dual_plane) {
disp_dlg_regs->refcyc_per_pte_group_vblank_c = (unsigned int) (dst_y_per_row_vblank
* (double) htotal * ref_freq_to_pix_freq
/ (double) dpte_groups_per_row_ub_c);
ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c
< (unsigned int) dml_pow(2, 13));
}
disp_dlg_regs->refcyc_per_meta_chunk_vblank_l =
(unsigned int) (dst_y_per_row_vblank * (double) htotal
* ref_freq_to_pix_freq / (double) meta_chunks_per_row_ub_l);
ASSERT(disp_dlg_regs->refcyc_per_meta_chunk_vblank_l < (unsigned int) dml_pow(2, 13));
disp_dlg_regs->refcyc_per_meta_chunk_vblank_c =
disp_dlg_regs->refcyc_per_meta_chunk_vblank_l; // dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now
disp_dlg_regs->refcyc_per_pte_group_flip_l = (unsigned int) (dst_y_per_row_flip * htotal
* ref_freq_to_pix_freq) / dpte_groups_per_row_ub_l;
disp_dlg_regs->refcyc_per_meta_chunk_flip_l = (unsigned int) (dst_y_per_row_flip * htotal
* ref_freq_to_pix_freq) / meta_chunks_per_row_ub_l;
if (dual_plane) {
disp_dlg_regs->refcyc_per_pte_group_flip_c = (unsigned int) (dst_y_per_row_flip
* htotal * ref_freq_to_pix_freq) / dpte_groups_per_row_ub_c;
disp_dlg_regs->refcyc_per_meta_chunk_flip_c = (unsigned int) (dst_y_per_row_flip
* htotal * ref_freq_to_pix_freq) / meta_chunks_per_row_ub_c;
}
disp_dlg_regs->dst_y_per_pte_row_nom_l = (unsigned int) ((double) dpte_row_height_l
/ (double) vratio_l * dml_pow(2, 2));
ASSERT(disp_dlg_regs->dst_y_per_pte_row_nom_l < (unsigned int) dml_pow(2, 17));
if (dual_plane) {
disp_dlg_regs->dst_y_per_pte_row_nom_c = (unsigned int) ((double) dpte_row_height_c
/ (double) vratio_c * dml_pow(2, 2));
if (disp_dlg_regs->dst_y_per_pte_row_nom_c >= (unsigned int) dml_pow(2, 17)) {
dml_print("DML_DLG: %s: Warning dst_y_per_pte_row_nom_c %u larger than supported by register format U15.2 %u\n",
__func__,
disp_dlg_regs->dst_y_per_pte_row_nom_c,
(unsigned int) dml_pow(2, 17) - 1);
}
}
disp_dlg_regs->dst_y_per_meta_row_nom_l = (unsigned int) ((double) meta_row_height_l
/ (double) vratio_l * dml_pow(2, 2));
ASSERT(disp_dlg_regs->dst_y_per_meta_row_nom_l < (unsigned int) dml_pow(2, 17));
disp_dlg_regs->dst_y_per_meta_row_nom_c = disp_dlg_regs->dst_y_per_meta_row_nom_l; // TODO: dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now
disp_dlg_regs->refcyc_per_pte_group_nom_l = (unsigned int) ((double) dpte_row_height_l
/ (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
/ (double) dpte_groups_per_row_ub_l);
if (disp_dlg_regs->refcyc_per_pte_group_nom_l >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_pte_group_nom_l = dml_pow(2, 23) - 1;
disp_dlg_regs->refcyc_per_meta_chunk_nom_l = (unsigned int) ((double) meta_row_height_l
/ (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
/ (double) meta_chunks_per_row_ub_l);
if (disp_dlg_regs->refcyc_per_meta_chunk_nom_l >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_meta_chunk_nom_l = dml_pow(2, 23) - 1;
if (dual_plane) {
disp_dlg_regs->refcyc_per_pte_group_nom_c =
(unsigned int) ((double) dpte_row_height_c / (double) vratio_c
* (double) htotal * ref_freq_to_pix_freq
/ (double) dpte_groups_per_row_ub_c);
if (disp_dlg_regs->refcyc_per_pte_group_nom_c >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_pte_group_nom_c = dml_pow(2, 23) - 1;
// TODO: Is this the right calculation? Does htotal need to be halved?
disp_dlg_regs->refcyc_per_meta_chunk_nom_c =
(unsigned int) ((double) meta_row_height_c / (double) vratio_c
* (double) htotal * ref_freq_to_pix_freq
/ (double) meta_chunks_per_row_ub_c);
if (disp_dlg_regs->refcyc_per_meta_chunk_nom_c >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_meta_chunk_nom_c = dml_pow(2, 23) - 1;
}
disp_dlg_regs->refcyc_per_line_delivery_pre_l = (unsigned int) dml_floor(refcyc_per_line_delivery_pre_l,
1);
disp_dlg_regs->refcyc_per_line_delivery_l = (unsigned int) dml_floor(refcyc_per_line_delivery_l,
1);
ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int) dml_pow(2, 13));
ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int) dml_pow(2, 13));
disp_dlg_regs->refcyc_per_line_delivery_pre_c = (unsigned int) dml_floor(refcyc_per_line_delivery_pre_c,
1);
disp_dlg_regs->refcyc_per_line_delivery_c = (unsigned int) dml_floor(refcyc_per_line_delivery_c,
1);
ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int) dml_pow(2, 13));
ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int) dml_pow(2, 13));
disp_dlg_regs->chunk_hdl_adjust_cur0 = 3;
disp_dlg_regs->dst_y_offset_cur0 = 0;
disp_dlg_regs->chunk_hdl_adjust_cur1 = 3;
disp_dlg_regs->dst_y_offset_cur1 = 0;
disp_dlg_regs->dst_y_delta_drq_limit = 0x7fff; // off
disp_ttu_regs->refcyc_per_req_delivery_pre_l = (unsigned int) (refcyc_per_req_delivery_pre_l
* dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_l = (unsigned int) (refcyc_per_req_delivery_l
* dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_pre_c = (unsigned int) (refcyc_per_req_delivery_pre_c
* dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_c = (unsigned int) (refcyc_per_req_delivery_c
* dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_pre_cur0 =
(unsigned int) (refcyc_per_req_delivery_pre_cur0 * dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_cur0 = (unsigned int) (refcyc_per_req_delivery_cur0
* dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_pre_cur1 =
(unsigned int) (refcyc_per_req_delivery_pre_cur1 * dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_cur1 = (unsigned int) (refcyc_per_req_delivery_cur1
* dml_pow(2, 10));
disp_ttu_regs->qos_level_low_wm = 0;
ASSERT(disp_ttu_regs->qos_level_low_wm < dml_pow(2, 14));
disp_ttu_regs->qos_level_high_wm = (unsigned int) (4.0 * (double) htotal
* ref_freq_to_pix_freq);
/*ASSERT(disp_ttu_regs->qos_level_high_wm < dml_pow(2, 14));*/
disp_ttu_regs->qos_level_flip = 14;
disp_ttu_regs->qos_level_fixed_l = 8;
disp_ttu_regs->qos_level_fixed_c = 8;
disp_ttu_regs->qos_level_fixed_cur0 = 8;
disp_ttu_regs->qos_ramp_disable_l = 0;
disp_ttu_regs->qos_ramp_disable_c = 0;
disp_ttu_regs->qos_ramp_disable_cur0 = 0;
disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
print__ttu_regs_st(mode_lib, disp_ttu_regs);
print__dlg_regs_st(mode_lib, disp_dlg_regs);
}
void dml20_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
const bool pstate_en,
const bool vm_en,
const bool ignore_viewport_pos,
const bool immediate_flip_support)
{
display_rq_params_st rq_param = {0};
display_dlg_sys_params_st dlg_sys_param = {0};
// Get watermark and Tex.
dlg_sys_param.t_urg_wm_us = get_wm_urgent(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.deepsleep_dcfclk_mhz = get_clk_dcf_deepsleep(mode_lib,
e2e_pipe_param,
num_pipes);
dlg_sys_param.t_extra_us = get_urgent_extra_latency(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.mem_trip_us = get_wm_memory_trip(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.t_mclk_wm_us = get_wm_dram_clock_change(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.t_sr_wm_us = get_wm_stutter_enter_exit(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.total_flip_bw = get_total_immediate_flip_bw(mode_lib,
e2e_pipe_param,
num_pipes);
dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib,
e2e_pipe_param,
num_pipes);
print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
// system parameter calculation done
dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx);
dml20_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe.src);
dml20_rq_dlg_get_dlg_params(mode_lib,
e2e_pipe_param,
num_pipes,
pipe_idx,
dlg_regs,
ttu_regs,
&rq_param.dlg,
&dlg_sys_param,
cstate_en,
pstate_en);
dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx);
}
static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
double *refcyc_per_req_delivery_pre_cur,
double *refcyc_per_req_delivery_cur,
double refclk_freq_in_mhz,
double ref_freq_to_pix_freq,
double hscale_pixel_rate_l,
double hscl_ratio,
double vratio_pre_l,
double vratio_l,
unsigned int cur_width,
enum cursor_bpp cur_bpp)
{
unsigned int cur_src_width = cur_width;
unsigned int cur_req_size = 0;
unsigned int cur_req_width = 0;
double cur_width_ub = 0.0;
double cur_req_per_width = 0.0;
double hactive_cur = 0.0;
ASSERT(cur_src_width <= 256);
*refcyc_per_req_delivery_pre_cur = 0.0;
*refcyc_per_req_delivery_cur = 0.0;
if (cur_src_width > 0) {
unsigned int cur_bit_per_pixel = 0;
if (cur_bpp == dm_cur_2bit) {
cur_req_size = 64; // byte
cur_bit_per_pixel = 2;
} else { // 32bit
cur_bit_per_pixel = 32;
if (cur_src_width >= 1 && cur_src_width <= 16)
cur_req_size = 64;
else if (cur_src_width >= 17 && cur_src_width <= 31)
cur_req_size = 128;
else
cur_req_size = 256;
}
cur_req_width = (double) cur_req_size / ((double) cur_bit_per_pixel / 8.0);
cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1)
* (double) cur_req_width;
cur_req_per_width = cur_width_ub / (double) cur_req_width;
hactive_cur = (double) cur_src_width / hscl_ratio; // TODO: oswin to think about what to do for cursor
if (vratio_pre_l <= 1.0) {
*refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq
/ (double) cur_req_per_width;
} else {
*refcyc_per_req_delivery_pre_cur = (double) refclk_freq_in_mhz
* (double) cur_src_width / hscale_pixel_rate_l
/ (double) cur_req_per_width;
}
ASSERT(*refcyc_per_req_delivery_pre_cur < dml_pow(2, 13));
if (vratio_l <= 1.0) {
*refcyc_per_req_delivery_cur = hactive_cur * ref_freq_to_pix_freq
/ (double) cur_req_per_width;
} else {
*refcyc_per_req_delivery_cur = (double) refclk_freq_in_mhz
* (double) cur_src_width / hscale_pixel_rate_l
/ (double) cur_req_per_width;
}
dml_print("DML_DLG: %s: cur_req_width = %d\n",
__func__,
cur_req_width);
dml_print("DML_DLG: %s: cur_width_ub = %3.2f\n",
__func__,
cur_width_ub);
dml_print("DML_DLG: %s: cur_req_per_width = %3.2f\n",
__func__,
cur_req_per_width);
dml_print("DML_DLG: %s: hactive_cur = %3.2f\n",
__func__,
hactive_cur);
dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_cur = %3.2f\n",
__func__,
*refcyc_per_req_delivery_pre_cur);
dml_print("DML_DLG: %s: refcyc_per_req_delivery_cur = %3.2f\n",
__func__,
*refcyc_per_req_delivery_cur);
ASSERT(*refcyc_per_req_delivery_cur < dml_pow(2, 13));
}
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "../display_mode_lib.h"
#include "display_mode_vba_20.h"
#include "../dml_inline_defs.h"
/*
* NOTE:
* This file is gcc-parseable HW gospel, coming straight from HW engineers.
*
* It doesn't adhere to Linux kernel style and sometimes will do things in odd
* ways. Unless there is something clearly wrong with it the code should
* remain as-is as it provides us with a guarantee from HW that it is correct.
*/
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
#define DCN20_MAX_420_IMAGE_WIDTH 4096
static double adjust_ReturnBW(
struct display_mode_lib *mode_lib,
double ReturnBW,
bool DCCEnabledAnyPlane,
double ReturnBandwidthToDCN);
static unsigned int dscceComputeDelay(
unsigned int bpc,
double bpp,
unsigned int sliceWidth,
unsigned int numSlices,
enum output_format_class pixelFormat);
static unsigned int dscComputeDelay(enum output_format_class pixelFormat);
// Super monster function with some 45 argument
static bool CalculatePrefetchSchedule(
struct display_mode_lib *mode_lib,
double DPPCLK,
double DISPCLK,
double PixelClock,
double DCFCLKDeepSleep,
unsigned int DSCDelay,
unsigned int DPPPerPlane,
bool ScalerEnabled,
unsigned int NumberOfCursors,
double DPPCLKDelaySubtotal,
double DPPCLKDelaySCL,
double DPPCLKDelaySCLLBOnly,
double DPPCLKDelayCNVCFormater,
double DPPCLKDelayCNVCCursor,
double DISPCLKDelaySubtotal,
unsigned int ScalerRecoutWidth,
enum output_format_class OutputFormat,
unsigned int VBlank,
unsigned int HTotal,
unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int PageTableLevels,
bool GPUVMEnable,
bool DynamicMetadataEnable,
unsigned int DynamicMetadataLinesBeforeActiveRequired,
unsigned int DynamicMetadataTransmittedBytes,
bool DCCEnable,
double UrgentLatencyPixelDataOnly,
double UrgentExtraLatency,
double TCalc,
unsigned int PDEAndMetaPTEBytesFrame,
unsigned int MetaRowByte,
unsigned int PixelPTEBytesPerRow,
double PrefetchSourceLinesY,
unsigned int SwathWidthY,
double BytePerPixelDETY,
double VInitPreFillY,
unsigned int MaxNumSwathY,
double PrefetchSourceLinesC,
double BytePerPixelDETC,
double VInitPreFillC,
unsigned int MaxNumSwathC,
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double TWait,
bool XFCEnabled,
double XFCRemoteSurfaceFlipDelay,
bool InterlaceEnable,
bool ProgressiveToInterlaceUnitInOPP,
double *DSTXAfterScaler,
double *DSTYAfterScaler,
double *DestinationLinesForPrefetch,
double *PrefetchBandwidth,
double *DestinationLinesToRequestVMInVBlank,
double *DestinationLinesToRequestRowInVBlank,
double *VRatioPrefetchY,
double *VRatioPrefetchC,
double *RequiredPrefetchPixDataBW,
unsigned int *VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
double *Tno_bw,
unsigned int *VUpdateOffsetPix,
double *VUpdateWidthPix,
double *VReadyOffsetPix);
static double RoundToDFSGranularityUp(double Clock, double VCOSpeed);
static double RoundToDFSGranularityDown(double Clock, double VCOSpeed);
static double CalculatePrefetchSourceLines(
struct display_mode_lib *mode_lib,
double VRatio,
double vtaps,
bool Interlace,
bool ProgressiveToInterlaceUnitInOPP,
unsigned int SwathHeight,
unsigned int ViewportYStart,
double *VInitPreFill,
unsigned int *MaxNumSwath);
static unsigned int CalculateVMAndRowBytes(
struct display_mode_lib *mode_lib,
bool DCCEnable,
unsigned int BlockHeight256Bytes,
unsigned int BlockWidth256Bytes,
enum source_format_class SourcePixelFormat,
unsigned int SurfaceTiling,
unsigned int BytePerPixel,
enum scan_direction_class ScanDirection,
unsigned int ViewportWidth,
unsigned int ViewportHeight,
unsigned int SwathWidthY,
bool GPUVMEnable,
unsigned int VMMPageSize,
unsigned int PTEBufferSizeInRequestsLuma,
unsigned int PDEProcessingBufIn64KBReqs,
unsigned int Pitch,
unsigned int DCCMetaPitch,
unsigned int *MacroTileWidth,
unsigned int *MetaRowByte,
unsigned int *PixelPTEBytesPerRow,
bool *PTEBufferSizeNotExceeded,
unsigned int *dpte_row_height,
unsigned int *meta_row_height);
static double CalculateTWait(
unsigned int PrefetchMode,
double DRAMClockChangeLatency,
double UrgentLatencyPixelDataOnly,
double SREnterPlusExitTime);
static double CalculateRemoteSurfaceFlipDelay(
struct display_mode_lib *mode_lib,
double VRatio,
double SwathWidth,
double Bpp,
double LineTime,
double XFCTSlvVupdateOffset,
double XFCTSlvVupdateWidth,
double XFCTSlvVreadyOffset,
double XFCXBUFLatencyTolerance,
double XFCFillBWOverhead,
double XFCSlvChunkSize,
double XFCBusTransportTime,
double TCalc,
double TWait,
double *SrcActiveDrainRate,
double *TInitXFill,
double *TslvChk);
static void CalculateActiveRowBandwidth(
bool GPUVMEnable,
enum source_format_class SourcePixelFormat,
double VRatio,
bool DCCEnable,
double LineTime,
unsigned int MetaRowByteLuma,
unsigned int MetaRowByteChroma,
unsigned int meta_row_height_luma,
unsigned int meta_row_height_chroma,
unsigned int PixelPTEBytesPerRowLuma,
unsigned int PixelPTEBytesPerRowChroma,
unsigned int dpte_row_height_luma,
unsigned int dpte_row_height_chroma,
double *meta_row_bw,
double *dpte_row_bw,
double *qual_row_bw);
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
double UrgentExtraLatency,
double UrgentLatencyPixelDataOnly,
unsigned int GPUVMMaxPageTableLevels,
bool GPUVMEnable,
double BandwidthAvailableForImmediateFlip,
unsigned int TotImmediateFlipBytes,
enum source_format_class SourcePixelFormat,
unsigned int ImmediateFlipBytes,
double LineTime,
double VRatio,
double Tno_bw,
double PDEAndMetaPTEBytesFrame,
unsigned int MetaRowByte,
unsigned int PixelPTEBytesPerRow,
bool DCCEnable,
unsigned int dpte_row_height,
unsigned int meta_row_height,
double qual_row_bw,
double *DestinationLinesToRequestVMInImmediateFlip,
double *DestinationLinesToRequestRowInImmediateFlip,
double *final_flip_bw,
bool *ImmediateFlipSupportedForPipe);
static double CalculateWriteBackDelay(
enum source_format_class WritebackPixelFormat,
double WritebackHRatio,
double WritebackVRatio,
unsigned int WritebackLumaHTaps,
unsigned int WritebackLumaVTaps,
unsigned int WritebackChromaHTaps,
unsigned int WritebackChromaVTaps,
unsigned int WritebackDestinationWidth);
static void dml20_DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
static void dml20_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
struct display_mode_lib *mode_lib);
void dml20_recalculate(struct display_mode_lib *mode_lib)
{
ModeSupportAndSystemConfiguration(mode_lib);
mode_lib->vba.FabricAndDRAMBandwidth = dml_min(
mode_lib->vba.DRAMSpeed * mode_lib->vba.NumberOfChannels * mode_lib->vba.DRAMChannelWidth,
mode_lib->vba.FabricClock * mode_lib->vba.FabricDatapathToDCNDataReturn) / 1000.0;
PixelClockAdjustmentForProgressiveToInterlaceUnit(mode_lib);
dml20_DisplayPipeConfiguration(mode_lib);
dml20_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(mode_lib);
}
static double adjust_ReturnBW(
struct display_mode_lib *mode_lib,
double ReturnBW,
bool DCCEnabledAnyPlane,
double ReturnBandwidthToDCN)
{
double CriticalCompression;
if (DCCEnabledAnyPlane
&& ReturnBandwidthToDCN
> mode_lib->vba.DCFCLK * mode_lib->vba.ReturnBusWidth / 4.0)
ReturnBW =
dml_min(
ReturnBW,
ReturnBandwidthToDCN * 4
* (1.0
- mode_lib->vba.UrgentLatencyPixelDataOnly
/ ((mode_lib->vba.ROBBufferSizeInKByte
- mode_lib->vba.PixelChunkSizeInKByte)
* 1024
/ ReturnBandwidthToDCN
- mode_lib->vba.DCFCLK
* mode_lib->vba.ReturnBusWidth
/ 4)
+ mode_lib->vba.UrgentLatencyPixelDataOnly));
CriticalCompression = 2.0 * mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK
* mode_lib->vba.UrgentLatencyPixelDataOnly
/ (ReturnBandwidthToDCN * mode_lib->vba.UrgentLatencyPixelDataOnly
+ (mode_lib->vba.ROBBufferSizeInKByte
- mode_lib->vba.PixelChunkSizeInKByte)
* 1024);
if (DCCEnabledAnyPlane && CriticalCompression > 1.0 && CriticalCompression < 4.0)
ReturnBW =
dml_min(
ReturnBW,
4.0 * ReturnBandwidthToDCN
* (mode_lib->vba.ROBBufferSizeInKByte
- mode_lib->vba.PixelChunkSizeInKByte)
* 1024
* mode_lib->vba.ReturnBusWidth
* mode_lib->vba.DCFCLK
* mode_lib->vba.UrgentLatencyPixelDataOnly
/ dml_pow(
(ReturnBandwidthToDCN
* mode_lib->vba.UrgentLatencyPixelDataOnly
+ (mode_lib->vba.ROBBufferSizeInKByte
- mode_lib->vba.PixelChunkSizeInKByte)
* 1024),
2));
return ReturnBW;
}
static unsigned int dscceComputeDelay(
unsigned int bpc,
double bpp,
unsigned int sliceWidth,
unsigned int numSlices,
enum output_format_class pixelFormat)
{
// valid bpc = source bits per component in the set of {8, 10, 12}
// valid bpp = increments of 1/16 of a bit
// min = 6/7/8 in N420/N422/444, respectively
// max = such that compression is 1:1
//valid sliceWidth = number of pixels per slice line, must be less than or equal to 5184/numSlices (or 4096/numSlices in 420 mode)
//valid numSlices = number of slices in the horiziontal direction per DSC engine in the set of {1, 2, 3, 4}
//valid pixelFormat = pixel/color format in the set of {:N444_RGB, :S422, :N422, :N420}
// fixed value
unsigned int rcModelSize = 8192;
// N422/N420 operate at 2 pixels per clock
unsigned int pixelsPerClock, lstall, D, initalXmitDelay, w, s, ix, wx, p, l0, a, ax, l,
Delay, pixels;
if (pixelFormat == dm_n422 || pixelFormat == dm_420)
pixelsPerClock = 2;
// #all other modes operate at 1 pixel per clock
else
pixelsPerClock = 1;
//initial transmit delay as per PPS
initalXmitDelay = dml_round(rcModelSize / 2.0 / bpp / pixelsPerClock);
//compute ssm delay
if (bpc == 8)
D = 81;
else if (bpc == 10)
D = 89;
else
D = 113;
//divide by pixel per cycle to compute slice width as seen by DSC
w = sliceWidth / pixelsPerClock;
//422 mode has an additional cycle of delay
if (pixelFormat == dm_s422)
s = 1;
else
s = 0;
//main calculation for the dscce
ix = initalXmitDelay + 45;
wx = (w + 2) / 3;
p = 3 * wx - w;
l0 = ix / w;
a = ix + p * l0;
ax = (a + 2) / 3 + D + 6 + 1;
l = (ax + wx - 1) / wx;
if ((ix % w) == 0 && p != 0)
lstall = 1;
else
lstall = 0;
Delay = l * wx * (numSlices - 1) + ax + s + lstall + 22;
//dsc processes 3 pixel containers per cycle and a container can contain 1 or 2 pixels
pixels = Delay * 3 * pixelsPerClock;
return pixels;
}
static unsigned int dscComputeDelay(enum output_format_class pixelFormat)
{
unsigned int Delay = 0;
if (pixelFormat == dm_420) {
// sfr
Delay = Delay + 2;
// dsccif
Delay = Delay + 0;
// dscc - input deserializer
Delay = Delay + 3;
// dscc gets pixels every other cycle
Delay = Delay + 2;
// dscc - input cdc fifo
Delay = Delay + 12;
// dscc gets pixels every other cycle
Delay = Delay + 13;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output cdc fifo
Delay = Delay + 7;
// dscc gets pixels every other cycle
Delay = Delay + 3;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output serializer
Delay = Delay + 1;
// sft
Delay = Delay + 1;
} else if (pixelFormat == dm_n422) {
// sfr
Delay = Delay + 2;
// dsccif
Delay = Delay + 1;
// dscc - input deserializer
Delay = Delay + 5;
// dscc - input cdc fifo
Delay = Delay + 25;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output cdc fifo
Delay = Delay + 10;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output serializer
Delay = Delay + 1;
// sft
Delay = Delay + 1;
} else {
// sfr
Delay = Delay + 2;
// dsccif
Delay = Delay + 0;
// dscc - input deserializer
Delay = Delay + 3;
// dscc - input cdc fifo
Delay = Delay + 12;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output cdc fifo
Delay = Delay + 7;
// dscc - output serializer
Delay = Delay + 1;
// dscc - cdc uncertainty
Delay = Delay + 2;
// sft
Delay = Delay + 1;
}
return Delay;
}
static bool CalculatePrefetchSchedule(
struct display_mode_lib *mode_lib,
double DPPCLK,
double DISPCLK,
double PixelClock,
double DCFCLKDeepSleep,
unsigned int DSCDelay,
unsigned int DPPPerPlane,
bool ScalerEnabled,
unsigned int NumberOfCursors,
double DPPCLKDelaySubtotal,
double DPPCLKDelaySCL,
double DPPCLKDelaySCLLBOnly,
double DPPCLKDelayCNVCFormater,
double DPPCLKDelayCNVCCursor,
double DISPCLKDelaySubtotal,
unsigned int ScalerRecoutWidth,
enum output_format_class OutputFormat,
unsigned int VBlank,
unsigned int HTotal,
unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int PageTableLevels,
bool GPUVMEnable,
bool DynamicMetadataEnable,
unsigned int DynamicMetadataLinesBeforeActiveRequired,
unsigned int DynamicMetadataTransmittedBytes,
bool DCCEnable,
double UrgentLatencyPixelDataOnly,
double UrgentExtraLatency,
double TCalc,
unsigned int PDEAndMetaPTEBytesFrame,
unsigned int MetaRowByte,
unsigned int PixelPTEBytesPerRow,
double PrefetchSourceLinesY,
unsigned int SwathWidthY,
double BytePerPixelDETY,
double VInitPreFillY,
unsigned int MaxNumSwathY,
double PrefetchSourceLinesC,
double BytePerPixelDETC,
double VInitPreFillC,
unsigned int MaxNumSwathC,
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double TWait,
bool XFCEnabled,
double XFCRemoteSurfaceFlipDelay,
bool InterlaceEnable,
bool ProgressiveToInterlaceUnitInOPP,
double *DSTXAfterScaler,
double *DSTYAfterScaler,
double *DestinationLinesForPrefetch,
double *PrefetchBandwidth,
double *DestinationLinesToRequestVMInVBlank,
double *DestinationLinesToRequestRowInVBlank,
double *VRatioPrefetchY,
double *VRatioPrefetchC,
double *RequiredPrefetchPixDataBW,
unsigned int *VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
double *Tno_bw,
unsigned int *VUpdateOffsetPix,
double *VUpdateWidthPix,
double *VReadyOffsetPix)
{
bool MyError = false;
unsigned int DPPCycles, DISPCLKCycles;
double DSTTotalPixelsAfterScaler, TotalRepeaterDelayTime;
double Tdm, LineTime, Tsetup;
double dst_y_prefetch_equ;
double Tsw_oto;
double prefetch_bw_oto;
double Tvm_oto;
double Tr0_oto;
double Tpre_oto;
double dst_y_prefetch_oto;
double TimeForFetchingMetaPTE = 0;
double TimeForFetchingRowInVBlank = 0;
double LinesToRequestPrefetchPixelData = 0;
if (ScalerEnabled)
DPPCycles = DPPCLKDelaySubtotal + DPPCLKDelaySCL;
else
DPPCycles = DPPCLKDelaySubtotal + DPPCLKDelaySCLLBOnly;
DPPCycles = DPPCycles + DPPCLKDelayCNVCFormater + NumberOfCursors * DPPCLKDelayCNVCCursor;
DISPCLKCycles = DISPCLKDelaySubtotal;
if (DPPCLK == 0.0 || DISPCLK == 0.0)
return true;
*DSTXAfterScaler = DPPCycles * PixelClock / DPPCLK + DISPCLKCycles * PixelClock / DISPCLK
+ DSCDelay;
if (DPPPerPlane > 1)
*DSTXAfterScaler = *DSTXAfterScaler + ScalerRecoutWidth;
if (OutputFormat == dm_420 || (InterlaceEnable && ProgressiveToInterlaceUnitInOPP))
*DSTYAfterScaler = 1;
else
*DSTYAfterScaler = 0;
DSTTotalPixelsAfterScaler = ((double) (*DSTYAfterScaler * HTotal)) + *DSTXAfterScaler;
*DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / HTotal, 1);
*DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * HTotal));
*VUpdateOffsetPix = dml_ceil(HTotal / 4.0, 1);
TotalRepeaterDelayTime = MaxInterDCNTileRepeaters * (2.0 / DPPCLK + 3.0 / DISPCLK);
*VUpdateWidthPix = (14.0 / DCFCLKDeepSleep + 12.0 / DPPCLK + TotalRepeaterDelayTime)
* PixelClock;
*VReadyOffsetPix = dml_max(
150.0 / DPPCLK,
TotalRepeaterDelayTime + 20.0 / DCFCLKDeepSleep + 10.0 / DPPCLK)
* PixelClock;
Tsetup = (double) (*VUpdateOffsetPix + *VUpdateWidthPix + *VReadyOffsetPix) / PixelClock;
LineTime = (double) HTotal / PixelClock;
if (DynamicMetadataEnable) {
double Tdmbf, Tdmec, Tdmsks;
Tdm = dml_max(0.0, UrgentExtraLatency - TCalc);
Tdmbf = DynamicMetadataTransmittedBytes / 4.0 / DISPCLK;
Tdmec = LineTime;
if (DynamicMetadataLinesBeforeActiveRequired == 0)
Tdmsks = VBlank * LineTime / 2.0;
else
Tdmsks = DynamicMetadataLinesBeforeActiveRequired * LineTime;
if (InterlaceEnable && !ProgressiveToInterlaceUnitInOPP)
Tdmsks = Tdmsks / 2;
if (VStartup * LineTime
< Tsetup + TWait + UrgentExtraLatency + Tdmbf + Tdmec + Tdmsks) {
MyError = true;
*VStartupRequiredWhenNotEnoughTimeForDynamicMetadata = (Tsetup + TWait
+ UrgentExtraLatency + Tdmbf + Tdmec + Tdmsks) / LineTime;
} else
*VStartupRequiredWhenNotEnoughTimeForDynamicMetadata = 0.0;
} else
Tdm = 0;
if (GPUVMEnable) {
if (PageTableLevels == 4)
*Tno_bw = UrgentExtraLatency + UrgentLatencyPixelDataOnly;
else if (PageTableLevels == 3)
*Tno_bw = UrgentExtraLatency;
else
*Tno_bw = 0;
} else if (DCCEnable)
*Tno_bw = LineTime;
else
*Tno_bw = LineTime / 4;
dst_y_prefetch_equ = VStartup - dml_max(TCalc + TWait, XFCRemoteSurfaceFlipDelay) / LineTime
- (Tsetup + Tdm) / LineTime
- (*DSTYAfterScaler + *DSTXAfterScaler / HTotal);
Tsw_oto = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime;
prefetch_bw_oto = (MetaRowByte + PixelPTEBytesPerRow
+ PrefetchSourceLinesY * SwathWidthY * dml_ceil(BytePerPixelDETY, 1)
+ PrefetchSourceLinesC * SwathWidthY / 2 * dml_ceil(BytePerPixelDETC, 2))
/ Tsw_oto;
if (GPUVMEnable == true) {
Tvm_oto =
dml_max(
*Tno_bw + PDEAndMetaPTEBytesFrame / prefetch_bw_oto,
dml_max(
UrgentExtraLatency
+ UrgentLatencyPixelDataOnly
* (PageTableLevels
- 1),
LineTime / 4.0));
} else
Tvm_oto = LineTime / 4.0;
if ((GPUVMEnable == true || DCCEnable == true)) {
Tr0_oto = dml_max(
(MetaRowByte + PixelPTEBytesPerRow) / prefetch_bw_oto,
dml_max(UrgentLatencyPixelDataOnly, dml_max(LineTime - Tvm_oto, LineTime / 4)));
} else
Tr0_oto = LineTime - Tvm_oto;
Tpre_oto = Tvm_oto + Tr0_oto + Tsw_oto;
dst_y_prefetch_oto = Tpre_oto / LineTime;
if (dst_y_prefetch_oto < dst_y_prefetch_equ)
*DestinationLinesForPrefetch = dst_y_prefetch_oto;
else
*DestinationLinesForPrefetch = dst_y_prefetch_equ;
*DestinationLinesForPrefetch = dml_floor(4.0 * (*DestinationLinesForPrefetch + 0.125), 1)
/ 4;
dml_print("DML: VStartup: %d\n", VStartup);
dml_print("DML: TCalc: %f\n", TCalc);
dml_print("DML: TWait: %f\n", TWait);
dml_print("DML: XFCRemoteSurfaceFlipDelay: %f\n", XFCRemoteSurfaceFlipDelay);
dml_print("DML: LineTime: %f\n", LineTime);
dml_print("DML: Tsetup: %f\n", Tsetup);
dml_print("DML: Tdm: %f\n", Tdm);
dml_print("DML: DSTYAfterScaler: %f\n", *DSTYAfterScaler);
dml_print("DML: DSTXAfterScaler: %f\n", *DSTXAfterScaler);
dml_print("DML: HTotal: %d\n", HTotal);
*PrefetchBandwidth = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBW = 0;
if (*DestinationLinesForPrefetch > 1) {
*PrefetchBandwidth = (PDEAndMetaPTEBytesFrame + 2 * MetaRowByte
+ 2 * PixelPTEBytesPerRow
+ PrefetchSourceLinesY * SwathWidthY * dml_ceil(BytePerPixelDETY, 1)
+ PrefetchSourceLinesC * SwathWidthY / 2
* dml_ceil(BytePerPixelDETC, 2))
/ (*DestinationLinesForPrefetch * LineTime - *Tno_bw);
if (GPUVMEnable) {
TimeForFetchingMetaPTE =
dml_max(
*Tno_bw
+ (double) PDEAndMetaPTEBytesFrame
/ *PrefetchBandwidth,
dml_max(
UrgentExtraLatency
+ UrgentLatencyPixelDataOnly
* (PageTableLevels
- 1),
LineTime / 4));
} else {
if (NumberOfCursors > 0 || XFCEnabled)
TimeForFetchingMetaPTE = LineTime / 4;
else
TimeForFetchingMetaPTE = 0.0;
}
if ((GPUVMEnable == true || DCCEnable == true)) {
TimeForFetchingRowInVBlank =
dml_max(
(MetaRowByte + PixelPTEBytesPerRow)
/ *PrefetchBandwidth,
dml_max(
UrgentLatencyPixelDataOnly,
dml_max(
LineTime
- TimeForFetchingMetaPTE,
LineTime
/ 4.0)));
} else {
if (NumberOfCursors > 0 || XFCEnabled)
TimeForFetchingRowInVBlank = LineTime - TimeForFetchingMetaPTE;
else
TimeForFetchingRowInVBlank = 0.0;
}
*DestinationLinesToRequestVMInVBlank = dml_floor(
4.0 * (TimeForFetchingMetaPTE / LineTime + 0.125),
1) / 4.0;
*DestinationLinesToRequestRowInVBlank = dml_floor(
4.0 * (TimeForFetchingRowInVBlank / LineTime + 0.125),
1) / 4.0;
LinesToRequestPrefetchPixelData =
*DestinationLinesForPrefetch
- ((NumberOfCursors > 0 || GPUVMEnable
|| DCCEnable) ?
(*DestinationLinesToRequestVMInVBlank
+ *DestinationLinesToRequestRowInVBlank) :
0.0);
if (LinesToRequestPrefetchPixelData > 0) {
*VRatioPrefetchY = (double) PrefetchSourceLinesY
/ LinesToRequestPrefetchPixelData;
*VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
if ((SwathHeightY > 4) && (VInitPreFillY > 3)) {
if (LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
*VRatioPrefetchY =
dml_max(
(double) PrefetchSourceLinesY
/ LinesToRequestPrefetchPixelData,
(double) MaxNumSwathY
* SwathHeightY
/ (LinesToRequestPrefetchPixelData
- (VInitPreFillY
- 3.0)
/ 2.0));
*VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
} else {
MyError = true;
*VRatioPrefetchY = 0;
}
}
*VRatioPrefetchC = (double) PrefetchSourceLinesC
/ LinesToRequestPrefetchPixelData;
*VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
if ((SwathHeightC > 4)) {
if (LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
*VRatioPrefetchC =
dml_max(
*VRatioPrefetchC,
(double) MaxNumSwathC
* SwathHeightC
/ (LinesToRequestPrefetchPixelData
- (VInitPreFillC
- 3.0)
/ 2.0));
*VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
} else {
MyError = true;
*VRatioPrefetchC = 0;
}
}
*RequiredPrefetchPixDataBW =
DPPPerPlane
* ((double) PrefetchSourceLinesY
/ LinesToRequestPrefetchPixelData
* dml_ceil(
BytePerPixelDETY,
1)
+ (double) PrefetchSourceLinesC
/ LinesToRequestPrefetchPixelData
* dml_ceil(
BytePerPixelDETC,
2)
/ 2)
* SwathWidthY / LineTime;
} else {
MyError = true;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBW = 0;
}
} else {
MyError = true;
}
if (MyError) {
*PrefetchBandwidth = 0;
TimeForFetchingMetaPTE = 0;
TimeForFetchingRowInVBlank = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*DestinationLinesForPrefetch = 0;
LinesToRequestPrefetchPixelData = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBW = 0;
}
return MyError;
}
static double RoundToDFSGranularityUp(double Clock, double VCOSpeed)
{
return VCOSpeed * 4 / dml_floor(VCOSpeed * 4 / Clock, 1);
}
static double RoundToDFSGranularityDown(double Clock, double VCOSpeed)
{
return VCOSpeed * 4 / dml_ceil(VCOSpeed * 4 / Clock, 1);
}
static double CalculatePrefetchSourceLines(
struct display_mode_lib *mode_lib,
double VRatio,
double vtaps,
bool Interlace,
bool ProgressiveToInterlaceUnitInOPP,
unsigned int SwathHeight,
unsigned int ViewportYStart,
double *VInitPreFill,
unsigned int *MaxNumSwath)
{
unsigned int MaxPartialSwath;
if (ProgressiveToInterlaceUnitInOPP)
*VInitPreFill = dml_floor((VRatio + vtaps + 1) / 2.0, 1);
else
*VInitPreFill = dml_floor((VRatio + vtaps + 1 + Interlace * 0.5 * VRatio) / 2.0, 1);
if (!mode_lib->vba.IgnoreViewportPositioning) {
*MaxNumSwath = dml_ceil((*VInitPreFill - 1.0) / SwathHeight, 1) + 1.0;
if (*VInitPreFill > 1.0)
MaxPartialSwath = (unsigned int) (*VInitPreFill - 2) % SwathHeight;
else
MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 2)
% SwathHeight;
MaxPartialSwath = dml_max(1U, MaxPartialSwath);
} else {
if (ViewportYStart != 0)
dml_print(
"WARNING DML: using viewport y position of 0 even though actual viewport y position is non-zero in prefetch source lines calculation\n");
*MaxNumSwath = dml_ceil(*VInitPreFill / SwathHeight, 1);
if (*VInitPreFill > 1.0)
MaxPartialSwath = (unsigned int) (*VInitPreFill - 1) % SwathHeight;
else
MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 1)
% SwathHeight;
}
return *MaxNumSwath * SwathHeight + MaxPartialSwath;
}
static unsigned int CalculateVMAndRowBytes(
struct display_mode_lib *mode_lib,
bool DCCEnable,
unsigned int BlockHeight256Bytes,
unsigned int BlockWidth256Bytes,
enum source_format_class SourcePixelFormat,
unsigned int SurfaceTiling,
unsigned int BytePerPixel,
enum scan_direction_class ScanDirection,
unsigned int ViewportWidth,
unsigned int ViewportHeight,
unsigned int SwathWidth,
bool GPUVMEnable,
unsigned int VMMPageSize,
unsigned int PTEBufferSizeInRequestsLuma,
unsigned int PDEProcessingBufIn64KBReqs,
unsigned int Pitch,
unsigned int DCCMetaPitch,
unsigned int *MacroTileWidth,
unsigned int *MetaRowByte,
unsigned int *PixelPTEBytesPerRow,
bool *PTEBufferSizeNotExceeded,
unsigned int *dpte_row_height,
unsigned int *meta_row_height)
{
unsigned int MetaRequestHeight;
unsigned int MetaRequestWidth;
unsigned int MetaSurfWidth;
unsigned int MetaSurfHeight;
unsigned int MPDEBytesFrame;
unsigned int MetaPTEBytesFrame;
unsigned int DCCMetaSurfaceBytes;
unsigned int MacroTileSizeBytes;
unsigned int MacroTileHeight;
unsigned int DPDE0BytesFrame;
unsigned int ExtraDPDEBytesFrame;
unsigned int PDEAndMetaPTEBytesFrame;
if (DCCEnable == true) {
MetaRequestHeight = 8 * BlockHeight256Bytes;
MetaRequestWidth = 8 * BlockWidth256Bytes;
if (ScanDirection == dm_horz) {
*meta_row_height = MetaRequestHeight;
MetaSurfWidth = dml_ceil((double) SwathWidth - 1, MetaRequestWidth)
+ MetaRequestWidth;
*MetaRowByte = MetaSurfWidth * MetaRequestHeight * BytePerPixel / 256.0;
} else {
*meta_row_height = MetaRequestWidth;
MetaSurfHeight = dml_ceil((double) SwathWidth - 1, MetaRequestHeight)
+ MetaRequestHeight;
*MetaRowByte = MetaSurfHeight * MetaRequestWidth * BytePerPixel / 256.0;
}
if (ScanDirection == dm_horz) {
DCCMetaSurfaceBytes = DCCMetaPitch
* (dml_ceil(ViewportHeight - 1, 64 * BlockHeight256Bytes)
+ 64 * BlockHeight256Bytes) * BytePerPixel
/ 256;
} else {
DCCMetaSurfaceBytes = DCCMetaPitch
* (dml_ceil(
(double) ViewportHeight - 1,
64 * BlockHeight256Bytes)
+ 64 * BlockHeight256Bytes) * BytePerPixel
/ 256;
}
if (GPUVMEnable == true) {
MetaPTEBytesFrame = (dml_ceil(
(double) (DCCMetaSurfaceBytes - VMMPageSize)
/ (8 * VMMPageSize),
1) + 1) * 64;
MPDEBytesFrame = 128 * (mode_lib->vba.GPUVMMaxPageTableLevels - 1);
} else {
MetaPTEBytesFrame = 0;
MPDEBytesFrame = 0;
}
} else {
MetaPTEBytesFrame = 0;
MPDEBytesFrame = 0;
*MetaRowByte = 0;
}
if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_l_vp) {
MacroTileSizeBytes = 256;
MacroTileHeight = BlockHeight256Bytes;
} else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x
|| SurfaceTiling == dm_sw_4kb_d || SurfaceTiling == dm_sw_4kb_d_x) {
MacroTileSizeBytes = 4096;
MacroTileHeight = 4 * BlockHeight256Bytes;
} else if (SurfaceTiling == dm_sw_64kb_s || SurfaceTiling == dm_sw_64kb_s_t
|| SurfaceTiling == dm_sw_64kb_s_x || SurfaceTiling == dm_sw_64kb_d
|| SurfaceTiling == dm_sw_64kb_d_t || SurfaceTiling == dm_sw_64kb_d_x
|| SurfaceTiling == dm_sw_64kb_r_x) {
MacroTileSizeBytes = 65536;
MacroTileHeight = 16 * BlockHeight256Bytes;
} else {
MacroTileSizeBytes = 262144;
MacroTileHeight = 32 * BlockHeight256Bytes;
}
*MacroTileWidth = MacroTileSizeBytes / BytePerPixel / MacroTileHeight;
if (GPUVMEnable == true && mode_lib->vba.GPUVMMaxPageTableLevels > 1) {
if (ScanDirection == dm_horz) {
DPDE0BytesFrame =
64
* (dml_ceil(
((Pitch
* (dml_ceil(
ViewportHeight
- 1,
MacroTileHeight)
+ MacroTileHeight)
* BytePerPixel)
- MacroTileSizeBytes)
/ (8
* 2097152),
1) + 1);
} else {
DPDE0BytesFrame =
64
* (dml_ceil(
((Pitch
* (dml_ceil(
(double) SwathWidth
- 1,
MacroTileHeight)
+ MacroTileHeight)
* BytePerPixel)
- MacroTileSizeBytes)
/ (8
* 2097152),
1) + 1);
}
ExtraDPDEBytesFrame = 128 * (mode_lib->vba.GPUVMMaxPageTableLevels - 2);
} else {
DPDE0BytesFrame = 0;
ExtraDPDEBytesFrame = 0;
}
PDEAndMetaPTEBytesFrame = MetaPTEBytesFrame + MPDEBytesFrame + DPDE0BytesFrame
+ ExtraDPDEBytesFrame;
if (GPUVMEnable == true) {
unsigned int PTERequestSize;
unsigned int PixelPTEReqHeight;
unsigned int PixelPTEReqWidth;
double FractionOfPTEReturnDrop;
unsigned int EffectivePDEProcessingBufIn64KBReqs;
if (SurfaceTiling == dm_sw_linear) {
PixelPTEReqHeight = 1;
PixelPTEReqWidth = 8.0 * VMMPageSize / BytePerPixel;
PTERequestSize = 64;
FractionOfPTEReturnDrop = 0;
} else if (MacroTileSizeBytes == 4096) {
PixelPTEReqHeight = MacroTileHeight;
PixelPTEReqWidth = 8 * *MacroTileWidth;
PTERequestSize = 64;
if (ScanDirection == dm_horz)
FractionOfPTEReturnDrop = 0;
else
FractionOfPTEReturnDrop = 7 / 8;
} else if (VMMPageSize == 4096 && MacroTileSizeBytes > 4096) {
PixelPTEReqHeight = 16 * BlockHeight256Bytes;
PixelPTEReqWidth = 16 * BlockWidth256Bytes;
PTERequestSize = 128;
FractionOfPTEReturnDrop = 0;
} else {
PixelPTEReqHeight = MacroTileHeight;
PixelPTEReqWidth = 8 * *MacroTileWidth;
PTERequestSize = 64;
FractionOfPTEReturnDrop = 0;
}
if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10)
EffectivePDEProcessingBufIn64KBReqs = PDEProcessingBufIn64KBReqs / 2;
else
EffectivePDEProcessingBufIn64KBReqs = PDEProcessingBufIn64KBReqs;
if (SurfaceTiling == dm_sw_linear) {
*dpte_row_height =
dml_min(
128,
1
<< (unsigned int) dml_floor(
dml_log2(
dml_min(
(double) PTEBufferSizeInRequestsLuma
* PixelPTEReqWidth,
EffectivePDEProcessingBufIn64KBReqs
* 65536.0
/ BytePerPixel)
/ Pitch),
1));
*PixelPTEBytesPerRow = PTERequestSize
* (dml_ceil(
(double) (Pitch * *dpte_row_height - 1)
/ PixelPTEReqWidth,
1) + 1);
} else if (ScanDirection == dm_horz) {
*dpte_row_height = PixelPTEReqHeight;
*PixelPTEBytesPerRow = PTERequestSize
* (dml_ceil(((double) SwathWidth - 1) / PixelPTEReqWidth, 1)
+ 1);
} else {
*dpte_row_height = dml_min(PixelPTEReqWidth, *MacroTileWidth);
*PixelPTEBytesPerRow = PTERequestSize
* (dml_ceil(
((double) SwathWidth - 1)
/ PixelPTEReqHeight,
1) + 1);
}
if (*PixelPTEBytesPerRow * (1 - FractionOfPTEReturnDrop)
<= 64 * PTEBufferSizeInRequestsLuma) {
*PTEBufferSizeNotExceeded = true;
} else {
*PTEBufferSizeNotExceeded = false;
}
} else {
*PixelPTEBytesPerRow = 0;
*PTEBufferSizeNotExceeded = true;
}
return PDEAndMetaPTEBytesFrame;
}
static void dml20_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
struct display_mode_lib *mode_lib)
{
unsigned int j, k;
mode_lib->vba.WritebackDISPCLK = 0.0;
mode_lib->vba.DISPCLKWithRamping = 0;
mode_lib->vba.DISPCLKWithoutRamping = 0;
mode_lib->vba.GlobalDPPCLK = 0.0;
// dml_ml->vba.DISPCLK and dml_ml->vba.DPPCLK Calculation
//
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.WritebackEnable[k]) {
mode_lib->vba.WritebackDISPCLK =
dml_max(
mode_lib->vba.WritebackDISPCLK,
CalculateWriteBackDISPCLK(
mode_lib->vba.WritebackPixelFormat[k],
mode_lib->vba.PixelClock[k],
mode_lib->vba.WritebackHRatio[k],
mode_lib->vba.WritebackVRatio[k],
mode_lib->vba.WritebackLumaHTaps[k],
mode_lib->vba.WritebackLumaVTaps[k],
mode_lib->vba.WritebackChromaHTaps[k],
mode_lib->vba.WritebackChromaVTaps[k],
mode_lib->vba.WritebackDestinationWidth[k],
mode_lib->vba.HTotal[k],
mode_lib->vba.WritebackChromaLineBufferWidth));
}
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.HRatio[k] > 1) {
mode_lib->vba.PSCL_THROUGHPUT_LUMA[k] = dml_min(
mode_lib->vba.MaxDCHUBToPSCLThroughput,
mode_lib->vba.MaxPSCLToLBThroughput
* mode_lib->vba.HRatio[k]
/ dml_ceil(
mode_lib->vba.htaps[k]
/ 6.0,
1));
} else {
mode_lib->vba.PSCL_THROUGHPUT_LUMA[k] = dml_min(
mode_lib->vba.MaxDCHUBToPSCLThroughput,
mode_lib->vba.MaxPSCLToLBThroughput);
}
mode_lib->vba.DPPCLKUsingSingleDPPLuma =
mode_lib->vba.PixelClock[k]
* dml_max(
mode_lib->vba.vtaps[k] / 6.0
* dml_min(
1.0,
mode_lib->vba.HRatio[k]),
dml_max(
mode_lib->vba.HRatio[k]
* mode_lib->vba.VRatio[k]
/ mode_lib->vba.PSCL_THROUGHPUT_LUMA[k],
1.0));
if ((mode_lib->vba.htaps[k] > 6 || mode_lib->vba.vtaps[k] > 6)
&& mode_lib->vba.DPPCLKUsingSingleDPPLuma
< 2 * mode_lib->vba.PixelClock[k]) {
mode_lib->vba.DPPCLKUsingSingleDPPLuma = 2 * mode_lib->vba.PixelClock[k];
}
if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
&& mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] = 0.0;
mode_lib->vba.DPPCLKUsingSingleDPP[k] =
mode_lib->vba.DPPCLKUsingSingleDPPLuma;
} else {
if (mode_lib->vba.HRatio[k] > 1) {
mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] =
dml_min(
mode_lib->vba.MaxDCHUBToPSCLThroughput,
mode_lib->vba.MaxPSCLToLBThroughput
* mode_lib->vba.HRatio[k]
/ 2
/ dml_ceil(
mode_lib->vba.HTAPsChroma[k]
/ 6.0,
1.0));
} else {
mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] = dml_min(
mode_lib->vba.MaxDCHUBToPSCLThroughput,
mode_lib->vba.MaxPSCLToLBThroughput);
}
mode_lib->vba.DPPCLKUsingSingleDPPChroma =
mode_lib->vba.PixelClock[k]
* dml_max(
mode_lib->vba.VTAPsChroma[k]
/ 6.0
* dml_min(
1.0,
mode_lib->vba.HRatio[k]
/ 2),
dml_max(
mode_lib->vba.HRatio[k]
* mode_lib->vba.VRatio[k]
/ 4
/ mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k],
1.0));
if ((mode_lib->vba.HTAPsChroma[k] > 6 || mode_lib->vba.VTAPsChroma[k] > 6)
&& mode_lib->vba.DPPCLKUsingSingleDPPChroma
< 2 * mode_lib->vba.PixelClock[k]) {
mode_lib->vba.DPPCLKUsingSingleDPPChroma = 2
* mode_lib->vba.PixelClock[k];
}
mode_lib->vba.DPPCLKUsingSingleDPP[k] = dml_max(
mode_lib->vba.DPPCLKUsingSingleDPPLuma,
mode_lib->vba.DPPCLKUsingSingleDPPChroma);
}
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.BlendingAndTiming[k] != k)
continue;
if (mode_lib->vba.ODMCombineEnabled[k]) {
mode_lib->vba.DISPCLKWithRamping =
dml_max(
mode_lib->vba.DISPCLKWithRamping,
mode_lib->vba.PixelClock[k] / 2
* (1
+ mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
/ 100)
* (1
+ mode_lib->vba.DISPCLKRampingMargin
/ 100));
mode_lib->vba.DISPCLKWithoutRamping =
dml_max(
mode_lib->vba.DISPCLKWithoutRamping,
mode_lib->vba.PixelClock[k] / 2
* (1
+ mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
/ 100));
} else if (!mode_lib->vba.ODMCombineEnabled[k]) {
mode_lib->vba.DISPCLKWithRamping =
dml_max(
mode_lib->vba.DISPCLKWithRamping,
mode_lib->vba.PixelClock[k]
* (1
+ mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
/ 100)
* (1
+ mode_lib->vba.DISPCLKRampingMargin
/ 100));
mode_lib->vba.DISPCLKWithoutRamping =
dml_max(
mode_lib->vba.DISPCLKWithoutRamping,
mode_lib->vba.PixelClock[k]
* (1
+ mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
/ 100));
}
}
mode_lib->vba.DISPCLKWithRamping = dml_max(
mode_lib->vba.DISPCLKWithRamping,
mode_lib->vba.WritebackDISPCLK);
mode_lib->vba.DISPCLKWithoutRamping = dml_max(
mode_lib->vba.DISPCLKWithoutRamping,
mode_lib->vba.WritebackDISPCLK);
ASSERT(mode_lib->vba.DISPCLKDPPCLKVCOSpeed != 0);
mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity = RoundToDFSGranularityUp(
mode_lib->vba.DISPCLKWithRamping,
mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity = RoundToDFSGranularityUp(
mode_lib->vba.DISPCLKWithoutRamping,
mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
mode_lib->vba.MaxDispclkRoundedToDFSGranularity = RoundToDFSGranularityDown(
mode_lib->vba.soc.clock_limits[mode_lib->vba.soc.num_states].dispclk_mhz,
mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
if (mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity
> mode_lib->vba.MaxDispclkRoundedToDFSGranularity) {
mode_lib->vba.DISPCLK_calculated =
mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity;
} else if (mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity
> mode_lib->vba.MaxDispclkRoundedToDFSGranularity) {
mode_lib->vba.DISPCLK_calculated = mode_lib->vba.MaxDispclkRoundedToDFSGranularity;
} else {
mode_lib->vba.DISPCLK_calculated =
mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity;
}
DTRACE(" dispclk_mhz (calculated) = %f", mode_lib->vba.DISPCLK_calculated);
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.DPPPerPlane[k] == 0) {
mode_lib->vba.DPPCLK_calculated[k] = 0;
} else {
mode_lib->vba.DPPCLK_calculated[k] = mode_lib->vba.DPPCLKUsingSingleDPP[k]
/ mode_lib->vba.DPPPerPlane[k]
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100);
}
mode_lib->vba.GlobalDPPCLK = dml_max(
mode_lib->vba.GlobalDPPCLK,
mode_lib->vba.DPPCLK_calculated[k]);
}
mode_lib->vba.GlobalDPPCLK = RoundToDFSGranularityUp(
mode_lib->vba.GlobalDPPCLK,
mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
mode_lib->vba.DPPCLK_calculated[k] = mode_lib->vba.GlobalDPPCLK / 255
* dml_ceil(
mode_lib->vba.DPPCLK_calculated[k] * 255
/ mode_lib->vba.GlobalDPPCLK,
1);
DTRACE(" dppclk_mhz[%i] (calculated) = %f", k, mode_lib->vba.DPPCLK_calculated[k]);
}
// Urgent Watermark
mode_lib->vba.DCCEnabledAnyPlane = false;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
if (mode_lib->vba.DCCEnable[k])
mode_lib->vba.DCCEnabledAnyPlane = true;
mode_lib->vba.ReturnBandwidthToDCN = dml_min(
mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK,
mode_lib->vba.FabricAndDRAMBandwidth * 1000)
* mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100;
mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBandwidthToDCN;
mode_lib->vba.ReturnBW = adjust_ReturnBW(
mode_lib,
mode_lib->vba.ReturnBW,
mode_lib->vba.DCCEnabledAnyPlane,
mode_lib->vba.ReturnBandwidthToDCN);
// Let's do this calculation again??
mode_lib->vba.ReturnBandwidthToDCN = dml_min(
mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK,
mode_lib->vba.FabricAndDRAMBandwidth * 1000);
mode_lib->vba.ReturnBW = adjust_ReturnBW(
mode_lib,
mode_lib->vba.ReturnBW,
mode_lib->vba.DCCEnabledAnyPlane,
mode_lib->vba.ReturnBandwidthToDCN);
DTRACE(" dcfclk_mhz = %f", mode_lib->vba.DCFCLK);
DTRACE(" return_bw_to_dcn = %f", mode_lib->vba.ReturnBandwidthToDCN);
DTRACE(" return_bus_bw = %f", mode_lib->vba.ReturnBW);
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
bool MainPlaneDoesODMCombine = false;
if (mode_lib->vba.SourceScan[k] == dm_horz)
mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportWidth[k];
else
mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k];
if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
MainPlaneDoesODMCombine = true;
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
if (mode_lib->vba.BlendingAndTiming[k] == j
&& mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
MainPlaneDoesODMCombine = true;
if (MainPlaneDoesODMCombine == true)
mode_lib->vba.SwathWidthY[k] = dml_min(
(double) mode_lib->vba.SwathWidthSingleDPPY[k],
dml_round(
mode_lib->vba.HActive[k] / 2.0
* mode_lib->vba.HRatio[k]));
else {
if (mode_lib->vba.DPPPerPlane[k] == 0) {
mode_lib->vba.SwathWidthY[k] = 0;
} else {
mode_lib->vba.SwathWidthY[k] = mode_lib->vba.SwathWidthSingleDPPY[k]
/ mode_lib->vba.DPPPerPlane[k];
}
}
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
mode_lib->vba.BytePerPixelDETY[k] = 8;
mode_lib->vba.BytePerPixelDETC[k] = 0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
mode_lib->vba.BytePerPixelDETY[k] = 4;
mode_lib->vba.BytePerPixelDETC[k] = 0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16) {
mode_lib->vba.BytePerPixelDETY[k] = 2;
mode_lib->vba.BytePerPixelDETC[k] = 0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8) {
mode_lib->vba.BytePerPixelDETY[k] = 1;
mode_lib->vba.BytePerPixelDETC[k] = 0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
mode_lib->vba.BytePerPixelDETY[k] = 1;
mode_lib->vba.BytePerPixelDETC[k] = 2;
} else { // dm_420_10
mode_lib->vba.BytePerPixelDETY[k] = 4.0 / 3.0;
mode_lib->vba.BytePerPixelDETC[k] = 8.0 / 3.0;
}
}
mode_lib->vba.TotalDataReadBandwidth = 0.0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
mode_lib->vba.ReadBandwidthPlaneLuma[k] = mode_lib->vba.SwathWidthSingleDPPY[k]
* dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1)
/ (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
* mode_lib->vba.VRatio[k];
mode_lib->vba.ReadBandwidthPlaneChroma[k] = mode_lib->vba.SwathWidthSingleDPPY[k]
/ 2 * dml_ceil(mode_lib->vba.BytePerPixelDETC[k], 2)
/ (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
* mode_lib->vba.VRatio[k] / 2;
DTRACE(
" read_bw[%i] = %fBps",
k,
mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ mode_lib->vba.ReadBandwidthPlaneChroma[k]);
mode_lib->vba.TotalDataReadBandwidth += mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ mode_lib->vba.ReadBandwidthPlaneChroma[k];
}
mode_lib->vba.TotalDCCActiveDPP = 0;
mode_lib->vba.TotalActiveDPP = 0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
mode_lib->vba.TotalActiveDPP = mode_lib->vba.TotalActiveDPP
+ mode_lib->vba.DPPPerPlane[k];
if (mode_lib->vba.DCCEnable[k])
mode_lib->vba.TotalDCCActiveDPP = mode_lib->vba.TotalDCCActiveDPP
+ mode_lib->vba.DPPPerPlane[k];
}
mode_lib->vba.UrgentRoundTripAndOutOfOrderLatency =
(mode_lib->vba.RoundTripPingLatencyCycles + 32) / mode_lib->vba.DCFCLK
+ mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelDataOnly
* mode_lib->vba.NumberOfChannels
/ mode_lib->vba.ReturnBW;
mode_lib->vba.LastPixelOfLineExtraWatermark = 0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
double DataFabricLineDeliveryTimeLuma, DataFabricLineDeliveryTimeChroma;
if (mode_lib->vba.VRatio[k] <= 1.0)
mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k] =
(double) mode_lib->vba.SwathWidthY[k]
* mode_lib->vba.DPPPerPlane[k]
/ mode_lib->vba.HRatio[k]
/ mode_lib->vba.PixelClock[k];
else
mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k] =
(double) mode_lib->vba.SwathWidthY[k]
/ mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
/ mode_lib->vba.DPPCLK[k];
DataFabricLineDeliveryTimeLuma = mode_lib->vba.SwathWidthSingleDPPY[k]
* mode_lib->vba.SwathHeightY[k]
* dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1)
/ (mode_lib->vba.ReturnBW * mode_lib->vba.ReadBandwidthPlaneLuma[k]
/ mode_lib->vba.TotalDataReadBandwidth);
mode_lib->vba.LastPixelOfLineExtraWatermark = dml_max(
mode_lib->vba.LastPixelOfLineExtraWatermark,
DataFabricLineDeliveryTimeLuma
- mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k]);
if (mode_lib->vba.BytePerPixelDETC[k] == 0)
mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k] = 0.0;
else if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0)
mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k] =
mode_lib->vba.SwathWidthY[k] / 2.0
* mode_lib->vba.DPPPerPlane[k]
/ (mode_lib->vba.HRatio[k] / 2.0)
/ mode_lib->vba.PixelClock[k];
else
mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k] =
mode_lib->vba.SwathWidthY[k] / 2.0
/ mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k]
/ mode_lib->vba.DPPCLK[k];
DataFabricLineDeliveryTimeChroma = mode_lib->vba.SwathWidthSingleDPPY[k] / 2.0
* mode_lib->vba.SwathHeightC[k]
* dml_ceil(mode_lib->vba.BytePerPixelDETC[k], 2)
/ (mode_lib->vba.ReturnBW
* mode_lib->vba.ReadBandwidthPlaneChroma[k]
/ mode_lib->vba.TotalDataReadBandwidth);
mode_lib->vba.LastPixelOfLineExtraWatermark =
dml_max(
mode_lib->vba.LastPixelOfLineExtraWatermark,
DataFabricLineDeliveryTimeChroma
- mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k]);
}
mode_lib->vba.UrgentExtraLatency = mode_lib->vba.UrgentRoundTripAndOutOfOrderLatency
+ (mode_lib->vba.TotalActiveDPP * mode_lib->vba.PixelChunkSizeInKByte
+ mode_lib->vba.TotalDCCActiveDPP
* mode_lib->vba.MetaChunkSize) * 1024.0
/ mode_lib->vba.ReturnBW;
if (mode_lib->vba.GPUVMEnable)
mode_lib->vba.UrgentExtraLatency += mode_lib->vba.TotalActiveDPP
* mode_lib->vba.PTEGroupSize / mode_lib->vba.ReturnBW;
mode_lib->vba.UrgentWatermark = mode_lib->vba.UrgentLatencyPixelDataOnly
+ mode_lib->vba.LastPixelOfLineExtraWatermark
+ mode_lib->vba.UrgentExtraLatency;
DTRACE(" urgent_extra_latency = %fus", mode_lib->vba.UrgentExtraLatency);
DTRACE(" wm_urgent = %fus", mode_lib->vba.UrgentWatermark);
mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly;
mode_lib->vba.TotalActiveWriteback = 0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.WritebackEnable[k])
mode_lib->vba.TotalActiveWriteback = mode_lib->vba.TotalActiveWriteback + mode_lib->vba.ActiveWritebacksPerPlane[k];
}
if (mode_lib->vba.TotalActiveWriteback <= 1)
mode_lib->vba.WritebackUrgentWatermark = mode_lib->vba.WritebackLatency;
else
mode_lib->vba.WritebackUrgentWatermark = mode_lib->vba.WritebackLatency
+ mode_lib->vba.WritebackChunkSize * 1024.0 / 32
/ mode_lib->vba.SOCCLK;
DTRACE(" wm_wb_urgent = %fus", mode_lib->vba.WritebackUrgentWatermark);
// NB P-State/DRAM Clock Change Watermark
mode_lib->vba.DRAMClockChangeWatermark = mode_lib->vba.DRAMClockChangeLatency
+ mode_lib->vba.UrgentWatermark;
DTRACE(" wm_pstate_change = %fus", mode_lib->vba.DRAMClockChangeWatermark);
DTRACE(" calculating wb pstate watermark");
DTRACE(" total wb outputs %d", mode_lib->vba.TotalActiveWriteback);
DTRACE(" socclk frequency %f Mhz", mode_lib->vba.SOCCLK);
if (mode_lib->vba.TotalActiveWriteback <= 1)
mode_lib->vba.WritebackDRAMClockChangeWatermark =
mode_lib->vba.DRAMClockChangeLatency
+ mode_lib->vba.WritebackLatency;
else
mode_lib->vba.WritebackDRAMClockChangeWatermark =
mode_lib->vba.DRAMClockChangeLatency
+ mode_lib->vba.WritebackLatency
+ mode_lib->vba.WritebackChunkSize * 1024.0 / 32
/ mode_lib->vba.SOCCLK;
DTRACE(" wm_wb_pstate %fus", mode_lib->vba.WritebackDRAMClockChangeWatermark);
// Stutter Efficiency
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
mode_lib->vba.LinesInDETY[k] = mode_lib->vba.DETBufferSizeY[k]
/ mode_lib->vba.BytePerPixelDETY[k] / mode_lib->vba.SwathWidthY[k];
mode_lib->vba.LinesInDETYRoundedDownToSwath[k] = dml_floor(
mode_lib->vba.LinesInDETY[k],
mode_lib->vba.SwathHeightY[k]);
mode_lib->vba.FullDETBufferingTimeY[k] =
mode_lib->vba.LinesInDETYRoundedDownToSwath[k]
* (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k])
/ mode_lib->vba.VRatio[k];
if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
mode_lib->vba.LinesInDETC[k] = mode_lib->vba.DETBufferSizeC[k]
/ mode_lib->vba.BytePerPixelDETC[k]
/ (mode_lib->vba.SwathWidthY[k] / 2);
mode_lib->vba.LinesInDETCRoundedDownToSwath[k] = dml_floor(
mode_lib->vba.LinesInDETC[k],
mode_lib->vba.SwathHeightC[k]);
mode_lib->vba.FullDETBufferingTimeC[k] =
mode_lib->vba.LinesInDETCRoundedDownToSwath[k]
* (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k])
/ (mode_lib->vba.VRatio[k] / 2);
} else {
mode_lib->vba.LinesInDETC[k] = 0;
mode_lib->vba.LinesInDETCRoundedDownToSwath[k] = 0;
mode_lib->vba.FullDETBufferingTimeC[k] = 999999;
}
}
mode_lib->vba.MinFullDETBufferingTime = 999999.0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.FullDETBufferingTimeY[k]
< mode_lib->vba.MinFullDETBufferingTime) {
mode_lib->vba.MinFullDETBufferingTime =
mode_lib->vba.FullDETBufferingTimeY[k];
mode_lib->vba.FrameTimeForMinFullDETBufferingTime =
(double) mode_lib->vba.VTotal[k] * mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k];
}
if (mode_lib->vba.FullDETBufferingTimeC[k]
< mode_lib->vba.MinFullDETBufferingTime) {
mode_lib->vba.MinFullDETBufferingTime =
mode_lib->vba.FullDETBufferingTimeC[k];
mode_lib->vba.FrameTimeForMinFullDETBufferingTime =
(double) mode_lib->vba.VTotal[k] * mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k];
}
}
mode_lib->vba.AverageReadBandwidthGBytePerSecond = 0.0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.DCCEnable[k]) {
mode_lib->vba.AverageReadBandwidthGBytePerSecond =
mode_lib->vba.AverageReadBandwidthGBytePerSecond
+ mode_lib->vba.ReadBandwidthPlaneLuma[k]
/ mode_lib->vba.DCCRate[k]
/ 1000
+ mode_lib->vba.ReadBandwidthPlaneChroma[k]
/ mode_lib->vba.DCCRate[k]
/ 1000;
} else {
mode_lib->vba.AverageReadBandwidthGBytePerSecond =
mode_lib->vba.AverageReadBandwidthGBytePerSecond
+ mode_lib->vba.ReadBandwidthPlaneLuma[k]
/ 1000
+ mode_lib->vba.ReadBandwidthPlaneChroma[k]
/ 1000;
}
if (mode_lib->vba.DCCEnable[k]) {
mode_lib->vba.AverageReadBandwidthGBytePerSecond =
mode_lib->vba.AverageReadBandwidthGBytePerSecond
+ mode_lib->vba.ReadBandwidthPlaneLuma[k]
/ 1000 / 256
+ mode_lib->vba.ReadBandwidthPlaneChroma[k]
/ 1000 / 256;
}
if (mode_lib->vba.GPUVMEnable) {
mode_lib->vba.AverageReadBandwidthGBytePerSecond =
mode_lib->vba.AverageReadBandwidthGBytePerSecond
+ mode_lib->vba.ReadBandwidthPlaneLuma[k]
/ 1000 / 512
+ mode_lib->vba.ReadBandwidthPlaneChroma[k]
/ 1000 / 512;
}
}
mode_lib->vba.PartOfBurstThatFitsInROB =
dml_min(
mode_lib->vba.MinFullDETBufferingTime
* mode_lib->vba.TotalDataReadBandwidth,
mode_lib->vba.ROBBufferSizeInKByte * 1024
* mode_lib->vba.TotalDataReadBandwidth
/ (mode_lib->vba.AverageReadBandwidthGBytePerSecond
* 1000));
mode_lib->vba.StutterBurstTime = mode_lib->vba.PartOfBurstThatFitsInROB
* (mode_lib->vba.AverageReadBandwidthGBytePerSecond * 1000)
/ mode_lib->vba.TotalDataReadBandwidth / mode_lib->vba.ReturnBW
+ (mode_lib->vba.MinFullDETBufferingTime
* mode_lib->vba.TotalDataReadBandwidth
- mode_lib->vba.PartOfBurstThatFitsInROB)
/ (mode_lib->vba.DCFCLK * 64);
if (mode_lib->vba.TotalActiveWriteback == 0) {
mode_lib->vba.StutterEfficiencyNotIncludingVBlank = (1
- (mode_lib->vba.SRExitTime + mode_lib->vba.StutterBurstTime)
/ mode_lib->vba.MinFullDETBufferingTime) * 100;
} else {
mode_lib->vba.StutterEfficiencyNotIncludingVBlank = 0;
}
mode_lib->vba.SmallestVBlank = 999999;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
mode_lib->vba.VBlankTime = (double) (mode_lib->vba.VTotal[k]
- mode_lib->vba.VActive[k]) * mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k];
} else {
mode_lib->vba.VBlankTime = 0;
}
mode_lib->vba.SmallestVBlank = dml_min(
mode_lib->vba.SmallestVBlank,
mode_lib->vba.VBlankTime);
}
mode_lib->vba.StutterEfficiency = (mode_lib->vba.StutterEfficiencyNotIncludingVBlank / 100
* (mode_lib->vba.FrameTimeForMinFullDETBufferingTime
- mode_lib->vba.SmallestVBlank)
+ mode_lib->vba.SmallestVBlank)
/ mode_lib->vba.FrameTimeForMinFullDETBufferingTime * 100;
// dml_ml->vba.DCFCLK Deep Sleep
mode_lib->vba.DCFCLKDeepSleep = 8.0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; k++) {
if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
mode_lib->vba.DCFCLKDeepSleepPerPlane[k] =
dml_max(
1.1 * mode_lib->vba.SwathWidthY[k]
* dml_ceil(
mode_lib->vba.BytePerPixelDETY[k],
1) / 32
/ mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k],
1.1 * mode_lib->vba.SwathWidthY[k] / 2.0
* dml_ceil(
mode_lib->vba.BytePerPixelDETC[k],
2) / 32
/ mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k]);
} else
mode_lib->vba.DCFCLKDeepSleepPerPlane[k] = 1.1 * mode_lib->vba.SwathWidthY[k]
* dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1) / 64.0
/ mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k];
mode_lib->vba.DCFCLKDeepSleepPerPlane[k] = dml_max(
mode_lib->vba.DCFCLKDeepSleepPerPlane[k],
mode_lib->vba.PixelClock[k] / 16.0);
mode_lib->vba.DCFCLKDeepSleep = dml_max(
mode_lib->vba.DCFCLKDeepSleep,
mode_lib->vba.DCFCLKDeepSleepPerPlane[k]);
DTRACE(
" dcfclk_deepsleep_per_plane[%i] = %fMHz",
k,
mode_lib->vba.DCFCLKDeepSleepPerPlane[k]);
}
DTRACE(" dcfclk_deepsleep_mhz = %fMHz", mode_lib->vba.DCFCLKDeepSleep);
// Stutter Watermark
mode_lib->vba.StutterExitWatermark = mode_lib->vba.SRExitTime
+ mode_lib->vba.LastPixelOfLineExtraWatermark
+ mode_lib->vba.UrgentExtraLatency + 10 / mode_lib->vba.DCFCLKDeepSleep;
mode_lib->vba.StutterEnterPlusExitWatermark = mode_lib->vba.SREnterPlusExitTime
+ mode_lib->vba.LastPixelOfLineExtraWatermark
+ mode_lib->vba.UrgentExtraLatency;
DTRACE(" wm_cstate_exit = %fus", mode_lib->vba.StutterExitWatermark);
DTRACE(" wm_cstate_enter_exit = %fus", mode_lib->vba.StutterEnterPlusExitWatermark);
// Urgent Latency Supported
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
mode_lib->vba.EffectiveDETPlusLBLinesLuma =
dml_floor(
mode_lib->vba.LinesInDETY[k]
+ dml_min(
mode_lib->vba.LinesInDETY[k]
* mode_lib->vba.DPPCLK[k]
* mode_lib->vba.BytePerPixelDETY[k]
* mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
/ (mode_lib->vba.ReturnBW
/ mode_lib->vba.DPPPerPlane[k]),
(double) mode_lib->vba.EffectiveLBLatencyHidingSourceLinesLuma),
mode_lib->vba.SwathHeightY[k]);
mode_lib->vba.UrgentLatencySupportUsLuma = mode_lib->vba.EffectiveDETPlusLBLinesLuma
* (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
/ mode_lib->vba.VRatio[k]
- mode_lib->vba.EffectiveDETPlusLBLinesLuma
* mode_lib->vba.SwathWidthY[k]
* mode_lib->vba.BytePerPixelDETY[k]
/ (mode_lib->vba.ReturnBW
/ mode_lib->vba.DPPPerPlane[k]);
if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
mode_lib->vba.EffectiveDETPlusLBLinesChroma =
dml_floor(
mode_lib->vba.LinesInDETC[k]
+ dml_min(
mode_lib->vba.LinesInDETC[k]
* mode_lib->vba.DPPCLK[k]
* mode_lib->vba.BytePerPixelDETC[k]
* mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k]
/ (mode_lib->vba.ReturnBW
/ mode_lib->vba.DPPPerPlane[k]),
(double) mode_lib->vba.EffectiveLBLatencyHidingSourceLinesChroma),
mode_lib->vba.SwathHeightC[k]);
mode_lib->vba.UrgentLatencySupportUsChroma =
mode_lib->vba.EffectiveDETPlusLBLinesChroma
* (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k])
/ (mode_lib->vba.VRatio[k] / 2)
- mode_lib->vba.EffectiveDETPlusLBLinesChroma
* (mode_lib->vba.SwathWidthY[k]
/ 2)
* mode_lib->vba.BytePerPixelDETC[k]
/ (mode_lib->vba.ReturnBW
/ mode_lib->vba.DPPPerPlane[k]);
mode_lib->vba.UrgentLatencySupportUs[k] = dml_min(
mode_lib->vba.UrgentLatencySupportUsLuma,
mode_lib->vba.UrgentLatencySupportUsChroma);
} else {
mode_lib->vba.UrgentLatencySupportUs[k] =
mode_lib->vba.UrgentLatencySupportUsLuma;
}
}
mode_lib->vba.MinUrgentLatencySupportUs = 999999;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
mode_lib->vba.MinUrgentLatencySupportUs = dml_min(
mode_lib->vba.MinUrgentLatencySupportUs,
mode_lib->vba.UrgentLatencySupportUs[k]);
}
// Non-Urgent Latency Tolerance
mode_lib->vba.NonUrgentLatencyTolerance = mode_lib->vba.MinUrgentLatencySupportUs
- mode_lib->vba.UrgentWatermark;
// DSCCLK
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if ((mode_lib->vba.BlendingAndTiming[k] != k) || !mode_lib->vba.DSCEnabled[k]) {
mode_lib->vba.DSCCLK_calculated[k] = 0.0;
} else {
if (mode_lib->vba.OutputFormat[k] == dm_420
|| mode_lib->vba.OutputFormat[k] == dm_n422)
mode_lib->vba.DSCFormatFactor = 2;
else
mode_lib->vba.DSCFormatFactor = 1;
if (mode_lib->vba.ODMCombineEnabled[k])
mode_lib->vba.DSCCLK_calculated[k] =
mode_lib->vba.PixelClockBackEnd[k] / 6
/ mode_lib->vba.DSCFormatFactor
/ (1
- mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
/ 100);
else
mode_lib->vba.DSCCLK_calculated[k] =
mode_lib->vba.PixelClockBackEnd[k] / 3
/ mode_lib->vba.DSCFormatFactor
/ (1
- mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
/ 100);
}
}
// DSC Delay
// TODO
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
double bpp = mode_lib->vba.OutputBpp[k];
unsigned int slices = mode_lib->vba.NumberOfDSCSlices[k];
if (mode_lib->vba.DSCEnabled[k] && bpp != 0) {
if (!mode_lib->vba.ODMCombineEnabled[k]) {
mode_lib->vba.DSCDelay[k] =
dscceComputeDelay(
mode_lib->vba.DSCInputBitPerComponent[k],
bpp,
dml_ceil(
(double) mode_lib->vba.HActive[k]
/ mode_lib->vba.NumberOfDSCSlices[k],
1),
slices,
mode_lib->vba.OutputFormat[k])
+ dscComputeDelay(
mode_lib->vba.OutputFormat[k]);
} else {
mode_lib->vba.DSCDelay[k] =
2
* (dscceComputeDelay(
mode_lib->vba.DSCInputBitPerComponent[k],
bpp,
dml_ceil(
(double) mode_lib->vba.HActive[k]
/ mode_lib->vba.NumberOfDSCSlices[k],
1),
slices / 2.0,
mode_lib->vba.OutputFormat[k])
+ dscComputeDelay(
mode_lib->vba.OutputFormat[k]));
}
mode_lib->vba.DSCDelay[k] = mode_lib->vba.DSCDelay[k]
* mode_lib->vba.PixelClock[k]
/ mode_lib->vba.PixelClockBackEnd[k];
} else {
mode_lib->vba.DSCDelay[k] = 0;
}
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) // NumberOfPlanes
if (j != k && mode_lib->vba.BlendingAndTiming[k] == j
&& mode_lib->vba.DSCEnabled[j])
mode_lib->vba.DSCDelay[k] = mode_lib->vba.DSCDelay[j];
// Prefetch
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
unsigned int PDEAndMetaPTEBytesFrameY;
unsigned int PixelPTEBytesPerRowY;
unsigned int MetaRowByteY;
unsigned int MetaRowByteC;
unsigned int PDEAndMetaPTEBytesFrameC;
unsigned int PixelPTEBytesPerRowC;
Calculate256BBlockSizes(
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.SurfaceTiling[k],
dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1),
dml_ceil(mode_lib->vba.BytePerPixelDETC[k], 2),
&mode_lib->vba.BlockHeight256BytesY[k],
&mode_lib->vba.BlockHeight256BytesC[k],
&mode_lib->vba.BlockWidth256BytesY[k],
&mode_lib->vba.BlockWidth256BytesC[k]);
PDEAndMetaPTEBytesFrameY = CalculateVMAndRowBytes(
mode_lib,
mode_lib->vba.DCCEnable[k],
mode_lib->vba.BlockHeight256BytesY[k],
mode_lib->vba.BlockWidth256BytesY[k],
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.SurfaceTiling[k],
dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1),
mode_lib->vba.SourceScan[k],
mode_lib->vba.ViewportWidth[k],
mode_lib->vba.ViewportHeight[k],
mode_lib->vba.SwathWidthY[k],
mode_lib->vba.GPUVMEnable,
mode_lib->vba.VMMPageSize,
mode_lib->vba.PTEBufferSizeInRequestsLuma,
mode_lib->vba.PDEProcessingBufIn64KBReqs,
mode_lib->vba.PitchY[k],
mode_lib->vba.DCCMetaPitchY[k],
&mode_lib->vba.MacroTileWidthY[k],
&MetaRowByteY,
&PixelPTEBytesPerRowY,
&mode_lib->vba.PTEBufferSizeNotExceeded[mode_lib->vba.VoltageLevel][0],
&mode_lib->vba.dpte_row_height[k],
&mode_lib->vba.meta_row_height[k]);
mode_lib->vba.PrefetchSourceLinesY[k] = CalculatePrefetchSourceLines(
mode_lib,
mode_lib->vba.VRatio[k],
mode_lib->vba.vtaps[k],
mode_lib->vba.Interlace[k],
mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
mode_lib->vba.SwathHeightY[k],
mode_lib->vba.ViewportYStartY[k],
&mode_lib->vba.VInitPreFillY[k],
&mode_lib->vba.MaxNumSwathY[k]);
if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_32
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_8)) {
PDEAndMetaPTEBytesFrameC =
CalculateVMAndRowBytes(
mode_lib,
mode_lib->vba.DCCEnable[k],
mode_lib->vba.BlockHeight256BytesC[k],
mode_lib->vba.BlockWidth256BytesC[k],
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.SurfaceTiling[k],
dml_ceil(
mode_lib->vba.BytePerPixelDETC[k],
2),
mode_lib->vba.SourceScan[k],
mode_lib->vba.ViewportWidth[k] / 2,
mode_lib->vba.ViewportHeight[k] / 2,
mode_lib->vba.SwathWidthY[k] / 2,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.VMMPageSize,
mode_lib->vba.PTEBufferSizeInRequestsLuma,
mode_lib->vba.PDEProcessingBufIn64KBReqs,
mode_lib->vba.PitchC[k],
0,
&mode_lib->vba.MacroTileWidthC[k],
&MetaRowByteC,
&PixelPTEBytesPerRowC,
&mode_lib->vba.PTEBufferSizeNotExceeded[mode_lib->vba.VoltageLevel][0],
&mode_lib->vba.dpte_row_height_chroma[k],
&mode_lib->vba.meta_row_height_chroma[k]);
mode_lib->vba.PrefetchSourceLinesC[k] = CalculatePrefetchSourceLines(
mode_lib,
mode_lib->vba.VRatio[k] / 2,
mode_lib->vba.VTAPsChroma[k],
mode_lib->vba.Interlace[k],
mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
mode_lib->vba.SwathHeightC[k],
mode_lib->vba.ViewportYStartC[k],
&mode_lib->vba.VInitPreFillC[k],
&mode_lib->vba.MaxNumSwathC[k]);
} else {
PixelPTEBytesPerRowC = 0;
PDEAndMetaPTEBytesFrameC = 0;
MetaRowByteC = 0;
mode_lib->vba.MaxNumSwathC[k] = 0;
mode_lib->vba.PrefetchSourceLinesC[k] = 0;
}
mode_lib->vba.PixelPTEBytesPerRow[k] = PixelPTEBytesPerRowY + PixelPTEBytesPerRowC;
mode_lib->vba.PDEAndMetaPTEBytesFrame[k] = PDEAndMetaPTEBytesFrameY
+ PDEAndMetaPTEBytesFrameC;
mode_lib->vba.MetaRowByte[k] = MetaRowByteY + MetaRowByteC;
CalculateActiveRowBandwidth(
mode_lib->vba.GPUVMEnable,
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.VRatio[k],
mode_lib->vba.DCCEnable[k],
mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
MetaRowByteY,
MetaRowByteC,
mode_lib->vba.meta_row_height[k],
mode_lib->vba.meta_row_height_chroma[k],
PixelPTEBytesPerRowY,
PixelPTEBytesPerRowC,
mode_lib->vba.dpte_row_height[k],
mode_lib->vba.dpte_row_height_chroma[k],
&mode_lib->vba.meta_row_bw[k],
&mode_lib->vba.dpte_row_bw[k],
&mode_lib->vba.qual_row_bw[k]);
}
mode_lib->vba.TCalc = 24.0 / mode_lib->vba.DCFCLKDeepSleep;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
if (mode_lib->vba.WritebackEnable[k] == true) {
mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] =
mode_lib->vba.WritebackLatency
+ CalculateWriteBackDelay(
mode_lib->vba.WritebackPixelFormat[k],
mode_lib->vba.WritebackHRatio[k],
mode_lib->vba.WritebackVRatio[k],
mode_lib->vba.WritebackLumaHTaps[k],
mode_lib->vba.WritebackLumaVTaps[k],
mode_lib->vba.WritebackChromaHTaps[k],
mode_lib->vba.WritebackChromaVTaps[k],
mode_lib->vba.WritebackDestinationWidth[k])
/ mode_lib->vba.DISPCLK;
} else
mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] = 0;
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
if (mode_lib->vba.BlendingAndTiming[j] == k
&& mode_lib->vba.WritebackEnable[j] == true) {
mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] =
dml_max(
mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k],
mode_lib->vba.WritebackLatency
+ CalculateWriteBackDelay(
mode_lib->vba.WritebackPixelFormat[j],
mode_lib->vba.WritebackHRatio[j],
mode_lib->vba.WritebackVRatio[j],
mode_lib->vba.WritebackLumaHTaps[j],
mode_lib->vba.WritebackLumaVTaps[j],
mode_lib->vba.WritebackChromaHTaps[j],
mode_lib->vba.WritebackChromaVTaps[j],
mode_lib->vba.WritebackDestinationWidth[j])
/ mode_lib->vba.DISPCLK);
}
}
}
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
if (mode_lib->vba.BlendingAndTiming[k] == j)
mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] =
mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][j];
mode_lib->vba.VStartupLines = 13;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
mode_lib->vba.MaxVStartupLines[k] =
mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
- dml_max(
1.0,
dml_ceil(
mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k]
/ (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]),
1));
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
mode_lib->vba.MaximumMaxVStartupLines = dml_max(
mode_lib->vba.MaximumMaxVStartupLines,
mode_lib->vba.MaxVStartupLines[k]);
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
mode_lib->vba.cursor_bw[k] = 0.0;
for (j = 0; j < mode_lib->vba.NumberOfCursors[k]; ++j)
mode_lib->vba.cursor_bw[k] += mode_lib->vba.CursorWidth[k][j]
* mode_lib->vba.CursorBPP[k][j] / 8.0
/ (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
* mode_lib->vba.VRatio[k];
}
do {
double MaxTotalRDBandwidth = 0;
bool DestinationLineTimesForPrefetchLessThan2 = false;
bool VRatioPrefetchMoreThan4 = false;
bool prefetch_vm_bw_valid = true;
bool prefetch_row_bw_valid = true;
double TWait = CalculateTWait(
mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb],
mode_lib->vba.DRAMClockChangeLatency,
mode_lib->vba.UrgentLatencyPixelDataOnly,
mode_lib->vba.SREnterPlusExitTime);
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.XFCEnabled[k] == true) {
mode_lib->vba.XFCRemoteSurfaceFlipDelay =
CalculateRemoteSurfaceFlipDelay(
mode_lib,
mode_lib->vba.VRatio[k],
mode_lib->vba.SwathWidthY[k],
dml_ceil(
mode_lib->vba.BytePerPixelDETY[k],
1),
mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k],
mode_lib->vba.XFCTSlvVupdateOffset,
mode_lib->vba.XFCTSlvVupdateWidth,
mode_lib->vba.XFCTSlvVreadyOffset,
mode_lib->vba.XFCXBUFLatencyTolerance,
mode_lib->vba.XFCFillBWOverhead,
mode_lib->vba.XFCSlvChunkSize,
mode_lib->vba.XFCBusTransportTime,
mode_lib->vba.TCalc,
TWait,
&mode_lib->vba.SrcActiveDrainRate,
&mode_lib->vba.TInitXFill,
&mode_lib->vba.TslvChk);
} else {
mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0;
}
mode_lib->vba.ErrorResult[k] =
CalculatePrefetchSchedule(
mode_lib,
mode_lib->vba.DPPCLK[k],
mode_lib->vba.DISPCLK,
mode_lib->vba.PixelClock[k],
mode_lib->vba.DCFCLKDeepSleep,
mode_lib->vba.DSCDelay[k],
mode_lib->vba.DPPPerPlane[k],
mode_lib->vba.ScalerEnabled[k],
mode_lib->vba.NumberOfCursors[k],
mode_lib->vba.DPPCLKDelaySubtotal,
mode_lib->vba.DPPCLKDelaySCL,
mode_lib->vba.DPPCLKDelaySCLLBOnly,
mode_lib->vba.DPPCLKDelayCNVCFormater,
mode_lib->vba.DPPCLKDelayCNVCCursor,
mode_lib->vba.DISPCLKDelaySubtotal,
(unsigned int) (mode_lib->vba.SwathWidthY[k]
/ mode_lib->vba.HRatio[k]),
mode_lib->vba.OutputFormat[k],
mode_lib->vba.VTotal[k]
- mode_lib->vba.VActive[k],
mode_lib->vba.HTotal[k],
mode_lib->vba.MaxInterDCNTileRepeaters,
dml_min(
mode_lib->vba.VStartupLines,
mode_lib->vba.MaxVStartupLines[k]),
mode_lib->vba.GPUVMMaxPageTableLevels,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.DynamicMetadataEnable[k],
mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
mode_lib->vba.DynamicMetadataTransmittedBytes[k],
mode_lib->vba.DCCEnable[k],
mode_lib->vba.UrgentLatencyPixelDataOnly,
mode_lib->vba.UrgentExtraLatency,
mode_lib->vba.TCalc,
mode_lib->vba.PDEAndMetaPTEBytesFrame[k],
mode_lib->vba.MetaRowByte[k],
mode_lib->vba.PixelPTEBytesPerRow[k],
mode_lib->vba.PrefetchSourceLinesY[k],
mode_lib->vba.SwathWidthY[k],
mode_lib->vba.BytePerPixelDETY[k],
mode_lib->vba.VInitPreFillY[k],
mode_lib->vba.MaxNumSwathY[k],
mode_lib->vba.PrefetchSourceLinesC[k],
mode_lib->vba.BytePerPixelDETC[k],
mode_lib->vba.VInitPreFillC[k],
mode_lib->vba.MaxNumSwathC[k],
mode_lib->vba.SwathHeightY[k],
mode_lib->vba.SwathHeightC[k],
TWait,
mode_lib->vba.XFCEnabled[k],
mode_lib->vba.XFCRemoteSurfaceFlipDelay,
mode_lib->vba.Interlace[k],
mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
&mode_lib->vba.DSTXAfterScaler[k],
&mode_lib->vba.DSTYAfterScaler[k],
&mode_lib->vba.DestinationLinesForPrefetch[k],
&mode_lib->vba.PrefetchBandwidth[k],
&mode_lib->vba.DestinationLinesToRequestVMInVBlank[k],
&mode_lib->vba.DestinationLinesToRequestRowInVBlank[k],
&mode_lib->vba.VRatioPrefetchY[k],
&mode_lib->vba.VRatioPrefetchC[k],
&mode_lib->vba.RequiredPrefetchPixDataBWLuma[k],
&mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
&mode_lib->vba.Tno_bw[k],
&mode_lib->vba.VUpdateOffsetPix[k],
&mode_lib->vba.VUpdateWidthPix[k],
&mode_lib->vba.VReadyOffsetPix[k]);
if (mode_lib->vba.BlendingAndTiming[k] == k) {
mode_lib->vba.VStartup[k] = dml_min(
mode_lib->vba.VStartupLines,
mode_lib->vba.MaxVStartupLines[k]);
if (mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata
!= 0) {
mode_lib->vba.VStartup[k] =
mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata;
}
} else {
mode_lib->vba.VStartup[k] =
dml_min(
mode_lib->vba.VStartupLines,
mode_lib->vba.MaxVStartupLines[mode_lib->vba.BlendingAndTiming[k]]);
}
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.PDEAndMetaPTEBytesFrame[k] == 0)
mode_lib->vba.prefetch_vm_bw[k] = 0;
else if (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k] > 0) {
mode_lib->vba.prefetch_vm_bw[k] =
(double) mode_lib->vba.PDEAndMetaPTEBytesFrame[k]
/ (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k]
* mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]);
} else {
mode_lib->vba.prefetch_vm_bw[k] = 0;
prefetch_vm_bw_valid = false;
}
if (mode_lib->vba.MetaRowByte[k] + mode_lib->vba.PixelPTEBytesPerRow[k]
== 0)
mode_lib->vba.prefetch_row_bw[k] = 0;
else if (mode_lib->vba.DestinationLinesToRequestRowInVBlank[k] > 0) {
mode_lib->vba.prefetch_row_bw[k] =
(double) (mode_lib->vba.MetaRowByte[k]
+ mode_lib->vba.PixelPTEBytesPerRow[k])
/ (mode_lib->vba.DestinationLinesToRequestRowInVBlank[k]
* mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]);
} else {
mode_lib->vba.prefetch_row_bw[k] = 0;
prefetch_row_bw_valid = false;
}
MaxTotalRDBandwidth =
MaxTotalRDBandwidth + mode_lib->vba.cursor_bw[k]
+ dml_max(
mode_lib->vba.prefetch_vm_bw[k],
dml_max(
mode_lib->vba.prefetch_row_bw[k],
dml_max(
mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ mode_lib->vba.ReadBandwidthPlaneChroma[k],
mode_lib->vba.RequiredPrefetchPixDataBWLuma[k])
+ mode_lib->vba.meta_row_bw[k]
+ mode_lib->vba.dpte_row_bw[k]));
if (mode_lib->vba.DestinationLinesForPrefetch[k] < 2)
DestinationLineTimesForPrefetchLessThan2 = true;
if (mode_lib->vba.VRatioPrefetchY[k] > 4
|| mode_lib->vba.VRatioPrefetchC[k] > 4)
VRatioPrefetchMoreThan4 = true;
}
if (MaxTotalRDBandwidth <= mode_lib->vba.ReturnBW && prefetch_vm_bw_valid
&& prefetch_row_bw_valid && !VRatioPrefetchMoreThan4
&& !DestinationLineTimesForPrefetchLessThan2)
mode_lib->vba.PrefetchModeSupported = true;
else {
mode_lib->vba.PrefetchModeSupported = false;
dml_print(
"DML: CalculatePrefetchSchedule ***failed***. Bandwidth violation. Results are NOT valid\n");
}
if (mode_lib->vba.PrefetchModeSupported == true) {
double final_flip_bw[DC__NUM_DPP__MAX];
unsigned int ImmediateFlipBytes[DC__NUM_DPP__MAX];
double total_dcn_read_bw_with_flip = 0;
mode_lib->vba.BandwidthAvailableForImmediateFlip = mode_lib->vba.ReturnBW;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
mode_lib->vba.BandwidthAvailableForImmediateFlip =
mode_lib->vba.BandwidthAvailableForImmediateFlip
- mode_lib->vba.cursor_bw[k]
- dml_max(
mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ mode_lib->vba.ReadBandwidthPlaneChroma[k]
+ mode_lib->vba.qual_row_bw[k],
mode_lib->vba.PrefetchBandwidth[k]);
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
ImmediateFlipBytes[k] = 0;
if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
&& mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
ImmediateFlipBytes[k] =
mode_lib->vba.PDEAndMetaPTEBytesFrame[k]
+ mode_lib->vba.MetaRowByte[k]
+ mode_lib->vba.PixelPTEBytesPerRow[k];
}
}
mode_lib->vba.TotImmediateFlipBytes = 0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
&& mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
mode_lib->vba.TotImmediateFlipBytes =
mode_lib->vba.TotImmediateFlipBytes
+ ImmediateFlipBytes[k];
}
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
CalculateFlipSchedule(
mode_lib,
mode_lib->vba.UrgentExtraLatency,
mode_lib->vba.UrgentLatencyPixelDataOnly,
mode_lib->vba.GPUVMMaxPageTableLevels,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.BandwidthAvailableForImmediateFlip,
mode_lib->vba.TotImmediateFlipBytes,
mode_lib->vba.SourcePixelFormat[k],
ImmediateFlipBytes[k],
mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k],
mode_lib->vba.VRatio[k],
mode_lib->vba.Tno_bw[k],
mode_lib->vba.PDEAndMetaPTEBytesFrame[k],
mode_lib->vba.MetaRowByte[k],
mode_lib->vba.PixelPTEBytesPerRow[k],
mode_lib->vba.DCCEnable[k],
mode_lib->vba.dpte_row_height[k],
mode_lib->vba.meta_row_height[k],
mode_lib->vba.qual_row_bw[k],
&mode_lib->vba.DestinationLinesToRequestVMInImmediateFlip[k],
&mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip[k],
&final_flip_bw[k],
&mode_lib->vba.ImmediateFlipSupportedForPipe[k]);
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
total_dcn_read_bw_with_flip =
total_dcn_read_bw_with_flip
+ mode_lib->vba.cursor_bw[k]
+ dml_max(
mode_lib->vba.prefetch_vm_bw[k],
dml_max(
mode_lib->vba.prefetch_row_bw[k],
final_flip_bw[k]
+ dml_max(
mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ mode_lib->vba.ReadBandwidthPlaneChroma[k],
mode_lib->vba.RequiredPrefetchPixDataBWLuma[k])));
}
mode_lib->vba.ImmediateFlipSupported = true;
if (total_dcn_read_bw_with_flip > mode_lib->vba.ReturnBW) {
mode_lib->vba.ImmediateFlipSupported = false;
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.ImmediateFlipSupportedForPipe[k] == false) {
mode_lib->vba.ImmediateFlipSupported = false;
}
}
} else {
mode_lib->vba.ImmediateFlipSupported = false;
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.ErrorResult[k]) {
mode_lib->vba.PrefetchModeSupported = false;
dml_print(
"DML: CalculatePrefetchSchedule ***failed***. Prefetch schedule violation. Results are NOT valid\n");
}
}
mode_lib->vba.VStartupLines = mode_lib->vba.VStartupLines + 1;
} while (!((mode_lib->vba.PrefetchModeSupported
&& (!mode_lib->vba.ImmediateFlipSupport
|| mode_lib->vba.ImmediateFlipSupported))
|| mode_lib->vba.MaximumMaxVStartupLines < mode_lib->vba.VStartupLines));
//Display Pipeline Delivery Time in Prefetch
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.VRatioPrefetchY[k] <= 1) {
mode_lib->vba.DisplayPipeLineDeliveryTimeLumaPrefetch[k] =
mode_lib->vba.SwathWidthY[k] * mode_lib->vba.DPPPerPlane[k]
/ mode_lib->vba.HRatio[k]
/ mode_lib->vba.PixelClock[k];
} else {
mode_lib->vba.DisplayPipeLineDeliveryTimeLumaPrefetch[k] =
mode_lib->vba.SwathWidthY[k]
/ mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
/ mode_lib->vba.DPPCLK[k];
}
if (mode_lib->vba.BytePerPixelDETC[k] == 0) {
mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] = 0;
} else {
if (mode_lib->vba.VRatioPrefetchC[k] <= 1) {
mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] =
mode_lib->vba.SwathWidthY[k]
* mode_lib->vba.DPPPerPlane[k]
/ mode_lib->vba.HRatio[k]
/ mode_lib->vba.PixelClock[k];
} else {
mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] =
mode_lib->vba.SwathWidthY[k]
/ mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
/ mode_lib->vba.DPPCLK[k];
}
}
}
// Min TTUVBlank
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = true;
mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = true;
mode_lib->vba.MinTTUVBlank[k] = dml_max(
mode_lib->vba.DRAMClockChangeWatermark,
dml_max(
mode_lib->vba.StutterEnterPlusExitWatermark,
mode_lib->vba.UrgentWatermark));
} else if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 1) {
mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = false;
mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = true;
mode_lib->vba.MinTTUVBlank[k] = dml_max(
mode_lib->vba.StutterEnterPlusExitWatermark,
mode_lib->vba.UrgentWatermark);
} else {
mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = false;
mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = false;
mode_lib->vba.MinTTUVBlank[k] = mode_lib->vba.UrgentWatermark;
}
if (!mode_lib->vba.DynamicMetadataEnable[k])
mode_lib->vba.MinTTUVBlank[k] = mode_lib->vba.TCalc
+ mode_lib->vba.MinTTUVBlank[k];
}
// DCC Configuration
mode_lib->vba.ActiveDPPs = 0;
// NB P-State/DRAM Clock Change Support
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
mode_lib->vba.ActiveDPPs = mode_lib->vba.ActiveDPPs + mode_lib->vba.DPPPerPlane[k];
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
double EffectiveLBLatencyHidingY;
double EffectiveLBLatencyHidingC;
double DPPOutputBufferLinesY;
double DPPOutputBufferLinesC;
double DPPOPPBufferingY;
double MaxDETBufferingTimeY;
double ActiveDRAMClockChangeLatencyMarginY;
mode_lib->vba.LBLatencyHidingSourceLinesY =
dml_min(
mode_lib->vba.MaxLineBufferLines,
(unsigned int) dml_floor(
(double) mode_lib->vba.LineBufferSize
/ mode_lib->vba.LBBitPerPixel[k]
/ (mode_lib->vba.SwathWidthY[k]
/ dml_max(
mode_lib->vba.HRatio[k],
1.0)),
1)) - (mode_lib->vba.vtaps[k] - 1);
mode_lib->vba.LBLatencyHidingSourceLinesC =
dml_min(
mode_lib->vba.MaxLineBufferLines,
(unsigned int) dml_floor(
(double) mode_lib->vba.LineBufferSize
/ mode_lib->vba.LBBitPerPixel[k]
/ (mode_lib->vba.SwathWidthY[k]
/ 2.0
/ dml_max(
mode_lib->vba.HRatio[k]
/ 2,
1.0)),
1))
- (mode_lib->vba.VTAPsChroma[k] - 1);
EffectiveLBLatencyHidingY = mode_lib->vba.LBLatencyHidingSourceLinesY
/ mode_lib->vba.VRatio[k]
* (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]);
EffectiveLBLatencyHidingC = mode_lib->vba.LBLatencyHidingSourceLinesC
/ (mode_lib->vba.VRatio[k] / 2)
* (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]);
if (mode_lib->vba.SwathWidthY[k] > 2 * mode_lib->vba.DPPOutputBufferPixels) {
DPPOutputBufferLinesY = mode_lib->vba.DPPOutputBufferPixels
/ mode_lib->vba.SwathWidthY[k];
} else if (mode_lib->vba.SwathWidthY[k] > mode_lib->vba.DPPOutputBufferPixels) {
DPPOutputBufferLinesY = 0.5;
} else {
DPPOutputBufferLinesY = 1;
}
if (mode_lib->vba.SwathWidthY[k] / 2 > 2 * mode_lib->vba.DPPOutputBufferPixels) {
DPPOutputBufferLinesC = mode_lib->vba.DPPOutputBufferPixels
/ (mode_lib->vba.SwathWidthY[k] / 2);
} else if (mode_lib->vba.SwathWidthY[k] / 2 > mode_lib->vba.DPPOutputBufferPixels) {
DPPOutputBufferLinesC = 0.5;
} else {
DPPOutputBufferLinesC = 1;
}
DPPOPPBufferingY = (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
* (DPPOutputBufferLinesY + mode_lib->vba.OPPOutputBufferLines);
MaxDETBufferingTimeY = mode_lib->vba.FullDETBufferingTimeY[k]
+ (mode_lib->vba.LinesInDETY[k]
- mode_lib->vba.LinesInDETYRoundedDownToSwath[k])
/ mode_lib->vba.SwathHeightY[k]
* (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]);
ActiveDRAMClockChangeLatencyMarginY = DPPOPPBufferingY + EffectiveLBLatencyHidingY
+ MaxDETBufferingTimeY - mode_lib->vba.DRAMClockChangeWatermark;
if (mode_lib->vba.ActiveDPPs > 1) {
ActiveDRAMClockChangeLatencyMarginY =
ActiveDRAMClockChangeLatencyMarginY
- (1 - 1 / (mode_lib->vba.ActiveDPPs - 1))
* mode_lib->vba.SwathHeightY[k]
* (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]);
}
if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
double DPPOPPBufferingC = (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k])
* (DPPOutputBufferLinesC
+ mode_lib->vba.OPPOutputBufferLines);
double MaxDETBufferingTimeC =
mode_lib->vba.FullDETBufferingTimeC[k]
+ (mode_lib->vba.LinesInDETC[k]
- mode_lib->vba.LinesInDETCRoundedDownToSwath[k])
/ mode_lib->vba.SwathHeightC[k]
* (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]);
double ActiveDRAMClockChangeLatencyMarginC = DPPOPPBufferingC
+ EffectiveLBLatencyHidingC + MaxDETBufferingTimeC
- mode_lib->vba.DRAMClockChangeWatermark;
if (mode_lib->vba.ActiveDPPs > 1) {
ActiveDRAMClockChangeLatencyMarginC =
ActiveDRAMClockChangeLatencyMarginC
- (1
- 1
/ (mode_lib->vba.ActiveDPPs
- 1))
* mode_lib->vba.SwathHeightC[k]
* (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]);
}
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(
ActiveDRAMClockChangeLatencyMarginY,
ActiveDRAMClockChangeLatencyMarginC);
} else {
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] =
ActiveDRAMClockChangeLatencyMarginY;
}
if (mode_lib->vba.WritebackEnable[k]) {
double WritebackDRAMClockChangeLatencyMargin;
if (mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
WritebackDRAMClockChangeLatencyMargin =
(double) (mode_lib->vba.WritebackInterfaceLumaBufferSize
+ mode_lib->vba.WritebackInterfaceChromaBufferSize)
/ (mode_lib->vba.WritebackDestinationWidth[k]
* mode_lib->vba.WritebackDestinationHeight[k]
/ (mode_lib->vba.WritebackSourceHeight[k]
* mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k])
* 4)
- mode_lib->vba.WritebackDRAMClockChangeWatermark;
} else if (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) {
WritebackDRAMClockChangeLatencyMargin =
dml_min(
(double) mode_lib->vba.WritebackInterfaceLumaBufferSize
* 8.0 / 10,
2.0
* mode_lib->vba.WritebackInterfaceChromaBufferSize
* 8 / 10)
/ (mode_lib->vba.WritebackDestinationWidth[k]
* mode_lib->vba.WritebackDestinationHeight[k]
/ (mode_lib->vba.WritebackSourceHeight[k]
* mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]))
- mode_lib->vba.WritebackDRAMClockChangeWatermark;
} else {
WritebackDRAMClockChangeLatencyMargin =
dml_min(
(double) mode_lib->vba.WritebackInterfaceLumaBufferSize,
2.0
* mode_lib->vba.WritebackInterfaceChromaBufferSize)
/ (mode_lib->vba.WritebackDestinationWidth[k]
* mode_lib->vba.WritebackDestinationHeight[k]
/ (mode_lib->vba.WritebackSourceHeight[k]
* mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]))
- mode_lib->vba.WritebackDRAMClockChangeWatermark;
}
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k],
WritebackDRAMClockChangeLatencyMargin);
}
}
mode_lib->vba.MinActiveDRAMClockChangeMargin = 999999;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]
< mode_lib->vba.MinActiveDRAMClockChangeMargin) {
mode_lib->vba.MinActiveDRAMClockChangeMargin =
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
}
}
mode_lib->vba.MinActiveDRAMClockChangeLatencySupported =
mode_lib->vba.MinActiveDRAMClockChangeMargin
+ mode_lib->vba.DRAMClockChangeLatency;
if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
mode_lib->vba.DRAMClockChangeWatermark += 25;
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
} else {
if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vblank;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (!mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k]) {
mode_lib->vba.DRAMClockChangeSupport[0][0] =
dm_dram_clock_change_unsupported;
}
}
} else {
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_unsupported;
}
}
for (k = 0; k <= mode_lib->vba.soc.num_states; k++)
for (j = 0; j < 2; j++)
mode_lib->vba.DRAMClockChangeSupport[k][j] = mode_lib->vba.DRAMClockChangeSupport[0][0];
//XFC Parameters:
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.XFCEnabled[k] == true) {
double TWait;
mode_lib->vba.XFCSlaveVUpdateOffset[k] = mode_lib->vba.XFCTSlvVupdateOffset;
mode_lib->vba.XFCSlaveVupdateWidth[k] = mode_lib->vba.XFCTSlvVupdateWidth;
mode_lib->vba.XFCSlaveVReadyOffset[k] = mode_lib->vba.XFCTSlvVreadyOffset;
TWait = CalculateTWait(
mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb],
mode_lib->vba.DRAMClockChangeLatency,
mode_lib->vba.UrgentLatencyPixelDataOnly,
mode_lib->vba.SREnterPlusExitTime);
mode_lib->vba.XFCRemoteSurfaceFlipDelay = CalculateRemoteSurfaceFlipDelay(
mode_lib,
mode_lib->vba.VRatio[k],
mode_lib->vba.SwathWidthY[k],
dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1),
mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
mode_lib->vba.XFCTSlvVupdateOffset,
mode_lib->vba.XFCTSlvVupdateWidth,
mode_lib->vba.XFCTSlvVreadyOffset,
mode_lib->vba.XFCXBUFLatencyTolerance,
mode_lib->vba.XFCFillBWOverhead,
mode_lib->vba.XFCSlvChunkSize,
mode_lib->vba.XFCBusTransportTime,
mode_lib->vba.TCalc,
TWait,
&mode_lib->vba.SrcActiveDrainRate,
&mode_lib->vba.TInitXFill,
&mode_lib->vba.TslvChk);
mode_lib->vba.XFCRemoteSurfaceFlipLatency[k] =
dml_floor(
mode_lib->vba.XFCRemoteSurfaceFlipDelay
/ (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]),
1);
mode_lib->vba.XFCTransferDelay[k] =
dml_ceil(
mode_lib->vba.XFCBusTransportTime
/ (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]),
1);
mode_lib->vba.XFCPrechargeDelay[k] =
dml_ceil(
(mode_lib->vba.XFCBusTransportTime
+ mode_lib->vba.TInitXFill
+ mode_lib->vba.TslvChk)
/ (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]),
1);
mode_lib->vba.InitFillLevel = mode_lib->vba.XFCXBUFLatencyTolerance
* mode_lib->vba.SrcActiveDrainRate;
mode_lib->vba.FinalFillMargin =
(mode_lib->vba.DestinationLinesToRequestVMInVBlank[k]
+ mode_lib->vba.DestinationLinesToRequestRowInVBlank[k])
* mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]
* mode_lib->vba.SrcActiveDrainRate
+ mode_lib->vba.XFCFillConstant;
mode_lib->vba.FinalFillLevel = mode_lib->vba.XFCRemoteSurfaceFlipDelay
* mode_lib->vba.SrcActiveDrainRate
+ mode_lib->vba.FinalFillMargin;
mode_lib->vba.RemainingFillLevel = dml_max(
0.0,
mode_lib->vba.FinalFillLevel - mode_lib->vba.InitFillLevel);
mode_lib->vba.TFinalxFill = mode_lib->vba.RemainingFillLevel
/ (mode_lib->vba.SrcActiveDrainRate
* mode_lib->vba.XFCFillBWOverhead / 100);
mode_lib->vba.XFCPrefetchMargin[k] =
mode_lib->vba.XFCRemoteSurfaceFlipDelay
+ mode_lib->vba.TFinalxFill
+ (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k]
+ mode_lib->vba.DestinationLinesToRequestRowInVBlank[k])
* mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k];
} else {
mode_lib->vba.XFCSlaveVUpdateOffset[k] = 0;
mode_lib->vba.XFCSlaveVupdateWidth[k] = 0;
mode_lib->vba.XFCSlaveVReadyOffset[k] = 0;
mode_lib->vba.XFCRemoteSurfaceFlipLatency[k] = 0;
mode_lib->vba.XFCPrechargeDelay[k] = 0;
mode_lib->vba.XFCTransferDelay[k] = 0;
mode_lib->vba.XFCPrefetchMargin[k] = 0;
}
}
{
unsigned int VStartupMargin = 0;
bool FirstMainPlane = true;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
unsigned int Margin = (mode_lib->vba.MaxVStartupLines[k] - mode_lib->vba.VStartup[k])
* mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k];
if (FirstMainPlane) {
VStartupMargin = Margin;
FirstMainPlane = false;
} else
VStartupMargin = dml_min(VStartupMargin, Margin);
}
if (mode_lib->vba.UseMaximumVStartup) {
if (mode_lib->vba.VTotal_Max[k] == mode_lib->vba.VTotal[k]) {
//only use max vstart if it is not drr or lateflip.
mode_lib->vba.VStartup[k] = mode_lib->vba.MaxVStartupLines[mode_lib->vba.BlendingAndTiming[k]];
}
}
}
}
}
static void dml20_DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
{
double BytePerPixDETY;
double BytePerPixDETC;
double Read256BytesBlockHeightY;
double Read256BytesBlockHeightC;
double Read256BytesBlockWidthY;
double Read256BytesBlockWidthC;
double MaximumSwathHeightY;
double MaximumSwathHeightC;
double MinimumSwathHeightY;
double MinimumSwathHeightC;
double SwathWidth;
double SwathWidthGranularityY;
double SwathWidthGranularityC;
double RoundedUpMaxSwathSizeBytesY;
double RoundedUpMaxSwathSizeBytesC;
unsigned int j, k;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
bool MainPlaneDoesODMCombine = false;
if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
BytePerPixDETY = 8;
BytePerPixDETC = 0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
BytePerPixDETY = 4;
BytePerPixDETC = 0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16) {
BytePerPixDETY = 2;
BytePerPixDETC = 0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8) {
BytePerPixDETY = 1;
BytePerPixDETC = 0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
BytePerPixDETY = 1;
BytePerPixDETC = 2;
} else {
BytePerPixDETY = 4.0 / 3.0;
BytePerPixDETC = 8.0 / 3.0;
}
if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
|| mode_lib->vba.SourcePixelFormat[k] == dm_444_32
|| mode_lib->vba.SourcePixelFormat[k] == dm_444_16
|| mode_lib->vba.SourcePixelFormat[k] == dm_444_8)) {
if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
Read256BytesBlockHeightY = 1;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
Read256BytesBlockHeightY = 4;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32
|| mode_lib->vba.SourcePixelFormat[k] == dm_444_16) {
Read256BytesBlockHeightY = 8;
} else {
Read256BytesBlockHeightY = 16;
}
Read256BytesBlockWidthY = 256 / dml_ceil(BytePerPixDETY, 1)
/ Read256BytesBlockHeightY;
Read256BytesBlockHeightC = 0;
Read256BytesBlockWidthC = 0;
} else {
if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
Read256BytesBlockHeightY = 1;
Read256BytesBlockHeightC = 1;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
Read256BytesBlockHeightY = 16;
Read256BytesBlockHeightC = 8;
} else {
Read256BytesBlockHeightY = 8;
Read256BytesBlockHeightC = 8;
}
Read256BytesBlockWidthY = 256 / dml_ceil(BytePerPixDETY, 1)
/ Read256BytesBlockHeightY;
Read256BytesBlockWidthC = 256 / dml_ceil(BytePerPixDETC, 2)
/ Read256BytesBlockHeightC;
}
if (mode_lib->vba.SourceScan[k] == dm_horz) {
MaximumSwathHeightY = Read256BytesBlockHeightY;
MaximumSwathHeightC = Read256BytesBlockHeightC;
} else {
MaximumSwathHeightY = Read256BytesBlockWidthY;
MaximumSwathHeightC = Read256BytesBlockWidthC;
}
if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
|| mode_lib->vba.SourcePixelFormat[k] == dm_444_32
|| mode_lib->vba.SourcePixelFormat[k] == dm_444_16
|| mode_lib->vba.SourcePixelFormat[k] == dm_444_8)) {
if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
|| (mode_lib->vba.SourcePixelFormat[k] == dm_444_64
&& (mode_lib->vba.SurfaceTiling[k]
== dm_sw_4kb_s
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_4kb_s_x
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_64kb_s
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_64kb_s_t
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_64kb_s_x
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_var_s
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_var_s_x)
&& mode_lib->vba.SourceScan[k] == dm_horz)) {
MinimumSwathHeightY = MaximumSwathHeightY;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8
&& mode_lib->vba.SourceScan[k] != dm_horz) {
MinimumSwathHeightY = MaximumSwathHeightY;
} else {
MinimumSwathHeightY = MaximumSwathHeightY / 2.0;
}
MinimumSwathHeightC = MaximumSwathHeightC;
} else {
if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
MinimumSwathHeightY = MaximumSwathHeightY;
MinimumSwathHeightC = MaximumSwathHeightC;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8
&& mode_lib->vba.SourceScan[k] == dm_horz) {
MinimumSwathHeightY = MaximumSwathHeightY / 2.0;
MinimumSwathHeightC = MaximumSwathHeightC;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10
&& mode_lib->vba.SourceScan[k] == dm_horz) {
MinimumSwathHeightC = MaximumSwathHeightC / 2.0;
MinimumSwathHeightY = MaximumSwathHeightY;
} else {
MinimumSwathHeightY = MaximumSwathHeightY;
MinimumSwathHeightC = MaximumSwathHeightC;
}
}
if (mode_lib->vba.SourceScan[k] == dm_horz) {
SwathWidth = mode_lib->vba.ViewportWidth[k];
} else {
SwathWidth = mode_lib->vba.ViewportHeight[k];
}
if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
MainPlaneDoesODMCombine = true;
}
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
if (mode_lib->vba.BlendingAndTiming[k] == j
&& mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
MainPlaneDoesODMCombine = true;
}
}
if (MainPlaneDoesODMCombine == true) {
SwathWidth = dml_min(
SwathWidth,
mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]);
} else {
if (mode_lib->vba.DPPPerPlane[k] == 0)
SwathWidth = 0;
else
SwathWidth = SwathWidth / mode_lib->vba.DPPPerPlane[k];
}
SwathWidthGranularityY = 256 / dml_ceil(BytePerPixDETY, 1) / MaximumSwathHeightY;
RoundedUpMaxSwathSizeBytesY = (dml_ceil(
(double) (SwathWidth - 1),
SwathWidthGranularityY) + SwathWidthGranularityY) * BytePerPixDETY
* MaximumSwathHeightY;
if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
RoundedUpMaxSwathSizeBytesY = dml_ceil(RoundedUpMaxSwathSizeBytesY, 256)
+ 256;
}
if (MaximumSwathHeightC > 0) {
SwathWidthGranularityC = 256.0 / dml_ceil(BytePerPixDETC, 2)
/ MaximumSwathHeightC;
RoundedUpMaxSwathSizeBytesC = (dml_ceil(
(double) (SwathWidth / 2.0 - 1),
SwathWidthGranularityC) + SwathWidthGranularityC)
* BytePerPixDETC * MaximumSwathHeightC;
if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
RoundedUpMaxSwathSizeBytesC = dml_ceil(
RoundedUpMaxSwathSizeBytesC,
256) + 256;
}
} else
RoundedUpMaxSwathSizeBytesC = 0.0;
if (RoundedUpMaxSwathSizeBytesY + RoundedUpMaxSwathSizeBytesC
<= mode_lib->vba.DETBufferSizeInKByte[0] * 1024.0 / 2.0) {
mode_lib->vba.SwathHeightY[k] = MaximumSwathHeightY;
mode_lib->vba.SwathHeightC[k] = MaximumSwathHeightC;
} else {
mode_lib->vba.SwathHeightY[k] = MinimumSwathHeightY;
mode_lib->vba.SwathHeightC[k] = MinimumSwathHeightC;
}
if (mode_lib->vba.SwathHeightC[k] == 0) {
mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte[0] * 1024;
mode_lib->vba.DETBufferSizeC[k] = 0;
} else if (mode_lib->vba.SwathHeightY[k] <= mode_lib->vba.SwathHeightC[k]) {
mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte[0]
* 1024.0 / 2;
mode_lib->vba.DETBufferSizeC[k] = mode_lib->vba.DETBufferSizeInKByte[0]
* 1024.0 / 2;
} else {
mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte[0]
* 1024.0 * 2 / 3;
mode_lib->vba.DETBufferSizeC[k] = mode_lib->vba.DETBufferSizeInKByte[0]
* 1024.0 / 3;
}
}
}
static double CalculateTWait(
unsigned int PrefetchMode,
double DRAMClockChangeLatency,
double UrgentLatencyPixelDataOnly,
double SREnterPlusExitTime)
{
if (PrefetchMode == 0) {
return dml_max(
DRAMClockChangeLatency + UrgentLatencyPixelDataOnly,
dml_max(SREnterPlusExitTime, UrgentLatencyPixelDataOnly));
} else if (PrefetchMode == 1) {
return dml_max(SREnterPlusExitTime, UrgentLatencyPixelDataOnly);
} else {
return UrgentLatencyPixelDataOnly;
}
}
static double CalculateRemoteSurfaceFlipDelay(
struct display_mode_lib *mode_lib,
double VRatio,
double SwathWidth,
double Bpp,
double LineTime,
double XFCTSlvVupdateOffset,
double XFCTSlvVupdateWidth,
double XFCTSlvVreadyOffset,
double XFCXBUFLatencyTolerance,
double XFCFillBWOverhead,
double XFCSlvChunkSize,
double XFCBusTransportTime,
double TCalc,
double TWait,
double *SrcActiveDrainRate,
double *TInitXFill,
double *TslvChk)
{
double TSlvSetup, AvgfillRate, result;
*SrcActiveDrainRate = VRatio * SwathWidth * Bpp / LineTime;
TSlvSetup = XFCTSlvVupdateOffset + XFCTSlvVupdateWidth + XFCTSlvVreadyOffset;
*TInitXFill = XFCXBUFLatencyTolerance / (1 + XFCFillBWOverhead / 100);
AvgfillRate = *SrcActiveDrainRate * (1 + XFCFillBWOverhead / 100);
*TslvChk = XFCSlvChunkSize / AvgfillRate;
dml_print(
"DML::CalculateRemoteSurfaceFlipDelay: SrcActiveDrainRate: %f\n",
*SrcActiveDrainRate);
dml_print("DML::CalculateRemoteSurfaceFlipDelay: TSlvSetup: %f\n", TSlvSetup);
dml_print("DML::CalculateRemoteSurfaceFlipDelay: TInitXFill: %f\n", *TInitXFill);
dml_print("DML::CalculateRemoteSurfaceFlipDelay: AvgfillRate: %f\n", AvgfillRate);
dml_print("DML::CalculateRemoteSurfaceFlipDelay: TslvChk: %f\n", *TslvChk);
result = 2 * XFCBusTransportTime + TSlvSetup + TCalc + TWait + *TslvChk + *TInitXFill; // TODO: This doesn't seem to match programming guide
dml_print("DML::CalculateRemoteSurfaceFlipDelay: RemoteSurfaceFlipDelay: %f\n", result);
return result;
}
static double CalculateWriteBackDelay(
enum source_format_class WritebackPixelFormat,
double WritebackHRatio,
double WritebackVRatio,
unsigned int WritebackLumaHTaps,
unsigned int WritebackLumaVTaps,
unsigned int WritebackChromaHTaps,
unsigned int WritebackChromaVTaps,
unsigned int WritebackDestinationWidth)
{
double CalculateWriteBackDelay =
dml_max(
dml_ceil(WritebackLumaHTaps / 4.0, 1) / WritebackHRatio,
WritebackLumaVTaps * dml_ceil(1.0 / WritebackVRatio, 1)
* dml_ceil(
WritebackDestinationWidth
/ 4.0,
1)
+ dml_ceil(1.0 / WritebackVRatio, 1)
* (dml_ceil(
WritebackLumaVTaps
/ 4.0,
1) + 4));
if (WritebackPixelFormat != dm_444_32) {
CalculateWriteBackDelay =
dml_max(
CalculateWriteBackDelay,
dml_max(
dml_ceil(
WritebackChromaHTaps
/ 2.0,
1)
/ (2
* WritebackHRatio),
WritebackChromaVTaps
* dml_ceil(
1
/ (2
* WritebackVRatio),
1)
* dml_ceil(
WritebackDestinationWidth
/ 2.0
/ 2.0,
1)
+ dml_ceil(
1
/ (2
* WritebackVRatio),
1)
* (dml_ceil(
WritebackChromaVTaps
/ 4.0,
1)
+ 4)));
}
return CalculateWriteBackDelay;
}
static void CalculateActiveRowBandwidth(
bool GPUVMEnable,
enum source_format_class SourcePixelFormat,
double VRatio,
bool DCCEnable,
double LineTime,
unsigned int MetaRowByteLuma,
unsigned int MetaRowByteChroma,
unsigned int meta_row_height_luma,
unsigned int meta_row_height_chroma,
unsigned int PixelPTEBytesPerRowLuma,
unsigned int PixelPTEBytesPerRowChroma,
unsigned int dpte_row_height_luma,
unsigned int dpte_row_height_chroma,
double *meta_row_bw,
double *dpte_row_bw,
double *qual_row_bw)
{
if (DCCEnable != true) {
*meta_row_bw = 0;
} else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
*meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime)
+ VRatio / 2 * MetaRowByteChroma
/ (meta_row_height_chroma * LineTime);
} else {
*meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime);
}
if (GPUVMEnable != true) {
*dpte_row_bw = 0;
} else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
*dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime)
+ VRatio / 2 * PixelPTEBytesPerRowChroma
/ (dpte_row_height_chroma * LineTime);
} else {
*dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime);
}
if ((SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10)) {
*qual_row_bw = *meta_row_bw + *dpte_row_bw;
} else {
*qual_row_bw = 0;
}
}
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
double UrgentExtraLatency,
double UrgentLatencyPixelDataOnly,
unsigned int GPUVMMaxPageTableLevels,
bool GPUVMEnable,
double BandwidthAvailableForImmediateFlip,
unsigned int TotImmediateFlipBytes,
enum source_format_class SourcePixelFormat,
unsigned int ImmediateFlipBytes,
double LineTime,
double VRatio,
double Tno_bw,
double PDEAndMetaPTEBytesFrame,
unsigned int MetaRowByte,
unsigned int PixelPTEBytesPerRow,
bool DCCEnable,
unsigned int dpte_row_height,
unsigned int meta_row_height,
double qual_row_bw,
double *DestinationLinesToRequestVMInImmediateFlip,
double *DestinationLinesToRequestRowInImmediateFlip,
double *final_flip_bw,
bool *ImmediateFlipSupportedForPipe)
{
double min_row_time = 0.0;
if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
*DestinationLinesToRequestVMInImmediateFlip = 0.0;
*DestinationLinesToRequestRowInImmediateFlip = 0.0;
*final_flip_bw = qual_row_bw;
*ImmediateFlipSupportedForPipe = true;
} else {
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
if (GPUVMEnable == true) {
mode_lib->vba.ImmediateFlipBW[0] = BandwidthAvailableForImmediateFlip
* ImmediateFlipBytes / TotImmediateFlipBytes;
TimeForFetchingMetaPTEImmediateFlip =
dml_max(
Tno_bw
+ PDEAndMetaPTEBytesFrame
/ mode_lib->vba.ImmediateFlipBW[0],
dml_max(
UrgentExtraLatency
+ UrgentLatencyPixelDataOnly
* (GPUVMMaxPageTableLevels
- 1),
LineTime / 4.0));
} else {
TimeForFetchingMetaPTEImmediateFlip = 0;
}
*DestinationLinesToRequestVMInImmediateFlip = dml_floor(
4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime + 0.125),
1) / 4.0;
if ((GPUVMEnable || DCCEnable)) {
mode_lib->vba.ImmediateFlipBW[0] = BandwidthAvailableForImmediateFlip
* ImmediateFlipBytes / TotImmediateFlipBytes;
TimeForFetchingRowInVBlankImmediateFlip = dml_max(
(MetaRowByte + PixelPTEBytesPerRow)
/ mode_lib->vba.ImmediateFlipBW[0],
dml_max(UrgentLatencyPixelDataOnly, LineTime / 4.0));
} else {
TimeForFetchingRowInVBlankImmediateFlip = 0;
}
*DestinationLinesToRequestRowInImmediateFlip = dml_floor(
4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime + 0.125),
1) / 4.0;
if (GPUVMEnable == true) {
*final_flip_bw =
dml_max(
PDEAndMetaPTEBytesFrame
/ (*DestinationLinesToRequestVMInImmediateFlip
* LineTime),
(MetaRowByte + PixelPTEBytesPerRow)
/ (TimeForFetchingRowInVBlankImmediateFlip
* LineTime));
} else if (MetaRowByte + PixelPTEBytesPerRow > 0) {
*final_flip_bw = (MetaRowByte + PixelPTEBytesPerRow)
/ (TimeForFetchingRowInVBlankImmediateFlip * LineTime);
} else {
*final_flip_bw = 0;
}
if (GPUVMEnable && !DCCEnable)
min_row_time = dpte_row_height * LineTime / VRatio;
else if (!GPUVMEnable && DCCEnable)
min_row_time = meta_row_height * LineTime / VRatio;
else
min_row_time = dml_min(dpte_row_height, meta_row_height) * LineTime
/ VRatio;
if (*DestinationLinesToRequestVMInImmediateFlip >= 8
|| *DestinationLinesToRequestRowInImmediateFlip >= 16
|| TimeForFetchingMetaPTEImmediateFlip
+ 2 * TimeForFetchingRowInVBlankImmediateFlip
> min_row_time)
*ImmediateFlipSupportedForPipe = false;
else
*ImmediateFlipSupportedForPipe = true;
}
}
static unsigned int TruncToValidBPP(
double DecimalBPP,
bool DSCEnabled,
enum output_encoder_class Output,
enum output_format_class Format,
unsigned int DSCInputBitPerComponent)
{
if (Output == dm_hdmi) {
if (Format == dm_420) {
if (DecimalBPP >= 18)
return 18;
else if (DecimalBPP >= 15)
return 15;
else if (DecimalBPP >= 12)
return 12;
else
return BPP_INVALID;
} else if (Format == dm_444) {
if (DecimalBPP >= 36)
return 36;
else if (DecimalBPP >= 30)
return 30;
else if (DecimalBPP >= 24)
return 24;
else if (DecimalBPP >= 18)
return 18;
else
return BPP_INVALID;
} else {
if (DecimalBPP / 1.5 >= 24)
return 24;
else if (DecimalBPP / 1.5 >= 20)
return 20;
else if (DecimalBPP / 1.5 >= 16)
return 16;
else
return BPP_INVALID;
}
} else {
if (DSCEnabled) {
if (Format == dm_420) {
if (DecimalBPP < 6)
return BPP_INVALID;
else if (DecimalBPP >= 1.5 * DSCInputBitPerComponent - 1 / 16)
return 1.5 * DSCInputBitPerComponent - 1 / 16;
else
return dml_floor(16 * DecimalBPP, 1) / 16;
} else if (Format == dm_n422) {
if (DecimalBPP < 7)
return BPP_INVALID;
else if (DecimalBPP >= 2 * DSCInputBitPerComponent - 1 / 16)
return 2 * DSCInputBitPerComponent - 1 / 16;
else
return dml_floor(16 * DecimalBPP, 1) / 16;
} else {
if (DecimalBPP < 8)
return BPP_INVALID;
else if (DecimalBPP >= 3 * DSCInputBitPerComponent - 1 / 16)
return 3 * DSCInputBitPerComponent - 1 / 16;
else
return dml_floor(16 * DecimalBPP, 1) / 16;
}
} else if (Format == dm_420) {
if (DecimalBPP >= 18)
return 18;
else if (DecimalBPP >= 15)
return 15;
else if (DecimalBPP >= 12)
return 12;
else
return BPP_INVALID;
} else if (Format == dm_s422 || Format == dm_n422) {
if (DecimalBPP >= 24)
return 24;
else if (DecimalBPP >= 20)
return 20;
else if (DecimalBPP >= 16)
return 16;
else
return BPP_INVALID;
} else {
if (DecimalBPP >= 36)
return 36;
else if (DecimalBPP >= 30)
return 30;
else if (DecimalBPP >= 24)
return 24;
else if (DecimalBPP >= 18)
return 18;
else
return BPP_INVALID;
}
}
}
void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
{
struct vba_vars_st *locals = &mode_lib->vba;
int i;
unsigned int j, k, m;
/*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/
/*Scale Ratio, taps Support Check*/
mode_lib->vba.ScaleRatioAndTapsSupport = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.ScalerEnabled[k] == false
&& ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_32
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_8)
|| mode_lib->vba.HRatio[k] != 1.0
|| mode_lib->vba.htaps[k] != 1.0
|| mode_lib->vba.VRatio[k] != 1.0
|| mode_lib->vba.vtaps[k] != 1.0)) {
mode_lib->vba.ScaleRatioAndTapsSupport = false;
} else if (mode_lib->vba.vtaps[k] < 1.0 || mode_lib->vba.vtaps[k] > 8.0
|| mode_lib->vba.htaps[k] < 1.0 || mode_lib->vba.htaps[k] > 8.0
|| (mode_lib->vba.htaps[k] > 1.0
&& (mode_lib->vba.htaps[k] % 2) == 1)
|| mode_lib->vba.HRatio[k] > mode_lib->vba.MaxHSCLRatio
|| mode_lib->vba.VRatio[k] > mode_lib->vba.MaxVSCLRatio
|| mode_lib->vba.HRatio[k] > mode_lib->vba.htaps[k]
|| mode_lib->vba.VRatio[k] > mode_lib->vba.vtaps[k]
|| (mode_lib->vba.SourcePixelFormat[k] != dm_444_64
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_32
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_8
&& (mode_lib->vba.HRatio[k] / 2.0
> mode_lib->vba.HTAPsChroma[k]
|| mode_lib->vba.VRatio[k] / 2.0
> mode_lib->vba.VTAPsChroma[k]))) {
mode_lib->vba.ScaleRatioAndTapsSupport = false;
}
}
/*Source Format, Pixel Format and Scan Support Check*/
mode_lib->vba.SourceFormatPixelAndScanSupport = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if ((mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
&& mode_lib->vba.SourceScan[k] != dm_horz)
|| ((mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d
|| mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d_x
|| mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d
|| mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_t
|| mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_x
|| mode_lib->vba.SurfaceTiling[k] == dm_sw_var_d
|| mode_lib->vba.SurfaceTiling[k] == dm_sw_var_d_x)
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_64)
|| (mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_r_x
&& (mode_lib->vba.SourcePixelFormat[k] == dm_mono_8
|| mode_lib->vba.SourcePixelFormat[k]
== dm_420_8
|| mode_lib->vba.SourcePixelFormat[k]
== dm_420_10))
|| (((mode_lib->vba.SurfaceTiling[k] == dm_sw_gfx7_2d_thin_gl
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_gfx7_2d_thin_l_vp)
&& !((mode_lib->vba.SourcePixelFormat[k]
== dm_444_64
|| mode_lib->vba.SourcePixelFormat[k]
== dm_444_32)
&& mode_lib->vba.SourceScan[k]
== dm_horz
&& mode_lib->vba.SupportGFX7CompatibleTilingIn32bppAnd64bpp
== true
&& mode_lib->vba.DCCEnable[k]
== false))
|| (mode_lib->vba.DCCEnable[k] == true
&& (mode_lib->vba.SurfaceTiling[k]
== dm_sw_linear
|| mode_lib->vba.SourcePixelFormat[k]
== dm_420_8
|| mode_lib->vba.SourcePixelFormat[k]
== dm_420_10)))) {
mode_lib->vba.SourceFormatPixelAndScanSupport = false;
}
}
/*Bandwidth Support Check*/
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
locals->BytePerPixelInDETY[k] = 8.0;
locals->BytePerPixelInDETC[k] = 0.0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
locals->BytePerPixelInDETY[k] = 4.0;
locals->BytePerPixelInDETC[k] = 0.0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16
|| mode_lib->vba.SourcePixelFormat[k] == dm_mono_16) {
locals->BytePerPixelInDETY[k] = 2.0;
locals->BytePerPixelInDETC[k] = 0.0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_mono_8) {
locals->BytePerPixelInDETY[k] = 1.0;
locals->BytePerPixelInDETC[k] = 0.0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
locals->BytePerPixelInDETY[k] = 1.0;
locals->BytePerPixelInDETC[k] = 2.0;
} else {
locals->BytePerPixelInDETY[k] = 4.0 / 3;
locals->BytePerPixelInDETC[k] = 8.0 / 3;
}
if (mode_lib->vba.SourceScan[k] == dm_horz) {
locals->SwathWidthYSingleDPP[k] = mode_lib->vba.ViewportWidth[k];
} else {
locals->SwathWidthYSingleDPP[k] = mode_lib->vba.ViewportHeight[k];
}
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
locals->ReadBandwidthLuma[k] = locals->SwathWidthYSingleDPP[k] * dml_ceil(locals->BytePerPixelInDETY[k], 1.0)
/ (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k];
locals->ReadBandwidthChroma[k] = locals->SwathWidthYSingleDPP[k] / 2 * dml_ceil(locals->BytePerPixelInDETC[k], 2.0)
/ (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k] / 2.0;
locals->ReadBandwidth[k] = locals->ReadBandwidthLuma[k] + locals->ReadBandwidthChroma[k];
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.WritebackEnable[k] == true
&& mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
locals->WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
* mode_lib->vba.WritebackDestinationHeight[k]
/ (mode_lib->vba.WritebackSourceHeight[k]
* mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]) * 4.0;
} else if (mode_lib->vba.WritebackEnable[k] == true
&& mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) {
locals->WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
* mode_lib->vba.WritebackDestinationHeight[k]
/ (mode_lib->vba.WritebackSourceHeight[k]
* mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]) * 3.0;
} else if (mode_lib->vba.WritebackEnable[k] == true) {
locals->WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
* mode_lib->vba.WritebackDestinationHeight[k]
/ (mode_lib->vba.WritebackSourceHeight[k]
* mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]) * 1.5;
} else {
locals->WriteBandwidth[k] = 0.0;
}
}
mode_lib->vba.DCCEnabledInAnyPlane = false;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.DCCEnable[k] == true) {
mode_lib->vba.DCCEnabledInAnyPlane = true;
}
}
mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly;
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->FabricAndDRAMBandwidthPerState[i] = dml_min(
mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
* mode_lib->vba.DRAMChannelWidth,
mode_lib->vba.FabricClockPerState[i]
* mode_lib->vba.FabricDatapathToDCNDataReturn) / 1000;
locals->ReturnBWToDCNPerState = dml_min(locals->ReturnBusWidth * locals->DCFCLKPerState[i],
locals->FabricAndDRAMBandwidthPerState[i] * 1000)
* locals->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100;
locals->ReturnBWPerState[i][0] = locals->ReturnBWToDCNPerState;
if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) {
locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency /
((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
/ (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i]
* locals->ReturnBusWidth / 4) + locals->UrgentLatency)));
}
locals->CriticalPoint = 2 * locals->ReturnBusWidth * locals->DCFCLKPerState[i] *
locals->UrgentLatency / (locals->ReturnBWToDCNPerState * locals->UrgentLatency
+ (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024);
if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) {
locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
4 * locals->ReturnBWToDCNPerState *
(locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
* locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency /
dml_pow((locals->ReturnBWToDCNPerState * locals->UrgentLatency
+ (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024), 2));
}
locals->ReturnBWToDCNPerState = dml_min(locals->ReturnBusWidth *
locals->DCFCLKPerState[i], locals->FabricAndDRAMBandwidthPerState[i] * 1000);
if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) {
locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency /
((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
/ (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i]
* locals->ReturnBusWidth / 4) + locals->UrgentLatency)));
}
locals->CriticalPoint = 2 * locals->ReturnBusWidth * locals->DCFCLKPerState[i] *
locals->UrgentLatency / (locals->ReturnBWToDCNPerState * locals->UrgentLatency
+ (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024);
if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) {
locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
4 * locals->ReturnBWToDCNPerState *
(locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
* locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency /
dml_pow((locals->ReturnBWToDCNPerState * locals->UrgentLatency
+ (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024), 2));
}
}
/*Writeback Latency support check*/
mode_lib->vba.WritebackLatencySupport = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.WritebackEnable[k] == true) {
if (mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
if (locals->WriteBandwidth[k]
> (mode_lib->vba.WritebackInterfaceLumaBufferSize
+ mode_lib->vba.WritebackInterfaceChromaBufferSize)
/ mode_lib->vba.WritebackLatency) {
mode_lib->vba.WritebackLatencySupport = false;
}
} else {
if (locals->WriteBandwidth[k]
> 1.5
* dml_min(
mode_lib->vba.WritebackInterfaceLumaBufferSize,
2.0
* mode_lib->vba.WritebackInterfaceChromaBufferSize)
/ mode_lib->vba.WritebackLatency) {
mode_lib->vba.WritebackLatencySupport = false;
}
}
}
}
/*Re-ordering Buffer Support Check*/
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i] =
(mode_lib->vba.RoundTripPingLatencyCycles + 32.0) / mode_lib->vba.DCFCLKPerState[i]
+ locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i][0];
if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i][0]
> locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i]) {
locals->ROBSupport[i][0] = true;
} else {
locals->ROBSupport[i][0] = false;
}
}
/*Writeback Mode Support Check*/
mode_lib->vba.TotalNumberOfActiveWriteback = 0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.WritebackEnable[k] == true) {
if (mode_lib->vba.ActiveWritebacksPerPlane[k] == 0)
mode_lib->vba.ActiveWritebacksPerPlane[k] = 1;
mode_lib->vba.TotalNumberOfActiveWriteback =
mode_lib->vba.TotalNumberOfActiveWriteback
+ mode_lib->vba.ActiveWritebacksPerPlane[k];
}
}
mode_lib->vba.WritebackModeSupport = true;
if (mode_lib->vba.TotalNumberOfActiveWriteback > mode_lib->vba.MaxNumWriteback) {
mode_lib->vba.WritebackModeSupport = false;
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.WritebackEnable[k] == true
&& mode_lib->vba.Writeback10bpc420Supported != true
&& mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) {
mode_lib->vba.WritebackModeSupport = false;
}
}
/*Writeback Scale Ratio and Taps Support Check*/
mode_lib->vba.WritebackScaleRatioAndTapsSupport = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.WritebackEnable[k] == true) {
if (mode_lib->vba.WritebackLumaAndChromaScalingSupported == false
&& (mode_lib->vba.WritebackHRatio[k] != 1.0
|| mode_lib->vba.WritebackVRatio[k] != 1.0)) {
mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
}
if (mode_lib->vba.WritebackHRatio[k] > mode_lib->vba.WritebackMaxHSCLRatio
|| mode_lib->vba.WritebackVRatio[k]
> mode_lib->vba.WritebackMaxVSCLRatio
|| mode_lib->vba.WritebackHRatio[k]
< mode_lib->vba.WritebackMinHSCLRatio
|| mode_lib->vba.WritebackVRatio[k]
< mode_lib->vba.WritebackMinVSCLRatio
|| mode_lib->vba.WritebackLumaHTaps[k]
> mode_lib->vba.WritebackMaxHSCLTaps
|| mode_lib->vba.WritebackLumaVTaps[k]
> mode_lib->vba.WritebackMaxVSCLTaps
|| mode_lib->vba.WritebackHRatio[k]
> mode_lib->vba.WritebackLumaHTaps[k]
|| mode_lib->vba.WritebackVRatio[k]
> mode_lib->vba.WritebackLumaVTaps[k]
|| (mode_lib->vba.WritebackLumaHTaps[k] > 2.0
&& ((mode_lib->vba.WritebackLumaHTaps[k] % 2)
== 1))
|| (mode_lib->vba.WritebackPixelFormat[k] != dm_444_32
&& (mode_lib->vba.WritebackChromaHTaps[k]
> mode_lib->vba.WritebackMaxHSCLTaps
|| mode_lib->vba.WritebackChromaVTaps[k]
> mode_lib->vba.WritebackMaxVSCLTaps
|| 2.0
* mode_lib->vba.WritebackHRatio[k]
> mode_lib->vba.WritebackChromaHTaps[k]
|| 2.0
* mode_lib->vba.WritebackVRatio[k]
> mode_lib->vba.WritebackChromaVTaps[k]
|| (mode_lib->vba.WritebackChromaHTaps[k] > 2.0
&& ((mode_lib->vba.WritebackChromaHTaps[k] % 2) == 1))))) {
mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
}
if (mode_lib->vba.WritebackVRatio[k] < 1.0) {
mode_lib->vba.WritebackLumaVExtra =
dml_max(1.0 - 2.0 / dml_ceil(1.0 / mode_lib->vba.WritebackVRatio[k], 1.0), 0.0);
} else {
mode_lib->vba.WritebackLumaVExtra = -1;
}
if ((mode_lib->vba.WritebackPixelFormat[k] == dm_444_32
&& mode_lib->vba.WritebackLumaVTaps[k]
> (mode_lib->vba.WritebackLineBufferLumaBufferSize
+ mode_lib->vba.WritebackLineBufferChromaBufferSize)
/ 3.0
/ mode_lib->vba.WritebackDestinationWidth[k]
- mode_lib->vba.WritebackLumaVExtra)
|| (mode_lib->vba.WritebackPixelFormat[k] == dm_420_8
&& mode_lib->vba.WritebackLumaVTaps[k]
> mode_lib->vba.WritebackLineBufferLumaBufferSize
* 8.0 / 10.0 / mode_lib->vba.WritebackDestinationWidth[k]
- mode_lib->vba.WritebackLumaVExtra)
|| (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10
&& mode_lib->vba.WritebackLumaVTaps[k]
> mode_lib->vba.WritebackLineBufferLumaBufferSize
* 8.0 / 10.0
/ mode_lib->vba.WritebackDestinationWidth[k]
- mode_lib->vba.WritebackLumaVExtra)) {
mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
}
if (2.0 * mode_lib->vba.WritebackVRatio[k] < 1) {
mode_lib->vba.WritebackChromaVExtra = 0.0;
} else {
mode_lib->vba.WritebackChromaVExtra = -1;
}
if ((mode_lib->vba.WritebackPixelFormat[k] == dm_420_8
&& mode_lib->vba.WritebackChromaVTaps[k]
> mode_lib->vba.WritebackLineBufferChromaBufferSize
* 8.0 / 10.0 / mode_lib->vba.WritebackDestinationWidth[k]
- mode_lib->vba.WritebackChromaVExtra)
|| (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10
&& mode_lib->vba.WritebackChromaVTaps[k]
> mode_lib->vba.WritebackLineBufferChromaBufferSize
* 8.0 / 10.0
/ mode_lib->vba.WritebackDestinationWidth[k]
- mode_lib->vba.WritebackChromaVExtra)) {
mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
}
}
}
/*Maximum DISPCLK/DPPCLK Support check*/
mode_lib->vba.WritebackRequiredDISPCLK = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.WritebackEnable[k] == true) {
mode_lib->vba.WritebackRequiredDISPCLK =
dml_max(
mode_lib->vba.WritebackRequiredDISPCLK,
CalculateWriteBackDISPCLK(
mode_lib->vba.WritebackPixelFormat[k],
mode_lib->vba.PixelClock[k],
mode_lib->vba.WritebackHRatio[k],
mode_lib->vba.WritebackVRatio[k],
mode_lib->vba.WritebackLumaHTaps[k],
mode_lib->vba.WritebackLumaVTaps[k],
mode_lib->vba.WritebackChromaHTaps[k],
mode_lib->vba.WritebackChromaVTaps[k],
mode_lib->vba.WritebackDestinationWidth[k],
mode_lib->vba.HTotal[k],
mode_lib->vba.WritebackChromaLineBufferWidth));
}
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.HRatio[k] > 1.0) {
locals->PSCL_FACTOR[k] = dml_min(
mode_lib->vba.MaxDCHUBToPSCLThroughput,
mode_lib->vba.MaxPSCLToLBThroughput
* mode_lib->vba.HRatio[k]
/ dml_ceil(
mode_lib->vba.htaps[k]
/ 6.0,
1.0));
} else {
locals->PSCL_FACTOR[k] = dml_min(
mode_lib->vba.MaxDCHUBToPSCLThroughput,
mode_lib->vba.MaxPSCLToLBThroughput);
}
if (locals->BytePerPixelInDETC[k] == 0.0) {
locals->PSCL_FACTOR_CHROMA[k] = 0.0;
locals->MinDPPCLKUsingSingleDPP[k] =
mode_lib->vba.PixelClock[k]
* dml_max3(
mode_lib->vba.vtaps[k] / 6.0
* dml_min(
1.0,
mode_lib->vba.HRatio[k]),
mode_lib->vba.HRatio[k]
* mode_lib->vba.VRatio[k]
/ locals->PSCL_FACTOR[k],
1.0);
if ((mode_lib->vba.htaps[k] > 6.0 || mode_lib->vba.vtaps[k] > 6.0)
&& locals->MinDPPCLKUsingSingleDPP[k]
< 2.0 * mode_lib->vba.PixelClock[k]) {
locals->MinDPPCLKUsingSingleDPP[k] = 2.0
* mode_lib->vba.PixelClock[k];
}
} else {
if (mode_lib->vba.HRatio[k] / 2.0 > 1.0) {
locals->PSCL_FACTOR_CHROMA[k] =
dml_min(
mode_lib->vba.MaxDCHUBToPSCLThroughput,
mode_lib->vba.MaxPSCLToLBThroughput
* mode_lib->vba.HRatio[k]
/ 2.0
/ dml_ceil(
mode_lib->vba.HTAPsChroma[k]
/ 6.0,
1.0));
} else {
locals->PSCL_FACTOR_CHROMA[k] = dml_min(
mode_lib->vba.MaxDCHUBToPSCLThroughput,
mode_lib->vba.MaxPSCLToLBThroughput);
}
locals->MinDPPCLKUsingSingleDPP[k] =
mode_lib->vba.PixelClock[k]
* dml_max5(
mode_lib->vba.vtaps[k] / 6.0
* dml_min(
1.0,
mode_lib->vba.HRatio[k]),
mode_lib->vba.HRatio[k]
* mode_lib->vba.VRatio[k]
/ locals->PSCL_FACTOR[k],
mode_lib->vba.VTAPsChroma[k]
/ 6.0
* dml_min(
1.0,
mode_lib->vba.HRatio[k]
/ 2.0),
mode_lib->vba.HRatio[k]
* mode_lib->vba.VRatio[k]
/ 4.0
/ locals->PSCL_FACTOR_CHROMA[k],
1.0);
if ((mode_lib->vba.htaps[k] > 6.0 || mode_lib->vba.vtaps[k] > 6.0
|| mode_lib->vba.HTAPsChroma[k] > 6.0
|| mode_lib->vba.VTAPsChroma[k] > 6.0)
&& locals->MinDPPCLKUsingSingleDPP[k]
< 2.0 * mode_lib->vba.PixelClock[k]) {
locals->MinDPPCLKUsingSingleDPP[k] = 2.0
* mode_lib->vba.PixelClock[k];
}
}
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
Calculate256BBlockSizes(
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.SurfaceTiling[k],
dml_ceil(locals->BytePerPixelInDETY[k], 1.0),
dml_ceil(locals->BytePerPixelInDETC[k], 2.0),
&locals->Read256BlockHeightY[k],
&locals->Read256BlockHeightC[k],
&locals->Read256BlockWidthY[k],
&locals->Read256BlockWidthC[k]);
if (mode_lib->vba.SourceScan[k] == dm_horz) {
locals->MaxSwathHeightY[k] = locals->Read256BlockHeightY[k];
locals->MaxSwathHeightC[k] = locals->Read256BlockHeightC[k];
} else {
locals->MaxSwathHeightY[k] = locals->Read256BlockWidthY[k];
locals->MaxSwathHeightC[k] = locals->Read256BlockWidthC[k];
}
if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
|| mode_lib->vba.SourcePixelFormat[k] == dm_444_32
|| mode_lib->vba.SourcePixelFormat[k] == dm_444_16
|| mode_lib->vba.SourcePixelFormat[k] == dm_mono_16
|| mode_lib->vba.SourcePixelFormat[k] == dm_mono_8)) {
if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
|| (mode_lib->vba.SourcePixelFormat[k] == dm_444_64
&& (mode_lib->vba.SurfaceTiling[k]
== dm_sw_4kb_s
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_4kb_s_x
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_64kb_s
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_64kb_s_t
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_64kb_s_x
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_var_s
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_var_s_x)
&& mode_lib->vba.SourceScan[k] == dm_horz)) {
locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k];
} else {
locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k]
/ 2.0;
}
locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k];
} else {
if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k];
locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k];
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8
&& mode_lib->vba.SourceScan[k] == dm_horz) {
locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k]
/ 2.0;
locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k];
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10
&& mode_lib->vba.SourceScan[k] == dm_horz) {
locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k]
/ 2.0;
locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k];
} else {
locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k];
locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k];
}
}
if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
mode_lib->vba.MaximumSwathWidthSupport = 8192.0;
} else {
mode_lib->vba.MaximumSwathWidthSupport = 5120.0;
}
mode_lib->vba.MaximumSwathWidthInDETBuffer =
dml_min(
mode_lib->vba.MaximumSwathWidthSupport,
mode_lib->vba.DETBufferSizeInKByte[0] * 1024.0 / 2.0
/ (locals->BytePerPixelInDETY[k]
* locals->MinSwathHeightY[k]
+ locals->BytePerPixelInDETC[k]
/ 2.0
* locals->MinSwathHeightC[k]));
if (locals->BytePerPixelInDETC[k] == 0.0) {
mode_lib->vba.MaximumSwathWidthInLineBuffer =
mode_lib->vba.LineBufferSize
* dml_max(mode_lib->vba.HRatio[k], 1.0)
/ mode_lib->vba.LBBitPerPixel[k]
/ (mode_lib->vba.vtaps[k]
+ dml_max(
dml_ceil(
mode_lib->vba.VRatio[k],
1.0)
- 2,
0.0));
} else {
mode_lib->vba.MaximumSwathWidthInLineBuffer =
dml_min(
mode_lib->vba.LineBufferSize
* dml_max(
mode_lib->vba.HRatio[k],
1.0)
/ mode_lib->vba.LBBitPerPixel[k]
/ (mode_lib->vba.vtaps[k]
+ dml_max(
dml_ceil(
mode_lib->vba.VRatio[k],
1.0)
- 2,
0.0)),
2.0 * mode_lib->vba.LineBufferSize
* dml_max(
mode_lib->vba.HRatio[k]
/ 2.0,
1.0)
/ mode_lib->vba.LBBitPerPixel[k]
/ (mode_lib->vba.VTAPsChroma[k]
+ dml_max(
dml_ceil(
mode_lib->vba.VRatio[k]
/ 2.0,
1.0)
- 2,
0.0)));
}
locals->MaximumSwathWidth[k] = dml_min(
mode_lib->vba.MaximumSwathWidthInDETBuffer,
mode_lib->vba.MaximumSwathWidthInLineBuffer);
}
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
for (j = 0; j < 2; j++) {
mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
mode_lib->vba.MaxDispclk[i],
mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
mode_lib->vba.MaxDppclk[i],
mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
locals->RequiredDISPCLK[i][j] = 0.0;
locals->DISPCLK_DPPCLK_Support[i][j] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine =
mode_lib->vba.PixelClock[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
* (1.0 + mode_lib->vba.DISPCLKRampingMargin / 100.0);
if (mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine >= mode_lib->vba.MaxDispclk[i]
&& i == mode_lib->vba.soc.num_states)
mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine = mode_lib->vba.PixelClock[k]
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * (1 + mode_lib->vba.DISPCLKRampingMargin / 100.0);
if (mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine >= mode_lib->vba.MaxDispclk[i]
&& i == mode_lib->vba.soc.num_states)
mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
if (mode_lib->vba.ODMCapability) {
if (locals->PlaneRequiredDISPCLKWithoutODMCombine > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
} else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) {
locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
}
}
if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
&& locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]
&& locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
locals->NoOfDPP[i][j][k] = 1;
locals->RequiredDPPCLK[i][j][k] =
locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
} else {
locals->NoOfDPP[i][j][k] = 2;
locals->RequiredDPPCLK[i][j][k] =
locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 2.0;
}
locals->RequiredDISPCLK[i][j] = dml_max(
locals->RequiredDISPCLK[i][j],
mode_lib->vba.PlaneRequiredDISPCLK);
if ((locals->MinDPPCLKUsingSingleDPP[k] / locals->NoOfDPP[i][j][k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
> mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity)
|| (mode_lib->vba.PlaneRequiredDISPCLK > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity)) {
locals->DISPCLK_DPPCLK_Support[i][j] = false;
}
}
locals->TotalNumberOfActiveDPP[i][j] = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++)
locals->TotalNumberOfActiveDPP[i][j] = locals->TotalNumberOfActiveDPP[i][j] + locals->NoOfDPP[i][j][k];
if (j == 1) {
while (locals->TotalNumberOfActiveDPP[i][j] < mode_lib->vba.MaxNumDPP
&& locals->TotalNumberOfActiveDPP[i][j] < 2 * mode_lib->vba.NumberOfActivePlanes) {
double BWOfNonSplitPlaneOfMaximumBandwidth;
unsigned int NumberOfNonSplitPlaneOfMaximumBandwidth;
BWOfNonSplitPlaneOfMaximumBandwidth = 0;
NumberOfNonSplitPlaneOfMaximumBandwidth = 0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (locals->ReadBandwidth[k] > BWOfNonSplitPlaneOfMaximumBandwidth && locals->NoOfDPP[i][j][k] == 1) {
BWOfNonSplitPlaneOfMaximumBandwidth = locals->ReadBandwidth[k];
NumberOfNonSplitPlaneOfMaximumBandwidth = k;
}
}
locals->NoOfDPP[i][j][NumberOfNonSplitPlaneOfMaximumBandwidth] = 2;
locals->RequiredDPPCLK[i][j][NumberOfNonSplitPlaneOfMaximumBandwidth] =
locals->MinDPPCLKUsingSingleDPP[NumberOfNonSplitPlaneOfMaximumBandwidth]
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100) / 2;
locals->TotalNumberOfActiveDPP[i][j] = locals->TotalNumberOfActiveDPP[i][j] + 1;
}
}
if (locals->TotalNumberOfActiveDPP[i][j] > mode_lib->vba.MaxNumDPP) {
locals->RequiredDISPCLK[i][j] = 0.0;
locals->DISPCLK_DPPCLK_Support[i][j] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) {
locals->NoOfDPP[i][j][k] = 1;
locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k]
* (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
} else {
locals->NoOfDPP[i][j][k] = 2;
locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k]
* (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 2.0;
}
if (i != mode_lib->vba.soc.num_states) {
mode_lib->vba.PlaneRequiredDISPCLK =
mode_lib->vba.PixelClock[k]
* (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
* (1.0 + mode_lib->vba.DISPCLKRampingMargin / 100.0);
} else {
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PixelClock[k]
* (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
}
locals->RequiredDISPCLK[i][j] = dml_max(
locals->RequiredDISPCLK[i][j],
mode_lib->vba.PlaneRequiredDISPCLK);
if (locals->MinDPPCLKUsingSingleDPP[k] / locals->NoOfDPP[i][j][k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
> mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
|| mode_lib->vba.PlaneRequiredDISPCLK > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity)
locals->DISPCLK_DPPCLK_Support[i][j] = false;
}
locals->TotalNumberOfActiveDPP[i][j] = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++)
locals->TotalNumberOfActiveDPP[i][j] = locals->TotalNumberOfActiveDPP[i][j] + locals->NoOfDPP[i][j][k];
}
locals->RequiredDISPCLK[i][j] = dml_max(
locals->RequiredDISPCLK[i][j],
mode_lib->vba.WritebackRequiredDISPCLK);
if (mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity
< mode_lib->vba.WritebackRequiredDISPCLK) {
locals->DISPCLK_DPPCLK_Support[i][j] = false;
}
}
}
/*Viewport Size Check*/
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->ViewportSizeSupport[i][0] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
if (dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]))
> locals->MaximumSwathWidth[k]) {
locals->ViewportSizeSupport[i][0] = false;
}
} else {
if (locals->SwathWidthYSingleDPP[k] / 2.0 > locals->MaximumSwathWidth[k]) {
locals->ViewportSizeSupport[i][0] = false;
}
}
}
}
/*Total Available Pipes Support Check*/
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
for (j = 0; j < 2; j++) {
if (locals->TotalNumberOfActiveDPP[i][j] <= mode_lib->vba.MaxNumDPP)
locals->TotalAvailablePipesSupport[i][j] = true;
else
locals->TotalAvailablePipesSupport[i][j] = false;
}
}
/*Total Available OTG Support Check*/
mode_lib->vba.TotalNumberOfActiveOTG = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
mode_lib->vba.TotalNumberOfActiveOTG = mode_lib->vba.TotalNumberOfActiveOTG
+ 1.0;
}
}
if (mode_lib->vba.TotalNumberOfActiveOTG <= mode_lib->vba.MaxNumOTG) {
mode_lib->vba.NumberOfOTGSupport = true;
} else {
mode_lib->vba.NumberOfOTGSupport = false;
}
/*Display IO and DSC Support Check*/
mode_lib->vba.NonsupportedDSCInputBPC = false;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (!(mode_lib->vba.DSCInputBitPerComponent[k] == 12.0
|| mode_lib->vba.DSCInputBitPerComponent[k] == 10.0
|| mode_lib->vba.DSCInputBitPerComponent[k] == 8.0)) {
mode_lib->vba.NonsupportedDSCInputBPC = true;
}
}
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
locals->RequiresDSC[i][k] = 0;
locals->RequiresFEC[i][k] = 0;
if (mode_lib->vba.BlendingAndTiming[k] == k) {
if (mode_lib->vba.Output[k] == dm_hdmi) {
locals->RequiresDSC[i][k] = 0;
locals->RequiresFEC[i][k] = 0;
locals->OutputBppPerState[i][k] = TruncToValidBPP(
dml_min(600.0, mode_lib->vba.PHYCLKPerState[i]) / mode_lib->vba.PixelClockBackEnd[k] * 24,
false,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.DSCInputBitPerComponent[k]);
} else if (mode_lib->vba.Output[k] == dm_dp
|| mode_lib->vba.Output[k] == dm_edp) {
if (mode_lib->vba.Output[k] == dm_edp) {
mode_lib->vba.EffectiveFECOverhead = 0.0;
} else {
mode_lib->vba.EffectiveFECOverhead =
mode_lib->vba.FECOverhead;
}
if (mode_lib->vba.PHYCLKPerState[i] >= 270.0) {
mode_lib->vba.Outbpp = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * 270.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
false,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.DSCInputBitPerComponent[k]);
mode_lib->vba.OutbppDSC = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 270.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
true,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.DSCInputBitPerComponent[k]);
if (mode_lib->vba.DSCEnabled[k] == true) {
locals->RequiresDSC[i][k] = true;
if (mode_lib->vba.Output[k] == dm_dp) {
locals->RequiresFEC[i][k] = true;
} else {
locals->RequiresFEC[i][k] = false;
}
mode_lib->vba.Outbpp = mode_lib->vba.OutbppDSC;
} else {
locals->RequiresDSC[i][k] = false;
locals->RequiresFEC[i][k] = false;
}
locals->OutputBppPerState[i][k] = mode_lib->vba.Outbpp;
}
if (mode_lib->vba.Outbpp == BPP_INVALID && mode_lib->vba.PHYCLKPerState[i] >= 540.0) {
mode_lib->vba.Outbpp = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * 540.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
false,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.DSCInputBitPerComponent[k]);
mode_lib->vba.OutbppDSC = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 540.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
true,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.DSCInputBitPerComponent[k]);
if (mode_lib->vba.DSCEnabled[k] == true) {
locals->RequiresDSC[i][k] = true;
if (mode_lib->vba.Output[k] == dm_dp) {
locals->RequiresFEC[i][k] = true;
} else {
locals->RequiresFEC[i][k] = false;
}
mode_lib->vba.Outbpp = mode_lib->vba.OutbppDSC;
} else {
locals->RequiresDSC[i][k] = false;
locals->RequiresFEC[i][k] = false;
}
locals->OutputBppPerState[i][k] = mode_lib->vba.Outbpp;
}
if (mode_lib->vba.Outbpp == BPP_INVALID
&& mode_lib->vba.PHYCLKPerState[i]
>= 810.0) {
mode_lib->vba.Outbpp = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * 810.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
false,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.DSCInputBitPerComponent[k]);
mode_lib->vba.OutbppDSC = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 810.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
true,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.DSCInputBitPerComponent[k]);
if (mode_lib->vba.DSCEnabled[k] == true || mode_lib->vba.Outbpp == BPP_INVALID) {
locals->RequiresDSC[i][k] = true;
if (mode_lib->vba.Output[k] == dm_dp) {
locals->RequiresFEC[i][k] = true;
} else {
locals->RequiresFEC[i][k] = false;
}
mode_lib->vba.Outbpp = mode_lib->vba.OutbppDSC;
} else {
locals->RequiresDSC[i][k] = false;
locals->RequiresFEC[i][k] = false;
}
locals->OutputBppPerState[i][k] =
mode_lib->vba.Outbpp;
}
}
} else {
locals->OutputBppPerState[i][k] = BPP_BLENDED_PIPE;
}
}
}
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->DIOSupport[i] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (!mode_lib->vba.skip_dio_check[k]
&& (locals->OutputBppPerState[i][k] == BPP_INVALID
|| (mode_lib->vba.OutputFormat[k] == dm_420
&& mode_lib->vba.Interlace[k] == true
&& mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true))) {
locals->DIOSupport[i] = false;
}
}
}
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
locals->DSCCLKRequiredMoreThanSupported[i] = false;
if (mode_lib->vba.BlendingAndTiming[k] == k) {
if ((mode_lib->vba.Output[k] == dm_dp
|| mode_lib->vba.Output[k] == dm_edp)) {
if (mode_lib->vba.OutputFormat[k] == dm_420
|| mode_lib->vba.OutputFormat[k]
== dm_n422) {
mode_lib->vba.DSCFormatFactor = 2;
} else {
mode_lib->vba.DSCFormatFactor = 1;
}
if (locals->RequiresDSC[i][k] == true) {
if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
if (mode_lib->vba.PixelClockBackEnd[k] / 6.0 / mode_lib->vba.DSCFormatFactor
> (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) {
locals->DSCCLKRequiredMoreThanSupported[i] =
true;
}
} else {
if (mode_lib->vba.PixelClockBackEnd[k] / 3.0 / mode_lib->vba.DSCFormatFactor
> (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) {
locals->DSCCLKRequiredMoreThanSupported[i] =
true;
}
}
}
}
}
}
}
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->NotEnoughDSCUnits[i] = false;
mode_lib->vba.TotalDSCUnitsRequired = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (locals->RequiresDSC[i][k] == true) {
if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
mode_lib->vba.TotalDSCUnitsRequired =
mode_lib->vba.TotalDSCUnitsRequired + 2.0;
} else {
mode_lib->vba.TotalDSCUnitsRequired =
mode_lib->vba.TotalDSCUnitsRequired + 1.0;
}
}
}
if (mode_lib->vba.TotalDSCUnitsRequired > mode_lib->vba.NumberOfDSC) {
locals->NotEnoughDSCUnits[i] = true;
}
}
/*DSC Delay per state*/
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] != k) {
mode_lib->vba.slices = 0;
} else if (locals->RequiresDSC[i][k] == 0
|| locals->RequiresDSC[i][k] == false) {
mode_lib->vba.slices = 0;
} else if (mode_lib->vba.PixelClockBackEnd[k] > 3200.0) {
mode_lib->vba.slices = dml_ceil(
mode_lib->vba.PixelClockBackEnd[k] / 400.0,
4.0);
} else if (mode_lib->vba.PixelClockBackEnd[k] > 1360.0) {
mode_lib->vba.slices = 8.0;
} else if (mode_lib->vba.PixelClockBackEnd[k] > 680.0) {
mode_lib->vba.slices = 4.0;
} else if (mode_lib->vba.PixelClockBackEnd[k] > 340.0) {
mode_lib->vba.slices = 2.0;
} else {
mode_lib->vba.slices = 1.0;
}
if (locals->OutputBppPerState[i][k] == BPP_BLENDED_PIPE
|| locals->OutputBppPerState[i][k] == BPP_INVALID) {
mode_lib->vba.bpp = 0.0;
} else {
mode_lib->vba.bpp = locals->OutputBppPerState[i][k];
}
if (locals->RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) {
if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
locals->DSCDelayPerState[i][k] =
dscceComputeDelay(
mode_lib->vba.DSCInputBitPerComponent[k],
mode_lib->vba.bpp,
dml_ceil(
mode_lib->vba.HActive[k]
/ mode_lib->vba.slices,
1.0),
mode_lib->vba.slices,
mode_lib->vba.OutputFormat[k])
+ dscComputeDelay(
mode_lib->vba.OutputFormat[k]);
} else {
locals->DSCDelayPerState[i][k] =
2.0 * (dscceComputeDelay(
mode_lib->vba.DSCInputBitPerComponent[k],
mode_lib->vba.bpp,
dml_ceil(mode_lib->vba.HActive[k] / mode_lib->vba.slices, 1.0),
mode_lib->vba.slices / 2,
mode_lib->vba.OutputFormat[k])
+ dscComputeDelay(mode_lib->vba.OutputFormat[k]));
}
locals->DSCDelayPerState[i][k] =
locals->DSCDelayPerState[i][k] * mode_lib->vba.PixelClock[k] / mode_lib->vba.PixelClockBackEnd[k];
} else {
locals->DSCDelayPerState[i][k] = 0.0;
}
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
for (m = 0; m <= mode_lib->vba.NumberOfActivePlanes - 1; m++) {
for (j = 0; j <= mode_lib->vba.NumberOfActivePlanes - 1; j++) {
if (mode_lib->vba.BlendingAndTiming[k] == m && locals->RequiresDSC[i][m] == true)
locals->DSCDelayPerState[i][k] = locals->DSCDelayPerState[i][m];
}
}
}
}
//Prefetch Check
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
for (j = 0; j < 2; j++) {
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1)
locals->SwathWidthYPerState[i][j][k] = dml_min(locals->SwathWidthYSingleDPP[k], dml_round(locals->HActive[k] / 2 * locals->HRatio[k]));
else
locals->SwathWidthYPerState[i][j][k] = locals->SwathWidthYSingleDPP[k] / locals->NoOfDPP[i][j][k];
locals->SwathWidthGranularityY = 256 / dml_ceil(locals->BytePerPixelInDETY[k], 1) / locals->MaxSwathHeightY[k];
locals->RoundedUpMaxSwathSizeBytesY = (dml_ceil(locals->SwathWidthYPerState[i][j][k] - 1, locals->SwathWidthGranularityY)
+ locals->SwathWidthGranularityY) * locals->BytePerPixelInDETY[k] * locals->MaxSwathHeightY[k];
if (locals->SourcePixelFormat[k] == dm_420_10) {
locals->RoundedUpMaxSwathSizeBytesY = dml_ceil(locals->RoundedUpMaxSwathSizeBytesY, 256) + 256;
}
if (locals->MaxSwathHeightC[k] > 0) {
locals->SwathWidthGranularityC = 256 / dml_ceil(locals->BytePerPixelInDETC[k], 2) / locals->MaxSwathHeightC[k];
locals->RoundedUpMaxSwathSizeBytesC = (dml_ceil(locals->SwathWidthYPerState[i][j][k] / 2 - 1, locals->SwathWidthGranularityC)
+ locals->SwathWidthGranularityC) * locals->BytePerPixelInDETC[k] * locals->MaxSwathHeightC[k];
}
if (locals->SourcePixelFormat[k] == dm_420_10) {
locals->RoundedUpMaxSwathSizeBytesC = dml_ceil(locals->RoundedUpMaxSwathSizeBytesC, 256) + 256;
} else {
locals->RoundedUpMaxSwathSizeBytesC = 0;
}
if (locals->RoundedUpMaxSwathSizeBytesY + locals->RoundedUpMaxSwathSizeBytesC <= locals->DETBufferSizeInKByte[0] * 1024 / 2) {
locals->SwathHeightYPerState[i][j][k] = locals->MaxSwathHeightY[k];
locals->SwathHeightCPerState[i][j][k] = locals->MaxSwathHeightC[k];
} else {
locals->SwathHeightYPerState[i][j][k] = locals->MinSwathHeightY[k];
locals->SwathHeightCPerState[i][j][k] = locals->MinSwathHeightC[k];
}
if (locals->BytePerPixelInDETC[k] == 0) {
locals->LinesInDETLuma = locals->DETBufferSizeInKByte[0] * 1024 / locals->BytePerPixelInDETY[k] / locals->SwathWidthYPerState[i][j][k];
locals->LinesInDETChroma = 0;
} else if (locals->SwathHeightYPerState[i][j][k] <= locals->SwathHeightCPerState[i][j][k]) {
locals->LinesInDETLuma = locals->DETBufferSizeInKByte[0] * 1024 / 2 / locals->BytePerPixelInDETY[k] /
locals->SwathWidthYPerState[i][j][k];
locals->LinesInDETChroma = locals->DETBufferSizeInKByte[0] * 1024 / 2 / locals->BytePerPixelInDETC[k] / (locals->SwathWidthYPerState[i][j][k] / 2);
} else {
locals->LinesInDETLuma = locals->DETBufferSizeInKByte[0] * 1024 * 2 / 3 / locals->BytePerPixelInDETY[k] / locals->SwathWidthYPerState[i][j][k];
locals->LinesInDETChroma = locals->DETBufferSizeInKByte[0] * 1024 / 3 / locals->BytePerPixelInDETY[k] / (locals->SwathWidthYPerState[i][j][k] / 2);
}
locals->EffectiveLBLatencyHidingSourceLinesLuma = dml_min(locals->MaxLineBufferLines,
dml_floor(locals->LineBufferSize / locals->LBBitPerPixel[k] / (locals->SwathWidthYPerState[i][j][k]
/ dml_max(locals->HRatio[k], 1)), 1)) - (locals->vtaps[k] - 1);
locals->EffectiveLBLatencyHidingSourceLinesChroma = dml_min(locals->MaxLineBufferLines,
dml_floor(locals->LineBufferSize / locals->LBBitPerPixel[k]
/ (locals->SwathWidthYPerState[i][j][k] / 2
/ dml_max(locals->HRatio[k] / 2, 1)), 1)) - (locals->VTAPsChroma[k] - 1);
locals->EffectiveDETLBLinesLuma = dml_floor(locals->LinesInDETLuma + dml_min(
locals->LinesInDETLuma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETY[k] *
locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i][0],
locals->EffectiveLBLatencyHidingSourceLinesLuma),
locals->SwathHeightYPerState[i][j][k]);
if (locals->LinesInDETChroma) {
locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma +
dml_min(locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] *
locals->BytePerPixelInDETC[k] *
locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0],
locals->EffectiveLBLatencyHidingSourceLinesChroma),
locals->SwathHeightCPerState[i][j][k]);
} else {
locals->EffectiveDETLBLinesChroma = 0;
}
if (locals->BytePerPixelInDETC[k] == 0) {
locals->UrgentLatencySupportUsPerState[i][j][k] = locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])
/ locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] *
dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]);
} else {
locals->UrgentLatencySupportUsPerState[i][j][k] = dml_min(
locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])
/ locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] *
dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]),
locals->EffectiveDETLBLinesChroma * (locals->HTotal[k] / locals->PixelClock[k]) / (locals->VRatio[k] / 2) -
locals->EffectiveDETLBLinesChroma * locals->SwathWidthYPerState[i][j][k] / 2 *
dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]));
}
}
}
}
for (i = 0; i <= locals->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
locals->UrgentLatencySupport[i][j] = true;
for (k = 0; k < locals->NumberOfActivePlanes; k++) {
if (locals->UrgentLatencySupportUsPerState[i][j][k] < locals->UrgentLatency)
locals->UrgentLatencySupport[i][j] = false;
}
}
}
/*Prefetch Check*/
for (i = 0; i <= locals->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
locals->TotalNumberOfDCCActiveDPP[i][j] = 0;
for (k = 0; k < locals->NumberOfActivePlanes; k++) {
if (locals->DCCEnable[k] == true) {
locals->TotalNumberOfDCCActiveDPP[i][j] =
locals->TotalNumberOfDCCActiveDPP[i][j] + locals->NoOfDPP[i][j][k];
}
}
}
}
CalculateMinAndMaxPrefetchMode(locals->AllowDRAMSelfRefreshOrDRAMClockChangeInVblank, &locals->MinPrefetchMode, &locals->MaxPrefetchMode);
for (i = 0; i <= locals->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
for (k = 0; k < locals->NumberOfActivePlanes; k++) {
locals->NoOfDPPThisState[k] = locals->NoOfDPP[i][j][k];
locals->RequiredDPPCLKThisState[k] = locals->RequiredDPPCLK[i][j][k];
locals->SwathHeightYThisState[k] = locals->SwathHeightYPerState[i][j][k];
locals->SwathHeightCThisState[k] = locals->SwathHeightCPerState[i][j][k];
locals->SwathWidthYThisState[k] = locals->SwathWidthYPerState[i][j][k];
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max(
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
mode_lib->vba.PixelClock[k] / 16.0);
if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
if (mode_lib->vba.VRatio[k] <= 1.0) {
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETY[k],
1.0)
/ 64.0
* mode_lib->vba.HRatio[k]
* mode_lib->vba.PixelClock[k]
/ mode_lib->vba.NoOfDPP[i][j][k]);
} else {
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETY[k],
1.0)
/ 64.0
* mode_lib->vba.PSCL_FACTOR[k]
* mode_lib->vba.RequiredDPPCLK[i][j][k]);
}
} else {
if (mode_lib->vba.VRatio[k] <= 1.0) {
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETY[k],
1.0)
/ 32.0
* mode_lib->vba.HRatio[k]
* mode_lib->vba.PixelClock[k]
/ mode_lib->vba.NoOfDPP[i][j][k]);
} else {
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETY[k],
1.0)
/ 32.0
* mode_lib->vba.PSCL_FACTOR[k]
* mode_lib->vba.RequiredDPPCLK[i][j][k]);
}
if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0) {
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETC[k],
2.0)
/ 32.0
* mode_lib->vba.HRatio[k]
/ 2.0
* mode_lib->vba.PixelClock[k]
/ mode_lib->vba.NoOfDPP[i][j][k]);
} else {
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETC[k],
2.0)
/ 32.0
* mode_lib->vba.PSCL_FACTOR_CHROMA[k]
* mode_lib->vba.RequiredDPPCLK[i][j][k]);
}
}
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
mode_lib->vba.PDEAndMetaPTEBytesPerFrameY = CalculateVMAndRowBytes(
mode_lib,
mode_lib->vba.DCCEnable[k],
mode_lib->vba.Read256BlockHeightY[k],
mode_lib->vba.Read256BlockWidthY[k],
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.SurfaceTiling[k],
dml_ceil(mode_lib->vba.BytePerPixelInDETY[k], 1.0),
mode_lib->vba.SourceScan[k],
mode_lib->vba.ViewportWidth[k],
mode_lib->vba.ViewportHeight[k],
mode_lib->vba.SwathWidthYPerState[i][j][k],
mode_lib->vba.GPUVMEnable,
mode_lib->vba.VMMPageSize,
mode_lib->vba.PTEBufferSizeInRequestsLuma,
mode_lib->vba.PDEProcessingBufIn64KBReqs,
mode_lib->vba.PitchY[k],
mode_lib->vba.DCCMetaPitchY[k],
&mode_lib->vba.MacroTileWidthY[k],
&mode_lib->vba.MetaRowBytesY,
&mode_lib->vba.DPTEBytesPerRowY,
&mode_lib->vba.PTEBufferSizeNotExceededY[i][j][k],
&mode_lib->vba.dpte_row_height[k],
&mode_lib->vba.meta_row_height[k]);
mode_lib->vba.PrefetchLinesY[0][0][k] = CalculatePrefetchSourceLines(
mode_lib,
mode_lib->vba.VRatio[k],
mode_lib->vba.vtaps[k],
mode_lib->vba.Interlace[k],
mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
mode_lib->vba.SwathHeightYPerState[i][j][k],
mode_lib->vba.ViewportYStartY[k],
&mode_lib->vba.PrefillY[k],
&mode_lib->vba.MaxNumSwY[k]);
if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_32
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_8)) {
mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = CalculateVMAndRowBytes(
mode_lib,
mode_lib->vba.DCCEnable[k],
mode_lib->vba.Read256BlockHeightY[k],
mode_lib->vba.Read256BlockWidthY[k],
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.SurfaceTiling[k],
dml_ceil(mode_lib->vba.BytePerPixelInDETC[k], 2.0),
mode_lib->vba.SourceScan[k],
mode_lib->vba.ViewportWidth[k] / 2.0,
mode_lib->vba.ViewportHeight[k] / 2.0,
mode_lib->vba.SwathWidthYPerState[i][j][k] / 2.0,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.VMMPageSize,
mode_lib->vba.PTEBufferSizeInRequestsLuma,
mode_lib->vba.PDEProcessingBufIn64KBReqs,
mode_lib->vba.PitchC[k],
0.0,
&mode_lib->vba.MacroTileWidthC[k],
&mode_lib->vba.MetaRowBytesC,
&mode_lib->vba.DPTEBytesPerRowC,
&mode_lib->vba.PTEBufferSizeNotExceededC[i][j][k],
&mode_lib->vba.dpte_row_height_chroma[k],
&mode_lib->vba.meta_row_height_chroma[k]);
mode_lib->vba.PrefetchLinesC[0][0][k] = CalculatePrefetchSourceLines(
mode_lib,
mode_lib->vba.VRatio[k] / 2.0,
mode_lib->vba.VTAPsChroma[k],
mode_lib->vba.Interlace[k],
mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
mode_lib->vba.SwathHeightCPerState[i][j][k],
mode_lib->vba.ViewportYStartC[k],
&mode_lib->vba.PrefillC[k],
&mode_lib->vba.MaxNumSwC[k]);
} else {
mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0;
mode_lib->vba.MetaRowBytesC = 0.0;
mode_lib->vba.DPTEBytesPerRowC = 0.0;
locals->PrefetchLinesC[0][0][k] = 0.0;
locals->PTEBufferSizeNotExceededC[i][j][k] = true;
locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma + mode_lib->vba.PTEBufferSizeInRequestsChroma;
}
locals->PDEAndMetaPTEBytesPerFrame[0][0][k] =
mode_lib->vba.PDEAndMetaPTEBytesPerFrameY + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC;
locals->MetaRowBytes[0][0][k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC;
locals->DPTEBytesPerRow[0][0][k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC;
CalculateActiveRowBandwidth(
mode_lib->vba.GPUVMEnable,
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.VRatio[k],
mode_lib->vba.DCCEnable[k],
mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
mode_lib->vba.MetaRowBytesY,
mode_lib->vba.MetaRowBytesC,
mode_lib->vba.meta_row_height[k],
mode_lib->vba.meta_row_height_chroma[k],
mode_lib->vba.DPTEBytesPerRowY,
mode_lib->vba.DPTEBytesPerRowC,
mode_lib->vba.dpte_row_height[k],
mode_lib->vba.dpte_row_height_chroma[k],
&mode_lib->vba.meta_row_bw[k],
&mode_lib->vba.dpte_row_bw[k],
&mode_lib->vba.qual_row_bw[k]);
}
mode_lib->vba.ExtraLatency =
mode_lib->vba.UrgentRoundTripAndOutOfOrderLatencyPerState[i]
+ (mode_lib->vba.TotalNumberOfActiveDPP[i][j]
* mode_lib->vba.PixelChunkSizeInKByte
+ mode_lib->vba.TotalNumberOfDCCActiveDPP[i][j]
* mode_lib->vba.MetaChunkSize)
* 1024.0
/ mode_lib->vba.ReturnBWPerState[i][0];
if (mode_lib->vba.GPUVMEnable == true) {
mode_lib->vba.ExtraLatency = mode_lib->vba.ExtraLatency
+ mode_lib->vba.TotalNumberOfActiveDPP[i][j]
* mode_lib->vba.PTEGroupSize
/ mode_lib->vba.ReturnBWPerState[i][0];
}
mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
if (mode_lib->vba.WritebackEnable[k] == true) {
locals->WritebackDelay[i][k] = mode_lib->vba.WritebackLatency
+ CalculateWriteBackDelay(
mode_lib->vba.WritebackPixelFormat[k],
mode_lib->vba.WritebackHRatio[k],
mode_lib->vba.WritebackVRatio[k],
mode_lib->vba.WritebackLumaHTaps[k],
mode_lib->vba.WritebackLumaVTaps[k],
mode_lib->vba.WritebackChromaHTaps[k],
mode_lib->vba.WritebackChromaVTaps[k],
mode_lib->vba.WritebackDestinationWidth[k]) / locals->RequiredDISPCLK[i][j];
} else {
locals->WritebackDelay[i][k] = 0.0;
}
for (m = 0; m <= mode_lib->vba.NumberOfActivePlanes - 1; m++) {
if (mode_lib->vba.BlendingAndTiming[m] == k
&& mode_lib->vba.WritebackEnable[m]
== true) {
locals->WritebackDelay[i][k] = dml_max(locals->WritebackDelay[i][k],
mode_lib->vba.WritebackLatency + CalculateWriteBackDelay(
mode_lib->vba.WritebackPixelFormat[m],
mode_lib->vba.WritebackHRatio[m],
mode_lib->vba.WritebackVRatio[m],
mode_lib->vba.WritebackLumaHTaps[m],
mode_lib->vba.WritebackLumaVTaps[m],
mode_lib->vba.WritebackChromaHTaps[m],
mode_lib->vba.WritebackChromaVTaps[m],
mode_lib->vba.WritebackDestinationWidth[m]) / locals->RequiredDISPCLK[i][j]);
}
}
}
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
for (m = 0; m <= mode_lib->vba.NumberOfActivePlanes - 1; m++) {
if (mode_lib->vba.BlendingAndTiming[k] == m) {
locals->WritebackDelay[i][k] = locals->WritebackDelay[i][m];
}
}
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
for (m = 0; m < locals->NumberOfCursors[k]; m++)
locals->cursor_bw[k] = locals->NumberOfCursors[k] * locals->CursorWidth[k][m] * locals->CursorBPP[k][m]
/ 8 / (locals->HTotal[k] / locals->PixelClock[k]) * locals->VRatio[k];
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
locals->MaximumVStartup[0][0][k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
- dml_max(1.0, dml_ceil(locals->WritebackDelay[i][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1.0));
}
mode_lib->vba.NextPrefetchMode = mode_lib->vba.MinPrefetchMode;
do {
mode_lib->vba.PrefetchMode[i][j] = mode_lib->vba.NextPrefetchMode;
mode_lib->vba.NextPrefetchMode = mode_lib->vba.NextPrefetchMode + 1;
mode_lib->vba.TWait = CalculateTWait(
mode_lib->vba.PrefetchMode[i][j],
mode_lib->vba.DRAMClockChangeLatency,
mode_lib->vba.UrgentLatency,
mode_lib->vba.SREnterPlusExitTime);
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.XFCEnabled[k] == true) {
mode_lib->vba.XFCRemoteSurfaceFlipDelay =
CalculateRemoteSurfaceFlipDelay(
mode_lib,
mode_lib->vba.VRatio[k],
locals->SwathWidthYPerState[i][j][k],
dml_ceil(locals->BytePerPixelInDETY[k], 1.0),
mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
mode_lib->vba.XFCTSlvVupdateOffset,
mode_lib->vba.XFCTSlvVupdateWidth,
mode_lib->vba.XFCTSlvVreadyOffset,
mode_lib->vba.XFCXBUFLatencyTolerance,
mode_lib->vba.XFCFillBWOverhead,
mode_lib->vba.XFCSlvChunkSize,
mode_lib->vba.XFCBusTransportTime,
mode_lib->vba.TimeCalc,
mode_lib->vba.TWait,
&mode_lib->vba.SrcActiveDrainRate,
&mode_lib->vba.TInitXFill,
&mode_lib->vba.TslvChk);
} else {
mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0.0;
}
mode_lib->vba.IsErrorResult[i][j][k] =
CalculatePrefetchSchedule(
mode_lib,
mode_lib->vba.RequiredDPPCLK[i][j][k],
mode_lib->vba.RequiredDISPCLK[i][j],
mode_lib->vba.PixelClock[k],
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
mode_lib->vba.DSCDelayPerState[i][k],
mode_lib->vba.NoOfDPP[i][j][k],
mode_lib->vba.ScalerEnabled[k],
mode_lib->vba.NumberOfCursors[k],
mode_lib->vba.DPPCLKDelaySubtotal,
mode_lib->vba.DPPCLKDelaySCL,
mode_lib->vba.DPPCLKDelaySCLLBOnly,
mode_lib->vba.DPPCLKDelayCNVCFormater,
mode_lib->vba.DPPCLKDelayCNVCCursor,
mode_lib->vba.DISPCLKDelaySubtotal,
mode_lib->vba.SwathWidthYPerState[i][j][k]
/ mode_lib->vba.HRatio[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.VTotal[k]
- mode_lib->vba.VActive[k],
mode_lib->vba.HTotal[k],
mode_lib->vba.MaxInterDCNTileRepeaters,
mode_lib->vba.MaximumVStartup[0][0][k],
mode_lib->vba.GPUVMMaxPageTableLevels,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.DynamicMetadataEnable[k],
mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
mode_lib->vba.DynamicMetadataTransmittedBytes[k],
mode_lib->vba.DCCEnable[k],
mode_lib->vba.UrgentLatencyPixelDataOnly,
mode_lib->vba.ExtraLatency,
mode_lib->vba.TimeCalc,
mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k],
mode_lib->vba.MetaRowBytes[0][0][k],
mode_lib->vba.DPTEBytesPerRow[0][0][k],
mode_lib->vba.PrefetchLinesY[0][0][k],
mode_lib->vba.SwathWidthYPerState[i][j][k],
mode_lib->vba.BytePerPixelInDETY[k],
mode_lib->vba.PrefillY[k],
mode_lib->vba.MaxNumSwY[k],
mode_lib->vba.PrefetchLinesC[0][0][k],
mode_lib->vba.BytePerPixelInDETC[k],
mode_lib->vba.PrefillC[k],
mode_lib->vba.MaxNumSwC[k],
mode_lib->vba.SwathHeightYPerState[i][j][k],
mode_lib->vba.SwathHeightCPerState[i][j][k],
mode_lib->vba.TWait,
mode_lib->vba.XFCEnabled[k],
mode_lib->vba.XFCRemoteSurfaceFlipDelay,
mode_lib->vba.Interlace[k],
mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
mode_lib->vba.DSTXAfterScaler,
mode_lib->vba.DSTYAfterScaler,
&mode_lib->vba.LineTimesForPrefetch[k],
&mode_lib->vba.PrefetchBW[k],
&mode_lib->vba.LinesForMetaPTE[k],
&mode_lib->vba.LinesForMetaAndDPTERow[k],
&mode_lib->vba.VRatioPreY[i][j][k],
&mode_lib->vba.VRatioPreC[i][j][k],
&mode_lib->vba.RequiredPrefetchPixelDataBWLuma[i][j][k],
&mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
&mode_lib->vba.Tno_bw[k],
&mode_lib->vba.VUpdateOffsetPix[k],
&mode_lib->vba.VUpdateWidthPix[k],
&mode_lib->vba.VReadyOffsetPix[k]);
}
mode_lib->vba.MaximumReadBandwidthWithoutPrefetch = 0.0;
mode_lib->vba.MaximumReadBandwidthWithPrefetch = 0.0;
locals->prefetch_vm_bw_valid = true;
locals->prefetch_row_bw_valid = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (locals->PDEAndMetaPTEBytesPerFrame[0][0][k] == 0)
locals->prefetch_vm_bw[k] = 0;
else if (locals->LinesForMetaPTE[k] > 0)
locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[0][0][k]
/ (locals->LinesForMetaPTE[k] * locals->HTotal[k] / locals->PixelClock[k]);
else {
locals->prefetch_vm_bw[k] = 0;
locals->prefetch_vm_bw_valid = false;
}
if (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k] == 0)
locals->prefetch_row_bw[k] = 0;
else if (locals->LinesForMetaAndDPTERow[k] > 0)
locals->prefetch_row_bw[k] = (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k])
/ (locals->LinesForMetaAndDPTERow[k] * locals->HTotal[k] / locals->PixelClock[k]);
else {
locals->prefetch_row_bw[k] = 0;
locals->prefetch_row_bw_valid = false;
}
mode_lib->vba.MaximumReadBandwidthWithoutPrefetch = mode_lib->vba.MaximumReadBandwidthWithPrefetch
+ mode_lib->vba.cursor_bw[k] + mode_lib->vba.ReadBandwidth[k] + mode_lib->vba.meta_row_bw[k] + mode_lib->vba.dpte_row_bw[k];
mode_lib->vba.MaximumReadBandwidthWithPrefetch =
mode_lib->vba.MaximumReadBandwidthWithPrefetch
+ mode_lib->vba.cursor_bw[k]
+ dml_max3(
mode_lib->vba.prefetch_vm_bw[k],
mode_lib->vba.prefetch_row_bw[k],
dml_max(mode_lib->vba.ReadBandwidth[k],
mode_lib->vba.RequiredPrefetchPixelDataBWLuma[i][j][k])
+ mode_lib->vba.meta_row_bw[k] + mode_lib->vba.dpte_row_bw[k]);
}
locals->BandwidthWithoutPrefetchSupported[i][0] = true;
if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i][0]) {
locals->BandwidthWithoutPrefetchSupported[i][0] = false;
}
locals->PrefetchSupported[i][j] = true;
if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i][0]) {
locals->PrefetchSupported[i][j] = false;
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (locals->LineTimesForPrefetch[k] < 2.0
|| locals->LinesForMetaPTE[k] >= 8.0
|| locals->LinesForMetaAndDPTERow[k] >= 16.0
|| mode_lib->vba.IsErrorResult[i][j][k] == true) {
locals->PrefetchSupported[i][j] = false;
}
}
locals->VRatioInPrefetchSupported[i][j] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (locals->VRatioPreY[i][j][k] > 4.0
|| locals->VRatioPreC[i][j][k] > 4.0
|| mode_lib->vba.IsErrorResult[i][j][k] == true) {
locals->VRatioInPrefetchSupported[i][j] = false;
}
}
} while ((locals->PrefetchSupported[i][j] != true || locals->VRatioInPrefetchSupported[i][j] != true)
&& mode_lib->vba.NextPrefetchMode < mode_lib->vba.MaxPrefetchMode);
if (mode_lib->vba.PrefetchSupported[i][j] == true
&& mode_lib->vba.VRatioInPrefetchSupported[i][j] == true) {
mode_lib->vba.BandwidthAvailableForImmediateFlip =
mode_lib->vba.ReturnBWPerState[i][0];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
mode_lib->vba.BandwidthAvailableForImmediateFlip =
mode_lib->vba.BandwidthAvailableForImmediateFlip
- mode_lib->vba.cursor_bw[k]
- dml_max(
mode_lib->vba.ReadBandwidth[k] + mode_lib->vba.qual_row_bw[k],
mode_lib->vba.PrefetchBW[k]);
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
mode_lib->vba.ImmediateFlipBytes[k] = 0.0;
if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
&& mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
mode_lib->vba.ImmediateFlipBytes[k] =
mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k]
+ mode_lib->vba.MetaRowBytes[0][0][k]
+ mode_lib->vba.DPTEBytesPerRow[0][0][k];
}
}
mode_lib->vba.TotImmediateFlipBytes = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
&& mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
mode_lib->vba.TotImmediateFlipBytes =
mode_lib->vba.TotImmediateFlipBytes
+ mode_lib->vba.ImmediateFlipBytes[k];
}
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
CalculateFlipSchedule(
mode_lib,
mode_lib->vba.ExtraLatency,
mode_lib->vba.UrgentLatencyPixelDataOnly,
mode_lib->vba.GPUVMMaxPageTableLevels,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.BandwidthAvailableForImmediateFlip,
mode_lib->vba.TotImmediateFlipBytes,
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.ImmediateFlipBytes[k],
mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k],
mode_lib->vba.VRatio[k],
mode_lib->vba.Tno_bw[k],
mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k],
mode_lib->vba.MetaRowBytes[0][0][k],
mode_lib->vba.DPTEBytesPerRow[0][0][k],
mode_lib->vba.DCCEnable[k],
mode_lib->vba.dpte_row_height[k],
mode_lib->vba.meta_row_height[k],
mode_lib->vba.qual_row_bw[k],
&mode_lib->vba.DestinationLinesToRequestVMInImmediateFlip[k],
&mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip[k],
&mode_lib->vba.final_flip_bw[k],
&mode_lib->vba.ImmediateFlipSupportedForPipe[k]);
}
mode_lib->vba.total_dcn_read_bw_with_flip = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
mode_lib->vba.total_dcn_read_bw_with_flip =
mode_lib->vba.total_dcn_read_bw_with_flip
+ mode_lib->vba.cursor_bw[k]
+ dml_max3(
mode_lib->vba.prefetch_vm_bw[k],
mode_lib->vba.prefetch_row_bw[k],
mode_lib->vba.final_flip_bw[k]
+ dml_max(
mode_lib->vba.ReadBandwidth[k],
mode_lib->vba.RequiredPrefetchPixelDataBWLuma[i][j][k]));
}
mode_lib->vba.ImmediateFlipSupportedForState[i][j] = true;
if (mode_lib->vba.total_dcn_read_bw_with_flip
> mode_lib->vba.ReturnBWPerState[i][0]) {
mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false;
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.ImmediateFlipSupportedForPipe[k] == false) {
mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false;
}
}
} else {
mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false;
}
}
}
/*Vertical Active BW support*/
mode_lib->vba.MaxTotalVActiveRDBandwidth = 0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; k++)
mode_lib->vba.MaxTotalVActiveRDBandwidth = mode_lib->vba.MaxTotalVActiveRDBandwidth + mode_lib->vba.ReadBandwidth[k];
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0] = dml_min(mode_lib->vba.ReturnBusWidth *
mode_lib->vba.DCFCLKPerState[i], mode_lib->vba.FabricAndDRAMBandwidthPerState[i] * 1000) *
mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation / 100;
if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0])
mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = true;
else
mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = false;
}
/*PTE Buffer Size Check*/
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
for (j = 0; j < 2; j++) {
locals->PTEBufferSizeNotExceeded[i][j] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (locals->PTEBufferSizeNotExceededY[i][j][k] == false
|| locals->PTEBufferSizeNotExceededC[i][j][k] == false) {
locals->PTEBufferSizeNotExceeded[i][j] = false;
}
}
}
}
/*Cursor Support Check*/
mode_lib->vba.CursorSupport = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
for (j = 0; j < 2; j++) {
if (mode_lib->vba.CursorWidth[k][j] > 0.0) {
if (dml_floor(
dml_floor(
mode_lib->vba.CursorBufferSize
- mode_lib->vba.CursorChunkSize,
mode_lib->vba.CursorChunkSize) * 1024.0
/ (mode_lib->vba.CursorWidth[k][j]
* mode_lib->vba.CursorBPP[k][j]
/ 8.0),
1.0)
* (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
/ mode_lib->vba.VRatio[k] < mode_lib->vba.UrgentLatencyPixelDataOnly
|| (mode_lib->vba.CursorBPP[k][j] == 64.0
&& mode_lib->vba.Cursor64BppSupport == false)) {
mode_lib->vba.CursorSupport = false;
}
}
}
}
/*Valid Pitch Check*/
mode_lib->vba.PitchSupport = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
locals->AlignedYPitch[k] = dml_ceil(
dml_max(mode_lib->vba.PitchY[k], mode_lib->vba.ViewportWidth[k]),
locals->MacroTileWidthY[k]);
if (locals->AlignedYPitch[k] > mode_lib->vba.PitchY[k]) {
mode_lib->vba.PitchSupport = false;
}
if (mode_lib->vba.DCCEnable[k] == true) {
locals->AlignedDCCMetaPitch[k] = dml_ceil(
dml_max(
mode_lib->vba.DCCMetaPitchY[k],
mode_lib->vba.ViewportWidth[k]),
64.0 * locals->Read256BlockWidthY[k]);
} else {
locals->AlignedDCCMetaPitch[k] = mode_lib->vba.DCCMetaPitchY[k];
}
if (locals->AlignedDCCMetaPitch[k] > mode_lib->vba.DCCMetaPitchY[k]) {
mode_lib->vba.PitchSupport = false;
}
if (mode_lib->vba.SourcePixelFormat[k] != dm_444_64
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_32
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_8) {
locals->AlignedCPitch[k] = dml_ceil(
dml_max(
mode_lib->vba.PitchC[k],
mode_lib->vba.ViewportWidth[k] / 2.0),
locals->MacroTileWidthC[k]);
} else {
locals->AlignedCPitch[k] = mode_lib->vba.PitchC[k];
}
if (locals->AlignedCPitch[k] > mode_lib->vba.PitchC[k]) {
mode_lib->vba.PitchSupport = false;
}
}
/*Mode Support, Voltage State and SOC Configuration*/
for (i = mode_lib->vba.soc.num_states; i >= 0; i--) {
for (j = 0; j < 2; j++) {
enum dm_validation_status status = DML_VALIDATION_OK;
if (mode_lib->vba.ScaleRatioAndTapsSupport != true) {
status = DML_FAIL_SCALE_RATIO_TAP;
} else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) {
status = DML_FAIL_SOURCE_PIXEL_FORMAT;
} else if (locals->ViewportSizeSupport[i][0] != true) {
status = DML_FAIL_VIEWPORT_SIZE;
} else if (locals->DIOSupport[i] != true) {
status = DML_FAIL_DIO_SUPPORT;
} else if (locals->NotEnoughDSCUnits[i] != false) {
status = DML_FAIL_NOT_ENOUGH_DSC;
} else if (locals->DSCCLKRequiredMoreThanSupported[i] != false) {
status = DML_FAIL_DSC_CLK_REQUIRED;
} else if (locals->UrgentLatencySupport[i][j] != true) {
status = DML_FAIL_URGENT_LATENCY;
} else if (locals->ROBSupport[i][0] != true) {
status = DML_FAIL_REORDERING_BUFFER;
} else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) {
status = DML_FAIL_DISPCLK_DPPCLK;
} else if (locals->TotalAvailablePipesSupport[i][j] != true) {
status = DML_FAIL_TOTAL_AVAILABLE_PIPES;
} else if (mode_lib->vba.NumberOfOTGSupport != true) {
status = DML_FAIL_NUM_OTG;
} else if (mode_lib->vba.WritebackModeSupport != true) {
status = DML_FAIL_WRITEBACK_MODE;
} else if (mode_lib->vba.WritebackLatencySupport != true) {
status = DML_FAIL_WRITEBACK_LATENCY;
} else if (mode_lib->vba.WritebackScaleRatioAndTapsSupport != true) {
status = DML_FAIL_WRITEBACK_SCALE_RATIO_TAP;
} else if (mode_lib->vba.CursorSupport != true) {
status = DML_FAIL_CURSOR_SUPPORT;
} else if (mode_lib->vba.PitchSupport != true) {
status = DML_FAIL_PITCH_SUPPORT;
} else if (locals->PrefetchSupported[i][j] != true) {
status = DML_FAIL_PREFETCH_SUPPORT;
} else if (locals->TotalVerticalActiveBandwidthSupport[i][0] != true) {
status = DML_FAIL_TOTAL_V_ACTIVE_BW;
} else if (locals->VRatioInPrefetchSupported[i][j] != true) {
status = DML_FAIL_V_RATIO_PREFETCH;
} else if (locals->PTEBufferSizeNotExceeded[i][j] != true) {
status = DML_FAIL_PTE_BUFFER_SIZE;
} else if (mode_lib->vba.NonsupportedDSCInputBPC != false) {
status = DML_FAIL_DSC_INPUT_BPC;
}
if (status == DML_VALIDATION_OK) {
locals->ModeSupport[i][j] = true;
} else {
locals->ModeSupport[i][j] = false;
}
locals->ValidationStatus[i] = status;
}
}
{
unsigned int MaximumMPCCombine = 0;
mode_lib->vba.VoltageLevel = mode_lib->vba.soc.num_states + 1;
for (i = mode_lib->vba.VoltageOverrideLevel; i <= mode_lib->vba.soc.num_states; i++) {
if (locals->ModeSupport[i][0] == true || locals->ModeSupport[i][1] == true) {
mode_lib->vba.VoltageLevel = i;
if (locals->ModeSupport[i][1] == true && (locals->ModeSupport[i][0] == false
|| mode_lib->vba.WhenToDoMPCCombine == dm_mpc_always_when_possible)) {
MaximumMPCCombine = 1;
} else {
MaximumMPCCombine = 0;
}
break;
}
}
mode_lib->vba.ImmediateFlipSupport =
locals->ImmediateFlipSupportedForState[mode_lib->vba.VoltageLevel][MaximumMPCCombine];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
mode_lib->vba.DPPPerPlane[k] = locals->NoOfDPP[mode_lib->vba.VoltageLevel][MaximumMPCCombine][k];
locals->DPPCLK[k] = locals->RequiredDPPCLK[mode_lib->vba.VoltageLevel][MaximumMPCCombine][k];
}
mode_lib->vba.DISPCLK = locals->RequiredDISPCLK[mode_lib->vba.VoltageLevel][MaximumMPCCombine];
mode_lib->vba.maxMpcComb = MaximumMPCCombine;
}
mode_lib->vba.DCFCLK = mode_lib->vba.DCFCLKPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel][0];
mode_lib->vba.FabricAndDRAMBandwidth = locals->FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
mode_lib->vba.ODMCombineEnabled[k] =
locals->ODMCombineEnablePerState[mode_lib->vba.VoltageLevel][k];
} else {
mode_lib->vba.ODMCombineEnabled[k] = 0;
}
mode_lib->vba.DSCEnabled[k] =
locals->RequiresDSC[mode_lib->vba.VoltageLevel][k];
mode_lib->vba.OutputBpp[k] =
locals->OutputBppPerState[mode_lib->vba.VoltageLevel][k];
}
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "../display_mode_lib.h"
#include "../display_mode_vba.h"
#include "display_rq_dlg_calc_20v2.h"
// Function: dml20v2_rq_dlg_get_rq_params
// Calculate requestor related parameters that register definition agnostic
// (i.e. this layer does try to separate real values from register definition)
// Input:
// pipe_src_param - pipe source configuration (e.g. vp, pitch, etc.)
// Output:
// rq_param - values that can be used to setup RQ (e.g. swath_height, plane1_addr, etc.)
//
static void dml20v2_rq_dlg_get_rq_params(
struct display_mode_lib *mode_lib,
display_rq_params_st * rq_param,
const display_pipe_source_params_st *pipe_src_param);
// Function: dml20v2_rq_dlg_get_dlg_params
// Calculate deadline related parameters
//
static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
display_dlg_regs_st *disp_dlg_regs,
display_ttu_regs_st *disp_ttu_regs,
const display_rq_dlg_params_st *rq_dlg_param,
const display_dlg_sys_params_st *dlg_sys_param,
const bool cstate_en,
const bool pstate_en);
/*
* NOTE:
* This file is gcc-parseable HW gospel, coming straight from HW engineers.
*
* It doesn't adhere to Linux kernel style and sometimes will do things in odd
* ways. Unless there is something clearly wrong with it the code should
* remain as-is as it provides us with a guarantee from HW that it is correct.
*/
static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
double *refcyc_per_req_delivery_pre_cur,
double *refcyc_per_req_delivery_cur,
double refclk_freq_in_mhz,
double ref_freq_to_pix_freq,
double hscale_pixel_rate_l,
double hscl_ratio,
double vratio_pre_l,
double vratio_l,
unsigned int cur_width,
enum cursor_bpp cur_bpp);
#include "../dml_inline_defs.h"
static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
{
unsigned int ret_val = 0;
if (source_format == dm_444_16) {
if (!is_chroma)
ret_val = 2;
} else if (source_format == dm_444_32) {
if (!is_chroma)
ret_val = 4;
} else if (source_format == dm_444_64) {
if (!is_chroma)
ret_val = 8;
} else if (source_format == dm_420_8) {
if (is_chroma)
ret_val = 2;
else
ret_val = 1;
} else if (source_format == dm_420_10) {
if (is_chroma)
ret_val = 4;
else
ret_val = 2;
} else if (source_format == dm_444_8) {
ret_val = 1;
}
return ret_val;
}
static bool is_dual_plane(enum source_format_class source_format)
{
bool ret_val = false;
if ((source_format == dm_420_8) || (source_format == dm_420_10))
ret_val = true;
return ret_val;
}
static double get_refcyc_per_delivery(struct display_mode_lib *mode_lib,
double refclk_freq_in_mhz,
double pclk_freq_in_mhz,
bool odm_combine,
unsigned int recout_width,
unsigned int hactive,
double vratio,
double hscale_pixel_rate,
unsigned int delivery_width,
unsigned int req_per_swath_ub)
{
double refcyc_per_delivery = 0.0;
if (vratio <= 1.0) {
if (odm_combine)
refcyc_per_delivery = (double) refclk_freq_in_mhz
* dml_min((double) recout_width, (double) hactive / 2.0)
/ pclk_freq_in_mhz / (double) req_per_swath_ub;
else
refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) recout_width
/ pclk_freq_in_mhz / (double) req_per_swath_ub;
} else {
refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) delivery_width
/ (double) hscale_pixel_rate / (double) req_per_swath_ub;
}
dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
dml_print("DML_DLG: %s: recout_width = %d\n", __func__, recout_width);
dml_print("DML_DLG: %s: vratio = %3.2f\n", __func__, vratio);
dml_print("DML_DLG: %s: req_per_swath_ub = %d\n", __func__, req_per_swath_ub);
dml_print("DML_DLG: %s: refcyc_per_delivery= %3.2f\n", __func__, refcyc_per_delivery);
return refcyc_per_delivery;
}
static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_size)
{
if (tile_size == dm_256k_tile)
return (256 * 1024);
else if (tile_size == dm_64k_tile)
return (64 * 1024);
else
return (4 * 1024);
}
static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib,
display_data_rq_regs_st *rq_regs,
const display_data_rq_sizing_params_st *rq_sizing)
{
dml_print("DML_DLG: %s: rq_sizing param\n", __func__);
print__data_rq_sizing_params_st(mode_lib, rq_sizing);
rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10;
if (rq_sizing->min_chunk_bytes == 0)
rq_regs->min_chunk_size = 0;
else
rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1;
rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10;
if (rq_sizing->min_meta_chunk_bytes == 0)
rq_regs->min_meta_chunk_size = 0;
else
rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1;
rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6;
rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6;
}
static void extract_rq_regs(struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
const display_rq_params_st *rq_param)
{
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
unsigned int detile_buf_plane1_addr = 0;
extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l);
rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_l.dpte_row_height),
1) - 3;
if (rq_param->yuv420) {
extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c);
rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_c.dpte_row_height),
1) - 3;
}
rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height);
rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height);
// TODO: take the max between luma, chroma chunk size?
// okay for now, as we are setting chunk_bytes to 8kb anyways
if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
rq_regs->drq_expansion_mode = 0;
} else {
rq_regs->drq_expansion_mode = 2;
}
rq_regs->prq_expansion_mode = 1;
rq_regs->mrq_expansion_mode = 1;
rq_regs->crq_expansion_mode = 1;
if (rq_param->yuv420) {
if ((double) rq_param->misc.rq_l.stored_swath_bytes
/ (double) rq_param->misc.rq_c.stored_swath_bytes <= 1.5) {
detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma
} else {
detile_buf_plane1_addr = dml_round_to_multiple((unsigned int) ((2.0 * detile_buf_size_in_bytes) / 3.0),
256,
0) / 64.0; // 2/3 to chroma
}
}
rq_regs->plane1_base_address = detile_buf_plane1_addr;
}
static void handle_det_buf_split(struct display_mode_lib *mode_lib,
display_rq_params_st *rq_param,
const display_pipe_source_params_st *pipe_src_param)
{
unsigned int total_swath_bytes = 0;
unsigned int swath_bytes_l = 0;
unsigned int swath_bytes_c = 0;
unsigned int full_swath_bytes_packed_l = 0;
unsigned int full_swath_bytes_packed_c = 0;
bool req128_l = false;
bool req128_c = false;
bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear);
bool surf_vert = (pipe_src_param->source_scan == dm_vert);
unsigned int log2_swath_height_l = 0;
unsigned int log2_swath_height_c = 0;
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
full_swath_bytes_packed_l = rq_param->misc.rq_l.full_swath_bytes;
full_swath_bytes_packed_c = rq_param->misc.rq_c.full_swath_bytes;
if (rq_param->yuv420_10bpc) {
full_swath_bytes_packed_l = dml_round_to_multiple(rq_param->misc.rq_l.full_swath_bytes * 2 / 3,
256,
1) + 256;
full_swath_bytes_packed_c = dml_round_to_multiple(rq_param->misc.rq_c.full_swath_bytes * 2 / 3,
256,
1) + 256;
}
if (rq_param->yuv420) {
total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;
if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request
req128_l = false;
req128_c = false;
swath_bytes_l = full_swath_bytes_packed_l;
swath_bytes_c = full_swath_bytes_packed_c;
} else { //128b request (for luma only for yuv420 8bpc)
req128_l = true;
req128_c = false;
swath_bytes_l = full_swath_bytes_packed_l / 2;
swath_bytes_c = full_swath_bytes_packed_c;
}
// Note: assumption, the config that pass in will fit into
// the detiled buffer.
} else {
total_swath_bytes = 2 * full_swath_bytes_packed_l;
if (total_swath_bytes <= detile_buf_size_in_bytes)
req128_l = false;
else
req128_l = true;
swath_bytes_l = total_swath_bytes;
swath_bytes_c = 0;
}
rq_param->misc.rq_l.stored_swath_bytes = swath_bytes_l;
rq_param->misc.rq_c.stored_swath_bytes = swath_bytes_c;
if (surf_linear) {
log2_swath_height_l = 0;
log2_swath_height_c = 0;
} else {
unsigned int swath_height_l;
unsigned int swath_height_c;
if (!surf_vert) {
swath_height_l = rq_param->misc.rq_l.blk256_height;
swath_height_c = rq_param->misc.rq_c.blk256_height;
} else {
swath_height_l = rq_param->misc.rq_l.blk256_width;
swath_height_c = rq_param->misc.rq_c.blk256_width;
}
if (swath_height_l > 0)
log2_swath_height_l = dml_log2(swath_height_l);
if (req128_l && log2_swath_height_l > 0)
log2_swath_height_l -= 1;
if (swath_height_c > 0)
log2_swath_height_c = dml_log2(swath_height_c);
if (req128_c && log2_swath_height_c > 0)
log2_swath_height_c -= 1;
}
rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
dml_print("DML_DLG: %s: req128_l = %0d\n", __func__, req128_l);
dml_print("DML_DLG: %s: req128_c = %0d\n", __func__, req128_c);
dml_print("DML_DLG: %s: full_swath_bytes_packed_l = %0d\n",
__func__,
full_swath_bytes_packed_l);
dml_print("DML_DLG: %s: full_swath_bytes_packed_c = %0d\n",
__func__,
full_swath_bytes_packed_c);
}
static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
display_data_rq_dlg_params_st *rq_dlg_param,
display_data_rq_misc_params_st *rq_misc_param,
display_data_rq_sizing_params_st *rq_sizing_param,
unsigned int vp_width,
unsigned int vp_height,
unsigned int data_pitch,
unsigned int meta_pitch,
unsigned int source_format,
unsigned int tiling,
unsigned int macro_tile_size,
unsigned int source_scan,
unsigned int is_chroma)
{
bool surf_linear = (tiling == dm_sw_linear);
bool surf_vert = (source_scan == dm_vert);
unsigned int bytes_per_element;
unsigned int bytes_per_element_y = get_bytes_per_element((enum source_format_class)(source_format),
false);
unsigned int bytes_per_element_c = get_bytes_per_element((enum source_format_class)(source_format),
true);
unsigned int blk256_width = 0;
unsigned int blk256_height = 0;
unsigned int blk256_width_y = 0;
unsigned int blk256_height_y = 0;
unsigned int blk256_width_c = 0;
unsigned int blk256_height_c = 0;
unsigned int log2_bytes_per_element;
unsigned int log2_blk256_width;
unsigned int log2_blk256_height;
unsigned int blk_bytes;
unsigned int log2_blk_bytes;
unsigned int log2_blk_height;
unsigned int log2_blk_width;
unsigned int log2_meta_req_bytes;
unsigned int log2_meta_req_height;
unsigned int log2_meta_req_width;
unsigned int meta_req_width;
unsigned int meta_req_height;
unsigned int log2_meta_row_height;
unsigned int meta_row_width_ub;
unsigned int log2_meta_chunk_bytes;
unsigned int log2_meta_chunk_height;
//full sized meta chunk width in unit of data elements
unsigned int log2_meta_chunk_width;
unsigned int log2_min_meta_chunk_bytes;
unsigned int min_meta_chunk_width;
unsigned int meta_chunk_width;
unsigned int meta_chunk_per_row_int;
unsigned int meta_row_remainder;
unsigned int meta_chunk_threshold;
unsigned int meta_blk_bytes;
unsigned int meta_blk_height;
unsigned int meta_blk_width;
unsigned int meta_surface_bytes;
unsigned int vmpg_bytes;
unsigned int meta_pte_req_per_frame_ub;
unsigned int meta_pte_bytes_per_frame_ub;
const unsigned int log2_vmpg_bytes = dml_log2(mode_lib->soc.vmm_page_size_bytes);
const unsigned int dpte_buf_in_pte_reqs = mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma;
const unsigned int pde_proc_buffer_size_64k_reqs =
mode_lib->ip.pde_proc_buffer_size_64k_reqs;
unsigned int log2_vmpg_height = 0;
unsigned int log2_vmpg_width = 0;
unsigned int log2_dpte_req_height_ptes = 0;
unsigned int log2_dpte_req_height = 0;
unsigned int log2_dpte_req_width = 0;
unsigned int log2_dpte_row_height_linear = 0;
unsigned int log2_dpte_row_height = 0;
unsigned int log2_dpte_group_width = 0;
unsigned int dpte_row_width_ub = 0;
unsigned int dpte_req_height = 0;
unsigned int dpte_req_width = 0;
unsigned int dpte_group_width = 0;
unsigned int log2_dpte_group_bytes = 0;
unsigned int log2_dpte_group_length = 0;
unsigned int pde_buf_entries;
bool yuv420 = (source_format == dm_420_8 || source_format == dm_420_10);
Calculate256BBlockSizes((enum source_format_class)(source_format),
(enum dm_swizzle_mode)(tiling),
bytes_per_element_y,
bytes_per_element_c,
&blk256_height_y,
&blk256_height_c,
&blk256_width_y,
&blk256_width_c);
if (!is_chroma) {
blk256_width = blk256_width_y;
blk256_height = blk256_height_y;
bytes_per_element = bytes_per_element_y;
} else {
blk256_width = blk256_width_c;
blk256_height = blk256_height_c;
bytes_per_element = bytes_per_element_c;
}
log2_bytes_per_element = dml_log2(bytes_per_element);
dml_print("DML_DLG: %s: surf_linear = %d\n", __func__, surf_linear);
dml_print("DML_DLG: %s: surf_vert = %d\n", __func__, surf_vert);
dml_print("DML_DLG: %s: blk256_width = %d\n", __func__, blk256_width);
dml_print("DML_DLG: %s: blk256_height = %d\n", __func__, blk256_height);
log2_blk256_width = dml_log2((double) blk256_width);
log2_blk256_height = dml_log2((double) blk256_height);
blk_bytes = surf_linear ?
256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
log2_blk_bytes = dml_log2((double) blk_bytes);
log2_blk_height = 0;
log2_blk_width = 0;
// remember log rule
// "+" in log is multiply
// "-" in log is divide
// "/2" is like square root
// blk is vertical biased
if (tiling != dm_sw_linear)
log2_blk_height = log2_blk256_height
+ dml_ceil((double) (log2_blk_bytes - 8) / 2.0, 1);
else
log2_blk_height = 0; // blk height of 1
log2_blk_width = log2_blk_bytes - log2_bytes_per_element - log2_blk_height;
if (!surf_vert) {
rq_dlg_param->swath_width_ub = dml_round_to_multiple(vp_width - 1, blk256_width, 1)
+ blk256_width;
rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_width;
} else {
rq_dlg_param->swath_width_ub = dml_round_to_multiple(vp_height - 1, blk256_height, 1)
+ blk256_height;
rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_height;
}
if (!surf_vert)
rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_height
* bytes_per_element;
else
rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_width
* bytes_per_element;
rq_misc_param->blk256_height = blk256_height;
rq_misc_param->blk256_width = blk256_width;
// -------
// meta
// -------
log2_meta_req_bytes = 6; // meta request is 64b and is 8x8byte meta element
// each 64b meta request for dcn is 8x8 meta elements and
// a meta element covers one 256b block of the data surface.
log2_meta_req_height = log2_blk256_height + 3; // meta req is 8x8 byte, each byte represent 1 blk256
log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element
- log2_meta_req_height;
meta_req_width = 1 << log2_meta_req_width;
meta_req_height = 1 << log2_meta_req_height;
log2_meta_row_height = 0;
meta_row_width_ub = 0;
// the dimensions of a meta row are meta_row_width x meta_row_height in elements.
// calculate upper bound of the meta_row_width
if (!surf_vert) {
log2_meta_row_height = log2_meta_req_height;
meta_row_width_ub = dml_round_to_multiple(vp_width - 1, meta_req_width, 1)
+ meta_req_width;
rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_width;
} else {
log2_meta_row_height = log2_meta_req_width;
meta_row_width_ub = dml_round_to_multiple(vp_height - 1, meta_req_height, 1)
+ meta_req_height;
rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_height;
}
rq_dlg_param->meta_bytes_per_row_ub = rq_dlg_param->meta_req_per_row_ub * 64;
rq_dlg_param->meta_row_height = 1 << log2_meta_row_height;
log2_meta_chunk_bytes = dml_log2(rq_sizing_param->meta_chunk_bytes);
log2_meta_chunk_height = log2_meta_row_height;
//full sized meta chunk width in unit of data elements
log2_meta_chunk_width = log2_meta_chunk_bytes + 8 - log2_bytes_per_element
- log2_meta_chunk_height;
log2_min_meta_chunk_bytes = dml_log2(rq_sizing_param->min_meta_chunk_bytes);
min_meta_chunk_width = 1
<< (log2_min_meta_chunk_bytes + 8 - log2_bytes_per_element
- log2_meta_chunk_height);
meta_chunk_width = 1 << log2_meta_chunk_width;
meta_chunk_per_row_int = (unsigned int) (meta_row_width_ub / meta_chunk_width);
meta_row_remainder = meta_row_width_ub % meta_chunk_width;
meta_chunk_threshold = 0;
meta_blk_bytes = 4096;
meta_blk_height = blk256_height * 64;
meta_blk_width = meta_blk_bytes * 256 / bytes_per_element / meta_blk_height;
meta_surface_bytes = meta_pitch
* (dml_round_to_multiple(vp_height - 1, meta_blk_height, 1) + meta_blk_height)
* bytes_per_element / 256;
vmpg_bytes = mode_lib->soc.vmm_page_size_bytes;
meta_pte_req_per_frame_ub = (dml_round_to_multiple(meta_surface_bytes - vmpg_bytes,
8 * vmpg_bytes,
1) + 8 * vmpg_bytes) / (8 * vmpg_bytes);
meta_pte_bytes_per_frame_ub = meta_pte_req_per_frame_ub * 64; //64B mpte request
rq_dlg_param->meta_pte_bytes_per_frame_ub = meta_pte_bytes_per_frame_ub;
dml_print("DML_DLG: %s: meta_blk_height = %d\n", __func__, meta_blk_height);
dml_print("DML_DLG: %s: meta_blk_width = %d\n", __func__, meta_blk_width);
dml_print("DML_DLG: %s: meta_surface_bytes = %d\n", __func__, meta_surface_bytes);
dml_print("DML_DLG: %s: meta_pte_req_per_frame_ub = %d\n",
__func__,
meta_pte_req_per_frame_ub);
dml_print("DML_DLG: %s: meta_pte_bytes_per_frame_ub = %d\n",
__func__,
meta_pte_bytes_per_frame_ub);
if (!surf_vert)
meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_width;
else
meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_height;
if (meta_row_remainder <= meta_chunk_threshold)
rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 1;
else
rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 2;
// ------
// dpte
// ------
if (surf_linear) {
log2_vmpg_height = 0; // one line high
} else {
log2_vmpg_height = (log2_vmpg_bytes - 8) / 2 + log2_blk256_height;
}
log2_vmpg_width = log2_vmpg_bytes - log2_bytes_per_element - log2_vmpg_height;
// only 3 possible shapes for dpte request in dimensions of ptes: 8x1, 4x2, 2x4.
if (surf_linear) { //one 64B PTE request returns 8 PTEs
log2_dpte_req_height_ptes = 0;
log2_dpte_req_width = log2_vmpg_width + 3;
log2_dpte_req_height = 0;
} else if (log2_blk_bytes == 12) { //4KB tile means 4kB page size
//one 64B req gives 8x1 PTEs for 4KB tile
log2_dpte_req_height_ptes = 0;
log2_dpte_req_width = log2_blk_width + 3;
log2_dpte_req_height = log2_blk_height + 0;
} else if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) { // tile block >= 64KB
//two 64B reqs of 2x4 PTEs give 16 PTEs to cover 64KB
log2_dpte_req_height_ptes = 4;
log2_dpte_req_width = log2_blk256_width + 4; // log2_64KB_width
log2_dpte_req_height = log2_blk256_height + 4; // log2_64KB_height
} else { //64KB page size and must 64KB tile block
//one 64B req gives 8x1 PTEs for 64KB tile
log2_dpte_req_height_ptes = 0;
log2_dpte_req_width = log2_blk_width + 3;
log2_dpte_req_height = log2_blk_height + 0;
}
// The dpte request dimensions in data elements is dpte_req_width x dpte_req_height
// log2_vmpg_width is how much 1 pte represent, now calculating how much a 64b pte req represent
// That depends on the pte shape (i.e. 8x1, 4x2, 2x4)
//log2_dpte_req_height = log2_vmpg_height + log2_dpte_req_height_ptes;
//log2_dpte_req_width = log2_vmpg_width + log2_dpte_req_width_ptes;
dpte_req_height = 1 << log2_dpte_req_height;
dpte_req_width = 1 << log2_dpte_req_width;
// calculate pitch dpte row buffer can hold
// round the result down to a power of two.
pde_buf_entries = yuv420 ? (pde_proc_buffer_size_64k_reqs >> 1) : pde_proc_buffer_size_64k_reqs;
if (surf_linear) {
unsigned int dpte_row_height;
log2_dpte_row_height_linear = dml_floor(dml_log2(dml_min(64 * 1024 * pde_buf_entries
/ bytes_per_element,
dpte_buf_in_pte_reqs
* dpte_req_width)
/ data_pitch),
1);
ASSERT(log2_dpte_row_height_linear >= 3);
if (log2_dpte_row_height_linear > 7)
log2_dpte_row_height_linear = 7;
log2_dpte_row_height = log2_dpte_row_height_linear;
// For linear, the dpte row is pitch dependent and the pte requests wrap at the pitch boundary.
// the dpte_row_width_ub is the upper bound of data_pitch*dpte_row_height in elements with this unique buffering.
dpte_row_height = 1 << log2_dpte_row_height;
dpte_row_width_ub = dml_round_to_multiple(data_pitch * dpte_row_height - 1,
dpte_req_width,
1) + dpte_req_width;
rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
} else {
// the upper bound of the dpte_row_width without dependency on viewport position follows.
// for tiled mode, row height is the same as req height and row store up to vp size upper bound
if (!surf_vert) {
log2_dpte_row_height = log2_dpte_req_height;
dpte_row_width_ub = dml_round_to_multiple(vp_width - 1, dpte_req_width, 1)
+ dpte_req_width;
rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
} else {
log2_dpte_row_height =
(log2_blk_width < log2_dpte_req_width) ?
log2_blk_width : log2_dpte_req_width;
dpte_row_width_ub = dml_round_to_multiple(vp_height - 1, dpte_req_height, 1)
+ dpte_req_height;
rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_height;
}
}
if (log2_blk_bytes >= 16 && log2_vmpg_bytes == 12) // tile block >= 64KB
rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 128; //2*64B dpte request
else
rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 64; //64B dpte request
rq_dlg_param->dpte_row_height = 1 << log2_dpte_row_height;
// the dpte_group_bytes is reduced for the specific case of vertical
// access of a tile surface that has dpte request of 8x1 ptes.
if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
//full size
rq_sizing_param->dpte_group_bytes = 2048;
//since pte request size is 64byte, the number of data pte requests per full sized group is as follows.
log2_dpte_group_bytes = dml_log2(rq_sizing_param->dpte_group_bytes);
log2_dpte_group_length = log2_dpte_group_bytes - 6; //length in 64b requests
// full sized data pte group width in elements
if (!surf_vert)
log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_width;
else
log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_height;
//But if the tile block >=64KB and the page size is 4KB, then each dPTE request is 2*64B
if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) // tile block >= 64KB
log2_dpte_group_width = log2_dpte_group_width - 1;
dpte_group_width = 1 << log2_dpte_group_width;
// since dpte groups are only aligned to dpte_req_width and not dpte_group_width,
// the upper bound for the dpte groups per row is as follows.
rq_dlg_param->dpte_groups_per_row_ub = dml_ceil((double) dpte_row_width_ub / dpte_group_width,
1);
}
static void get_surf_rq_param(struct display_mode_lib *mode_lib,
display_data_rq_sizing_params_st *rq_sizing_param,
display_data_rq_dlg_params_st *rq_dlg_param,
display_data_rq_misc_params_st *rq_misc_param,
const display_pipe_source_params_st *pipe_src_param,
bool is_chroma)
{
bool mode_422 = false;
unsigned int vp_width = 0;
unsigned int vp_height = 0;
unsigned int data_pitch = 0;
unsigned int meta_pitch = 0;
unsigned int ppe = mode_422 ? 2 : 1;
// TODO check if ppe apply for both luma and chroma in 422 case
if (is_chroma) {
vp_width = pipe_src_param->viewport_width_c / ppe;
vp_height = pipe_src_param->viewport_height_c;
data_pitch = pipe_src_param->data_pitch_c;
meta_pitch = pipe_src_param->meta_pitch_c;
} else {
vp_width = pipe_src_param->viewport_width / ppe;
vp_height = pipe_src_param->viewport_height;
data_pitch = pipe_src_param->data_pitch;
meta_pitch = pipe_src_param->meta_pitch;
}
rq_sizing_param->chunk_bytes = 8192;
if (rq_sizing_param->chunk_bytes == 64 * 1024)
rq_sizing_param->min_chunk_bytes = 0;
else
rq_sizing_param->min_chunk_bytes = 1024;
rq_sizing_param->meta_chunk_bytes = 2048;
rq_sizing_param->min_meta_chunk_bytes = 256;
rq_sizing_param->mpte_group_bytes = 2048;
get_meta_and_pte_attr(mode_lib,
rq_dlg_param,
rq_misc_param,
rq_sizing_param,
vp_width,
vp_height,
data_pitch,
meta_pitch,
pipe_src_param->source_format,
pipe_src_param->sw_mode,
pipe_src_param->macro_tile_size,
pipe_src_param->source_scan,
is_chroma);
}
static void dml20v2_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
display_rq_params_st *rq_param,
const display_pipe_source_params_st *pipe_src_param)
{
// get param for luma surface
rq_param->yuv420 = pipe_src_param->source_format == dm_420_8
|| pipe_src_param->source_format == dm_420_10;
rq_param->yuv420_10bpc = pipe_src_param->source_format == dm_420_10;
get_surf_rq_param(mode_lib,
&(rq_param->sizing.rq_l),
&(rq_param->dlg.rq_l),
&(rq_param->misc.rq_l),
pipe_src_param,
0);
if (is_dual_plane((enum source_format_class)(pipe_src_param->source_format))) {
// get param for chroma surface
get_surf_rq_param(mode_lib,
&(rq_param->sizing.rq_c),
&(rq_param->dlg.rq_c),
&(rq_param->misc.rq_c),
pipe_src_param,
1);
}
// calculate how to split the det buffer space between luma and chroma
handle_det_buf_split(mode_lib, rq_param, pipe_src_param);
print__rq_params_st(mode_lib, rq_param);
}
void dml20v2_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
const display_pipe_params_st *pipe_param)
{
display_rq_params_st rq_param = {0};
memset(rq_regs, 0, sizeof(*rq_regs));
dml20v2_rq_dlg_get_rq_params(mode_lib, &rq_param, &pipe_param->src);
extract_rq_regs(mode_lib, rq_regs, &rq_param);
print__rq_regs_st(mode_lib, rq_regs);
}
// Note: currently taken in as is.
// Nice to decouple code from hw register implement and extract code that are repeated for luma and chroma.
static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
display_dlg_regs_st *disp_dlg_regs,
display_ttu_regs_st *disp_ttu_regs,
const display_rq_dlg_params_st *rq_dlg_param,
const display_dlg_sys_params_st *dlg_sys_param,
const bool cstate_en,
const bool pstate_en)
{
const display_pipe_source_params_st *src = &e2e_pipe_param[pipe_idx].pipe.src;
const display_pipe_dest_params_st *dst = &e2e_pipe_param[pipe_idx].pipe.dest;
const display_output_params_st *dout = &e2e_pipe_param[pipe_idx].dout;
const display_clocks_and_cfg_st *clks = &e2e_pipe_param[pipe_idx].clks_cfg;
const scaler_ratio_depth_st *scl = &e2e_pipe_param[pipe_idx].pipe.scale_ratio_depth;
const scaler_taps_st *taps = &e2e_pipe_param[pipe_idx].pipe.scale_taps;
// -------------------------
// Section 1.15.2.1: OTG dependent Params
// -------------------------
// Timing
unsigned int htotal = dst->htotal;
// unsigned int hblank_start = dst.hblank_start; // TODO: Remove
unsigned int hblank_end = dst->hblank_end;
unsigned int vblank_start = dst->vblank_start;
unsigned int vblank_end = dst->vblank_end;
unsigned int min_vblank = mode_lib->ip.min_vblank_lines;
double dppclk_freq_in_mhz = clks->dppclk_mhz;
double dispclk_freq_in_mhz = clks->dispclk_mhz;
double refclk_freq_in_mhz = clks->refclk_mhz;
double pclk_freq_in_mhz = dst->pixel_rate_mhz;
bool interlaced = dst->interlaced;
double ref_freq_to_pix_freq = refclk_freq_in_mhz / pclk_freq_in_mhz;
double min_dcfclk_mhz;
double t_calc_us;
double min_ttu_vblank;
double min_dst_y_ttu_vblank;
unsigned int dlg_vblank_start;
bool dual_plane;
bool mode_422;
unsigned int access_dir;
unsigned int vp_height_l;
unsigned int vp_width_l;
unsigned int vp_height_c;
unsigned int vp_width_c;
// Scaling
unsigned int htaps_l;
unsigned int htaps_c;
double hratio_l;
double hratio_c;
double vratio_l;
double vratio_c;
bool scl_enable;
double line_time_in_us;
// double vinit_l;
// double vinit_c;
// double vinit_bot_l;
// double vinit_bot_c;
// unsigned int swath_height_l;
unsigned int swath_width_ub_l;
// unsigned int dpte_bytes_per_row_ub_l;
unsigned int dpte_groups_per_row_ub_l;
// unsigned int meta_pte_bytes_per_frame_ub_l;
// unsigned int meta_bytes_per_row_ub_l;
// unsigned int swath_height_c;
unsigned int swath_width_ub_c;
// unsigned int dpte_bytes_per_row_ub_c;
unsigned int dpte_groups_per_row_ub_c;
unsigned int meta_chunks_per_row_ub_l;
unsigned int meta_chunks_per_row_ub_c;
unsigned int vupdate_offset;
unsigned int vupdate_width;
unsigned int vready_offset;
unsigned int dppclk_delay_subtotal;
unsigned int dispclk_delay_subtotal;
unsigned int pixel_rate_delay_subtotal;
unsigned int vstartup_start;
unsigned int dst_x_after_scaler;
unsigned int dst_y_after_scaler;
double line_wait;
double dst_y_prefetch;
double dst_y_per_vm_vblank;
double dst_y_per_row_vblank;
double dst_y_per_vm_flip;
double dst_y_per_row_flip;
double min_dst_y_per_vm_vblank;
double min_dst_y_per_row_vblank;
double lsw;
double vratio_pre_l;
double vratio_pre_c;
unsigned int req_per_swath_ub_l;
unsigned int req_per_swath_ub_c;
unsigned int meta_row_height_l;
unsigned int meta_row_height_c;
unsigned int swath_width_pixels_ub_l;
unsigned int swath_width_pixels_ub_c;
unsigned int scaler_rec_in_width_l;
unsigned int scaler_rec_in_width_c;
unsigned int dpte_row_height_l;
unsigned int dpte_row_height_c;
double hscale_pixel_rate_l;
double hscale_pixel_rate_c;
double min_hratio_fact_l;
double min_hratio_fact_c;
double refcyc_per_line_delivery_pre_l;
double refcyc_per_line_delivery_pre_c;
double refcyc_per_line_delivery_l;
double refcyc_per_line_delivery_c;
double refcyc_per_req_delivery_pre_l;
double refcyc_per_req_delivery_pre_c;
double refcyc_per_req_delivery_l;
double refcyc_per_req_delivery_c;
unsigned int full_recout_width;
double refcyc_per_req_delivery_pre_cur0;
double refcyc_per_req_delivery_cur0;
double refcyc_per_req_delivery_pre_cur1;
double refcyc_per_req_delivery_cur1;
memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
dml_print("DML_DLG: %s: cstate_en = %d\n", __func__, cstate_en);
dml_print("DML_DLG: %s: pstate_en = %d\n", __func__, pstate_en);
dml_print("DML_DLG: %s: dppclk_freq_in_mhz = %3.2f\n", __func__, dppclk_freq_in_mhz);
dml_print("DML_DLG: %s: dispclk_freq_in_mhz = %3.2f\n", __func__, dispclk_freq_in_mhz);
dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
dml_print("DML_DLG: %s: interlaced = %d\n", __func__, interlaced);
ASSERT(ref_freq_to_pix_freq < 4.0);
disp_dlg_regs->ref_freq_to_pix_freq =
(unsigned int) (ref_freq_to_pix_freq * dml_pow(2, 19));
disp_dlg_regs->refcyc_per_htotal = (unsigned int) (ref_freq_to_pix_freq * (double) htotal
* dml_pow(2, 8));
disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; // 15 bits
disp_dlg_regs->refcyc_h_blank_end = (unsigned int) ((double) hblank_end
* (double) ref_freq_to_pix_freq);
ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13));
min_dcfclk_mhz = dlg_sys_param->deepsleep_dcfclk_mhz;
t_calc_us = get_tcalc(mode_lib, e2e_pipe_param, num_pipes);
min_ttu_vblank = get_min_ttu_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double) htotal;
dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start
+ min_dst_y_ttu_vblank) * dml_pow(2, 2));
ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int) dml_pow(2, 18));
dml_print("DML_DLG: %s: min_dcfclk_mhz = %3.2f\n",
__func__,
min_dcfclk_mhz);
dml_print("DML_DLG: %s: min_ttu_vblank = %3.2f\n",
__func__,
min_ttu_vblank);
dml_print("DML_DLG: %s: min_dst_y_ttu_vblank = %3.2f\n",
__func__,
min_dst_y_ttu_vblank);
dml_print("DML_DLG: %s: t_calc_us = %3.2f\n",
__func__,
t_calc_us);
dml_print("DML_DLG: %s: disp_dlg_regs->min_dst_y_next_start = 0x%0x\n",
__func__,
disp_dlg_regs->min_dst_y_next_start);
dml_print("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n",
__func__,
ref_freq_to_pix_freq);
// -------------------------
// Section 1.15.2.2: Prefetch, Active and TTU
// -------------------------
// Prefetch Calc
// Source
// dcc_en = src.dcc;
dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
mode_422 = false; // TODO
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
// bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
// bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
vp_height_l = src->viewport_height;
vp_width_l = src->viewport_width;
vp_height_c = src->viewport_height_c;
vp_width_c = src->viewport_width_c;
// Scaling
htaps_l = taps->htaps;
htaps_c = taps->htaps_c;
hratio_l = scl->hscl_ratio;
hratio_c = scl->hscl_ratio_c;
vratio_l = scl->vscl_ratio;
vratio_c = scl->vscl_ratio_c;
scl_enable = scl->scl_enable;
line_time_in_us = (htotal / pclk_freq_in_mhz);
// vinit_l = scl.vinit;
// vinit_c = scl.vinit_c;
// vinit_bot_l = scl.vinit_bot;
// vinit_bot_c = scl.vinit_bot_c;
// unsigned int swath_height_l = rq_dlg_param->rq_l.swath_height;
swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub;
// unsigned int dpte_bytes_per_row_ub_l = rq_dlg_param->rq_l.dpte_bytes_per_row_ub;
dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub;
// unsigned int meta_pte_bytes_per_frame_ub_l = rq_dlg_param->rq_l.meta_pte_bytes_per_frame_ub;
// unsigned int meta_bytes_per_row_ub_l = rq_dlg_param->rq_l.meta_bytes_per_row_ub;
// unsigned int swath_height_c = rq_dlg_param->rq_c.swath_height;
swath_width_ub_c = rq_dlg_param->rq_c.swath_width_ub;
// dpte_bytes_per_row_ub_c = rq_dlg_param->rq_c.dpte_bytes_per_row_ub;
dpte_groups_per_row_ub_c = rq_dlg_param->rq_c.dpte_groups_per_row_ub;
meta_chunks_per_row_ub_l = rq_dlg_param->rq_l.meta_chunks_per_row_ub;
meta_chunks_per_row_ub_c = rq_dlg_param->rq_c.meta_chunks_per_row_ub;
vupdate_offset = dst->vupdate_offset;
vupdate_width = dst->vupdate_width;
vready_offset = dst->vready_offset;
dppclk_delay_subtotal = mode_lib->ip.dppclk_delay_subtotal;
dispclk_delay_subtotal = mode_lib->ip.dispclk_delay_subtotal;
if (scl_enable)
dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_scl;
else
dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_scl_lb_only;
dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_cnvc_formatter
+ src->num_cursors * mode_lib->ip.dppclk_delay_cnvc_cursor;
if (dout->dsc_enable) {
double dsc_delay = get_dsc_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
dispclk_delay_subtotal += dsc_delay;
}
pixel_rate_delay_subtotal = dppclk_delay_subtotal * pclk_freq_in_mhz / dppclk_freq_in_mhz
+ dispclk_delay_subtotal * pclk_freq_in_mhz / dispclk_freq_in_mhz;
vstartup_start = dst->vstartup_start;
if (interlaced) {
if (vstartup_start / 2.0
- (double) (vready_offset + vupdate_width + vupdate_offset) / htotal
<= vblank_end / 2.0)
disp_dlg_regs->vready_after_vcount0 = 1;
else
disp_dlg_regs->vready_after_vcount0 = 0;
} else {
if (vstartup_start
- (double) (vready_offset + vupdate_width + vupdate_offset) / htotal
<= vblank_end)
disp_dlg_regs->vready_after_vcount0 = 1;
else
disp_dlg_regs->vready_after_vcount0 = 0;
}
// TODO: Where is this coming from?
if (interlaced)
vstartup_start = vstartup_start / 2;
// TODO: What if this min_vblank doesn't match the value in the dml_config_settings.cpp?
if (vstartup_start >= min_vblank) {
dml_print("WARNING: DML_DLG: %s: vblank_start=%d vblank_end=%d\n",
__func__,
vblank_start,
vblank_end);
dml_print("WARNING: DML_DLG: %s: vstartup_start=%d should be less than min_vblank=%d\n",
__func__,
vstartup_start,
min_vblank);
min_vblank = vstartup_start + 1;
dml_print("WARNING: DML_DLG: %s: vstartup_start=%d should be less than min_vblank=%d\n",
__func__,
vstartup_start,
min_vblank);
}
dst_x_after_scaler = get_dst_x_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
dst_y_after_scaler = get_dst_y_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
dml_print("DML_DLG: %s: htotal = %d\n", __func__, htotal);
dml_print("DML_DLG: %s: pixel_rate_delay_subtotal = %d\n",
__func__,
pixel_rate_delay_subtotal);
dml_print("DML_DLG: %s: dst_x_after_scaler = %d\n",
__func__,
dst_x_after_scaler);
dml_print("DML_DLG: %s: dst_y_after_scaler = %d\n",
__func__,
dst_y_after_scaler);
// Lwait
line_wait = mode_lib->soc.urgent_latency_us;
if (cstate_en)
line_wait = dml_max(mode_lib->soc.sr_enter_plus_exit_time_us, line_wait);
if (pstate_en)
line_wait = dml_max(mode_lib->soc.dram_clock_change_latency_us
+ mode_lib->soc.urgent_latency_us,
line_wait);
line_wait = line_wait / line_time_in_us;
dst_y_prefetch = get_dst_y_prefetch(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
dml_print("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, dst_y_prefetch);
dst_y_per_vm_vblank = get_dst_y_per_vm_vblank(mode_lib,
e2e_pipe_param,
num_pipes,
pipe_idx);
dst_y_per_row_vblank = get_dst_y_per_row_vblank(mode_lib,
e2e_pipe_param,
num_pipes,
pipe_idx);
dst_y_per_vm_flip = get_dst_y_per_vm_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
dst_y_per_row_flip = get_dst_y_per_row_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
min_dst_y_per_vm_vblank = 8.0;
min_dst_y_per_row_vblank = 16.0;
// magic!
if (htotal <= 75) {
min_vblank = 300;
min_dst_y_per_vm_vblank = 100.0;
min_dst_y_per_row_vblank = 100.0;
}
dml_print("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, dst_y_per_vm_vblank);
dml_print("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, dst_y_per_row_vblank);
ASSERT(dst_y_per_vm_vblank < min_dst_y_per_vm_vblank);
ASSERT(dst_y_per_row_vblank < min_dst_y_per_row_vblank);
ASSERT(dst_y_prefetch > (dst_y_per_vm_vblank + dst_y_per_row_vblank));
lsw = dst_y_prefetch - (dst_y_per_vm_vblank + dst_y_per_row_vblank);
dml_print("DML_DLG: %s: lsw = %3.2f\n", __func__, lsw);
vratio_pre_l = get_vratio_prefetch_l(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
vratio_pre_c = get_vratio_prefetch_c(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
dml_print("DML_DLG: %s: vratio_pre_l=%3.2f\n", __func__, vratio_pre_l);
dml_print("DML_DLG: %s: vratio_pre_c=%3.2f\n", __func__, vratio_pre_c);
// Active
req_per_swath_ub_l = rq_dlg_param->rq_l.req_per_swath_ub;
req_per_swath_ub_c = rq_dlg_param->rq_c.req_per_swath_ub;
meta_row_height_l = rq_dlg_param->rq_l.meta_row_height;
meta_row_height_c = rq_dlg_param->rq_c.meta_row_height;
swath_width_pixels_ub_l = 0;
swath_width_pixels_ub_c = 0;
scaler_rec_in_width_l = 0;
scaler_rec_in_width_c = 0;
dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height;
dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height;
if (mode_422) {
swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element
swath_width_pixels_ub_c = swath_width_ub_c * 2;
} else {
swath_width_pixels_ub_l = swath_width_ub_l * 1;
swath_width_pixels_ub_c = swath_width_ub_c * 1;
}
hscale_pixel_rate_l = 0.;
hscale_pixel_rate_c = 0.;
min_hratio_fact_l = 1.0;
min_hratio_fact_c = 1.0;
if (htaps_l <= 1)
min_hratio_fact_l = 2.0;
else if (htaps_l <= 6) {
if ((hratio_l * 2.0) > 4.0)
min_hratio_fact_l = 4.0;
else
min_hratio_fact_l = hratio_l * 2.0;
} else {
if (hratio_l > 4.0)
min_hratio_fact_l = 4.0;
else
min_hratio_fact_l = hratio_l;
}
hscale_pixel_rate_l = min_hratio_fact_l * dppclk_freq_in_mhz;
if (htaps_c <= 1)
min_hratio_fact_c = 2.0;
else if (htaps_c <= 6) {
if ((hratio_c * 2.0) > 4.0)
min_hratio_fact_c = 4.0;
else
min_hratio_fact_c = hratio_c * 2.0;
} else {
if (hratio_c > 4.0)
min_hratio_fact_c = 4.0;
else
min_hratio_fact_c = hratio_c;
}
hscale_pixel_rate_c = min_hratio_fact_c * dppclk_freq_in_mhz;
refcyc_per_line_delivery_pre_l = 0.;
refcyc_per_line_delivery_pre_c = 0.;
refcyc_per_line_delivery_l = 0.;
refcyc_per_line_delivery_c = 0.;
refcyc_per_req_delivery_pre_l = 0.;
refcyc_per_req_delivery_pre_c = 0.;
refcyc_per_req_delivery_l = 0.;
refcyc_per_req_delivery_c = 0.;
full_recout_width = 0;
// In ODM
if (src->is_hsplit) {
// This "hack" is only allowed (and valid) for MPC combine. In ODM
// combine, you MUST specify the full_recout_width...according to Oswin
if (dst->full_recout_width == 0 && !dst->odm_combine) {
dml_print("DML_DLG: %s: Warning: full_recout_width not set in hsplit mode\n",
__func__);
full_recout_width = dst->recout_width * 2; // assume half split for dcn1
} else
full_recout_width = dst->full_recout_width;
} else
full_recout_width = dst->recout_width;
// As of DCN2, mpc_combine and odm_combine are mutually exclusive
refcyc_per_line_delivery_pre_l = get_refcyc_per_delivery(mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_pre_l,
hscale_pixel_rate_l,
swath_width_pixels_ub_l,
1); // per line
refcyc_per_line_delivery_l = get_refcyc_per_delivery(mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_l,
hscale_pixel_rate_l,
swath_width_pixels_ub_l,
1); // per line
dml_print("DML_DLG: %s: full_recout_width = %d\n",
__func__,
full_recout_width);
dml_print("DML_DLG: %s: hscale_pixel_rate_l = %3.2f\n",
__func__,
hscale_pixel_rate_l);
dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n",
__func__,
refcyc_per_line_delivery_pre_l);
dml_print("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n",
__func__,
refcyc_per_line_delivery_l);
if (dual_plane) {
refcyc_per_line_delivery_pre_c = get_refcyc_per_delivery(mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_pre_c,
hscale_pixel_rate_c,
swath_width_pixels_ub_c,
1); // per line
refcyc_per_line_delivery_c = get_refcyc_per_delivery(mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_c,
hscale_pixel_rate_c,
swath_width_pixels_ub_c,
1); // per line
dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n",
__func__,
refcyc_per_line_delivery_pre_c);
dml_print("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n",
__func__,
refcyc_per_line_delivery_c);
}
// TTU - Luma / Chroma
if (access_dir) { // vertical access
scaler_rec_in_width_l = vp_height_l;
scaler_rec_in_width_c = vp_height_c;
} else {
scaler_rec_in_width_l = vp_width_l;
scaler_rec_in_width_c = vp_width_c;
}
refcyc_per_req_delivery_pre_l = get_refcyc_per_delivery(mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_pre_l,
hscale_pixel_rate_l,
scaler_rec_in_width_l,
req_per_swath_ub_l); // per req
refcyc_per_req_delivery_l = get_refcyc_per_delivery(mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_l,
hscale_pixel_rate_l,
scaler_rec_in_width_l,
req_per_swath_ub_l); // per req
dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n",
__func__,
refcyc_per_req_delivery_pre_l);
dml_print("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n",
__func__,
refcyc_per_req_delivery_l);
ASSERT(refcyc_per_req_delivery_pre_l < dml_pow(2, 13));
ASSERT(refcyc_per_req_delivery_l < dml_pow(2, 13));
if (dual_plane) {
refcyc_per_req_delivery_pre_c = get_refcyc_per_delivery(mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_pre_c,
hscale_pixel_rate_c,
scaler_rec_in_width_c,
req_per_swath_ub_c); // per req
refcyc_per_req_delivery_c = get_refcyc_per_delivery(mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_c,
hscale_pixel_rate_c,
scaler_rec_in_width_c,
req_per_swath_ub_c); // per req
dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n",
__func__,
refcyc_per_req_delivery_pre_c);
dml_print("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n",
__func__,
refcyc_per_req_delivery_c);
ASSERT(refcyc_per_req_delivery_pre_c < dml_pow(2, 13));
ASSERT(refcyc_per_req_delivery_c < dml_pow(2, 13));
}
// TTU - Cursor
refcyc_per_req_delivery_pre_cur0 = 0.0;
refcyc_per_req_delivery_cur0 = 0.0;
if (src->num_cursors > 0) {
calculate_ttu_cursor(mode_lib,
&refcyc_per_req_delivery_pre_cur0,
&refcyc_per_req_delivery_cur0,
refclk_freq_in_mhz,
ref_freq_to_pix_freq,
hscale_pixel_rate_l,
scl->hscl_ratio,
vratio_pre_l,
vratio_l,
src->cur0_src_width,
(enum cursor_bpp)(src->cur0_bpp));
}
refcyc_per_req_delivery_pre_cur1 = 0.0;
refcyc_per_req_delivery_cur1 = 0.0;
if (src->num_cursors > 1) {
calculate_ttu_cursor(mode_lib,
&refcyc_per_req_delivery_pre_cur1,
&refcyc_per_req_delivery_cur1,
refclk_freq_in_mhz,
ref_freq_to_pix_freq,
hscale_pixel_rate_l,
scl->hscl_ratio,
vratio_pre_l,
vratio_l,
src->cur1_src_width,
(enum cursor_bpp)(src->cur1_bpp));
}
// TTU - Misc
// all hard-coded
// Assignment to register structures
disp_dlg_regs->dst_y_after_scaler = dst_y_after_scaler; // in terms of line
disp_dlg_regs->refcyc_x_after_scaler = dst_x_after_scaler * ref_freq_to_pix_freq; // in terms of refclk
ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int) dml_pow(2, 13));
disp_dlg_regs->dst_y_prefetch = (unsigned int) (dst_y_prefetch * dml_pow(2, 2));
disp_dlg_regs->dst_y_per_vm_vblank = (unsigned int) (dst_y_per_vm_vblank * dml_pow(2, 2));
disp_dlg_regs->dst_y_per_row_vblank = (unsigned int) (dst_y_per_row_vblank * dml_pow(2, 2));
disp_dlg_regs->dst_y_per_vm_flip = (unsigned int) (dst_y_per_vm_flip * dml_pow(2, 2));
disp_dlg_regs->dst_y_per_row_flip = (unsigned int) (dst_y_per_row_flip * dml_pow(2, 2));
disp_dlg_regs->vratio_prefetch = (unsigned int) (vratio_pre_l * dml_pow(2, 19));
disp_dlg_regs->vratio_prefetch_c = (unsigned int) (vratio_pre_c * dml_pow(2, 19));
disp_dlg_regs->refcyc_per_pte_group_vblank_l =
(unsigned int) (dst_y_per_row_vblank * (double) htotal
* ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l);
ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int) dml_pow(2, 13));
if (dual_plane) {
disp_dlg_regs->refcyc_per_pte_group_vblank_c = (unsigned int) (dst_y_per_row_vblank
* (double) htotal * ref_freq_to_pix_freq
/ (double) dpte_groups_per_row_ub_c);
ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c
< (unsigned int) dml_pow(2, 13));
}
disp_dlg_regs->refcyc_per_meta_chunk_vblank_l =
(unsigned int) (dst_y_per_row_vblank * (double) htotal
* ref_freq_to_pix_freq / (double) meta_chunks_per_row_ub_l);
ASSERT(disp_dlg_regs->refcyc_per_meta_chunk_vblank_l < (unsigned int) dml_pow(2, 13));
disp_dlg_regs->refcyc_per_meta_chunk_vblank_c =
disp_dlg_regs->refcyc_per_meta_chunk_vblank_l; // dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now
disp_dlg_regs->refcyc_per_pte_group_flip_l = (unsigned int) (dst_y_per_row_flip * htotal
* ref_freq_to_pix_freq) / dpte_groups_per_row_ub_l;
disp_dlg_regs->refcyc_per_meta_chunk_flip_l = (unsigned int) (dst_y_per_row_flip * htotal
* ref_freq_to_pix_freq) / meta_chunks_per_row_ub_l;
if (dual_plane) {
disp_dlg_regs->refcyc_per_pte_group_flip_c = (unsigned int) (dst_y_per_row_flip
* htotal * ref_freq_to_pix_freq) / dpte_groups_per_row_ub_c;
disp_dlg_regs->refcyc_per_meta_chunk_flip_c = (unsigned int) (dst_y_per_row_flip
* htotal * ref_freq_to_pix_freq) / meta_chunks_per_row_ub_c;
}
disp_dlg_regs->dst_y_per_pte_row_nom_l = (unsigned int) ((double) dpte_row_height_l
/ (double) vratio_l * dml_pow(2, 2));
ASSERT(disp_dlg_regs->dst_y_per_pte_row_nom_l < (unsigned int) dml_pow(2, 17));
if (dual_plane) {
disp_dlg_regs->dst_y_per_pte_row_nom_c = (unsigned int) ((double) dpte_row_height_c
/ (double) vratio_c * dml_pow(2, 2));
if (disp_dlg_regs->dst_y_per_pte_row_nom_c >= (unsigned int) dml_pow(2, 17)) {
dml_print("DML_DLG: %s: Warning dst_y_per_pte_row_nom_c %u larger than supported by register format U15.2 %u\n",
__func__,
disp_dlg_regs->dst_y_per_pte_row_nom_c,
(unsigned int) dml_pow(2, 17) - 1);
}
}
disp_dlg_regs->dst_y_per_meta_row_nom_l = (unsigned int) ((double) meta_row_height_l
/ (double) vratio_l * dml_pow(2, 2));
ASSERT(disp_dlg_regs->dst_y_per_meta_row_nom_l < (unsigned int) dml_pow(2, 17));
disp_dlg_regs->dst_y_per_meta_row_nom_c = disp_dlg_regs->dst_y_per_meta_row_nom_l; // TODO: dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now
disp_dlg_regs->refcyc_per_pte_group_nom_l = (unsigned int) ((double) dpte_row_height_l
/ (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
/ (double) dpte_groups_per_row_ub_l);
if (disp_dlg_regs->refcyc_per_pte_group_nom_l >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_pte_group_nom_l = dml_pow(2, 23) - 1;
disp_dlg_regs->refcyc_per_meta_chunk_nom_l = (unsigned int) ((double) meta_row_height_l
/ (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
/ (double) meta_chunks_per_row_ub_l);
if (disp_dlg_regs->refcyc_per_meta_chunk_nom_l >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_meta_chunk_nom_l = dml_pow(2, 23) - 1;
if (dual_plane) {
disp_dlg_regs->refcyc_per_pte_group_nom_c =
(unsigned int) ((double) dpte_row_height_c / (double) vratio_c
* (double) htotal * ref_freq_to_pix_freq
/ (double) dpte_groups_per_row_ub_c);
if (disp_dlg_regs->refcyc_per_pte_group_nom_c >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_pte_group_nom_c = dml_pow(2, 23) - 1;
// TODO: Is this the right calculation? Does htotal need to be halved?
disp_dlg_regs->refcyc_per_meta_chunk_nom_c =
(unsigned int) ((double) meta_row_height_c / (double) vratio_c
* (double) htotal * ref_freq_to_pix_freq
/ (double) meta_chunks_per_row_ub_c);
if (disp_dlg_regs->refcyc_per_meta_chunk_nom_c >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_meta_chunk_nom_c = dml_pow(2, 23) - 1;
}
disp_dlg_regs->refcyc_per_line_delivery_pre_l = (unsigned int) dml_floor(refcyc_per_line_delivery_pre_l,
1);
disp_dlg_regs->refcyc_per_line_delivery_l = (unsigned int) dml_floor(refcyc_per_line_delivery_l,
1);
ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int) dml_pow(2, 13));
ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int) dml_pow(2, 13));
disp_dlg_regs->refcyc_per_line_delivery_pre_c = (unsigned int) dml_floor(refcyc_per_line_delivery_pre_c,
1);
disp_dlg_regs->refcyc_per_line_delivery_c = (unsigned int) dml_floor(refcyc_per_line_delivery_c,
1);
ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int) dml_pow(2, 13));
ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int) dml_pow(2, 13));
disp_dlg_regs->chunk_hdl_adjust_cur0 = 3;
disp_dlg_regs->dst_y_offset_cur0 = 0;
disp_dlg_regs->chunk_hdl_adjust_cur1 = 3;
disp_dlg_regs->dst_y_offset_cur1 = 0;
disp_dlg_regs->dst_y_delta_drq_limit = 0x7fff; // off
disp_ttu_regs->refcyc_per_req_delivery_pre_l = (unsigned int) (refcyc_per_req_delivery_pre_l
* dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_l = (unsigned int) (refcyc_per_req_delivery_l
* dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_pre_c = (unsigned int) (refcyc_per_req_delivery_pre_c
* dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_c = (unsigned int) (refcyc_per_req_delivery_c
* dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_pre_cur0 =
(unsigned int) (refcyc_per_req_delivery_pre_cur0 * dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_cur0 = (unsigned int) (refcyc_per_req_delivery_cur0
* dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_pre_cur1 =
(unsigned int) (refcyc_per_req_delivery_pre_cur1 * dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_cur1 = (unsigned int) (refcyc_per_req_delivery_cur1
* dml_pow(2, 10));
disp_ttu_regs->qos_level_low_wm = 0;
ASSERT(disp_ttu_regs->qos_level_low_wm < dml_pow(2, 14));
disp_ttu_regs->qos_level_high_wm = (unsigned int) (4.0 * (double) htotal
* ref_freq_to_pix_freq);
/*ASSERT(disp_ttu_regs->qos_level_high_wm < dml_pow(2, 14));*/
disp_ttu_regs->qos_level_flip = 14;
disp_ttu_regs->qos_level_fixed_l = 8;
disp_ttu_regs->qos_level_fixed_c = 8;
disp_ttu_regs->qos_level_fixed_cur0 = 8;
disp_ttu_regs->qos_ramp_disable_l = 0;
disp_ttu_regs->qos_ramp_disable_c = 0;
disp_ttu_regs->qos_ramp_disable_cur0 = 0;
disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
print__ttu_regs_st(mode_lib, disp_ttu_regs);
print__dlg_regs_st(mode_lib, disp_dlg_regs);
}
void dml20v2_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
const bool pstate_en,
const bool vm_en,
const bool ignore_viewport_pos,
const bool immediate_flip_support)
{
display_rq_params_st rq_param = {0};
display_dlg_sys_params_st dlg_sys_param = {0};
// Get watermark and Tex.
dlg_sys_param.t_urg_wm_us = get_wm_urgent(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.deepsleep_dcfclk_mhz = get_clk_dcf_deepsleep(mode_lib,
e2e_pipe_param,
num_pipes);
dlg_sys_param.t_extra_us = get_urgent_extra_latency(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.mem_trip_us = get_wm_memory_trip(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.t_mclk_wm_us = get_wm_dram_clock_change(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.t_sr_wm_us = get_wm_stutter_enter_exit(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.total_flip_bw = get_total_immediate_flip_bw(mode_lib,
e2e_pipe_param,
num_pipes);
dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib,
e2e_pipe_param,
num_pipes);
print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
// system parameter calculation done
dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx);
dml20v2_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe.src);
dml20v2_rq_dlg_get_dlg_params(mode_lib,
e2e_pipe_param,
num_pipes,
pipe_idx,
dlg_regs,
ttu_regs,
&rq_param.dlg,
&dlg_sys_param,
cstate_en,
pstate_en);
dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx);
}
static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
double *refcyc_per_req_delivery_pre_cur,
double *refcyc_per_req_delivery_cur,
double refclk_freq_in_mhz,
double ref_freq_to_pix_freq,
double hscale_pixel_rate_l,
double hscl_ratio,
double vratio_pre_l,
double vratio_l,
unsigned int cur_width,
enum cursor_bpp cur_bpp)
{
unsigned int cur_src_width = cur_width;
unsigned int cur_req_size = 0;
unsigned int cur_req_width = 0;
double cur_width_ub = 0.0;
double cur_req_per_width = 0.0;
double hactive_cur = 0.0;
ASSERT(cur_src_width <= 256);
*refcyc_per_req_delivery_pre_cur = 0.0;
*refcyc_per_req_delivery_cur = 0.0;
if (cur_src_width > 0) {
unsigned int cur_bit_per_pixel = 0;
if (cur_bpp == dm_cur_2bit) {
cur_req_size = 64; // byte
cur_bit_per_pixel = 2;
} else { // 32bit
cur_bit_per_pixel = 32;
if (cur_src_width >= 1 && cur_src_width <= 16)
cur_req_size = 64;
else if (cur_src_width >= 17 && cur_src_width <= 31)
cur_req_size = 128;
else
cur_req_size = 256;
}
cur_req_width = (double) cur_req_size / ((double) cur_bit_per_pixel / 8.0);
cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1)
* (double) cur_req_width;
cur_req_per_width = cur_width_ub / (double) cur_req_width;
hactive_cur = (double) cur_src_width / hscl_ratio; // TODO: oswin to think about what to do for cursor
if (vratio_pre_l <= 1.0) {
*refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq
/ (double) cur_req_per_width;
} else {
*refcyc_per_req_delivery_pre_cur = (double) refclk_freq_in_mhz
* (double) cur_src_width / hscale_pixel_rate_l
/ (double) cur_req_per_width;
}
ASSERT(*refcyc_per_req_delivery_pre_cur < dml_pow(2, 13));
if (vratio_l <= 1.0) {
*refcyc_per_req_delivery_cur = hactive_cur * ref_freq_to_pix_freq
/ (double) cur_req_per_width;
} else {
*refcyc_per_req_delivery_cur = (double) refclk_freq_in_mhz
* (double) cur_src_width / hscale_pixel_rate_l
/ (double) cur_req_per_width;
}
dml_print("DML_DLG: %s: cur_req_width = %d\n",
__func__,
cur_req_width);
dml_print("DML_DLG: %s: cur_width_ub = %3.2f\n",
__func__,
cur_width_ub);
dml_print("DML_DLG: %s: cur_req_per_width = %3.2f\n",
__func__,
cur_req_per_width);
dml_print("DML_DLG: %s: hactive_cur = %3.2f\n",
__func__,
hactive_cur);
dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_cur = %3.2f\n",
__func__,
*refcyc_per_req_delivery_pre_cur);
dml_print("DML_DLG: %s: refcyc_per_req_delivery_cur = %3.2f\n",
__func__,
*refcyc_per_req_delivery_cur);
ASSERT(*refcyc_per_req_delivery_cur < dml_pow(2, 13));
}
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "../display_mode_lib.h"
#include "display_mode_vba_20v2.h"
#include "../dml_inline_defs.h"
/*
* NOTE:
* This file is gcc-parseable HW gospel, coming straight from HW engineers.
*
* It doesn't adhere to Linux kernel style and sometimes will do things in odd
* ways. Unless there is something clearly wrong with it the code should
* remain as-is as it provides us with a guarantee from HW that it is correct.
*/
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
#define DCN20_MAX_DSC_IMAGE_WIDTH 5184
#define DCN20_MAX_420_IMAGE_WIDTH 4096
static double adjust_ReturnBW(
struct display_mode_lib *mode_lib,
double ReturnBW,
bool DCCEnabledAnyPlane,
double ReturnBandwidthToDCN);
static unsigned int dscceComputeDelay(
unsigned int bpc,
double bpp,
unsigned int sliceWidth,
unsigned int numSlices,
enum output_format_class pixelFormat);
static unsigned int dscComputeDelay(enum output_format_class pixelFormat);
static bool CalculateDelayAfterScaler(
struct display_mode_lib *mode_lib,
double ReturnBW,
double ReadBandwidthPlaneLuma,
double ReadBandwidthPlaneChroma,
double TotalDataReadBandwidth,
double DisplayPipeLineDeliveryTimeLuma,
double DisplayPipeLineDeliveryTimeChroma,
double DPPCLK,
double DISPCLK,
double PixelClock,
unsigned int DSCDelay,
unsigned int DPPPerPlane,
bool ScalerEnabled,
unsigned int NumberOfCursors,
double DPPCLKDelaySubtotal,
double DPPCLKDelaySCL,
double DPPCLKDelaySCLLBOnly,
double DPPCLKDelayCNVCFormater,
double DPPCLKDelayCNVCCursor,
double DISPCLKDelaySubtotal,
unsigned int ScalerRecoutWidth,
enum output_format_class OutputFormat,
unsigned int HTotal,
unsigned int SwathWidthSingleDPPY,
double BytePerPixelDETY,
double BytePerPixelDETC,
unsigned int SwathHeightY,
unsigned int SwathHeightC,
bool Interlace,
bool ProgressiveToInterlaceUnitInOPP,
double *DSTXAfterScaler,
double *DSTYAfterScaler
);
// Super monster function with some 45 argument
static bool CalculatePrefetchSchedule(
struct display_mode_lib *mode_lib,
double DPPCLK,
double DISPCLK,
double PixelClock,
double DCFCLKDeepSleep,
unsigned int DPPPerPlane,
unsigned int NumberOfCursors,
unsigned int VBlank,
unsigned int HTotal,
unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int PageTableLevels,
bool GPUVMEnable,
bool DynamicMetadataEnable,
unsigned int DynamicMetadataLinesBeforeActiveRequired,
unsigned int DynamicMetadataTransmittedBytes,
bool DCCEnable,
double UrgentLatencyPixelDataOnly,
double UrgentExtraLatency,
double TCalc,
unsigned int PDEAndMetaPTEBytesFrame,
unsigned int MetaRowByte,
unsigned int PixelPTEBytesPerRow,
double PrefetchSourceLinesY,
unsigned int SwathWidthY,
double BytePerPixelDETY,
double VInitPreFillY,
unsigned int MaxNumSwathY,
double PrefetchSourceLinesC,
double BytePerPixelDETC,
double VInitPreFillC,
unsigned int MaxNumSwathC,
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double TWait,
bool XFCEnabled,
double XFCRemoteSurfaceFlipDelay,
bool InterlaceEnable,
bool ProgressiveToInterlaceUnitInOPP,
double DSTXAfterScaler,
double DSTYAfterScaler,
double *DestinationLinesForPrefetch,
double *PrefetchBandwidth,
double *DestinationLinesToRequestVMInVBlank,
double *DestinationLinesToRequestRowInVBlank,
double *VRatioPrefetchY,
double *VRatioPrefetchC,
double *RequiredPrefetchPixDataBW,
double *Tno_bw,
unsigned int *VUpdateOffsetPix,
double *VUpdateWidthPix,
double *VReadyOffsetPix);
static double RoundToDFSGranularityUp(double Clock, double VCOSpeed);
static double RoundToDFSGranularityDown(double Clock, double VCOSpeed);
static double CalculatePrefetchSourceLines(
struct display_mode_lib *mode_lib,
double VRatio,
double vtaps,
bool Interlace,
bool ProgressiveToInterlaceUnitInOPP,
unsigned int SwathHeight,
unsigned int ViewportYStart,
double *VInitPreFill,
unsigned int *MaxNumSwath);
static unsigned int CalculateVMAndRowBytes(
struct display_mode_lib *mode_lib,
bool DCCEnable,
unsigned int BlockHeight256Bytes,
unsigned int BlockWidth256Bytes,
enum source_format_class SourcePixelFormat,
unsigned int SurfaceTiling,
unsigned int BytePerPixel,
enum scan_direction_class ScanDirection,
unsigned int ViewportWidth,
unsigned int ViewportHeight,
unsigned int SwathWidthY,
bool GPUVMEnable,
unsigned int VMMPageSize,
unsigned int PTEBufferSizeInRequestsLuma,
unsigned int PDEProcessingBufIn64KBReqs,
unsigned int Pitch,
unsigned int DCCMetaPitch,
unsigned int *MacroTileWidth,
unsigned int *MetaRowByte,
unsigned int *PixelPTEBytesPerRow,
bool *PTEBufferSizeNotExceeded,
unsigned int *dpte_row_height,
unsigned int *meta_row_height);
static double CalculateTWait(
unsigned int PrefetchMode,
double DRAMClockChangeLatency,
double UrgentLatencyPixelDataOnly,
double SREnterPlusExitTime);
static double CalculateRemoteSurfaceFlipDelay(
struct display_mode_lib *mode_lib,
double VRatio,
double SwathWidth,
double Bpp,
double LineTime,
double XFCTSlvVupdateOffset,
double XFCTSlvVupdateWidth,
double XFCTSlvVreadyOffset,
double XFCXBUFLatencyTolerance,
double XFCFillBWOverhead,
double XFCSlvChunkSize,
double XFCBusTransportTime,
double TCalc,
double TWait,
double *SrcActiveDrainRate,
double *TInitXFill,
double *TslvChk);
static void CalculateActiveRowBandwidth(
bool GPUVMEnable,
enum source_format_class SourcePixelFormat,
double VRatio,
bool DCCEnable,
double LineTime,
unsigned int MetaRowByteLuma,
unsigned int MetaRowByteChroma,
unsigned int meta_row_height_luma,
unsigned int meta_row_height_chroma,
unsigned int PixelPTEBytesPerRowLuma,
unsigned int PixelPTEBytesPerRowChroma,
unsigned int dpte_row_height_luma,
unsigned int dpte_row_height_chroma,
double *meta_row_bw,
double *dpte_row_bw,
double *qual_row_bw);
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
double UrgentExtraLatency,
double UrgentLatencyPixelDataOnly,
unsigned int GPUVMMaxPageTableLevels,
bool GPUVMEnable,
double BandwidthAvailableForImmediateFlip,
unsigned int TotImmediateFlipBytes,
enum source_format_class SourcePixelFormat,
unsigned int ImmediateFlipBytes,
double LineTime,
double VRatio,
double Tno_bw,
double PDEAndMetaPTEBytesFrame,
unsigned int MetaRowByte,
unsigned int PixelPTEBytesPerRow,
bool DCCEnable,
unsigned int dpte_row_height,
unsigned int meta_row_height,
double qual_row_bw,
double *DestinationLinesToRequestVMInImmediateFlip,
double *DestinationLinesToRequestRowInImmediateFlip,
double *final_flip_bw,
bool *ImmediateFlipSupportedForPipe);
static double CalculateWriteBackDelay(
enum source_format_class WritebackPixelFormat,
double WritebackHRatio,
double WritebackVRatio,
unsigned int WritebackLumaHTaps,
unsigned int WritebackLumaVTaps,
unsigned int WritebackChromaHTaps,
unsigned int WritebackChromaVTaps,
unsigned int WritebackDestinationWidth);
static void dml20v2_DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
struct display_mode_lib *mode_lib);
void dml20v2_recalculate(struct display_mode_lib *mode_lib)
{
ModeSupportAndSystemConfiguration(mode_lib);
mode_lib->vba.FabricAndDRAMBandwidth = dml_min(
mode_lib->vba.DRAMSpeed * mode_lib->vba.NumberOfChannels * mode_lib->vba.DRAMChannelWidth,
mode_lib->vba.FabricClock * mode_lib->vba.FabricDatapathToDCNDataReturn) / 1000.0;
PixelClockAdjustmentForProgressiveToInterlaceUnit(mode_lib);
dml20v2_DisplayPipeConfiguration(mode_lib);
dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(mode_lib);
}
static double adjust_ReturnBW(
struct display_mode_lib *mode_lib,
double ReturnBW,
bool DCCEnabledAnyPlane,
double ReturnBandwidthToDCN)
{
double CriticalCompression;
if (DCCEnabledAnyPlane
&& ReturnBandwidthToDCN
> mode_lib->vba.DCFCLK * mode_lib->vba.ReturnBusWidth / 4.0)
ReturnBW =
dml_min(
ReturnBW,
ReturnBandwidthToDCN * 4
* (1.0
- mode_lib->vba.UrgentLatencyPixelDataOnly
/ ((mode_lib->vba.ROBBufferSizeInKByte
- mode_lib->vba.PixelChunkSizeInKByte)
* 1024
/ ReturnBandwidthToDCN
- mode_lib->vba.DCFCLK
* mode_lib->vba.ReturnBusWidth
/ 4)
+ mode_lib->vba.UrgentLatencyPixelDataOnly));
CriticalCompression = 2.0 * mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK
* mode_lib->vba.UrgentLatencyPixelDataOnly
/ (ReturnBandwidthToDCN * mode_lib->vba.UrgentLatencyPixelDataOnly
+ (mode_lib->vba.ROBBufferSizeInKByte
- mode_lib->vba.PixelChunkSizeInKByte)
* 1024);
if (DCCEnabledAnyPlane && CriticalCompression > 1.0 && CriticalCompression < 4.0)
ReturnBW =
dml_min(
ReturnBW,
4.0 * ReturnBandwidthToDCN
* (mode_lib->vba.ROBBufferSizeInKByte
- mode_lib->vba.PixelChunkSizeInKByte)
* 1024
* mode_lib->vba.ReturnBusWidth
* mode_lib->vba.DCFCLK
* mode_lib->vba.UrgentLatencyPixelDataOnly
/ dml_pow(
(ReturnBandwidthToDCN
* mode_lib->vba.UrgentLatencyPixelDataOnly
+ (mode_lib->vba.ROBBufferSizeInKByte
- mode_lib->vba.PixelChunkSizeInKByte)
* 1024),
2));
return ReturnBW;
}
static unsigned int dscceComputeDelay(
unsigned int bpc,
double bpp,
unsigned int sliceWidth,
unsigned int numSlices,
enum output_format_class pixelFormat)
{
// valid bpc = source bits per component in the set of {8, 10, 12}
// valid bpp = increments of 1/16 of a bit
// min = 6/7/8 in N420/N422/444, respectively
// max = such that compression is 1:1
//valid sliceWidth = number of pixels per slice line, must be less than or equal to 5184/numSlices (or 4096/numSlices in 420 mode)
//valid numSlices = number of slices in the horiziontal direction per DSC engine in the set of {1, 2, 3, 4}
//valid pixelFormat = pixel/color format in the set of {:N444_RGB, :S422, :N422, :N420}
// fixed value
unsigned int rcModelSize = 8192;
// N422/N420 operate at 2 pixels per clock
unsigned int pixelsPerClock, lstall, D, initalXmitDelay, w, s, ix, wx, p, l0, a, ax, l,
Delay, pixels;
if (pixelFormat == dm_n422 || pixelFormat == dm_420)
pixelsPerClock = 2;
// #all other modes operate at 1 pixel per clock
else
pixelsPerClock = 1;
//initial transmit delay as per PPS
initalXmitDelay = dml_round(rcModelSize / 2.0 / bpp / pixelsPerClock);
//compute ssm delay
if (bpc == 8)
D = 81;
else if (bpc == 10)
D = 89;
else
D = 113;
//divide by pixel per cycle to compute slice width as seen by DSC
w = sliceWidth / pixelsPerClock;
//422 mode has an additional cycle of delay
if (pixelFormat == dm_s422)
s = 1;
else
s = 0;
//main calculation for the dscce
ix = initalXmitDelay + 45;
wx = (w + 2) / 3;
p = 3 * wx - w;
l0 = ix / w;
a = ix + p * l0;
ax = (a + 2) / 3 + D + 6 + 1;
l = (ax + wx - 1) / wx;
if ((ix % w) == 0 && p != 0)
lstall = 1;
else
lstall = 0;
Delay = l * wx * (numSlices - 1) + ax + s + lstall + 22;
//dsc processes 3 pixel containers per cycle and a container can contain 1 or 2 pixels
pixels = Delay * 3 * pixelsPerClock;
return pixels;
}
static unsigned int dscComputeDelay(enum output_format_class pixelFormat)
{
unsigned int Delay = 0;
if (pixelFormat == dm_420) {
// sfr
Delay = Delay + 2;
// dsccif
Delay = Delay + 0;
// dscc - input deserializer
Delay = Delay + 3;
// dscc gets pixels every other cycle
Delay = Delay + 2;
// dscc - input cdc fifo
Delay = Delay + 12;
// dscc gets pixels every other cycle
Delay = Delay + 13;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output cdc fifo
Delay = Delay + 7;
// dscc gets pixels every other cycle
Delay = Delay + 3;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output serializer
Delay = Delay + 1;
// sft
Delay = Delay + 1;
} else if (pixelFormat == dm_n422) {
// sfr
Delay = Delay + 2;
// dsccif
Delay = Delay + 1;
// dscc - input deserializer
Delay = Delay + 5;
// dscc - input cdc fifo
Delay = Delay + 25;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output cdc fifo
Delay = Delay + 10;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output serializer
Delay = Delay + 1;
// sft
Delay = Delay + 1;
} else {
// sfr
Delay = Delay + 2;
// dsccif
Delay = Delay + 0;
// dscc - input deserializer
Delay = Delay + 3;
// dscc - input cdc fifo
Delay = Delay + 12;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output cdc fifo
Delay = Delay + 7;
// dscc - output serializer
Delay = Delay + 1;
// dscc - cdc uncertainty
Delay = Delay + 2;
// sft
Delay = Delay + 1;
}
return Delay;
}
static bool CalculateDelayAfterScaler(
struct display_mode_lib *mode_lib,
double ReturnBW,
double ReadBandwidthPlaneLuma,
double ReadBandwidthPlaneChroma,
double TotalDataReadBandwidth,
double DisplayPipeLineDeliveryTimeLuma,
double DisplayPipeLineDeliveryTimeChroma,
double DPPCLK,
double DISPCLK,
double PixelClock,
unsigned int DSCDelay,
unsigned int DPPPerPlane,
bool ScalerEnabled,
unsigned int NumberOfCursors,
double DPPCLKDelaySubtotal,
double DPPCLKDelaySCL,
double DPPCLKDelaySCLLBOnly,
double DPPCLKDelayCNVCFormater,
double DPPCLKDelayCNVCCursor,
double DISPCLKDelaySubtotal,
unsigned int ScalerRecoutWidth,
enum output_format_class OutputFormat,
unsigned int HTotal,
unsigned int SwathWidthSingleDPPY,
double BytePerPixelDETY,
double BytePerPixelDETC,
unsigned int SwathHeightY,
unsigned int SwathHeightC,
bool Interlace,
bool ProgressiveToInterlaceUnitInOPP,
double *DSTXAfterScaler,
double *DSTYAfterScaler
)
{
unsigned int DPPCycles, DISPCLKCycles;
double DataFabricLineDeliveryTimeLuma;
double DataFabricLineDeliveryTimeChroma;
double DSTTotalPixelsAfterScaler;
DataFabricLineDeliveryTimeLuma = SwathWidthSingleDPPY * SwathHeightY * dml_ceil(BytePerPixelDETY, 1) / (mode_lib->vba.ReturnBW * ReadBandwidthPlaneLuma / TotalDataReadBandwidth);
mode_lib->vba.LastPixelOfLineExtraWatermark = dml_max(mode_lib->vba.LastPixelOfLineExtraWatermark, DataFabricLineDeliveryTimeLuma - DisplayPipeLineDeliveryTimeLuma);
if (BytePerPixelDETC != 0) {
DataFabricLineDeliveryTimeChroma = SwathWidthSingleDPPY / 2 * SwathHeightC * dml_ceil(BytePerPixelDETC, 2) / (mode_lib->vba.ReturnBW * ReadBandwidthPlaneChroma / TotalDataReadBandwidth);
mode_lib->vba.LastPixelOfLineExtraWatermark = dml_max(mode_lib->vba.LastPixelOfLineExtraWatermark, DataFabricLineDeliveryTimeChroma - DisplayPipeLineDeliveryTimeChroma);
}
if (ScalerEnabled)
DPPCycles = DPPCLKDelaySubtotal + DPPCLKDelaySCL;
else
DPPCycles = DPPCLKDelaySubtotal + DPPCLKDelaySCLLBOnly;
DPPCycles = DPPCycles + DPPCLKDelayCNVCFormater + NumberOfCursors * DPPCLKDelayCNVCCursor;
DISPCLKCycles = DISPCLKDelaySubtotal;
if (DPPCLK == 0.0 || DISPCLK == 0.0)
return true;
*DSTXAfterScaler = DPPCycles * PixelClock / DPPCLK + DISPCLKCycles * PixelClock / DISPCLK
+ DSCDelay;
if (DPPPerPlane > 1)
*DSTXAfterScaler = *DSTXAfterScaler + ScalerRecoutWidth;
if (OutputFormat == dm_420 || (Interlace && ProgressiveToInterlaceUnitInOPP))
*DSTYAfterScaler = 1;
else
*DSTYAfterScaler = 0;
DSTTotalPixelsAfterScaler = ((double) (*DSTYAfterScaler * HTotal)) + *DSTXAfterScaler;
*DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / HTotal, 1);
*DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * HTotal));
return true;
}
static bool CalculatePrefetchSchedule(
struct display_mode_lib *mode_lib,
double DPPCLK,
double DISPCLK,
double PixelClock,
double DCFCLKDeepSleep,
unsigned int DPPPerPlane,
unsigned int NumberOfCursors,
unsigned int VBlank,
unsigned int HTotal,
unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int PageTableLevels,
bool GPUVMEnable,
bool DynamicMetadataEnable,
unsigned int DynamicMetadataLinesBeforeActiveRequired,
unsigned int DynamicMetadataTransmittedBytes,
bool DCCEnable,
double UrgentLatencyPixelDataOnly,
double UrgentExtraLatency,
double TCalc,
unsigned int PDEAndMetaPTEBytesFrame,
unsigned int MetaRowByte,
unsigned int PixelPTEBytesPerRow,
double PrefetchSourceLinesY,
unsigned int SwathWidthY,
double BytePerPixelDETY,
double VInitPreFillY,
unsigned int MaxNumSwathY,
double PrefetchSourceLinesC,
double BytePerPixelDETC,
double VInitPreFillC,
unsigned int MaxNumSwathC,
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double TWait,
bool XFCEnabled,
double XFCRemoteSurfaceFlipDelay,
bool InterlaceEnable,
bool ProgressiveToInterlaceUnitInOPP,
double DSTXAfterScaler,
double DSTYAfterScaler,
double *DestinationLinesForPrefetch,
double *PrefetchBandwidth,
double *DestinationLinesToRequestVMInVBlank,
double *DestinationLinesToRequestRowInVBlank,
double *VRatioPrefetchY,
double *VRatioPrefetchC,
double *RequiredPrefetchPixDataBW,
double *Tno_bw,
unsigned int *VUpdateOffsetPix,
double *VUpdateWidthPix,
double *VReadyOffsetPix)
{
bool MyError = false;
double TotalRepeaterDelayTime;
double Tdm, LineTime, Tsetup;
double dst_y_prefetch_equ;
double Tsw_oto;
double prefetch_bw_oto;
double Tvm_oto;
double Tr0_oto;
double Tpre_oto;
double dst_y_prefetch_oto;
double TimeForFetchingMetaPTE = 0;
double TimeForFetchingRowInVBlank = 0;
double LinesToRequestPrefetchPixelData = 0;
*VUpdateOffsetPix = dml_ceil(HTotal / 4.0, 1);
TotalRepeaterDelayTime = MaxInterDCNTileRepeaters * (2.0 / DPPCLK + 3.0 / DISPCLK);
*VUpdateWidthPix = (14.0 / DCFCLKDeepSleep + 12.0 / DPPCLK + TotalRepeaterDelayTime)
* PixelClock;
*VReadyOffsetPix = dml_max(
150.0 / DPPCLK,
TotalRepeaterDelayTime + 20.0 / DCFCLKDeepSleep + 10.0 / DPPCLK)
* PixelClock;
Tsetup = (double) (*VUpdateOffsetPix + *VUpdateWidthPix + *VReadyOffsetPix) / PixelClock;
LineTime = (double) HTotal / PixelClock;
if (DynamicMetadataEnable) {
double Tdmbf, Tdmec, Tdmsks;
Tdm = dml_max(0.0, UrgentExtraLatency - TCalc);
Tdmbf = DynamicMetadataTransmittedBytes / 4.0 / DISPCLK;
Tdmec = LineTime;
if (DynamicMetadataLinesBeforeActiveRequired == 0)
Tdmsks = VBlank * LineTime / 2.0;
else
Tdmsks = DynamicMetadataLinesBeforeActiveRequired * LineTime;
if (InterlaceEnable && !ProgressiveToInterlaceUnitInOPP)
Tdmsks = Tdmsks / 2;
if (VStartup * LineTime
< Tsetup + TWait + UrgentExtraLatency + Tdmbf + Tdmec + Tdmsks) {
MyError = true;
}
} else
Tdm = 0;
if (GPUVMEnable) {
if (PageTableLevels == 4)
*Tno_bw = UrgentExtraLatency + UrgentLatencyPixelDataOnly;
else if (PageTableLevels == 3)
*Tno_bw = UrgentExtraLatency;
else
*Tno_bw = 0;
} else if (DCCEnable)
*Tno_bw = LineTime;
else
*Tno_bw = LineTime / 4;
dst_y_prefetch_equ = VStartup - dml_max(TCalc + TWait, XFCRemoteSurfaceFlipDelay) / LineTime
- (Tsetup + Tdm) / LineTime
- (DSTYAfterScaler + DSTXAfterScaler / HTotal);
Tsw_oto = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime;
prefetch_bw_oto = (MetaRowByte + PixelPTEBytesPerRow
+ PrefetchSourceLinesY * SwathWidthY * dml_ceil(BytePerPixelDETY, 1)
+ PrefetchSourceLinesC * SwathWidthY / 2 * dml_ceil(BytePerPixelDETC, 2))
/ Tsw_oto;
if (GPUVMEnable == true) {
Tvm_oto =
dml_max(
*Tno_bw + PDEAndMetaPTEBytesFrame / prefetch_bw_oto,
dml_max(
UrgentExtraLatency
+ UrgentLatencyPixelDataOnly
* (PageTableLevels
- 1),
LineTime / 4.0));
} else
Tvm_oto = LineTime / 4.0;
if ((GPUVMEnable == true || DCCEnable == true)) {
Tr0_oto = dml_max(
(MetaRowByte + PixelPTEBytesPerRow) / prefetch_bw_oto,
dml_max(UrgentLatencyPixelDataOnly, dml_max(LineTime - Tvm_oto, LineTime / 4)));
} else
Tr0_oto = LineTime - Tvm_oto;
Tpre_oto = Tvm_oto + Tr0_oto + Tsw_oto;
dst_y_prefetch_oto = Tpre_oto / LineTime;
if (dst_y_prefetch_oto < dst_y_prefetch_equ)
*DestinationLinesForPrefetch = dst_y_prefetch_oto;
else
*DestinationLinesForPrefetch = dst_y_prefetch_equ;
*DestinationLinesForPrefetch = dml_floor(4.0 * (*DestinationLinesForPrefetch + 0.125), 1)
/ 4;
dml_print("DML: VStartup: %d\n", VStartup);
dml_print("DML: TCalc: %f\n", TCalc);
dml_print("DML: TWait: %f\n", TWait);
dml_print("DML: XFCRemoteSurfaceFlipDelay: %f\n", XFCRemoteSurfaceFlipDelay);
dml_print("DML: LineTime: %f\n", LineTime);
dml_print("DML: Tsetup: %f\n", Tsetup);
dml_print("DML: Tdm: %f\n", Tdm);
dml_print("DML: DSTYAfterScaler: %f\n", DSTYAfterScaler);
dml_print("DML: DSTXAfterScaler: %f\n", DSTXAfterScaler);
dml_print("DML: HTotal: %d\n", HTotal);
*PrefetchBandwidth = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBW = 0;
if (*DestinationLinesForPrefetch > 1) {
*PrefetchBandwidth = (PDEAndMetaPTEBytesFrame + 2 * MetaRowByte
+ 2 * PixelPTEBytesPerRow
+ PrefetchSourceLinesY * SwathWidthY * dml_ceil(BytePerPixelDETY, 1)
+ PrefetchSourceLinesC * SwathWidthY / 2
* dml_ceil(BytePerPixelDETC, 2))
/ (*DestinationLinesForPrefetch * LineTime - *Tno_bw);
if (GPUVMEnable) {
TimeForFetchingMetaPTE =
dml_max(
*Tno_bw
+ (double) PDEAndMetaPTEBytesFrame
/ *PrefetchBandwidth,
dml_max(
UrgentExtraLatency
+ UrgentLatencyPixelDataOnly
* (PageTableLevels
- 1),
LineTime / 4));
} else {
if (NumberOfCursors > 0 || XFCEnabled)
TimeForFetchingMetaPTE = LineTime / 4;
else
TimeForFetchingMetaPTE = 0.0;
}
if ((GPUVMEnable == true || DCCEnable == true)) {
TimeForFetchingRowInVBlank =
dml_max(
(MetaRowByte + PixelPTEBytesPerRow)
/ *PrefetchBandwidth,
dml_max(
UrgentLatencyPixelDataOnly,
dml_max(
LineTime
- TimeForFetchingMetaPTE,
LineTime
/ 4.0)));
} else {
if (NumberOfCursors > 0 || XFCEnabled)
TimeForFetchingRowInVBlank = LineTime - TimeForFetchingMetaPTE;
else
TimeForFetchingRowInVBlank = 0.0;
}
*DestinationLinesToRequestVMInVBlank = dml_floor(
4.0 * (TimeForFetchingMetaPTE / LineTime + 0.125),
1) / 4.0;
*DestinationLinesToRequestRowInVBlank = dml_floor(
4.0 * (TimeForFetchingRowInVBlank / LineTime + 0.125),
1) / 4.0;
LinesToRequestPrefetchPixelData =
*DestinationLinesForPrefetch
- ((NumberOfCursors > 0 || GPUVMEnable
|| DCCEnable) ?
(*DestinationLinesToRequestVMInVBlank
+ *DestinationLinesToRequestRowInVBlank) :
0.0);
if (LinesToRequestPrefetchPixelData > 0) {
*VRatioPrefetchY = (double) PrefetchSourceLinesY
/ LinesToRequestPrefetchPixelData;
*VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
if ((SwathHeightY > 4) && (VInitPreFillY > 3)) {
if (LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
*VRatioPrefetchY =
dml_max(
(double) PrefetchSourceLinesY
/ LinesToRequestPrefetchPixelData,
(double) MaxNumSwathY
* SwathHeightY
/ (LinesToRequestPrefetchPixelData
- (VInitPreFillY
- 3.0)
/ 2.0));
*VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
} else {
MyError = true;
*VRatioPrefetchY = 0;
}
}
*VRatioPrefetchC = (double) PrefetchSourceLinesC
/ LinesToRequestPrefetchPixelData;
*VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
if ((SwathHeightC > 4)) {
if (LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
*VRatioPrefetchC =
dml_max(
*VRatioPrefetchC,
(double) MaxNumSwathC
* SwathHeightC
/ (LinesToRequestPrefetchPixelData
- (VInitPreFillC
- 3.0)
/ 2.0));
*VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
} else {
MyError = true;
*VRatioPrefetchC = 0;
}
}
*RequiredPrefetchPixDataBW =
DPPPerPlane
* ((double) PrefetchSourceLinesY
/ LinesToRequestPrefetchPixelData
* dml_ceil(
BytePerPixelDETY,
1)
+ (double) PrefetchSourceLinesC
/ LinesToRequestPrefetchPixelData
* dml_ceil(
BytePerPixelDETC,
2)
/ 2)
* SwathWidthY / LineTime;
} else {
MyError = true;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBW = 0;
}
} else {
MyError = true;
}
if (MyError) {
*PrefetchBandwidth = 0;
TimeForFetchingMetaPTE = 0;
TimeForFetchingRowInVBlank = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*DestinationLinesForPrefetch = 0;
LinesToRequestPrefetchPixelData = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBW = 0;
}
return MyError;
}
static double RoundToDFSGranularityUp(double Clock, double VCOSpeed)
{
return VCOSpeed * 4 / dml_floor(VCOSpeed * 4 / Clock, 1);
}
static double RoundToDFSGranularityDown(double Clock, double VCOSpeed)
{
return VCOSpeed * 4 / dml_ceil(VCOSpeed * 4 / Clock, 1);
}
static double CalculatePrefetchSourceLines(
struct display_mode_lib *mode_lib,
double VRatio,
double vtaps,
bool Interlace,
bool ProgressiveToInterlaceUnitInOPP,
unsigned int SwathHeight,
unsigned int ViewportYStart,
double *VInitPreFill,
unsigned int *MaxNumSwath)
{
unsigned int MaxPartialSwath;
if (ProgressiveToInterlaceUnitInOPP)
*VInitPreFill = dml_floor((VRatio + vtaps + 1) / 2.0, 1);
else
*VInitPreFill = dml_floor((VRatio + vtaps + 1 + Interlace * 0.5 * VRatio) / 2.0, 1);
if (!mode_lib->vba.IgnoreViewportPositioning) {
*MaxNumSwath = dml_ceil((*VInitPreFill - 1.0) / SwathHeight, 1) + 1.0;
if (*VInitPreFill > 1.0)
MaxPartialSwath = (unsigned int) (*VInitPreFill - 2) % SwathHeight;
else
MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 2)
% SwathHeight;
MaxPartialSwath = dml_max(1U, MaxPartialSwath);
} else {
if (ViewportYStart != 0)
dml_print(
"WARNING DML: using viewport y position of 0 even though actual viewport y position is non-zero in prefetch source lines calculation\n");
*MaxNumSwath = dml_ceil(*VInitPreFill / SwathHeight, 1);
if (*VInitPreFill > 1.0)
MaxPartialSwath = (unsigned int) (*VInitPreFill - 1) % SwathHeight;
else
MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 1)
% SwathHeight;
}
return *MaxNumSwath * SwathHeight + MaxPartialSwath;
}
static unsigned int CalculateVMAndRowBytes(
struct display_mode_lib *mode_lib,
bool DCCEnable,
unsigned int BlockHeight256Bytes,
unsigned int BlockWidth256Bytes,
enum source_format_class SourcePixelFormat,
unsigned int SurfaceTiling,
unsigned int BytePerPixel,
enum scan_direction_class ScanDirection,
unsigned int ViewportWidth,
unsigned int ViewportHeight,
unsigned int SwathWidth,
bool GPUVMEnable,
unsigned int VMMPageSize,
unsigned int PTEBufferSizeInRequestsLuma,
unsigned int PDEProcessingBufIn64KBReqs,
unsigned int Pitch,
unsigned int DCCMetaPitch,
unsigned int *MacroTileWidth,
unsigned int *MetaRowByte,
unsigned int *PixelPTEBytesPerRow,
bool *PTEBufferSizeNotExceeded,
unsigned int *dpte_row_height,
unsigned int *meta_row_height)
{
unsigned int MetaRequestHeight;
unsigned int MetaRequestWidth;
unsigned int MetaSurfWidth;
unsigned int MetaSurfHeight;
unsigned int MPDEBytesFrame;
unsigned int MetaPTEBytesFrame;
unsigned int DCCMetaSurfaceBytes;
unsigned int MacroTileSizeBytes;
unsigned int MacroTileHeight;
unsigned int DPDE0BytesFrame;
unsigned int ExtraDPDEBytesFrame;
unsigned int PDEAndMetaPTEBytesFrame;
if (DCCEnable == true) {
MetaRequestHeight = 8 * BlockHeight256Bytes;
MetaRequestWidth = 8 * BlockWidth256Bytes;
if (ScanDirection == dm_horz) {
*meta_row_height = MetaRequestHeight;
MetaSurfWidth = dml_ceil((double) SwathWidth - 1, MetaRequestWidth)
+ MetaRequestWidth;
*MetaRowByte = MetaSurfWidth * MetaRequestHeight * BytePerPixel / 256.0;
} else {
*meta_row_height = MetaRequestWidth;
MetaSurfHeight = dml_ceil((double) SwathWidth - 1, MetaRequestHeight)
+ MetaRequestHeight;
*MetaRowByte = MetaSurfHeight * MetaRequestWidth * BytePerPixel / 256.0;
}
if (ScanDirection == dm_horz) {
DCCMetaSurfaceBytes = DCCMetaPitch
* (dml_ceil(ViewportHeight - 1, 64 * BlockHeight256Bytes)
+ 64 * BlockHeight256Bytes) * BytePerPixel
/ 256;
} else {
DCCMetaSurfaceBytes = DCCMetaPitch
* (dml_ceil(
(double) ViewportHeight - 1,
64 * BlockHeight256Bytes)
+ 64 * BlockHeight256Bytes) * BytePerPixel
/ 256;
}
if (GPUVMEnable == true) {
MetaPTEBytesFrame = (dml_ceil(
(double) (DCCMetaSurfaceBytes - VMMPageSize)
/ (8 * VMMPageSize),
1) + 1) * 64;
MPDEBytesFrame = 128 * (mode_lib->vba.GPUVMMaxPageTableLevels - 1);
} else {
MetaPTEBytesFrame = 0;
MPDEBytesFrame = 0;
}
} else {
MetaPTEBytesFrame = 0;
MPDEBytesFrame = 0;
*MetaRowByte = 0;
}
if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_l_vp) {
MacroTileSizeBytes = 256;
MacroTileHeight = BlockHeight256Bytes;
} else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x
|| SurfaceTiling == dm_sw_4kb_d || SurfaceTiling == dm_sw_4kb_d_x) {
MacroTileSizeBytes = 4096;
MacroTileHeight = 4 * BlockHeight256Bytes;
} else if (SurfaceTiling == dm_sw_64kb_s || SurfaceTiling == dm_sw_64kb_s_t
|| SurfaceTiling == dm_sw_64kb_s_x || SurfaceTiling == dm_sw_64kb_d
|| SurfaceTiling == dm_sw_64kb_d_t || SurfaceTiling == dm_sw_64kb_d_x
|| SurfaceTiling == dm_sw_64kb_r_x) {
MacroTileSizeBytes = 65536;
MacroTileHeight = 16 * BlockHeight256Bytes;
} else {
MacroTileSizeBytes = 262144;
MacroTileHeight = 32 * BlockHeight256Bytes;
}
*MacroTileWidth = MacroTileSizeBytes / BytePerPixel / MacroTileHeight;
if (GPUVMEnable == true && mode_lib->vba.GPUVMMaxPageTableLevels > 1) {
if (ScanDirection == dm_horz) {
DPDE0BytesFrame =
64
* (dml_ceil(
((Pitch
* (dml_ceil(
ViewportHeight
- 1,
MacroTileHeight)
+ MacroTileHeight)
* BytePerPixel)
- MacroTileSizeBytes)
/ (8
* 2097152),
1) + 1);
} else {
DPDE0BytesFrame =
64
* (dml_ceil(
((Pitch
* (dml_ceil(
(double) SwathWidth
- 1,
MacroTileHeight)
+ MacroTileHeight)
* BytePerPixel)
- MacroTileSizeBytes)
/ (8
* 2097152),
1) + 1);
}
ExtraDPDEBytesFrame = 128 * (mode_lib->vba.GPUVMMaxPageTableLevels - 2);
} else {
DPDE0BytesFrame = 0;
ExtraDPDEBytesFrame = 0;
}
PDEAndMetaPTEBytesFrame = MetaPTEBytesFrame + MPDEBytesFrame + DPDE0BytesFrame
+ ExtraDPDEBytesFrame;
if (GPUVMEnable == true) {
unsigned int PTERequestSize;
unsigned int PixelPTEReqHeight;
unsigned int PixelPTEReqWidth;
double FractionOfPTEReturnDrop;
unsigned int EffectivePDEProcessingBufIn64KBReqs;
if (SurfaceTiling == dm_sw_linear) {
PixelPTEReqHeight = 1;
PixelPTEReqWidth = 8.0 * VMMPageSize / BytePerPixel;
PTERequestSize = 64;
FractionOfPTEReturnDrop = 0;
} else if (MacroTileSizeBytes == 4096) {
PixelPTEReqHeight = MacroTileHeight;
PixelPTEReqWidth = 8 * *MacroTileWidth;
PTERequestSize = 64;
if (ScanDirection == dm_horz)
FractionOfPTEReturnDrop = 0;
else
FractionOfPTEReturnDrop = 7 / 8;
} else if (VMMPageSize == 4096 && MacroTileSizeBytes > 4096) {
PixelPTEReqHeight = 16 * BlockHeight256Bytes;
PixelPTEReqWidth = 16 * BlockWidth256Bytes;
PTERequestSize = 128;
FractionOfPTEReturnDrop = 0;
} else {
PixelPTEReqHeight = MacroTileHeight;
PixelPTEReqWidth = 8 * *MacroTileWidth;
PTERequestSize = 64;
FractionOfPTEReturnDrop = 0;
}
if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10)
EffectivePDEProcessingBufIn64KBReqs = PDEProcessingBufIn64KBReqs / 2;
else
EffectivePDEProcessingBufIn64KBReqs = PDEProcessingBufIn64KBReqs;
if (SurfaceTiling == dm_sw_linear) {
*dpte_row_height =
dml_min(
128,
1
<< (unsigned int) dml_floor(
dml_log2(
dml_min(
(double) PTEBufferSizeInRequestsLuma
* PixelPTEReqWidth,
EffectivePDEProcessingBufIn64KBReqs
* 65536.0
/ BytePerPixel)
/ Pitch),
1));
*PixelPTEBytesPerRow = PTERequestSize
* (dml_ceil(
(double) (Pitch * *dpte_row_height - 1)
/ PixelPTEReqWidth,
1) + 1);
} else if (ScanDirection == dm_horz) {
*dpte_row_height = PixelPTEReqHeight;
*PixelPTEBytesPerRow = PTERequestSize
* (dml_ceil(((double) SwathWidth - 1) / PixelPTEReqWidth, 1)
+ 1);
} else {
*dpte_row_height = dml_min(PixelPTEReqWidth, *MacroTileWidth);
*PixelPTEBytesPerRow = PTERequestSize
* (dml_ceil(
((double) SwathWidth - 1)
/ PixelPTEReqHeight,
1) + 1);
}
if (*PixelPTEBytesPerRow * (1 - FractionOfPTEReturnDrop)
<= 64 * PTEBufferSizeInRequestsLuma) {
*PTEBufferSizeNotExceeded = true;
} else {
*PTEBufferSizeNotExceeded = false;
}
} else {
*PixelPTEBytesPerRow = 0;
*PTEBufferSizeNotExceeded = true;
}
return PDEAndMetaPTEBytesFrame;
}
static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
struct display_mode_lib *mode_lib)
{
unsigned int j, k;
mode_lib->vba.WritebackDISPCLK = 0.0;
mode_lib->vba.DISPCLKWithRamping = 0;
mode_lib->vba.DISPCLKWithoutRamping = 0;
mode_lib->vba.GlobalDPPCLK = 0.0;
// dml_ml->vba.DISPCLK and dml_ml->vba.DPPCLK Calculation
//
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.WritebackEnable[k]) {
mode_lib->vba.WritebackDISPCLK =
dml_max(
mode_lib->vba.WritebackDISPCLK,
CalculateWriteBackDISPCLK(
mode_lib->vba.WritebackPixelFormat[k],
mode_lib->vba.PixelClock[k],
mode_lib->vba.WritebackHRatio[k],
mode_lib->vba.WritebackVRatio[k],
mode_lib->vba.WritebackLumaHTaps[k],
mode_lib->vba.WritebackLumaVTaps[k],
mode_lib->vba.WritebackChromaHTaps[k],
mode_lib->vba.WritebackChromaVTaps[k],
mode_lib->vba.WritebackDestinationWidth[k],
mode_lib->vba.HTotal[k],
mode_lib->vba.WritebackChromaLineBufferWidth));
}
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.HRatio[k] > 1) {
mode_lib->vba.PSCL_THROUGHPUT_LUMA[k] = dml_min(
mode_lib->vba.MaxDCHUBToPSCLThroughput,
mode_lib->vba.MaxPSCLToLBThroughput
* mode_lib->vba.HRatio[k]
/ dml_ceil(
mode_lib->vba.htaps[k]
/ 6.0,
1));
} else {
mode_lib->vba.PSCL_THROUGHPUT_LUMA[k] = dml_min(
mode_lib->vba.MaxDCHUBToPSCLThroughput,
mode_lib->vba.MaxPSCLToLBThroughput);
}
mode_lib->vba.DPPCLKUsingSingleDPPLuma =
mode_lib->vba.PixelClock[k]
* dml_max(
mode_lib->vba.vtaps[k] / 6.0
* dml_min(
1.0,
mode_lib->vba.HRatio[k]),
dml_max(
mode_lib->vba.HRatio[k]
* mode_lib->vba.VRatio[k]
/ mode_lib->vba.PSCL_THROUGHPUT_LUMA[k],
1.0));
if ((mode_lib->vba.htaps[k] > 6 || mode_lib->vba.vtaps[k] > 6)
&& mode_lib->vba.DPPCLKUsingSingleDPPLuma
< 2 * mode_lib->vba.PixelClock[k]) {
mode_lib->vba.DPPCLKUsingSingleDPPLuma = 2 * mode_lib->vba.PixelClock[k];
}
if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
&& mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] = 0.0;
mode_lib->vba.DPPCLKUsingSingleDPP[k] =
mode_lib->vba.DPPCLKUsingSingleDPPLuma;
} else {
if (mode_lib->vba.HRatio[k] > 1) {
mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] =
dml_min(
mode_lib->vba.MaxDCHUBToPSCLThroughput,
mode_lib->vba.MaxPSCLToLBThroughput
* mode_lib->vba.HRatio[k]
/ 2
/ dml_ceil(
mode_lib->vba.HTAPsChroma[k]
/ 6.0,
1.0));
} else {
mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] = dml_min(
mode_lib->vba.MaxDCHUBToPSCLThroughput,
mode_lib->vba.MaxPSCLToLBThroughput);
}
mode_lib->vba.DPPCLKUsingSingleDPPChroma =
mode_lib->vba.PixelClock[k]
* dml_max(
mode_lib->vba.VTAPsChroma[k]
/ 6.0
* dml_min(
1.0,
mode_lib->vba.HRatio[k]
/ 2),
dml_max(
mode_lib->vba.HRatio[k]
* mode_lib->vba.VRatio[k]
/ 4
/ mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k],
1.0));
if ((mode_lib->vba.HTAPsChroma[k] > 6 || mode_lib->vba.VTAPsChroma[k] > 6)
&& mode_lib->vba.DPPCLKUsingSingleDPPChroma
< 2 * mode_lib->vba.PixelClock[k]) {
mode_lib->vba.DPPCLKUsingSingleDPPChroma = 2
* mode_lib->vba.PixelClock[k];
}
mode_lib->vba.DPPCLKUsingSingleDPP[k] = dml_max(
mode_lib->vba.DPPCLKUsingSingleDPPLuma,
mode_lib->vba.DPPCLKUsingSingleDPPChroma);
}
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.BlendingAndTiming[k] != k)
continue;
if (mode_lib->vba.ODMCombineEnabled[k]) {
mode_lib->vba.DISPCLKWithRamping =
dml_max(
mode_lib->vba.DISPCLKWithRamping,
mode_lib->vba.PixelClock[k] / 2
* (1
+ mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
/ 100)
* (1
+ mode_lib->vba.DISPCLKRampingMargin
/ 100));
mode_lib->vba.DISPCLKWithoutRamping =
dml_max(
mode_lib->vba.DISPCLKWithoutRamping,
mode_lib->vba.PixelClock[k] / 2
* (1
+ mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
/ 100));
} else if (!mode_lib->vba.ODMCombineEnabled[k]) {
mode_lib->vba.DISPCLKWithRamping =
dml_max(
mode_lib->vba.DISPCLKWithRamping,
mode_lib->vba.PixelClock[k]
* (1
+ mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
/ 100)
* (1
+ mode_lib->vba.DISPCLKRampingMargin
/ 100));
mode_lib->vba.DISPCLKWithoutRamping =
dml_max(
mode_lib->vba.DISPCLKWithoutRamping,
mode_lib->vba.PixelClock[k]
* (1
+ mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
/ 100));
}
}
mode_lib->vba.DISPCLKWithRamping = dml_max(
mode_lib->vba.DISPCLKWithRamping,
mode_lib->vba.WritebackDISPCLK);
mode_lib->vba.DISPCLKWithoutRamping = dml_max(
mode_lib->vba.DISPCLKWithoutRamping,
mode_lib->vba.WritebackDISPCLK);
ASSERT(mode_lib->vba.DISPCLKDPPCLKVCOSpeed != 0);
mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity = RoundToDFSGranularityUp(
mode_lib->vba.DISPCLKWithRamping,
mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity = RoundToDFSGranularityUp(
mode_lib->vba.DISPCLKWithoutRamping,
mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
mode_lib->vba.MaxDispclkRoundedToDFSGranularity = RoundToDFSGranularityDown(
mode_lib->vba.soc.clock_limits[mode_lib->vba.soc.num_states].dispclk_mhz,
mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
if (mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity
> mode_lib->vba.MaxDispclkRoundedToDFSGranularity) {
mode_lib->vba.DISPCLK_calculated =
mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity;
} else if (mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity
> mode_lib->vba.MaxDispclkRoundedToDFSGranularity) {
mode_lib->vba.DISPCLK_calculated = mode_lib->vba.MaxDispclkRoundedToDFSGranularity;
} else {
mode_lib->vba.DISPCLK_calculated =
mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity;
}
DTRACE(" dispclk_mhz (calculated) = %f", mode_lib->vba.DISPCLK_calculated);
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.DPPPerPlane[k] == 0) {
mode_lib->vba.DPPCLK_calculated[k] = 0;
} else {
mode_lib->vba.DPPCLK_calculated[k] = mode_lib->vba.DPPCLKUsingSingleDPP[k]
/ mode_lib->vba.DPPPerPlane[k]
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100);
}
mode_lib->vba.GlobalDPPCLK = dml_max(
mode_lib->vba.GlobalDPPCLK,
mode_lib->vba.DPPCLK_calculated[k]);
}
mode_lib->vba.GlobalDPPCLK = RoundToDFSGranularityUp(
mode_lib->vba.GlobalDPPCLK,
mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
mode_lib->vba.DPPCLK_calculated[k] = mode_lib->vba.GlobalDPPCLK / 255
* dml_ceil(
mode_lib->vba.DPPCLK_calculated[k] * 255
/ mode_lib->vba.GlobalDPPCLK,
1);
DTRACE(" dppclk_mhz[%i] (calculated) = %f", k, mode_lib->vba.DPPCLK_calculated[k]);
}
// Urgent Watermark
mode_lib->vba.DCCEnabledAnyPlane = false;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
if (mode_lib->vba.DCCEnable[k])
mode_lib->vba.DCCEnabledAnyPlane = true;
mode_lib->vba.ReturnBandwidthToDCN = dml_min(
mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK,
mode_lib->vba.FabricAndDRAMBandwidth * 1000)
* mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100;
mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBandwidthToDCN;
mode_lib->vba.ReturnBW = adjust_ReturnBW(
mode_lib,
mode_lib->vba.ReturnBW,
mode_lib->vba.DCCEnabledAnyPlane,
mode_lib->vba.ReturnBandwidthToDCN);
// Let's do this calculation again??
mode_lib->vba.ReturnBandwidthToDCN = dml_min(
mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK,
mode_lib->vba.FabricAndDRAMBandwidth * 1000);
mode_lib->vba.ReturnBW = adjust_ReturnBW(
mode_lib,
mode_lib->vba.ReturnBW,
mode_lib->vba.DCCEnabledAnyPlane,
mode_lib->vba.ReturnBandwidthToDCN);
DTRACE(" dcfclk_mhz = %f", mode_lib->vba.DCFCLK);
DTRACE(" return_bw_to_dcn = %f", mode_lib->vba.ReturnBandwidthToDCN);
DTRACE(" return_bus_bw = %f", mode_lib->vba.ReturnBW);
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
bool MainPlaneDoesODMCombine = false;
if (mode_lib->vba.SourceScan[k] == dm_horz)
mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportWidth[k];
else
mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k];
if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
MainPlaneDoesODMCombine = true;
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
if (mode_lib->vba.BlendingAndTiming[k] == j
&& mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
MainPlaneDoesODMCombine = true;
if (MainPlaneDoesODMCombine == true)
mode_lib->vba.SwathWidthY[k] = dml_min(
(double) mode_lib->vba.SwathWidthSingleDPPY[k],
dml_round(
mode_lib->vba.HActive[k] / 2.0
* mode_lib->vba.HRatio[k]));
else {
if (mode_lib->vba.DPPPerPlane[k] == 0) {
mode_lib->vba.SwathWidthY[k] = 0;
} else {
mode_lib->vba.SwathWidthY[k] = mode_lib->vba.SwathWidthSingleDPPY[k]
/ mode_lib->vba.DPPPerPlane[k];
}
}
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
mode_lib->vba.BytePerPixelDETY[k] = 8;
mode_lib->vba.BytePerPixelDETC[k] = 0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
mode_lib->vba.BytePerPixelDETY[k] = 4;
mode_lib->vba.BytePerPixelDETC[k] = 0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16) {
mode_lib->vba.BytePerPixelDETY[k] = 2;
mode_lib->vba.BytePerPixelDETC[k] = 0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8) {
mode_lib->vba.BytePerPixelDETY[k] = 1;
mode_lib->vba.BytePerPixelDETC[k] = 0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
mode_lib->vba.BytePerPixelDETY[k] = 1;
mode_lib->vba.BytePerPixelDETC[k] = 2;
} else { // dm_420_10
mode_lib->vba.BytePerPixelDETY[k] = 4.0 / 3.0;
mode_lib->vba.BytePerPixelDETC[k] = 8.0 / 3.0;
}
}
mode_lib->vba.TotalDataReadBandwidth = 0.0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
mode_lib->vba.ReadBandwidthPlaneLuma[k] = mode_lib->vba.SwathWidthSingleDPPY[k]
* dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1)
/ (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
* mode_lib->vba.VRatio[k];
mode_lib->vba.ReadBandwidthPlaneChroma[k] = mode_lib->vba.SwathWidthSingleDPPY[k]
/ 2 * dml_ceil(mode_lib->vba.BytePerPixelDETC[k], 2)
/ (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
* mode_lib->vba.VRatio[k] / 2;
DTRACE(
" read_bw[%i] = %fBps",
k,
mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ mode_lib->vba.ReadBandwidthPlaneChroma[k]);
mode_lib->vba.TotalDataReadBandwidth += mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ mode_lib->vba.ReadBandwidthPlaneChroma[k];
}
mode_lib->vba.TotalDCCActiveDPP = 0;
mode_lib->vba.TotalActiveDPP = 0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
mode_lib->vba.TotalActiveDPP = mode_lib->vba.TotalActiveDPP
+ mode_lib->vba.DPPPerPlane[k];
if (mode_lib->vba.DCCEnable[k])
mode_lib->vba.TotalDCCActiveDPP = mode_lib->vba.TotalDCCActiveDPP
+ mode_lib->vba.DPPPerPlane[k];
}
mode_lib->vba.UrgentRoundTripAndOutOfOrderLatency =
(mode_lib->vba.RoundTripPingLatencyCycles + 32) / mode_lib->vba.DCFCLK
+ mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelDataOnly
* mode_lib->vba.NumberOfChannels
/ mode_lib->vba.ReturnBW;
mode_lib->vba.LastPixelOfLineExtraWatermark = 0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.VRatio[k] <= 1.0)
mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k] =
(double) mode_lib->vba.SwathWidthY[k]
* mode_lib->vba.DPPPerPlane[k]
/ mode_lib->vba.HRatio[k]
/ mode_lib->vba.PixelClock[k];
else
mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k] =
(double) mode_lib->vba.SwathWidthY[k]
/ mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
/ mode_lib->vba.DPPCLK[k];
if (mode_lib->vba.BytePerPixelDETC[k] == 0)
mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k] = 0.0;
else if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0)
mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k] =
mode_lib->vba.SwathWidthY[k] / 2.0
* mode_lib->vba.DPPPerPlane[k]
/ (mode_lib->vba.HRatio[k] / 2.0)
/ mode_lib->vba.PixelClock[k];
else
mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k] =
mode_lib->vba.SwathWidthY[k] / 2.0
/ mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k]
/ mode_lib->vba.DPPCLK[k];
}
mode_lib->vba.UrgentExtraLatency = mode_lib->vba.UrgentRoundTripAndOutOfOrderLatency
+ (mode_lib->vba.TotalActiveDPP * mode_lib->vba.PixelChunkSizeInKByte
+ mode_lib->vba.TotalDCCActiveDPP
* mode_lib->vba.MetaChunkSize) * 1024.0
/ mode_lib->vba.ReturnBW;
if (mode_lib->vba.GPUVMEnable)
mode_lib->vba.UrgentExtraLatency += mode_lib->vba.TotalActiveDPP
* mode_lib->vba.PTEGroupSize / mode_lib->vba.ReturnBW;
mode_lib->vba.UrgentWatermark = mode_lib->vba.UrgentLatencyPixelDataOnly
+ mode_lib->vba.LastPixelOfLineExtraWatermark
+ mode_lib->vba.UrgentExtraLatency;
DTRACE(" urgent_extra_latency = %fus", mode_lib->vba.UrgentExtraLatency);
DTRACE(" wm_urgent = %fus", mode_lib->vba.UrgentWatermark);
mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly;
mode_lib->vba.TotalActiveWriteback = 0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.WritebackEnable[k])
mode_lib->vba.TotalActiveWriteback = mode_lib->vba.TotalActiveWriteback + mode_lib->vba.ActiveWritebacksPerPlane[k];
}
if (mode_lib->vba.TotalActiveWriteback <= 1)
mode_lib->vba.WritebackUrgentWatermark = mode_lib->vba.WritebackLatency;
else
mode_lib->vba.WritebackUrgentWatermark = mode_lib->vba.WritebackLatency
+ mode_lib->vba.WritebackChunkSize * 1024.0 / 32
/ mode_lib->vba.SOCCLK;
DTRACE(" wm_wb_urgent = %fus", mode_lib->vba.WritebackUrgentWatermark);
// NB P-State/DRAM Clock Change Watermark
mode_lib->vba.DRAMClockChangeWatermark = mode_lib->vba.DRAMClockChangeLatency
+ mode_lib->vba.UrgentWatermark;
DTRACE(" wm_pstate_change = %fus", mode_lib->vba.DRAMClockChangeWatermark);
DTRACE(" calculating wb pstate watermark");
DTRACE(" total wb outputs %d", mode_lib->vba.TotalActiveWriteback);
DTRACE(" socclk frequency %f Mhz", mode_lib->vba.SOCCLK);
if (mode_lib->vba.TotalActiveWriteback <= 1)
mode_lib->vba.WritebackDRAMClockChangeWatermark =
mode_lib->vba.DRAMClockChangeLatency
+ mode_lib->vba.WritebackLatency;
else
mode_lib->vba.WritebackDRAMClockChangeWatermark =
mode_lib->vba.DRAMClockChangeLatency
+ mode_lib->vba.WritebackLatency
+ mode_lib->vba.WritebackChunkSize * 1024.0 / 32
/ mode_lib->vba.SOCCLK;
DTRACE(" wm_wb_pstate %fus", mode_lib->vba.WritebackDRAMClockChangeWatermark);
// Stutter Efficiency
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
mode_lib->vba.LinesInDETY[k] = mode_lib->vba.DETBufferSizeY[k]
/ mode_lib->vba.BytePerPixelDETY[k] / mode_lib->vba.SwathWidthY[k];
mode_lib->vba.LinesInDETYRoundedDownToSwath[k] = dml_floor(
mode_lib->vba.LinesInDETY[k],
mode_lib->vba.SwathHeightY[k]);
mode_lib->vba.FullDETBufferingTimeY[k] =
mode_lib->vba.LinesInDETYRoundedDownToSwath[k]
* (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k])
/ mode_lib->vba.VRatio[k];
if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
mode_lib->vba.LinesInDETC[k] = mode_lib->vba.DETBufferSizeC[k]
/ mode_lib->vba.BytePerPixelDETC[k]
/ (mode_lib->vba.SwathWidthY[k] / 2);
mode_lib->vba.LinesInDETCRoundedDownToSwath[k] = dml_floor(
mode_lib->vba.LinesInDETC[k],
mode_lib->vba.SwathHeightC[k]);
mode_lib->vba.FullDETBufferingTimeC[k] =
mode_lib->vba.LinesInDETCRoundedDownToSwath[k]
* (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k])
/ (mode_lib->vba.VRatio[k] / 2);
} else {
mode_lib->vba.LinesInDETC[k] = 0;
mode_lib->vba.LinesInDETCRoundedDownToSwath[k] = 0;
mode_lib->vba.FullDETBufferingTimeC[k] = 999999;
}
}
mode_lib->vba.MinFullDETBufferingTime = 999999.0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.FullDETBufferingTimeY[k]
< mode_lib->vba.MinFullDETBufferingTime) {
mode_lib->vba.MinFullDETBufferingTime =
mode_lib->vba.FullDETBufferingTimeY[k];
mode_lib->vba.FrameTimeForMinFullDETBufferingTime =
(double) mode_lib->vba.VTotal[k] * mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k];
}
if (mode_lib->vba.FullDETBufferingTimeC[k]
< mode_lib->vba.MinFullDETBufferingTime) {
mode_lib->vba.MinFullDETBufferingTime =
mode_lib->vba.FullDETBufferingTimeC[k];
mode_lib->vba.FrameTimeForMinFullDETBufferingTime =
(double) mode_lib->vba.VTotal[k] * mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k];
}
}
mode_lib->vba.AverageReadBandwidthGBytePerSecond = 0.0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.DCCEnable[k]) {
mode_lib->vba.AverageReadBandwidthGBytePerSecond =
mode_lib->vba.AverageReadBandwidthGBytePerSecond
+ mode_lib->vba.ReadBandwidthPlaneLuma[k]
/ mode_lib->vba.DCCRate[k]
/ 1000
+ mode_lib->vba.ReadBandwidthPlaneChroma[k]
/ mode_lib->vba.DCCRate[k]
/ 1000;
} else {
mode_lib->vba.AverageReadBandwidthGBytePerSecond =
mode_lib->vba.AverageReadBandwidthGBytePerSecond
+ mode_lib->vba.ReadBandwidthPlaneLuma[k]
/ 1000
+ mode_lib->vba.ReadBandwidthPlaneChroma[k]
/ 1000;
}
if (mode_lib->vba.DCCEnable[k]) {
mode_lib->vba.AverageReadBandwidthGBytePerSecond =
mode_lib->vba.AverageReadBandwidthGBytePerSecond
+ mode_lib->vba.ReadBandwidthPlaneLuma[k]
/ 1000 / 256
+ mode_lib->vba.ReadBandwidthPlaneChroma[k]
/ 1000 / 256;
}
if (mode_lib->vba.GPUVMEnable) {
mode_lib->vba.AverageReadBandwidthGBytePerSecond =
mode_lib->vba.AverageReadBandwidthGBytePerSecond
+ mode_lib->vba.ReadBandwidthPlaneLuma[k]
/ 1000 / 512
+ mode_lib->vba.ReadBandwidthPlaneChroma[k]
/ 1000 / 512;
}
}
mode_lib->vba.PartOfBurstThatFitsInROB =
dml_min(
mode_lib->vba.MinFullDETBufferingTime
* mode_lib->vba.TotalDataReadBandwidth,
mode_lib->vba.ROBBufferSizeInKByte * 1024
* mode_lib->vba.TotalDataReadBandwidth
/ (mode_lib->vba.AverageReadBandwidthGBytePerSecond
* 1000));
mode_lib->vba.StutterBurstTime = mode_lib->vba.PartOfBurstThatFitsInROB
* (mode_lib->vba.AverageReadBandwidthGBytePerSecond * 1000)
/ mode_lib->vba.TotalDataReadBandwidth / mode_lib->vba.ReturnBW
+ (mode_lib->vba.MinFullDETBufferingTime
* mode_lib->vba.TotalDataReadBandwidth
- mode_lib->vba.PartOfBurstThatFitsInROB)
/ (mode_lib->vba.DCFCLK * 64);
if (mode_lib->vba.TotalActiveWriteback == 0) {
mode_lib->vba.StutterEfficiencyNotIncludingVBlank = (1
- (mode_lib->vba.SRExitTime + mode_lib->vba.StutterBurstTime)
/ mode_lib->vba.MinFullDETBufferingTime) * 100;
} else {
mode_lib->vba.StutterEfficiencyNotIncludingVBlank = 0;
}
mode_lib->vba.SmallestVBlank = 999999;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
mode_lib->vba.VBlankTime = (double) (mode_lib->vba.VTotal[k]
- mode_lib->vba.VActive[k]) * mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k];
} else {
mode_lib->vba.VBlankTime = 0;
}
mode_lib->vba.SmallestVBlank = dml_min(
mode_lib->vba.SmallestVBlank,
mode_lib->vba.VBlankTime);
}
mode_lib->vba.StutterEfficiency = (mode_lib->vba.StutterEfficiencyNotIncludingVBlank / 100
* (mode_lib->vba.FrameTimeForMinFullDETBufferingTime
- mode_lib->vba.SmallestVBlank)
+ mode_lib->vba.SmallestVBlank)
/ mode_lib->vba.FrameTimeForMinFullDETBufferingTime * 100;
// dml_ml->vba.DCFCLK Deep Sleep
mode_lib->vba.DCFCLKDeepSleep = 8.0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; k++) {
if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
mode_lib->vba.DCFCLKDeepSleepPerPlane[k] =
dml_max(
1.1 * mode_lib->vba.SwathWidthY[k]
* dml_ceil(
mode_lib->vba.BytePerPixelDETY[k],
1) / 32
/ mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k],
1.1 * mode_lib->vba.SwathWidthY[k] / 2.0
* dml_ceil(
mode_lib->vba.BytePerPixelDETC[k],
2) / 32
/ mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k]);
} else
mode_lib->vba.DCFCLKDeepSleepPerPlane[k] = 1.1 * mode_lib->vba.SwathWidthY[k]
* dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1) / 64.0
/ mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k];
mode_lib->vba.DCFCLKDeepSleepPerPlane[k] = dml_max(
mode_lib->vba.DCFCLKDeepSleepPerPlane[k],
mode_lib->vba.PixelClock[k] / 16.0);
mode_lib->vba.DCFCLKDeepSleep = dml_max(
mode_lib->vba.DCFCLKDeepSleep,
mode_lib->vba.DCFCLKDeepSleepPerPlane[k]);
DTRACE(
" dcfclk_deepsleep_per_plane[%i] = %fMHz",
k,
mode_lib->vba.DCFCLKDeepSleepPerPlane[k]);
}
DTRACE(" dcfclk_deepsleep_mhz = %fMHz", mode_lib->vba.DCFCLKDeepSleep);
// Stutter Watermark
mode_lib->vba.StutterExitWatermark = mode_lib->vba.SRExitTime
+ mode_lib->vba.LastPixelOfLineExtraWatermark
+ mode_lib->vba.UrgentExtraLatency + 10 / mode_lib->vba.DCFCLKDeepSleep;
mode_lib->vba.StutterEnterPlusExitWatermark = mode_lib->vba.SREnterPlusExitTime
+ mode_lib->vba.LastPixelOfLineExtraWatermark
+ mode_lib->vba.UrgentExtraLatency;
DTRACE(" wm_cstate_exit = %fus", mode_lib->vba.StutterExitWatermark);
DTRACE(" wm_cstate_enter_exit = %fus", mode_lib->vba.StutterEnterPlusExitWatermark);
// Urgent Latency Supported
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
mode_lib->vba.EffectiveDETPlusLBLinesLuma =
dml_floor(
mode_lib->vba.LinesInDETY[k]
+ dml_min(
mode_lib->vba.LinesInDETY[k]
* mode_lib->vba.DPPCLK[k]
* mode_lib->vba.BytePerPixelDETY[k]
* mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
/ (mode_lib->vba.ReturnBW
/ mode_lib->vba.DPPPerPlane[k]),
(double) mode_lib->vba.EffectiveLBLatencyHidingSourceLinesLuma),
mode_lib->vba.SwathHeightY[k]);
mode_lib->vba.UrgentLatencySupportUsLuma = mode_lib->vba.EffectiveDETPlusLBLinesLuma
* (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
/ mode_lib->vba.VRatio[k]
- mode_lib->vba.EffectiveDETPlusLBLinesLuma
* mode_lib->vba.SwathWidthY[k]
* mode_lib->vba.BytePerPixelDETY[k]
/ (mode_lib->vba.ReturnBW
/ mode_lib->vba.DPPPerPlane[k]);
if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
mode_lib->vba.EffectiveDETPlusLBLinesChroma =
dml_floor(
mode_lib->vba.LinesInDETC[k]
+ dml_min(
mode_lib->vba.LinesInDETC[k]
* mode_lib->vba.DPPCLK[k]
* mode_lib->vba.BytePerPixelDETC[k]
* mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k]
/ (mode_lib->vba.ReturnBW
/ mode_lib->vba.DPPPerPlane[k]),
(double) mode_lib->vba.EffectiveLBLatencyHidingSourceLinesChroma),
mode_lib->vba.SwathHeightC[k]);
mode_lib->vba.UrgentLatencySupportUsChroma =
mode_lib->vba.EffectiveDETPlusLBLinesChroma
* (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k])
/ (mode_lib->vba.VRatio[k] / 2)
- mode_lib->vba.EffectiveDETPlusLBLinesChroma
* (mode_lib->vba.SwathWidthY[k]
/ 2)
* mode_lib->vba.BytePerPixelDETC[k]
/ (mode_lib->vba.ReturnBW
/ mode_lib->vba.DPPPerPlane[k]);
mode_lib->vba.UrgentLatencySupportUs[k] = dml_min(
mode_lib->vba.UrgentLatencySupportUsLuma,
mode_lib->vba.UrgentLatencySupportUsChroma);
} else {
mode_lib->vba.UrgentLatencySupportUs[k] =
mode_lib->vba.UrgentLatencySupportUsLuma;
}
}
mode_lib->vba.MinUrgentLatencySupportUs = 999999;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
mode_lib->vba.MinUrgentLatencySupportUs = dml_min(
mode_lib->vba.MinUrgentLatencySupportUs,
mode_lib->vba.UrgentLatencySupportUs[k]);
}
// Non-Urgent Latency Tolerance
mode_lib->vba.NonUrgentLatencyTolerance = mode_lib->vba.MinUrgentLatencySupportUs
- mode_lib->vba.UrgentWatermark;
// DSCCLK
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if ((mode_lib->vba.BlendingAndTiming[k] != k) || !mode_lib->vba.DSCEnabled[k]) {
mode_lib->vba.DSCCLK_calculated[k] = 0.0;
} else {
if (mode_lib->vba.OutputFormat[k] == dm_420
|| mode_lib->vba.OutputFormat[k] == dm_n422)
mode_lib->vba.DSCFormatFactor = 2;
else
mode_lib->vba.DSCFormatFactor = 1;
if (mode_lib->vba.ODMCombineEnabled[k])
mode_lib->vba.DSCCLK_calculated[k] =
mode_lib->vba.PixelClockBackEnd[k] / 6
/ mode_lib->vba.DSCFormatFactor
/ (1
- mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
/ 100);
else
mode_lib->vba.DSCCLK_calculated[k] =
mode_lib->vba.PixelClockBackEnd[k] / 3
/ mode_lib->vba.DSCFormatFactor
/ (1
- mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
/ 100);
}
}
// DSC Delay
// TODO
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
double bpp = mode_lib->vba.OutputBpp[k];
unsigned int slices = mode_lib->vba.NumberOfDSCSlices[k];
if (mode_lib->vba.DSCEnabled[k] && bpp != 0) {
if (!mode_lib->vba.ODMCombineEnabled[k]) {
mode_lib->vba.DSCDelay[k] =
dscceComputeDelay(
mode_lib->vba.DSCInputBitPerComponent[k],
bpp,
dml_ceil(
(double) mode_lib->vba.HActive[k]
/ mode_lib->vba.NumberOfDSCSlices[k],
1),
slices,
mode_lib->vba.OutputFormat[k])
+ dscComputeDelay(
mode_lib->vba.OutputFormat[k]);
} else {
mode_lib->vba.DSCDelay[k] =
2
* (dscceComputeDelay(
mode_lib->vba.DSCInputBitPerComponent[k],
bpp,
dml_ceil(
(double) mode_lib->vba.HActive[k]
/ mode_lib->vba.NumberOfDSCSlices[k],
1),
slices / 2.0,
mode_lib->vba.OutputFormat[k])
+ dscComputeDelay(
mode_lib->vba.OutputFormat[k]));
}
mode_lib->vba.DSCDelay[k] = mode_lib->vba.DSCDelay[k]
* mode_lib->vba.PixelClock[k]
/ mode_lib->vba.PixelClockBackEnd[k];
} else {
mode_lib->vba.DSCDelay[k] = 0;
}
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) // NumberOfPlanes
if (j != k && mode_lib->vba.BlendingAndTiming[k] == j
&& mode_lib->vba.DSCEnabled[j])
mode_lib->vba.DSCDelay[k] = mode_lib->vba.DSCDelay[j];
// Prefetch
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
unsigned int PDEAndMetaPTEBytesFrameY;
unsigned int PixelPTEBytesPerRowY;
unsigned int MetaRowByteY;
unsigned int MetaRowByteC;
unsigned int PDEAndMetaPTEBytesFrameC;
unsigned int PixelPTEBytesPerRowC;
Calculate256BBlockSizes(
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.SurfaceTiling[k],
dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1),
dml_ceil(mode_lib->vba.BytePerPixelDETC[k], 2),
&mode_lib->vba.BlockHeight256BytesY[k],
&mode_lib->vba.BlockHeight256BytesC[k],
&mode_lib->vba.BlockWidth256BytesY[k],
&mode_lib->vba.BlockWidth256BytesC[k]);
PDEAndMetaPTEBytesFrameY = CalculateVMAndRowBytes(
mode_lib,
mode_lib->vba.DCCEnable[k],
mode_lib->vba.BlockHeight256BytesY[k],
mode_lib->vba.BlockWidth256BytesY[k],
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.SurfaceTiling[k],
dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1),
mode_lib->vba.SourceScan[k],
mode_lib->vba.ViewportWidth[k],
mode_lib->vba.ViewportHeight[k],
mode_lib->vba.SwathWidthY[k],
mode_lib->vba.GPUVMEnable,
mode_lib->vba.VMMPageSize,
mode_lib->vba.PTEBufferSizeInRequestsLuma,
mode_lib->vba.PDEProcessingBufIn64KBReqs,
mode_lib->vba.PitchY[k],
mode_lib->vba.DCCMetaPitchY[k],
&mode_lib->vba.MacroTileWidthY[k],
&MetaRowByteY,
&PixelPTEBytesPerRowY,
&mode_lib->vba.PTEBufferSizeNotExceeded[mode_lib->vba.VoltageLevel][0],
&mode_lib->vba.dpte_row_height[k],
&mode_lib->vba.meta_row_height[k]);
mode_lib->vba.PrefetchSourceLinesY[k] = CalculatePrefetchSourceLines(
mode_lib,
mode_lib->vba.VRatio[k],
mode_lib->vba.vtaps[k],
mode_lib->vba.Interlace[k],
mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
mode_lib->vba.SwathHeightY[k],
mode_lib->vba.ViewportYStartY[k],
&mode_lib->vba.VInitPreFillY[k],
&mode_lib->vba.MaxNumSwathY[k]);
if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_32
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_8)) {
PDEAndMetaPTEBytesFrameC =
CalculateVMAndRowBytes(
mode_lib,
mode_lib->vba.DCCEnable[k],
mode_lib->vba.BlockHeight256BytesC[k],
mode_lib->vba.BlockWidth256BytesC[k],
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.SurfaceTiling[k],
dml_ceil(
mode_lib->vba.BytePerPixelDETC[k],
2),
mode_lib->vba.SourceScan[k],
mode_lib->vba.ViewportWidth[k] / 2,
mode_lib->vba.ViewportHeight[k] / 2,
mode_lib->vba.SwathWidthY[k] / 2,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.VMMPageSize,
mode_lib->vba.PTEBufferSizeInRequestsLuma,
mode_lib->vba.PDEProcessingBufIn64KBReqs,
mode_lib->vba.PitchC[k],
0,
&mode_lib->vba.MacroTileWidthC[k],
&MetaRowByteC,
&PixelPTEBytesPerRowC,
&mode_lib->vba.PTEBufferSizeNotExceeded[mode_lib->vba.VoltageLevel][0],
&mode_lib->vba.dpte_row_height_chroma[k],
&mode_lib->vba.meta_row_height_chroma[k]);
mode_lib->vba.PrefetchSourceLinesC[k] = CalculatePrefetchSourceLines(
mode_lib,
mode_lib->vba.VRatio[k] / 2,
mode_lib->vba.VTAPsChroma[k],
mode_lib->vba.Interlace[k],
mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
mode_lib->vba.SwathHeightC[k],
mode_lib->vba.ViewportYStartC[k],
&mode_lib->vba.VInitPreFillC[k],
&mode_lib->vba.MaxNumSwathC[k]);
} else {
PixelPTEBytesPerRowC = 0;
PDEAndMetaPTEBytesFrameC = 0;
MetaRowByteC = 0;
mode_lib->vba.MaxNumSwathC[k] = 0;
mode_lib->vba.PrefetchSourceLinesC[k] = 0;
}
mode_lib->vba.PixelPTEBytesPerRow[k] = PixelPTEBytesPerRowY + PixelPTEBytesPerRowC;
mode_lib->vba.PDEAndMetaPTEBytesFrame[k] = PDEAndMetaPTEBytesFrameY
+ PDEAndMetaPTEBytesFrameC;
mode_lib->vba.MetaRowByte[k] = MetaRowByteY + MetaRowByteC;
CalculateActiveRowBandwidth(
mode_lib->vba.GPUVMEnable,
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.VRatio[k],
mode_lib->vba.DCCEnable[k],
mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
MetaRowByteY,
MetaRowByteC,
mode_lib->vba.meta_row_height[k],
mode_lib->vba.meta_row_height_chroma[k],
PixelPTEBytesPerRowY,
PixelPTEBytesPerRowC,
mode_lib->vba.dpte_row_height[k],
mode_lib->vba.dpte_row_height_chroma[k],
&mode_lib->vba.meta_row_bw[k],
&mode_lib->vba.dpte_row_bw[k],
&mode_lib->vba.qual_row_bw[k]);
}
mode_lib->vba.TCalc = 24.0 / mode_lib->vba.DCFCLKDeepSleep;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
if (mode_lib->vba.WritebackEnable[k] == true) {
mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] =
mode_lib->vba.WritebackLatency
+ CalculateWriteBackDelay(
mode_lib->vba.WritebackPixelFormat[k],
mode_lib->vba.WritebackHRatio[k],
mode_lib->vba.WritebackVRatio[k],
mode_lib->vba.WritebackLumaHTaps[k],
mode_lib->vba.WritebackLumaVTaps[k],
mode_lib->vba.WritebackChromaHTaps[k],
mode_lib->vba.WritebackChromaVTaps[k],
mode_lib->vba.WritebackDestinationWidth[k])
/ mode_lib->vba.DISPCLK;
} else
mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] = 0;
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
if (mode_lib->vba.BlendingAndTiming[j] == k
&& mode_lib->vba.WritebackEnable[j] == true) {
mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] =
dml_max(
mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k],
mode_lib->vba.WritebackLatency
+ CalculateWriteBackDelay(
mode_lib->vba.WritebackPixelFormat[j],
mode_lib->vba.WritebackHRatio[j],
mode_lib->vba.WritebackVRatio[j],
mode_lib->vba.WritebackLumaHTaps[j],
mode_lib->vba.WritebackLumaVTaps[j],
mode_lib->vba.WritebackChromaHTaps[j],
mode_lib->vba.WritebackChromaVTaps[j],
mode_lib->vba.WritebackDestinationWidth[j])
/ mode_lib->vba.DISPCLK);
}
}
}
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
if (mode_lib->vba.BlendingAndTiming[k] == j)
mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] =
mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][j];
mode_lib->vba.VStartupLines = 13;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
mode_lib->vba.MaxVStartupLines[k] =
mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
- dml_max(
1.0,
dml_ceil(
mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k]
/ (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]),
1));
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
mode_lib->vba.MaximumMaxVStartupLines = dml_max(
mode_lib->vba.MaximumMaxVStartupLines,
mode_lib->vba.MaxVStartupLines[k]);
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
mode_lib->vba.cursor_bw[k] = 0.0;
for (j = 0; j < mode_lib->vba.NumberOfCursors[k]; ++j)
mode_lib->vba.cursor_bw[k] += mode_lib->vba.CursorWidth[k][j]
* mode_lib->vba.CursorBPP[k][j] / 8.0
/ (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
* mode_lib->vba.VRatio[k];
}
do {
double MaxTotalRDBandwidth = 0;
bool DestinationLineTimesForPrefetchLessThan2 = false;
bool VRatioPrefetchMoreThan4 = false;
bool prefetch_vm_bw_valid = true;
bool prefetch_row_bw_valid = true;
double TWait = CalculateTWait(
mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb],
mode_lib->vba.DRAMClockChangeLatency,
mode_lib->vba.UrgentLatencyPixelDataOnly,
mode_lib->vba.SREnterPlusExitTime);
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.XFCEnabled[k] == true) {
mode_lib->vba.XFCRemoteSurfaceFlipDelay =
CalculateRemoteSurfaceFlipDelay(
mode_lib,
mode_lib->vba.VRatio[k],
mode_lib->vba.SwathWidthY[k],
dml_ceil(
mode_lib->vba.BytePerPixelDETY[k],
1),
mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k],
mode_lib->vba.XFCTSlvVupdateOffset,
mode_lib->vba.XFCTSlvVupdateWidth,
mode_lib->vba.XFCTSlvVreadyOffset,
mode_lib->vba.XFCXBUFLatencyTolerance,
mode_lib->vba.XFCFillBWOverhead,
mode_lib->vba.XFCSlvChunkSize,
mode_lib->vba.XFCBusTransportTime,
mode_lib->vba.TCalc,
TWait,
&mode_lib->vba.SrcActiveDrainRate,
&mode_lib->vba.TInitXFill,
&mode_lib->vba.TslvChk);
} else {
mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0;
}
CalculateDelayAfterScaler(mode_lib, mode_lib->vba.ReturnBW, mode_lib->vba.ReadBandwidthPlaneLuma[k], mode_lib->vba.ReadBandwidthPlaneChroma[k], mode_lib->vba.TotalDataReadBandwidth,
mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k], mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k],
mode_lib->vba.DPPCLK[k], mode_lib->vba.DISPCLK, mode_lib->vba.PixelClock[k], mode_lib->vba.DSCDelay[k], mode_lib->vba.DPPPerPlane[k], mode_lib->vba.ScalerEnabled[k], mode_lib->vba.NumberOfCursors[k],
mode_lib->vba.DPPCLKDelaySubtotal, mode_lib->vba.DPPCLKDelaySCL, mode_lib->vba.DPPCLKDelaySCLLBOnly, mode_lib->vba.DPPCLKDelayCNVCFormater, mode_lib->vba.DPPCLKDelayCNVCCursor, mode_lib->vba.DISPCLKDelaySubtotal,
mode_lib->vba.SwathWidthY[k] / mode_lib->vba.HRatio[k], mode_lib->vba.OutputFormat[k], mode_lib->vba.HTotal[k],
mode_lib->vba.SwathWidthSingleDPPY[k], mode_lib->vba.BytePerPixelDETY[k], mode_lib->vba.BytePerPixelDETC[k], mode_lib->vba.SwathHeightY[k], mode_lib->vba.SwathHeightC[k], mode_lib->vba.Interlace[k],
mode_lib->vba.ProgressiveToInterlaceUnitInOPP, &mode_lib->vba.DSTXAfterScaler[k], &mode_lib->vba.DSTYAfterScaler[k]);
mode_lib->vba.ErrorResult[k] =
CalculatePrefetchSchedule(
mode_lib,
mode_lib->vba.DPPCLK[k],
mode_lib->vba.DISPCLK,
mode_lib->vba.PixelClock[k],
mode_lib->vba.DCFCLKDeepSleep,
mode_lib->vba.DPPPerPlane[k],
mode_lib->vba.NumberOfCursors[k],
mode_lib->vba.VTotal[k]
- mode_lib->vba.VActive[k],
mode_lib->vba.HTotal[k],
mode_lib->vba.MaxInterDCNTileRepeaters,
dml_min(
mode_lib->vba.VStartupLines,
mode_lib->vba.MaxVStartupLines[k]),
mode_lib->vba.GPUVMMaxPageTableLevels,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.DynamicMetadataEnable[k],
mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
mode_lib->vba.DynamicMetadataTransmittedBytes[k],
mode_lib->vba.DCCEnable[k],
mode_lib->vba.UrgentLatencyPixelDataOnly,
mode_lib->vba.UrgentExtraLatency,
mode_lib->vba.TCalc,
mode_lib->vba.PDEAndMetaPTEBytesFrame[k],
mode_lib->vba.MetaRowByte[k],
mode_lib->vba.PixelPTEBytesPerRow[k],
mode_lib->vba.PrefetchSourceLinesY[k],
mode_lib->vba.SwathWidthY[k],
mode_lib->vba.BytePerPixelDETY[k],
mode_lib->vba.VInitPreFillY[k],
mode_lib->vba.MaxNumSwathY[k],
mode_lib->vba.PrefetchSourceLinesC[k],
mode_lib->vba.BytePerPixelDETC[k],
mode_lib->vba.VInitPreFillC[k],
mode_lib->vba.MaxNumSwathC[k],
mode_lib->vba.SwathHeightY[k],
mode_lib->vba.SwathHeightC[k],
TWait,
mode_lib->vba.XFCEnabled[k],
mode_lib->vba.XFCRemoteSurfaceFlipDelay,
mode_lib->vba.Interlace[k],
mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
mode_lib->vba.DSTXAfterScaler[k],
mode_lib->vba.DSTYAfterScaler[k],
&mode_lib->vba.DestinationLinesForPrefetch[k],
&mode_lib->vba.PrefetchBandwidth[k],
&mode_lib->vba.DestinationLinesToRequestVMInVBlank[k],
&mode_lib->vba.DestinationLinesToRequestRowInVBlank[k],
&mode_lib->vba.VRatioPrefetchY[k],
&mode_lib->vba.VRatioPrefetchC[k],
&mode_lib->vba.RequiredPrefetchPixDataBWLuma[k],
&mode_lib->vba.Tno_bw[k],
&mode_lib->vba.VUpdateOffsetPix[k],
&mode_lib->vba.VUpdateWidthPix[k],
&mode_lib->vba.VReadyOffsetPix[k]);
if (mode_lib->vba.BlendingAndTiming[k] == k) {
mode_lib->vba.VStartup[k] = dml_min(
mode_lib->vba.VStartupLines,
mode_lib->vba.MaxVStartupLines[k]);
if (mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata
!= 0) {
mode_lib->vba.VStartup[k] =
mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata;
}
} else {
mode_lib->vba.VStartup[k] =
dml_min(
mode_lib->vba.VStartupLines,
mode_lib->vba.MaxVStartupLines[mode_lib->vba.BlendingAndTiming[k]]);
}
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.PDEAndMetaPTEBytesFrame[k] == 0)
mode_lib->vba.prefetch_vm_bw[k] = 0;
else if (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k] > 0) {
mode_lib->vba.prefetch_vm_bw[k] =
(double) mode_lib->vba.PDEAndMetaPTEBytesFrame[k]
/ (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k]
* mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]);
} else {
mode_lib->vba.prefetch_vm_bw[k] = 0;
prefetch_vm_bw_valid = false;
}
if (mode_lib->vba.MetaRowByte[k] + mode_lib->vba.PixelPTEBytesPerRow[k]
== 0)
mode_lib->vba.prefetch_row_bw[k] = 0;
else if (mode_lib->vba.DestinationLinesToRequestRowInVBlank[k] > 0) {
mode_lib->vba.prefetch_row_bw[k] =
(double) (mode_lib->vba.MetaRowByte[k]
+ mode_lib->vba.PixelPTEBytesPerRow[k])
/ (mode_lib->vba.DestinationLinesToRequestRowInVBlank[k]
* mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]);
} else {
mode_lib->vba.prefetch_row_bw[k] = 0;
prefetch_row_bw_valid = false;
}
MaxTotalRDBandwidth =
MaxTotalRDBandwidth + mode_lib->vba.cursor_bw[k]
+ dml_max(
mode_lib->vba.prefetch_vm_bw[k],
dml_max(
mode_lib->vba.prefetch_row_bw[k],
dml_max(
mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ mode_lib->vba.ReadBandwidthPlaneChroma[k],
mode_lib->vba.RequiredPrefetchPixDataBWLuma[k])
+ mode_lib->vba.meta_row_bw[k]
+ mode_lib->vba.dpte_row_bw[k]));
if (mode_lib->vba.DestinationLinesForPrefetch[k] < 2)
DestinationLineTimesForPrefetchLessThan2 = true;
if (mode_lib->vba.VRatioPrefetchY[k] > 4
|| mode_lib->vba.VRatioPrefetchC[k] > 4)
VRatioPrefetchMoreThan4 = true;
}
if (MaxTotalRDBandwidth <= mode_lib->vba.ReturnBW && prefetch_vm_bw_valid
&& prefetch_row_bw_valid && !VRatioPrefetchMoreThan4
&& !DestinationLineTimesForPrefetchLessThan2)
mode_lib->vba.PrefetchModeSupported = true;
else {
mode_lib->vba.PrefetchModeSupported = false;
dml_print(
"DML: CalculatePrefetchSchedule ***failed***. Bandwidth violation. Results are NOT valid\n");
}
if (mode_lib->vba.PrefetchModeSupported == true) {
double final_flip_bw[DC__NUM_DPP__MAX];
unsigned int ImmediateFlipBytes[DC__NUM_DPP__MAX];
double total_dcn_read_bw_with_flip = 0;
mode_lib->vba.BandwidthAvailableForImmediateFlip = mode_lib->vba.ReturnBW;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
mode_lib->vba.BandwidthAvailableForImmediateFlip =
mode_lib->vba.BandwidthAvailableForImmediateFlip
- mode_lib->vba.cursor_bw[k]
- dml_max(
mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ mode_lib->vba.ReadBandwidthPlaneChroma[k]
+ mode_lib->vba.qual_row_bw[k],
mode_lib->vba.PrefetchBandwidth[k]);
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
ImmediateFlipBytes[k] = 0;
if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
&& mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
ImmediateFlipBytes[k] =
mode_lib->vba.PDEAndMetaPTEBytesFrame[k]
+ mode_lib->vba.MetaRowByte[k]
+ mode_lib->vba.PixelPTEBytesPerRow[k];
}
}
mode_lib->vba.TotImmediateFlipBytes = 0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
&& mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
mode_lib->vba.TotImmediateFlipBytes =
mode_lib->vba.TotImmediateFlipBytes
+ ImmediateFlipBytes[k];
}
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
CalculateFlipSchedule(
mode_lib,
mode_lib->vba.UrgentExtraLatency,
mode_lib->vba.UrgentLatencyPixelDataOnly,
mode_lib->vba.GPUVMMaxPageTableLevels,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.BandwidthAvailableForImmediateFlip,
mode_lib->vba.TotImmediateFlipBytes,
mode_lib->vba.SourcePixelFormat[k],
ImmediateFlipBytes[k],
mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k],
mode_lib->vba.VRatio[k],
mode_lib->vba.Tno_bw[k],
mode_lib->vba.PDEAndMetaPTEBytesFrame[k],
mode_lib->vba.MetaRowByte[k],
mode_lib->vba.PixelPTEBytesPerRow[k],
mode_lib->vba.DCCEnable[k],
mode_lib->vba.dpte_row_height[k],
mode_lib->vba.meta_row_height[k],
mode_lib->vba.qual_row_bw[k],
&mode_lib->vba.DestinationLinesToRequestVMInImmediateFlip[k],
&mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip[k],
&final_flip_bw[k],
&mode_lib->vba.ImmediateFlipSupportedForPipe[k]);
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
total_dcn_read_bw_with_flip =
total_dcn_read_bw_with_flip
+ mode_lib->vba.cursor_bw[k]
+ dml_max(
mode_lib->vba.prefetch_vm_bw[k],
dml_max(
mode_lib->vba.prefetch_row_bw[k],
final_flip_bw[k]
+ dml_max(
mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ mode_lib->vba.ReadBandwidthPlaneChroma[k],
mode_lib->vba.RequiredPrefetchPixDataBWLuma[k])));
}
mode_lib->vba.ImmediateFlipSupported = true;
if (total_dcn_read_bw_with_flip > mode_lib->vba.ReturnBW) {
mode_lib->vba.ImmediateFlipSupported = false;
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.ImmediateFlipSupportedForPipe[k] == false) {
mode_lib->vba.ImmediateFlipSupported = false;
}
}
} else {
mode_lib->vba.ImmediateFlipSupported = false;
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.ErrorResult[k]) {
mode_lib->vba.PrefetchModeSupported = false;
dml_print(
"DML: CalculatePrefetchSchedule ***failed***. Prefetch schedule violation. Results are NOT valid\n");
}
}
mode_lib->vba.VStartupLines = mode_lib->vba.VStartupLines + 1;
} while (!((mode_lib->vba.PrefetchModeSupported
&& (!mode_lib->vba.ImmediateFlipSupport
|| mode_lib->vba.ImmediateFlipSupported))
|| mode_lib->vba.MaximumMaxVStartupLines < mode_lib->vba.VStartupLines));
//Display Pipeline Delivery Time in Prefetch
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.VRatioPrefetchY[k] <= 1) {
mode_lib->vba.DisplayPipeLineDeliveryTimeLumaPrefetch[k] =
mode_lib->vba.SwathWidthY[k] * mode_lib->vba.DPPPerPlane[k]
/ mode_lib->vba.HRatio[k]
/ mode_lib->vba.PixelClock[k];
} else {
mode_lib->vba.DisplayPipeLineDeliveryTimeLumaPrefetch[k] =
mode_lib->vba.SwathWidthY[k]
/ mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
/ mode_lib->vba.DPPCLK[k];
}
if (mode_lib->vba.BytePerPixelDETC[k] == 0) {
mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] = 0;
} else {
if (mode_lib->vba.VRatioPrefetchC[k] <= 1) {
mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] =
mode_lib->vba.SwathWidthY[k]
* mode_lib->vba.DPPPerPlane[k]
/ mode_lib->vba.HRatio[k]
/ mode_lib->vba.PixelClock[k];
} else {
mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] =
mode_lib->vba.SwathWidthY[k]
/ mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
/ mode_lib->vba.DPPCLK[k];
}
}
}
// Min TTUVBlank
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = true;
mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = true;
mode_lib->vba.MinTTUVBlank[k] = dml_max(
mode_lib->vba.DRAMClockChangeWatermark,
dml_max(
mode_lib->vba.StutterEnterPlusExitWatermark,
mode_lib->vba.UrgentWatermark));
} else if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 1) {
mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = false;
mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = true;
mode_lib->vba.MinTTUVBlank[k] = dml_max(
mode_lib->vba.StutterEnterPlusExitWatermark,
mode_lib->vba.UrgentWatermark);
} else {
mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = false;
mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = false;
mode_lib->vba.MinTTUVBlank[k] = mode_lib->vba.UrgentWatermark;
}
if (!mode_lib->vba.DynamicMetadataEnable[k])
mode_lib->vba.MinTTUVBlank[k] = mode_lib->vba.TCalc
+ mode_lib->vba.MinTTUVBlank[k];
}
// DCC Configuration
mode_lib->vba.ActiveDPPs = 0;
// NB P-State/DRAM Clock Change Support
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
mode_lib->vba.ActiveDPPs = mode_lib->vba.ActiveDPPs + mode_lib->vba.DPPPerPlane[k];
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
double EffectiveLBLatencyHidingY;
double EffectiveLBLatencyHidingC;
double DPPOutputBufferLinesY;
double DPPOutputBufferLinesC;
double DPPOPPBufferingY;
double MaxDETBufferingTimeY;
double ActiveDRAMClockChangeLatencyMarginY;
mode_lib->vba.LBLatencyHidingSourceLinesY =
dml_min(
mode_lib->vba.MaxLineBufferLines,
(unsigned int) dml_floor(
(double) mode_lib->vba.LineBufferSize
/ mode_lib->vba.LBBitPerPixel[k]
/ (mode_lib->vba.SwathWidthY[k]
/ dml_max(
mode_lib->vba.HRatio[k],
1.0)),
1)) - (mode_lib->vba.vtaps[k] - 1);
mode_lib->vba.LBLatencyHidingSourceLinesC =
dml_min(
mode_lib->vba.MaxLineBufferLines,
(unsigned int) dml_floor(
(double) mode_lib->vba.LineBufferSize
/ mode_lib->vba.LBBitPerPixel[k]
/ (mode_lib->vba.SwathWidthY[k]
/ 2.0
/ dml_max(
mode_lib->vba.HRatio[k]
/ 2,
1.0)),
1))
- (mode_lib->vba.VTAPsChroma[k] - 1);
EffectiveLBLatencyHidingY = mode_lib->vba.LBLatencyHidingSourceLinesY
/ mode_lib->vba.VRatio[k]
* (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]);
EffectiveLBLatencyHidingC = mode_lib->vba.LBLatencyHidingSourceLinesC
/ (mode_lib->vba.VRatio[k] / 2)
* (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]);
if (mode_lib->vba.SwathWidthY[k] > 2 * mode_lib->vba.DPPOutputBufferPixels) {
DPPOutputBufferLinesY = mode_lib->vba.DPPOutputBufferPixels
/ mode_lib->vba.SwathWidthY[k];
} else if (mode_lib->vba.SwathWidthY[k] > mode_lib->vba.DPPOutputBufferPixels) {
DPPOutputBufferLinesY = 0.5;
} else {
DPPOutputBufferLinesY = 1;
}
if (mode_lib->vba.SwathWidthY[k] / 2 > 2 * mode_lib->vba.DPPOutputBufferPixels) {
DPPOutputBufferLinesC = mode_lib->vba.DPPOutputBufferPixels
/ (mode_lib->vba.SwathWidthY[k] / 2);
} else if (mode_lib->vba.SwathWidthY[k] / 2 > mode_lib->vba.DPPOutputBufferPixels) {
DPPOutputBufferLinesC = 0.5;
} else {
DPPOutputBufferLinesC = 1;
}
DPPOPPBufferingY = (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
* (DPPOutputBufferLinesY + mode_lib->vba.OPPOutputBufferLines);
MaxDETBufferingTimeY = mode_lib->vba.FullDETBufferingTimeY[k]
+ (mode_lib->vba.LinesInDETY[k]
- mode_lib->vba.LinesInDETYRoundedDownToSwath[k])
/ mode_lib->vba.SwathHeightY[k]
* (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]);
ActiveDRAMClockChangeLatencyMarginY = DPPOPPBufferingY + EffectiveLBLatencyHidingY
+ MaxDETBufferingTimeY - mode_lib->vba.DRAMClockChangeWatermark;
if (mode_lib->vba.ActiveDPPs > 1) {
ActiveDRAMClockChangeLatencyMarginY =
ActiveDRAMClockChangeLatencyMarginY
- (1 - 1 / (mode_lib->vba.ActiveDPPs - 1))
* mode_lib->vba.SwathHeightY[k]
* (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]);
}
if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
double DPPOPPBufferingC = (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k])
* (DPPOutputBufferLinesC
+ mode_lib->vba.OPPOutputBufferLines);
double MaxDETBufferingTimeC =
mode_lib->vba.FullDETBufferingTimeC[k]
+ (mode_lib->vba.LinesInDETC[k]
- mode_lib->vba.LinesInDETCRoundedDownToSwath[k])
/ mode_lib->vba.SwathHeightC[k]
* (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]);
double ActiveDRAMClockChangeLatencyMarginC = DPPOPPBufferingC
+ EffectiveLBLatencyHidingC + MaxDETBufferingTimeC
- mode_lib->vba.DRAMClockChangeWatermark;
if (mode_lib->vba.ActiveDPPs > 1) {
ActiveDRAMClockChangeLatencyMarginC =
ActiveDRAMClockChangeLatencyMarginC
- (1
- 1
/ (mode_lib->vba.ActiveDPPs
- 1))
* mode_lib->vba.SwathHeightC[k]
* (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]);
}
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(
ActiveDRAMClockChangeLatencyMarginY,
ActiveDRAMClockChangeLatencyMarginC);
} else {
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] =
ActiveDRAMClockChangeLatencyMarginY;
}
if (mode_lib->vba.WritebackEnable[k]) {
double WritebackDRAMClockChangeLatencyMargin;
if (mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
WritebackDRAMClockChangeLatencyMargin =
(double) (mode_lib->vba.WritebackInterfaceLumaBufferSize
+ mode_lib->vba.WritebackInterfaceChromaBufferSize)
/ (mode_lib->vba.WritebackDestinationWidth[k]
* mode_lib->vba.WritebackDestinationHeight[k]
/ (mode_lib->vba.WritebackSourceHeight[k]
* mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k])
* 4)
- mode_lib->vba.WritebackDRAMClockChangeWatermark;
} else if (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) {
WritebackDRAMClockChangeLatencyMargin =
dml_min(
(double) mode_lib->vba.WritebackInterfaceLumaBufferSize
* 8.0 / 10,
2.0
* mode_lib->vba.WritebackInterfaceChromaBufferSize
* 8 / 10)
/ (mode_lib->vba.WritebackDestinationWidth[k]
* mode_lib->vba.WritebackDestinationHeight[k]
/ (mode_lib->vba.WritebackSourceHeight[k]
* mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]))
- mode_lib->vba.WritebackDRAMClockChangeWatermark;
} else {
WritebackDRAMClockChangeLatencyMargin =
dml_min(
(double) mode_lib->vba.WritebackInterfaceLumaBufferSize,
2.0
* mode_lib->vba.WritebackInterfaceChromaBufferSize)
/ (mode_lib->vba.WritebackDestinationWidth[k]
* mode_lib->vba.WritebackDestinationHeight[k]
/ (mode_lib->vba.WritebackSourceHeight[k]
* mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]))
- mode_lib->vba.WritebackDRAMClockChangeWatermark;
}
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k],
WritebackDRAMClockChangeLatencyMargin);
}
}
{
float SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
int PlaneWithMinActiveDRAMClockChangeMargin = -1;
mode_lib->vba.MinActiveDRAMClockChangeMargin = 999999;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]
< mode_lib->vba.MinActiveDRAMClockChangeMargin) {
mode_lib->vba.MinActiveDRAMClockChangeMargin =
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
if (mode_lib->vba.BlendingAndTiming[k] == k) {
PlaneWithMinActiveDRAMClockChangeMargin = k;
} else {
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
if (mode_lib->vba.BlendingAndTiming[k] == j) {
PlaneWithMinActiveDRAMClockChangeMargin = j;
}
}
}
}
}
mode_lib->vba.MinActiveDRAMClockChangeLatencySupported =
mode_lib->vba.MinActiveDRAMClockChangeMargin
+ mode_lib->vba.DRAMClockChangeLatency;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (mode_lib->vba.BlendingAndTiming[k] == k))
&& !(mode_lib->vba.BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
&& mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]
< SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank =
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
}
}
if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
mode_lib->vba.DRAMClockChangeWatermark += 25;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
if (mode_lib->vba.DRAMClockChangeWatermark >
dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
mode_lib->vba.MinTTUVBlank[k] += 25;
}
}
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
} else if (mode_lib->vba.DummyPStateCheck &&
mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
} else {
if ((mode_lib->vba.SynchronizedVBlank
|| mode_lib->vba.NumberOfActivePlanes == 1
|| (SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0 &&
mode_lib->vba.AllowDramClockChangeOneDisplayVactive))
&& mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vblank;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (!mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k]) {
mode_lib->vba.DRAMClockChangeSupport[0][0] =
dm_dram_clock_change_unsupported;
}
}
} else {
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_unsupported;
}
}
}
for (k = 0; k <= mode_lib->vba.soc.num_states; k++)
for (j = 0; j < 2; j++)
mode_lib->vba.DRAMClockChangeSupport[k][j] = mode_lib->vba.DRAMClockChangeSupport[0][0];
//XFC Parameters:
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.XFCEnabled[k] == true) {
double TWait;
mode_lib->vba.XFCSlaveVUpdateOffset[k] = mode_lib->vba.XFCTSlvVupdateOffset;
mode_lib->vba.XFCSlaveVupdateWidth[k] = mode_lib->vba.XFCTSlvVupdateWidth;
mode_lib->vba.XFCSlaveVReadyOffset[k] = mode_lib->vba.XFCTSlvVreadyOffset;
TWait = CalculateTWait(
mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb],
mode_lib->vba.DRAMClockChangeLatency,
mode_lib->vba.UrgentLatencyPixelDataOnly,
mode_lib->vba.SREnterPlusExitTime);
mode_lib->vba.XFCRemoteSurfaceFlipDelay = CalculateRemoteSurfaceFlipDelay(
mode_lib,
mode_lib->vba.VRatio[k],
mode_lib->vba.SwathWidthY[k],
dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1),
mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
mode_lib->vba.XFCTSlvVupdateOffset,
mode_lib->vba.XFCTSlvVupdateWidth,
mode_lib->vba.XFCTSlvVreadyOffset,
mode_lib->vba.XFCXBUFLatencyTolerance,
mode_lib->vba.XFCFillBWOverhead,
mode_lib->vba.XFCSlvChunkSize,
mode_lib->vba.XFCBusTransportTime,
mode_lib->vba.TCalc,
TWait,
&mode_lib->vba.SrcActiveDrainRate,
&mode_lib->vba.TInitXFill,
&mode_lib->vba.TslvChk);
mode_lib->vba.XFCRemoteSurfaceFlipLatency[k] =
dml_floor(
mode_lib->vba.XFCRemoteSurfaceFlipDelay
/ (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]),
1);
mode_lib->vba.XFCTransferDelay[k] =
dml_ceil(
mode_lib->vba.XFCBusTransportTime
/ (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]),
1);
mode_lib->vba.XFCPrechargeDelay[k] =
dml_ceil(
(mode_lib->vba.XFCBusTransportTime
+ mode_lib->vba.TInitXFill
+ mode_lib->vba.TslvChk)
/ (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]),
1);
mode_lib->vba.InitFillLevel = mode_lib->vba.XFCXBUFLatencyTolerance
* mode_lib->vba.SrcActiveDrainRate;
mode_lib->vba.FinalFillMargin =
(mode_lib->vba.DestinationLinesToRequestVMInVBlank[k]
+ mode_lib->vba.DestinationLinesToRequestRowInVBlank[k])
* mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]
* mode_lib->vba.SrcActiveDrainRate
+ mode_lib->vba.XFCFillConstant;
mode_lib->vba.FinalFillLevel = mode_lib->vba.XFCRemoteSurfaceFlipDelay
* mode_lib->vba.SrcActiveDrainRate
+ mode_lib->vba.FinalFillMargin;
mode_lib->vba.RemainingFillLevel = dml_max(
0.0,
mode_lib->vba.FinalFillLevel - mode_lib->vba.InitFillLevel);
mode_lib->vba.TFinalxFill = mode_lib->vba.RemainingFillLevel
/ (mode_lib->vba.SrcActiveDrainRate
* mode_lib->vba.XFCFillBWOverhead / 100);
mode_lib->vba.XFCPrefetchMargin[k] =
mode_lib->vba.XFCRemoteSurfaceFlipDelay
+ mode_lib->vba.TFinalxFill
+ (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k]
+ mode_lib->vba.DestinationLinesToRequestRowInVBlank[k])
* mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k];
} else {
mode_lib->vba.XFCSlaveVUpdateOffset[k] = 0;
mode_lib->vba.XFCSlaveVupdateWidth[k] = 0;
mode_lib->vba.XFCSlaveVReadyOffset[k] = 0;
mode_lib->vba.XFCRemoteSurfaceFlipLatency[k] = 0;
mode_lib->vba.XFCPrechargeDelay[k] = 0;
mode_lib->vba.XFCTransferDelay[k] = 0;
mode_lib->vba.XFCPrefetchMargin[k] = 0;
}
}
{
unsigned int VStartupMargin = 0;
bool FirstMainPlane = true;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
unsigned int Margin = (mode_lib->vba.MaxVStartupLines[k] - mode_lib->vba.VStartup[k])
* mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k];
if (FirstMainPlane) {
VStartupMargin = Margin;
FirstMainPlane = false;
} else
VStartupMargin = dml_min(VStartupMargin, Margin);
}
if (mode_lib->vba.UseMaximumVStartup) {
if (mode_lib->vba.VTotal_Max[k] == mode_lib->vba.VTotal[k]) {
//only use max vstart if it is not drr or lateflip.
mode_lib->vba.VStartup[k] = mode_lib->vba.MaxVStartupLines[mode_lib->vba.BlendingAndTiming[k]];
}
}
}
}
}
static void dml20v2_DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
{
double BytePerPixDETY;
double BytePerPixDETC;
double Read256BytesBlockHeightY;
double Read256BytesBlockHeightC;
double Read256BytesBlockWidthY;
double Read256BytesBlockWidthC;
double MaximumSwathHeightY;
double MaximumSwathHeightC;
double MinimumSwathHeightY;
double MinimumSwathHeightC;
double SwathWidth;
double SwathWidthGranularityY;
double SwathWidthGranularityC;
double RoundedUpMaxSwathSizeBytesY;
double RoundedUpMaxSwathSizeBytesC;
unsigned int j, k;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
bool MainPlaneDoesODMCombine = false;
if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
BytePerPixDETY = 8;
BytePerPixDETC = 0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
BytePerPixDETY = 4;
BytePerPixDETC = 0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16) {
BytePerPixDETY = 2;
BytePerPixDETC = 0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8) {
BytePerPixDETY = 1;
BytePerPixDETC = 0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
BytePerPixDETY = 1;
BytePerPixDETC = 2;
} else {
BytePerPixDETY = 4.0 / 3.0;
BytePerPixDETC = 8.0 / 3.0;
}
if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
|| mode_lib->vba.SourcePixelFormat[k] == dm_444_32
|| mode_lib->vba.SourcePixelFormat[k] == dm_444_16
|| mode_lib->vba.SourcePixelFormat[k] == dm_444_8)) {
if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
Read256BytesBlockHeightY = 1;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
Read256BytesBlockHeightY = 4;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32
|| mode_lib->vba.SourcePixelFormat[k] == dm_444_16) {
Read256BytesBlockHeightY = 8;
} else {
Read256BytesBlockHeightY = 16;
}
Read256BytesBlockWidthY = 256 / dml_ceil(BytePerPixDETY, 1)
/ Read256BytesBlockHeightY;
Read256BytesBlockHeightC = 0;
Read256BytesBlockWidthC = 0;
} else {
if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
Read256BytesBlockHeightY = 1;
Read256BytesBlockHeightC = 1;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
Read256BytesBlockHeightY = 16;
Read256BytesBlockHeightC = 8;
} else {
Read256BytesBlockHeightY = 8;
Read256BytesBlockHeightC = 8;
}
Read256BytesBlockWidthY = 256 / dml_ceil(BytePerPixDETY, 1)
/ Read256BytesBlockHeightY;
Read256BytesBlockWidthC = 256 / dml_ceil(BytePerPixDETC, 2)
/ Read256BytesBlockHeightC;
}
if (mode_lib->vba.SourceScan[k] == dm_horz) {
MaximumSwathHeightY = Read256BytesBlockHeightY;
MaximumSwathHeightC = Read256BytesBlockHeightC;
} else {
MaximumSwathHeightY = Read256BytesBlockWidthY;
MaximumSwathHeightC = Read256BytesBlockWidthC;
}
if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
|| mode_lib->vba.SourcePixelFormat[k] == dm_444_32
|| mode_lib->vba.SourcePixelFormat[k] == dm_444_16
|| mode_lib->vba.SourcePixelFormat[k] == dm_444_8)) {
if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
|| (mode_lib->vba.SourcePixelFormat[k] == dm_444_64
&& (mode_lib->vba.SurfaceTiling[k]
== dm_sw_4kb_s
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_4kb_s_x
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_64kb_s
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_64kb_s_t
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_64kb_s_x
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_var_s
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_var_s_x)
&& mode_lib->vba.SourceScan[k] == dm_horz)) {
MinimumSwathHeightY = MaximumSwathHeightY;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8
&& mode_lib->vba.SourceScan[k] != dm_horz) {
MinimumSwathHeightY = MaximumSwathHeightY;
} else {
MinimumSwathHeightY = MaximumSwathHeightY / 2.0;
}
MinimumSwathHeightC = MaximumSwathHeightC;
} else {
if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
MinimumSwathHeightY = MaximumSwathHeightY;
MinimumSwathHeightC = MaximumSwathHeightC;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8
&& mode_lib->vba.SourceScan[k] == dm_horz) {
MinimumSwathHeightY = MaximumSwathHeightY / 2.0;
MinimumSwathHeightC = MaximumSwathHeightC;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10
&& mode_lib->vba.SourceScan[k] == dm_horz) {
MinimumSwathHeightC = MaximumSwathHeightC / 2.0;
MinimumSwathHeightY = MaximumSwathHeightY;
} else {
MinimumSwathHeightY = MaximumSwathHeightY;
MinimumSwathHeightC = MaximumSwathHeightC;
}
}
if (mode_lib->vba.SourceScan[k] == dm_horz) {
SwathWidth = mode_lib->vba.ViewportWidth[k];
} else {
SwathWidth = mode_lib->vba.ViewportHeight[k];
}
if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
MainPlaneDoesODMCombine = true;
}
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
if (mode_lib->vba.BlendingAndTiming[k] == j
&& mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
MainPlaneDoesODMCombine = true;
}
}
if (MainPlaneDoesODMCombine == true) {
SwathWidth = dml_min(
SwathWidth,
mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]);
} else {
if (mode_lib->vba.DPPPerPlane[k] == 0)
SwathWidth = 0;
else
SwathWidth = SwathWidth / mode_lib->vba.DPPPerPlane[k];
}
SwathWidthGranularityY = 256 / dml_ceil(BytePerPixDETY, 1) / MaximumSwathHeightY;
RoundedUpMaxSwathSizeBytesY = (dml_ceil(
(double) (SwathWidth - 1),
SwathWidthGranularityY) + SwathWidthGranularityY) * BytePerPixDETY
* MaximumSwathHeightY;
if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
RoundedUpMaxSwathSizeBytesY = dml_ceil(RoundedUpMaxSwathSizeBytesY, 256)
+ 256;
}
if (MaximumSwathHeightC > 0) {
SwathWidthGranularityC = 256.0 / dml_ceil(BytePerPixDETC, 2)
/ MaximumSwathHeightC;
RoundedUpMaxSwathSizeBytesC = (dml_ceil(
(double) (SwathWidth / 2.0 - 1),
SwathWidthGranularityC) + SwathWidthGranularityC)
* BytePerPixDETC * MaximumSwathHeightC;
if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
RoundedUpMaxSwathSizeBytesC = dml_ceil(
RoundedUpMaxSwathSizeBytesC,
256) + 256;
}
} else
RoundedUpMaxSwathSizeBytesC = 0.0;
if (RoundedUpMaxSwathSizeBytesY + RoundedUpMaxSwathSizeBytesC
<= mode_lib->vba.DETBufferSizeInKByte[0] * 1024.0 / 2.0) {
mode_lib->vba.SwathHeightY[k] = MaximumSwathHeightY;
mode_lib->vba.SwathHeightC[k] = MaximumSwathHeightC;
} else {
mode_lib->vba.SwathHeightY[k] = MinimumSwathHeightY;
mode_lib->vba.SwathHeightC[k] = MinimumSwathHeightC;
}
if (mode_lib->vba.SwathHeightC[k] == 0) {
mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte[0] * 1024;
mode_lib->vba.DETBufferSizeC[k] = 0;
} else if (mode_lib->vba.SwathHeightY[k] <= mode_lib->vba.SwathHeightC[k]) {
mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte[0]
* 1024.0 / 2;
mode_lib->vba.DETBufferSizeC[k] = mode_lib->vba.DETBufferSizeInKByte[0]
* 1024.0 / 2;
} else {
mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte[0]
* 1024.0 * 2 / 3;
mode_lib->vba.DETBufferSizeC[k] = mode_lib->vba.DETBufferSizeInKByte[0]
* 1024.0 / 3;
}
}
}
static double CalculateTWait(
unsigned int PrefetchMode,
double DRAMClockChangeLatency,
double UrgentLatencyPixelDataOnly,
double SREnterPlusExitTime)
{
if (PrefetchMode == 0) {
return dml_max(
DRAMClockChangeLatency + UrgentLatencyPixelDataOnly,
dml_max(SREnterPlusExitTime, UrgentLatencyPixelDataOnly));
} else if (PrefetchMode == 1) {
return dml_max(SREnterPlusExitTime, UrgentLatencyPixelDataOnly);
} else {
return UrgentLatencyPixelDataOnly;
}
}
static double CalculateRemoteSurfaceFlipDelay(
struct display_mode_lib *mode_lib,
double VRatio,
double SwathWidth,
double Bpp,
double LineTime,
double XFCTSlvVupdateOffset,
double XFCTSlvVupdateWidth,
double XFCTSlvVreadyOffset,
double XFCXBUFLatencyTolerance,
double XFCFillBWOverhead,
double XFCSlvChunkSize,
double XFCBusTransportTime,
double TCalc,
double TWait,
double *SrcActiveDrainRate,
double *TInitXFill,
double *TslvChk)
{
double TSlvSetup, AvgfillRate, result;
*SrcActiveDrainRate = VRatio * SwathWidth * Bpp / LineTime;
TSlvSetup = XFCTSlvVupdateOffset + XFCTSlvVupdateWidth + XFCTSlvVreadyOffset;
*TInitXFill = XFCXBUFLatencyTolerance / (1 + XFCFillBWOverhead / 100);
AvgfillRate = *SrcActiveDrainRate * (1 + XFCFillBWOverhead / 100);
*TslvChk = XFCSlvChunkSize / AvgfillRate;
dml_print(
"DML::CalculateRemoteSurfaceFlipDelay: SrcActiveDrainRate: %f\n",
*SrcActiveDrainRate);
dml_print("DML::CalculateRemoteSurfaceFlipDelay: TSlvSetup: %f\n", TSlvSetup);
dml_print("DML::CalculateRemoteSurfaceFlipDelay: TInitXFill: %f\n", *TInitXFill);
dml_print("DML::CalculateRemoteSurfaceFlipDelay: AvgfillRate: %f\n", AvgfillRate);
dml_print("DML::CalculateRemoteSurfaceFlipDelay: TslvChk: %f\n", *TslvChk);
result = 2 * XFCBusTransportTime + TSlvSetup + TCalc + TWait + *TslvChk + *TInitXFill; // TODO: This doesn't seem to match programming guide
dml_print("DML::CalculateRemoteSurfaceFlipDelay: RemoteSurfaceFlipDelay: %f\n", result);
return result;
}
static double CalculateWriteBackDelay(
enum source_format_class WritebackPixelFormat,
double WritebackHRatio,
double WritebackVRatio,
unsigned int WritebackLumaHTaps,
unsigned int WritebackLumaVTaps,
unsigned int WritebackChromaHTaps,
unsigned int WritebackChromaVTaps,
unsigned int WritebackDestinationWidth)
{
double CalculateWriteBackDelay =
dml_max(
dml_ceil(WritebackLumaHTaps / 4.0, 1) / WritebackHRatio,
WritebackLumaVTaps * dml_ceil(1.0 / WritebackVRatio, 1)
* dml_ceil(
WritebackDestinationWidth
/ 4.0,
1)
+ dml_ceil(1.0 / WritebackVRatio, 1)
* (dml_ceil(
WritebackLumaVTaps
/ 4.0,
1) + 4));
if (WritebackPixelFormat != dm_444_32) {
CalculateWriteBackDelay =
dml_max(
CalculateWriteBackDelay,
dml_max(
dml_ceil(
WritebackChromaHTaps
/ 2.0,
1)
/ (2
* WritebackHRatio),
WritebackChromaVTaps
* dml_ceil(
1
/ (2
* WritebackVRatio),
1)
* dml_ceil(
WritebackDestinationWidth
/ 2.0
/ 2.0,
1)
+ dml_ceil(
1
/ (2
* WritebackVRatio),
1)
* (dml_ceil(
WritebackChromaVTaps
/ 4.0,
1)
+ 4)));
}
return CalculateWriteBackDelay;
}
static void CalculateActiveRowBandwidth(
bool GPUVMEnable,
enum source_format_class SourcePixelFormat,
double VRatio,
bool DCCEnable,
double LineTime,
unsigned int MetaRowByteLuma,
unsigned int MetaRowByteChroma,
unsigned int meta_row_height_luma,
unsigned int meta_row_height_chroma,
unsigned int PixelPTEBytesPerRowLuma,
unsigned int PixelPTEBytesPerRowChroma,
unsigned int dpte_row_height_luma,
unsigned int dpte_row_height_chroma,
double *meta_row_bw,
double *dpte_row_bw,
double *qual_row_bw)
{
if (DCCEnable != true) {
*meta_row_bw = 0;
} else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
*meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime)
+ VRatio / 2 * MetaRowByteChroma
/ (meta_row_height_chroma * LineTime);
} else {
*meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime);
}
if (GPUVMEnable != true) {
*dpte_row_bw = 0;
} else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
*dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime)
+ VRatio / 2 * PixelPTEBytesPerRowChroma
/ (dpte_row_height_chroma * LineTime);
} else {
*dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime);
}
if ((SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10)) {
*qual_row_bw = *meta_row_bw + *dpte_row_bw;
} else {
*qual_row_bw = 0;
}
}
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
double UrgentExtraLatency,
double UrgentLatencyPixelDataOnly,
unsigned int GPUVMMaxPageTableLevels,
bool GPUVMEnable,
double BandwidthAvailableForImmediateFlip,
unsigned int TotImmediateFlipBytes,
enum source_format_class SourcePixelFormat,
unsigned int ImmediateFlipBytes,
double LineTime,
double VRatio,
double Tno_bw,
double PDEAndMetaPTEBytesFrame,
unsigned int MetaRowByte,
unsigned int PixelPTEBytesPerRow,
bool DCCEnable,
unsigned int dpte_row_height,
unsigned int meta_row_height,
double qual_row_bw,
double *DestinationLinesToRequestVMInImmediateFlip,
double *DestinationLinesToRequestRowInImmediateFlip,
double *final_flip_bw,
bool *ImmediateFlipSupportedForPipe)
{
double min_row_time = 0.0;
if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
*DestinationLinesToRequestVMInImmediateFlip = 0.0;
*DestinationLinesToRequestRowInImmediateFlip = 0.0;
*final_flip_bw = qual_row_bw;
*ImmediateFlipSupportedForPipe = true;
} else {
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
if (GPUVMEnable == true) {
mode_lib->vba.ImmediateFlipBW[0] = BandwidthAvailableForImmediateFlip
* ImmediateFlipBytes / TotImmediateFlipBytes;
TimeForFetchingMetaPTEImmediateFlip =
dml_max(
Tno_bw
+ PDEAndMetaPTEBytesFrame
/ mode_lib->vba.ImmediateFlipBW[0],
dml_max(
UrgentExtraLatency
+ UrgentLatencyPixelDataOnly
* (GPUVMMaxPageTableLevels
- 1),
LineTime / 4.0));
} else {
TimeForFetchingMetaPTEImmediateFlip = 0;
}
*DestinationLinesToRequestVMInImmediateFlip = dml_floor(
4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime + 0.125),
1) / 4.0;
if ((GPUVMEnable == true || DCCEnable == true)) {
mode_lib->vba.ImmediateFlipBW[0] = BandwidthAvailableForImmediateFlip
* ImmediateFlipBytes / TotImmediateFlipBytes;
TimeForFetchingRowInVBlankImmediateFlip = dml_max(
(MetaRowByte + PixelPTEBytesPerRow)
/ mode_lib->vba.ImmediateFlipBW[0],
dml_max(UrgentLatencyPixelDataOnly, LineTime / 4.0));
} else {
TimeForFetchingRowInVBlankImmediateFlip = 0;
}
*DestinationLinesToRequestRowInImmediateFlip = dml_floor(
4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime + 0.125),
1) / 4.0;
if (GPUVMEnable == true) {
*final_flip_bw =
dml_max(
PDEAndMetaPTEBytesFrame
/ (*DestinationLinesToRequestVMInImmediateFlip
* LineTime),
(MetaRowByte + PixelPTEBytesPerRow)
/ (TimeForFetchingRowInVBlankImmediateFlip
* LineTime));
} else if (MetaRowByte + PixelPTEBytesPerRow > 0) {
*final_flip_bw = (MetaRowByte + PixelPTEBytesPerRow)
/ (TimeForFetchingRowInVBlankImmediateFlip * LineTime);
} else {
*final_flip_bw = 0;
}
if (GPUVMEnable && !DCCEnable)
min_row_time = dpte_row_height * LineTime / VRatio;
else if (!GPUVMEnable && DCCEnable)
min_row_time = meta_row_height * LineTime / VRatio;
else
min_row_time = dml_min(dpte_row_height, meta_row_height) * LineTime
/ VRatio;
if (*DestinationLinesToRequestVMInImmediateFlip >= 8
|| *DestinationLinesToRequestRowInImmediateFlip >= 16
|| TimeForFetchingMetaPTEImmediateFlip
+ 2 * TimeForFetchingRowInVBlankImmediateFlip
> min_row_time)
*ImmediateFlipSupportedForPipe = false;
else
*ImmediateFlipSupportedForPipe = true;
}
}
static unsigned int TruncToValidBPP(
double DecimalBPP,
double DesiredBPP,
bool DSCEnabled,
enum output_encoder_class Output,
enum output_format_class Format,
unsigned int DSCInputBitPerComponent)
{
if (Output == dm_hdmi) {
if (Format == dm_420) {
if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
return 18;
else if (DecimalBPP >= 15 && (DesiredBPP == 0 || DesiredBPP == 15))
return 15;
else if (DecimalBPP >= 12 && (DesiredBPP == 0 || DesiredBPP == 12))
return 12;
else
return BPP_INVALID;
} else if (Format == dm_444) {
if (DecimalBPP >= 36 && (DesiredBPP == 0 || DesiredBPP == 36))
return 36;
else if (DecimalBPP >= 30 && (DesiredBPP == 0 || DesiredBPP == 30))
return 30;
else if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
return 24;
else if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
return 18;
else
return BPP_INVALID;
} else {
if (DecimalBPP / 1.5 >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
return 24;
else if (DecimalBPP / 1.5 >= 20 && (DesiredBPP == 0 || DesiredBPP == 20))
return 20;
else if (DecimalBPP / 1.5 >= 16 && (DesiredBPP == 0 || DesiredBPP == 16))
return 16;
else
return BPP_INVALID;
}
} else {
if (DSCEnabled) {
if (Format == dm_420) {
if (DesiredBPP == 0) {
if (DecimalBPP < 6)
return BPP_INVALID;
else if (DecimalBPP >= 1.5 * DSCInputBitPerComponent - 1.0 / 16.0)
return 1.5 * DSCInputBitPerComponent - 1.0 / 16.0;
else
return dml_floor(16 * DecimalBPP, 1) / 16.0;
} else {
if (DecimalBPP < 6
|| DesiredBPP < 6
|| DesiredBPP > 1.5 * DSCInputBitPerComponent - 1.0 / 16.0
|| DecimalBPP < DesiredBPP) {
return BPP_INVALID;
} else {
return DesiredBPP;
}
}
} else if (Format == dm_n422) {
if (DesiredBPP == 0) {
if (DecimalBPP < 7)
return BPP_INVALID;
else if (DecimalBPP >= 2 * DSCInputBitPerComponent - 1.0 / 16.0)
return 2 * DSCInputBitPerComponent - 1.0 / 16.0;
else
return dml_floor(16 * DecimalBPP, 1) / 16.0;
} else {
if (DecimalBPP < 7
|| DesiredBPP < 7
|| DesiredBPP > 2 * DSCInputBitPerComponent - 1.0 / 16.0
|| DecimalBPP < DesiredBPP) {
return BPP_INVALID;
} else {
return DesiredBPP;
}
}
} else {
if (DesiredBPP == 0) {
if (DecimalBPP < 8)
return BPP_INVALID;
else if (DecimalBPP >= 3 * DSCInputBitPerComponent - 1.0 / 16.0)
return 3 * DSCInputBitPerComponent - 1.0 / 16.0;
else
return dml_floor(16 * DecimalBPP, 1) / 16.0;
} else {
if (DecimalBPP < 8
|| DesiredBPP < 8
|| DesiredBPP > 3 * DSCInputBitPerComponent - 1.0 / 16.0
|| DecimalBPP < DesiredBPP) {
return BPP_INVALID;
} else {
return DesiredBPP;
}
}
}
} else if (Format == dm_420) {
if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
return 18;
else if (DecimalBPP >= 15 && (DesiredBPP == 0 || DesiredBPP == 15))
return 15;
else if (DecimalBPP >= 12 && (DesiredBPP == 0 || DesiredBPP == 12))
return 12;
else
return BPP_INVALID;
} else if (Format == dm_s422 || Format == dm_n422) {
if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
return 24;
else if (DecimalBPP >= 20 && (DesiredBPP == 0 || DesiredBPP == 20))
return 20;
else if (DecimalBPP >= 16 && (DesiredBPP == 0 || DesiredBPP == 16))
return 16;
else
return BPP_INVALID;
} else {
if (DecimalBPP >= 36 && (DesiredBPP == 0 || DesiredBPP == 36))
return 36;
else if (DecimalBPP >= 30 && (DesiredBPP == 0 || DesiredBPP == 30))
return 30;
else if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
return 24;
else if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
return 18;
else
return BPP_INVALID;
}
}
}
void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
{
struct vba_vars_st *locals = &mode_lib->vba;
int i;
unsigned int j, k, m;
/*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/
/*Scale Ratio, taps Support Check*/
mode_lib->vba.ScaleRatioAndTapsSupport = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.ScalerEnabled[k] == false
&& ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_32
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_8)
|| mode_lib->vba.HRatio[k] != 1.0
|| mode_lib->vba.htaps[k] != 1.0
|| mode_lib->vba.VRatio[k] != 1.0
|| mode_lib->vba.vtaps[k] != 1.0)) {
mode_lib->vba.ScaleRatioAndTapsSupport = false;
} else if (mode_lib->vba.vtaps[k] < 1.0 || mode_lib->vba.vtaps[k] > 8.0
|| mode_lib->vba.htaps[k] < 1.0 || mode_lib->vba.htaps[k] > 8.0
|| (mode_lib->vba.htaps[k] > 1.0
&& (mode_lib->vba.htaps[k] % 2) == 1)
|| mode_lib->vba.HRatio[k] > mode_lib->vba.MaxHSCLRatio
|| mode_lib->vba.VRatio[k] > mode_lib->vba.MaxVSCLRatio
|| mode_lib->vba.HRatio[k] > mode_lib->vba.htaps[k]
|| mode_lib->vba.VRatio[k] > mode_lib->vba.vtaps[k]
|| (mode_lib->vba.SourcePixelFormat[k] != dm_444_64
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_32
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_8
&& (mode_lib->vba.HRatio[k] / 2.0
> mode_lib->vba.HTAPsChroma[k]
|| mode_lib->vba.VRatio[k] / 2.0
> mode_lib->vba.VTAPsChroma[k]))) {
mode_lib->vba.ScaleRatioAndTapsSupport = false;
}
}
/*Source Format, Pixel Format and Scan Support Check*/
mode_lib->vba.SourceFormatPixelAndScanSupport = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if ((mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
&& mode_lib->vba.SourceScan[k] != dm_horz)
|| ((mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d
|| mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d_x
|| mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d
|| mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_t
|| mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_x
|| mode_lib->vba.SurfaceTiling[k] == dm_sw_var_d
|| mode_lib->vba.SurfaceTiling[k] == dm_sw_var_d_x)
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_64)
|| (mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_r_x
&& (mode_lib->vba.SourcePixelFormat[k] == dm_mono_8
|| mode_lib->vba.SourcePixelFormat[k]
== dm_420_8
|| mode_lib->vba.SourcePixelFormat[k]
== dm_420_10))
|| (((mode_lib->vba.SurfaceTiling[k] == dm_sw_gfx7_2d_thin_gl
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_gfx7_2d_thin_l_vp)
&& !((mode_lib->vba.SourcePixelFormat[k]
== dm_444_64
|| mode_lib->vba.SourcePixelFormat[k]
== dm_444_32)
&& mode_lib->vba.SourceScan[k]
== dm_horz
&& mode_lib->vba.SupportGFX7CompatibleTilingIn32bppAnd64bpp
== true
&& mode_lib->vba.DCCEnable[k]
== false))
|| (mode_lib->vba.DCCEnable[k] == true
&& (mode_lib->vba.SurfaceTiling[k]
== dm_sw_linear
|| mode_lib->vba.SourcePixelFormat[k]
== dm_420_8
|| mode_lib->vba.SourcePixelFormat[k]
== dm_420_10)))) {
mode_lib->vba.SourceFormatPixelAndScanSupport = false;
}
}
/*Bandwidth Support Check*/
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
locals->BytePerPixelInDETY[k] = 8.0;
locals->BytePerPixelInDETC[k] = 0.0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
locals->BytePerPixelInDETY[k] = 4.0;
locals->BytePerPixelInDETC[k] = 0.0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16
|| mode_lib->vba.SourcePixelFormat[k] == dm_mono_16) {
locals->BytePerPixelInDETY[k] = 2.0;
locals->BytePerPixelInDETC[k] = 0.0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_mono_8) {
locals->BytePerPixelInDETY[k] = 1.0;
locals->BytePerPixelInDETC[k] = 0.0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
locals->BytePerPixelInDETY[k] = 1.0;
locals->BytePerPixelInDETC[k] = 2.0;
} else {
locals->BytePerPixelInDETY[k] = 4.0 / 3;
locals->BytePerPixelInDETC[k] = 8.0 / 3;
}
if (mode_lib->vba.SourceScan[k] == dm_horz) {
locals->SwathWidthYSingleDPP[k] = mode_lib->vba.ViewportWidth[k];
} else {
locals->SwathWidthYSingleDPP[k] = mode_lib->vba.ViewportHeight[k];
}
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
locals->ReadBandwidthLuma[k] = locals->SwathWidthYSingleDPP[k] * dml_ceil(locals->BytePerPixelInDETY[k], 1.0)
/ (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k];
locals->ReadBandwidthChroma[k] = locals->SwathWidthYSingleDPP[k] / 2 * dml_ceil(locals->BytePerPixelInDETC[k], 2.0)
/ (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k] / 2.0;
locals->ReadBandwidth[k] = locals->ReadBandwidthLuma[k] + locals->ReadBandwidthChroma[k];
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.WritebackEnable[k] == true
&& mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
locals->WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
* mode_lib->vba.WritebackDestinationHeight[k]
/ (mode_lib->vba.WritebackSourceHeight[k]
* mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]) * 4.0;
} else if (mode_lib->vba.WritebackEnable[k] == true
&& mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) {
locals->WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
* mode_lib->vba.WritebackDestinationHeight[k]
/ (mode_lib->vba.WritebackSourceHeight[k]
* mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]) * 3.0;
} else if (mode_lib->vba.WritebackEnable[k] == true) {
locals->WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
* mode_lib->vba.WritebackDestinationHeight[k]
/ (mode_lib->vba.WritebackSourceHeight[k]
* mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]) * 1.5;
} else {
locals->WriteBandwidth[k] = 0.0;
}
}
mode_lib->vba.DCCEnabledInAnyPlane = false;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.DCCEnable[k] == true) {
mode_lib->vba.DCCEnabledInAnyPlane = true;
}
}
mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly;
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->FabricAndDRAMBandwidthPerState[i] = dml_min(
mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
* mode_lib->vba.DRAMChannelWidth,
mode_lib->vba.FabricClockPerState[i]
* mode_lib->vba.FabricDatapathToDCNDataReturn) / 1000;
locals->ReturnBWToDCNPerState = dml_min(locals->ReturnBusWidth * locals->DCFCLKPerState[i],
locals->FabricAndDRAMBandwidthPerState[i] * 1000)
* locals->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100;
locals->ReturnBWPerState[i][0] = locals->ReturnBWToDCNPerState;
if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) {
locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency /
((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
/ (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i]
* locals->ReturnBusWidth / 4) + locals->UrgentLatency)));
}
locals->CriticalPoint = 2 * locals->ReturnBusWidth * locals->DCFCLKPerState[i] *
locals->UrgentLatency / (locals->ReturnBWToDCNPerState * locals->UrgentLatency
+ (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024);
if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) {
locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
4 * locals->ReturnBWToDCNPerState *
(locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
* locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency /
dml_pow((locals->ReturnBWToDCNPerState * locals->UrgentLatency
+ (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024), 2));
}
locals->ReturnBWToDCNPerState = dml_min(locals->ReturnBusWidth *
locals->DCFCLKPerState[i], locals->FabricAndDRAMBandwidthPerState[i] * 1000);
if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) {
locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency /
((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
/ (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i]
* locals->ReturnBusWidth / 4) + locals->UrgentLatency)));
}
locals->CriticalPoint = 2 * locals->ReturnBusWidth * locals->DCFCLKPerState[i] *
locals->UrgentLatency / (locals->ReturnBWToDCNPerState * locals->UrgentLatency
+ (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024);
if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) {
locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
4 * locals->ReturnBWToDCNPerState *
(locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
* locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency /
dml_pow((locals->ReturnBWToDCNPerState * locals->UrgentLatency
+ (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024), 2));
}
}
/*Writeback Latency support check*/
mode_lib->vba.WritebackLatencySupport = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.WritebackEnable[k] == true) {
if (mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
if (locals->WriteBandwidth[k]
> (mode_lib->vba.WritebackInterfaceLumaBufferSize
+ mode_lib->vba.WritebackInterfaceChromaBufferSize)
/ mode_lib->vba.WritebackLatency) {
mode_lib->vba.WritebackLatencySupport = false;
}
} else {
if (locals->WriteBandwidth[k]
> 1.5
* dml_min(
mode_lib->vba.WritebackInterfaceLumaBufferSize,
2.0
* mode_lib->vba.WritebackInterfaceChromaBufferSize)
/ mode_lib->vba.WritebackLatency) {
mode_lib->vba.WritebackLatencySupport = false;
}
}
}
}
/*Re-ordering Buffer Support Check*/
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i] =
(mode_lib->vba.RoundTripPingLatencyCycles + 32.0) / mode_lib->vba.DCFCLKPerState[i]
+ locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i][0];
if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i][0]
> locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i]) {
locals->ROBSupport[i][0] = true;
} else {
locals->ROBSupport[i][0] = false;
}
}
/*Writeback Mode Support Check*/
mode_lib->vba.TotalNumberOfActiveWriteback = 0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.WritebackEnable[k] == true) {
if (mode_lib->vba.ActiveWritebacksPerPlane[k] == 0)
mode_lib->vba.ActiveWritebacksPerPlane[k] = 1;
mode_lib->vba.TotalNumberOfActiveWriteback =
mode_lib->vba.TotalNumberOfActiveWriteback
+ mode_lib->vba.ActiveWritebacksPerPlane[k];
}
}
mode_lib->vba.WritebackModeSupport = true;
if (mode_lib->vba.TotalNumberOfActiveWriteback > mode_lib->vba.MaxNumWriteback) {
mode_lib->vba.WritebackModeSupport = false;
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.WritebackEnable[k] == true
&& mode_lib->vba.Writeback10bpc420Supported != true
&& mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) {
mode_lib->vba.WritebackModeSupport = false;
}
}
/*Writeback Scale Ratio and Taps Support Check*/
mode_lib->vba.WritebackScaleRatioAndTapsSupport = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.WritebackEnable[k] == true) {
if (mode_lib->vba.WritebackLumaAndChromaScalingSupported == false
&& (mode_lib->vba.WritebackHRatio[k] != 1.0
|| mode_lib->vba.WritebackVRatio[k] != 1.0)) {
mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
}
if (mode_lib->vba.WritebackHRatio[k] > mode_lib->vba.WritebackMaxHSCLRatio
|| mode_lib->vba.WritebackVRatio[k]
> mode_lib->vba.WritebackMaxVSCLRatio
|| mode_lib->vba.WritebackHRatio[k]
< mode_lib->vba.WritebackMinHSCLRatio
|| mode_lib->vba.WritebackVRatio[k]
< mode_lib->vba.WritebackMinVSCLRatio
|| mode_lib->vba.WritebackLumaHTaps[k]
> mode_lib->vba.WritebackMaxHSCLTaps
|| mode_lib->vba.WritebackLumaVTaps[k]
> mode_lib->vba.WritebackMaxVSCLTaps
|| mode_lib->vba.WritebackHRatio[k]
> mode_lib->vba.WritebackLumaHTaps[k]
|| mode_lib->vba.WritebackVRatio[k]
> mode_lib->vba.WritebackLumaVTaps[k]
|| (mode_lib->vba.WritebackLumaHTaps[k] > 2.0
&& ((mode_lib->vba.WritebackLumaHTaps[k] % 2)
== 1))
|| (mode_lib->vba.WritebackPixelFormat[k] != dm_444_32
&& (mode_lib->vba.WritebackChromaHTaps[k]
> mode_lib->vba.WritebackMaxHSCLTaps
|| mode_lib->vba.WritebackChromaVTaps[k]
> mode_lib->vba.WritebackMaxVSCLTaps
|| 2.0
* mode_lib->vba.WritebackHRatio[k]
> mode_lib->vba.WritebackChromaHTaps[k]
|| 2.0
* mode_lib->vba.WritebackVRatio[k]
> mode_lib->vba.WritebackChromaVTaps[k]
|| (mode_lib->vba.WritebackChromaHTaps[k] > 2.0
&& ((mode_lib->vba.WritebackChromaHTaps[k] % 2) == 1))))) {
mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
}
if (mode_lib->vba.WritebackVRatio[k] < 1.0) {
mode_lib->vba.WritebackLumaVExtra =
dml_max(1.0 - 2.0 / dml_ceil(1.0 / mode_lib->vba.WritebackVRatio[k], 1.0), 0.0);
} else {
mode_lib->vba.WritebackLumaVExtra = -1;
}
if ((mode_lib->vba.WritebackPixelFormat[k] == dm_444_32
&& mode_lib->vba.WritebackLumaVTaps[k]
> (mode_lib->vba.WritebackLineBufferLumaBufferSize
+ mode_lib->vba.WritebackLineBufferChromaBufferSize)
/ 3.0
/ mode_lib->vba.WritebackDestinationWidth[k]
- mode_lib->vba.WritebackLumaVExtra)
|| (mode_lib->vba.WritebackPixelFormat[k] == dm_420_8
&& mode_lib->vba.WritebackLumaVTaps[k]
> mode_lib->vba.WritebackLineBufferLumaBufferSize
* 8.0 / 10.0 / mode_lib->vba.WritebackDestinationWidth[k]
- mode_lib->vba.WritebackLumaVExtra)
|| (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10
&& mode_lib->vba.WritebackLumaVTaps[k]
> mode_lib->vba.WritebackLineBufferLumaBufferSize
* 8.0 / 10.0
/ mode_lib->vba.WritebackDestinationWidth[k]
- mode_lib->vba.WritebackLumaVExtra)) {
mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
}
if (2.0 * mode_lib->vba.WritebackVRatio[k] < 1) {
mode_lib->vba.WritebackChromaVExtra = 0.0;
} else {
mode_lib->vba.WritebackChromaVExtra = -1;
}
if ((mode_lib->vba.WritebackPixelFormat[k] == dm_420_8
&& mode_lib->vba.WritebackChromaVTaps[k]
> mode_lib->vba.WritebackLineBufferChromaBufferSize
* 8.0 / 10.0 / mode_lib->vba.WritebackDestinationWidth[k]
- mode_lib->vba.WritebackChromaVExtra)
|| (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10
&& mode_lib->vba.WritebackChromaVTaps[k]
> mode_lib->vba.WritebackLineBufferChromaBufferSize
* 8.0 / 10.0
/ mode_lib->vba.WritebackDestinationWidth[k]
- mode_lib->vba.WritebackChromaVExtra)) {
mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
}
}
}
/*Maximum DISPCLK/DPPCLK Support check*/
mode_lib->vba.WritebackRequiredDISPCLK = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.WritebackEnable[k] == true) {
mode_lib->vba.WritebackRequiredDISPCLK =
dml_max(
mode_lib->vba.WritebackRequiredDISPCLK,
CalculateWriteBackDISPCLK(
mode_lib->vba.WritebackPixelFormat[k],
mode_lib->vba.PixelClock[k],
mode_lib->vba.WritebackHRatio[k],
mode_lib->vba.WritebackVRatio[k],
mode_lib->vba.WritebackLumaHTaps[k],
mode_lib->vba.WritebackLumaVTaps[k],
mode_lib->vba.WritebackChromaHTaps[k],
mode_lib->vba.WritebackChromaVTaps[k],
mode_lib->vba.WritebackDestinationWidth[k],
mode_lib->vba.HTotal[k],
mode_lib->vba.WritebackChromaLineBufferWidth));
}
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.HRatio[k] > 1.0) {
locals->PSCL_FACTOR[k] = dml_min(
mode_lib->vba.MaxDCHUBToPSCLThroughput,
mode_lib->vba.MaxPSCLToLBThroughput
* mode_lib->vba.HRatio[k]
/ dml_ceil(
mode_lib->vba.htaps[k]
/ 6.0,
1.0));
} else {
locals->PSCL_FACTOR[k] = dml_min(
mode_lib->vba.MaxDCHUBToPSCLThroughput,
mode_lib->vba.MaxPSCLToLBThroughput);
}
if (locals->BytePerPixelInDETC[k] == 0.0) {
locals->PSCL_FACTOR_CHROMA[k] = 0.0;
locals->MinDPPCLKUsingSingleDPP[k] =
mode_lib->vba.PixelClock[k]
* dml_max3(
mode_lib->vba.vtaps[k] / 6.0
* dml_min(
1.0,
mode_lib->vba.HRatio[k]),
mode_lib->vba.HRatio[k]
* mode_lib->vba.VRatio[k]
/ locals->PSCL_FACTOR[k],
1.0);
if ((mode_lib->vba.htaps[k] > 6.0 || mode_lib->vba.vtaps[k] > 6.0)
&& locals->MinDPPCLKUsingSingleDPP[k]
< 2.0 * mode_lib->vba.PixelClock[k]) {
locals->MinDPPCLKUsingSingleDPP[k] = 2.0
* mode_lib->vba.PixelClock[k];
}
} else {
if (mode_lib->vba.HRatio[k] / 2.0 > 1.0) {
locals->PSCL_FACTOR_CHROMA[k] =
dml_min(
mode_lib->vba.MaxDCHUBToPSCLThroughput,
mode_lib->vba.MaxPSCLToLBThroughput
* mode_lib->vba.HRatio[k]
/ 2.0
/ dml_ceil(
mode_lib->vba.HTAPsChroma[k]
/ 6.0,
1.0));
} else {
locals->PSCL_FACTOR_CHROMA[k] = dml_min(
mode_lib->vba.MaxDCHUBToPSCLThroughput,
mode_lib->vba.MaxPSCLToLBThroughput);
}
locals->MinDPPCLKUsingSingleDPP[k] =
mode_lib->vba.PixelClock[k]
* dml_max5(
mode_lib->vba.vtaps[k] / 6.0
* dml_min(
1.0,
mode_lib->vba.HRatio[k]),
mode_lib->vba.HRatio[k]
* mode_lib->vba.VRatio[k]
/ locals->PSCL_FACTOR[k],
mode_lib->vba.VTAPsChroma[k]
/ 6.0
* dml_min(
1.0,
mode_lib->vba.HRatio[k]
/ 2.0),
mode_lib->vba.HRatio[k]
* mode_lib->vba.VRatio[k]
/ 4.0
/ locals->PSCL_FACTOR_CHROMA[k],
1.0);
if ((mode_lib->vba.htaps[k] > 6.0 || mode_lib->vba.vtaps[k] > 6.0
|| mode_lib->vba.HTAPsChroma[k] > 6.0
|| mode_lib->vba.VTAPsChroma[k] > 6.0)
&& locals->MinDPPCLKUsingSingleDPP[k]
< 2.0 * mode_lib->vba.PixelClock[k]) {
locals->MinDPPCLKUsingSingleDPP[k] = 2.0
* mode_lib->vba.PixelClock[k];
}
}
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
Calculate256BBlockSizes(
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.SurfaceTiling[k],
dml_ceil(locals->BytePerPixelInDETY[k], 1.0),
dml_ceil(locals->BytePerPixelInDETC[k], 2.0),
&locals->Read256BlockHeightY[k],
&locals->Read256BlockHeightC[k],
&locals->Read256BlockWidthY[k],
&locals->Read256BlockWidthC[k]);
if (mode_lib->vba.SourceScan[k] == dm_horz) {
locals->MaxSwathHeightY[k] = locals->Read256BlockHeightY[k];
locals->MaxSwathHeightC[k] = locals->Read256BlockHeightC[k];
} else {
locals->MaxSwathHeightY[k] = locals->Read256BlockWidthY[k];
locals->MaxSwathHeightC[k] = locals->Read256BlockWidthC[k];
}
if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
|| mode_lib->vba.SourcePixelFormat[k] == dm_444_32
|| mode_lib->vba.SourcePixelFormat[k] == dm_444_16
|| mode_lib->vba.SourcePixelFormat[k] == dm_mono_16
|| mode_lib->vba.SourcePixelFormat[k] == dm_mono_8)) {
if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
|| (mode_lib->vba.SourcePixelFormat[k] == dm_444_64
&& (mode_lib->vba.SurfaceTiling[k]
== dm_sw_4kb_s
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_4kb_s_x
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_64kb_s
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_64kb_s_t
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_64kb_s_x
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_var_s
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_var_s_x)
&& mode_lib->vba.SourceScan[k] == dm_horz)) {
locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k];
} else {
locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k]
/ 2.0;
}
locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k];
} else {
if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k];
locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k];
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8
&& mode_lib->vba.SourceScan[k] == dm_horz) {
locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k]
/ 2.0;
locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k];
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10
&& mode_lib->vba.SourceScan[k] == dm_horz) {
locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k]
/ 2.0;
locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k];
} else {
locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k];
locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k];
}
}
if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
mode_lib->vba.MaximumSwathWidthSupport = 8192.0;
} else {
mode_lib->vba.MaximumSwathWidthSupport = 5120.0;
}
mode_lib->vba.MaximumSwathWidthInDETBuffer =
dml_min(
mode_lib->vba.MaximumSwathWidthSupport,
mode_lib->vba.DETBufferSizeInKByte[0] * 1024.0 / 2.0
/ (locals->BytePerPixelInDETY[k]
* locals->MinSwathHeightY[k]
+ locals->BytePerPixelInDETC[k]
/ 2.0
* locals->MinSwathHeightC[k]));
if (locals->BytePerPixelInDETC[k] == 0.0) {
mode_lib->vba.MaximumSwathWidthInLineBuffer =
mode_lib->vba.LineBufferSize
* dml_max(mode_lib->vba.HRatio[k], 1.0)
/ mode_lib->vba.LBBitPerPixel[k]
/ (mode_lib->vba.vtaps[k]
+ dml_max(
dml_ceil(
mode_lib->vba.VRatio[k],
1.0)
- 2,
0.0));
} else {
mode_lib->vba.MaximumSwathWidthInLineBuffer =
dml_min(
mode_lib->vba.LineBufferSize
* dml_max(
mode_lib->vba.HRatio[k],
1.0)
/ mode_lib->vba.LBBitPerPixel[k]
/ (mode_lib->vba.vtaps[k]
+ dml_max(
dml_ceil(
mode_lib->vba.VRatio[k],
1.0)
- 2,
0.0)),
2.0 * mode_lib->vba.LineBufferSize
* dml_max(
mode_lib->vba.HRatio[k]
/ 2.0,
1.0)
/ mode_lib->vba.LBBitPerPixel[k]
/ (mode_lib->vba.VTAPsChroma[k]
+ dml_max(
dml_ceil(
mode_lib->vba.VRatio[k]
/ 2.0,
1.0)
- 2,
0.0)));
}
locals->MaximumSwathWidth[k] = dml_min(
mode_lib->vba.MaximumSwathWidthInDETBuffer,
mode_lib->vba.MaximumSwathWidthInLineBuffer);
}
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
double MaxMaxDispclkRoundedDown = RoundToDFSGranularityDown(
mode_lib->vba.MaxDispclk[mode_lib->vba.soc.num_states],
mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
for (j = 0; j < 2; j++) {
mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
mode_lib->vba.MaxDispclk[i],
mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
mode_lib->vba.MaxDppclk[i],
mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
locals->RequiredDISPCLK[i][j] = 0.0;
locals->DISPCLK_DPPCLK_Support[i][j] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine =
mode_lib->vba.PixelClock[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
* (1.0 + mode_lib->vba.DISPCLKRampingMargin / 100.0);
if (mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine >= mode_lib->vba.MaxDispclk[i]
&& i == mode_lib->vba.soc.num_states)
mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine = mode_lib->vba.PixelClock[k]
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * (1 + mode_lib->vba.DISPCLKRampingMargin / 100.0);
if (mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine >= mode_lib->vba.MaxDispclk[i]
&& i == mode_lib->vba.soc.num_states)
mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
if (mode_lib->vba.ODMCapability) {
if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) {
locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
} else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN20_MAX_DSC_IMAGE_WIDTH)) {
locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
} else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) {
locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
}
}
if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
&& locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]
&& locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
locals->NoOfDPP[i][j][k] = 1;
locals->RequiredDPPCLK[i][j][k] =
locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
} else {
locals->NoOfDPP[i][j][k] = 2;
locals->RequiredDPPCLK[i][j][k] =
locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 2.0;
}
locals->RequiredDISPCLK[i][j] = dml_max(
locals->RequiredDISPCLK[i][j],
mode_lib->vba.PlaneRequiredDISPCLK);
if ((locals->MinDPPCLKUsingSingleDPP[k] / locals->NoOfDPP[i][j][k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
> mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity)
|| (mode_lib->vba.PlaneRequiredDISPCLK > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity)) {
locals->DISPCLK_DPPCLK_Support[i][j] = false;
}
}
locals->TotalNumberOfActiveDPP[i][j] = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++)
locals->TotalNumberOfActiveDPP[i][j] = locals->TotalNumberOfActiveDPP[i][j] + locals->NoOfDPP[i][j][k];
if (j == 1) {
while (locals->TotalNumberOfActiveDPP[i][j] < mode_lib->vba.MaxNumDPP
&& locals->TotalNumberOfActiveDPP[i][j] < 2 * mode_lib->vba.NumberOfActivePlanes) {
double BWOfNonSplitPlaneOfMaximumBandwidth;
unsigned int NumberOfNonSplitPlaneOfMaximumBandwidth;
BWOfNonSplitPlaneOfMaximumBandwidth = 0;
NumberOfNonSplitPlaneOfMaximumBandwidth = 0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (locals->ReadBandwidth[k] > BWOfNonSplitPlaneOfMaximumBandwidth && locals->NoOfDPP[i][j][k] == 1) {
BWOfNonSplitPlaneOfMaximumBandwidth = locals->ReadBandwidth[k];
NumberOfNonSplitPlaneOfMaximumBandwidth = k;
}
}
locals->NoOfDPP[i][j][NumberOfNonSplitPlaneOfMaximumBandwidth] = 2;
locals->RequiredDPPCLK[i][j][NumberOfNonSplitPlaneOfMaximumBandwidth] =
locals->MinDPPCLKUsingSingleDPP[NumberOfNonSplitPlaneOfMaximumBandwidth]
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100) / 2;
locals->TotalNumberOfActiveDPP[i][j] = locals->TotalNumberOfActiveDPP[i][j] + 1;
}
}
if (locals->TotalNumberOfActiveDPP[i][j] > mode_lib->vba.MaxNumDPP) {
locals->RequiredDISPCLK[i][j] = 0.0;
locals->DISPCLK_DPPCLK_Support[i][j] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) {
locals->NoOfDPP[i][j][k] = 1;
locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k]
* (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
} else {
locals->NoOfDPP[i][j][k] = 2;
locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k]
* (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 2.0;
}
if (i != mode_lib->vba.soc.num_states) {
mode_lib->vba.PlaneRequiredDISPCLK =
mode_lib->vba.PixelClock[k]
* (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
* (1.0 + mode_lib->vba.DISPCLKRampingMargin / 100.0);
} else {
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PixelClock[k]
* (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
}
locals->RequiredDISPCLK[i][j] = dml_max(
locals->RequiredDISPCLK[i][j],
mode_lib->vba.PlaneRequiredDISPCLK);
if (locals->MinDPPCLKUsingSingleDPP[k] / locals->NoOfDPP[i][j][k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
> mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
|| mode_lib->vba.PlaneRequiredDISPCLK > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity)
locals->DISPCLK_DPPCLK_Support[i][j] = false;
}
locals->TotalNumberOfActiveDPP[i][j] = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++)
locals->TotalNumberOfActiveDPP[i][j] = locals->TotalNumberOfActiveDPP[i][j] + locals->NoOfDPP[i][j][k];
}
locals->RequiredDISPCLK[i][j] = dml_max(
locals->RequiredDISPCLK[i][j],
mode_lib->vba.WritebackRequiredDISPCLK);
if (mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity
< mode_lib->vba.WritebackRequiredDISPCLK) {
locals->DISPCLK_DPPCLK_Support[i][j] = false;
}
}
}
/*Viewport Size Check*/
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->ViewportSizeSupport[i][0] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
if (dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]))
> locals->MaximumSwathWidth[k]) {
locals->ViewportSizeSupport[i][0] = false;
}
} else {
if (locals->SwathWidthYSingleDPP[k] / 2.0 > locals->MaximumSwathWidth[k]) {
locals->ViewportSizeSupport[i][0] = false;
}
}
}
}
/*Total Available Pipes Support Check*/
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
for (j = 0; j < 2; j++) {
if (locals->TotalNumberOfActiveDPP[i][j] <= mode_lib->vba.MaxNumDPP)
locals->TotalAvailablePipesSupport[i][j] = true;
else
locals->TotalAvailablePipesSupport[i][j] = false;
}
}
/*Total Available OTG Support Check*/
mode_lib->vba.TotalNumberOfActiveOTG = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
mode_lib->vba.TotalNumberOfActiveOTG = mode_lib->vba.TotalNumberOfActiveOTG
+ 1.0;
}
}
if (mode_lib->vba.TotalNumberOfActiveOTG <= mode_lib->vba.MaxNumOTG) {
mode_lib->vba.NumberOfOTGSupport = true;
} else {
mode_lib->vba.NumberOfOTGSupport = false;
}
/*Display IO and DSC Support Check*/
mode_lib->vba.NonsupportedDSCInputBPC = false;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (!(mode_lib->vba.DSCInputBitPerComponent[k] == 12.0
|| mode_lib->vba.DSCInputBitPerComponent[k] == 10.0
|| mode_lib->vba.DSCInputBitPerComponent[k] == 8.0)) {
mode_lib->vba.NonsupportedDSCInputBPC = true;
}
}
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
locals->RequiresDSC[i][k] = 0;
locals->RequiresFEC[i][k] = 0;
if (mode_lib->vba.BlendingAndTiming[k] == k) {
if (mode_lib->vba.Output[k] == dm_hdmi) {
locals->RequiresDSC[i][k] = 0;
locals->RequiresFEC[i][k] = 0;
locals->OutputBppPerState[i][k] = TruncToValidBPP(
dml_min(600.0, mode_lib->vba.PHYCLKPerState[i]) / mode_lib->vba.PixelClockBackEnd[k] * 24,
mode_lib->vba.ForcedOutputLinkBPP[k],
false,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.DSCInputBitPerComponent[k]);
} else if (mode_lib->vba.Output[k] == dm_dp
|| mode_lib->vba.Output[k] == dm_edp) {
if (mode_lib->vba.Output[k] == dm_edp) {
mode_lib->vba.EffectiveFECOverhead = 0.0;
} else {
mode_lib->vba.EffectiveFECOverhead =
mode_lib->vba.FECOverhead;
}
if (mode_lib->vba.PHYCLKPerState[i] >= 270.0) {
mode_lib->vba.Outbpp = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * 270.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
mode_lib->vba.ForcedOutputLinkBPP[k],
false,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.DSCInputBitPerComponent[k]);
mode_lib->vba.OutbppDSC = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 270.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
mode_lib->vba.ForcedOutputLinkBPP[k],
true,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.DSCInputBitPerComponent[k]);
if (mode_lib->vba.DSCEnabled[k] == true) {
locals->RequiresDSC[i][k] = true;
if (mode_lib->vba.Output[k] == dm_dp) {
locals->RequiresFEC[i][k] = true;
} else {
locals->RequiresFEC[i][k] = false;
}
mode_lib->vba.Outbpp = mode_lib->vba.OutbppDSC;
} else {
locals->RequiresDSC[i][k] = false;
locals->RequiresFEC[i][k] = false;
}
locals->OutputBppPerState[i][k] = mode_lib->vba.Outbpp;
}
if (mode_lib->vba.Outbpp == BPP_INVALID && mode_lib->vba.PHYCLKPerState[i] >= 540.0) {
mode_lib->vba.Outbpp = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * 540.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
mode_lib->vba.ForcedOutputLinkBPP[k],
false,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.DSCInputBitPerComponent[k]);
mode_lib->vba.OutbppDSC = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 540.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
mode_lib->vba.ForcedOutputLinkBPP[k],
true,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.DSCInputBitPerComponent[k]);
if (mode_lib->vba.DSCEnabled[k] == true) {
locals->RequiresDSC[i][k] = true;
if (mode_lib->vba.Output[k] == dm_dp) {
locals->RequiresFEC[i][k] = true;
} else {
locals->RequiresFEC[i][k] = false;
}
mode_lib->vba.Outbpp = mode_lib->vba.OutbppDSC;
} else {
locals->RequiresDSC[i][k] = false;
locals->RequiresFEC[i][k] = false;
}
locals->OutputBppPerState[i][k] = mode_lib->vba.Outbpp;
}
if (mode_lib->vba.Outbpp == BPP_INVALID
&& mode_lib->vba.PHYCLKPerState[i]
>= 810.0) {
mode_lib->vba.Outbpp = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * 810.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
mode_lib->vba.ForcedOutputLinkBPP[k],
false,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.DSCInputBitPerComponent[k]);
mode_lib->vba.OutbppDSC = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 810.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
mode_lib->vba.ForcedOutputLinkBPP[k],
true,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.DSCInputBitPerComponent[k]);
if (mode_lib->vba.DSCEnabled[k] == true || mode_lib->vba.Outbpp == BPP_INVALID) {
locals->RequiresDSC[i][k] = true;
if (mode_lib->vba.Output[k] == dm_dp) {
locals->RequiresFEC[i][k] = true;
} else {
locals->RequiresFEC[i][k] = false;
}
mode_lib->vba.Outbpp = mode_lib->vba.OutbppDSC;
} else {
locals->RequiresDSC[i][k] = false;
locals->RequiresFEC[i][k] = false;
}
locals->OutputBppPerState[i][k] =
mode_lib->vba.Outbpp;
}
}
} else {
locals->OutputBppPerState[i][k] = BPP_BLENDED_PIPE;
}
}
}
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->DIOSupport[i] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (!mode_lib->vba.skip_dio_check[k]
&& (locals->OutputBppPerState[i][k] == BPP_INVALID
|| (mode_lib->vba.OutputFormat[k] == dm_420
&& mode_lib->vba.Interlace[k] == true
&& mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true))) {
locals->DIOSupport[i] = false;
}
}
}
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
locals->DSCCLKRequiredMoreThanSupported[i] = false;
if (mode_lib->vba.BlendingAndTiming[k] == k) {
if ((mode_lib->vba.Output[k] == dm_dp
|| mode_lib->vba.Output[k] == dm_edp)) {
if (mode_lib->vba.OutputFormat[k] == dm_420
|| mode_lib->vba.OutputFormat[k]
== dm_n422) {
mode_lib->vba.DSCFormatFactor = 2;
} else {
mode_lib->vba.DSCFormatFactor = 1;
}
if (locals->RequiresDSC[i][k] == true) {
if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
if (mode_lib->vba.PixelClockBackEnd[k] / 6.0 / mode_lib->vba.DSCFormatFactor
> (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) {
locals->DSCCLKRequiredMoreThanSupported[i] =
true;
}
} else {
if (mode_lib->vba.PixelClockBackEnd[k] / 3.0 / mode_lib->vba.DSCFormatFactor
> (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) {
locals->DSCCLKRequiredMoreThanSupported[i] =
true;
}
}
}
}
}
}
}
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->NotEnoughDSCUnits[i] = false;
mode_lib->vba.TotalDSCUnitsRequired = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (locals->RequiresDSC[i][k] == true) {
if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
mode_lib->vba.TotalDSCUnitsRequired =
mode_lib->vba.TotalDSCUnitsRequired + 2.0;
} else {
mode_lib->vba.TotalDSCUnitsRequired =
mode_lib->vba.TotalDSCUnitsRequired + 1.0;
}
}
}
if (mode_lib->vba.TotalDSCUnitsRequired > mode_lib->vba.NumberOfDSC) {
locals->NotEnoughDSCUnits[i] = true;
}
}
/*DSC Delay per state*/
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] != k) {
mode_lib->vba.slices = 0;
} else if (locals->RequiresDSC[i][k] == 0
|| locals->RequiresDSC[i][k] == false) {
mode_lib->vba.slices = 0;
} else if (mode_lib->vba.PixelClockBackEnd[k] > 3200.0) {
mode_lib->vba.slices = dml_ceil(
mode_lib->vba.PixelClockBackEnd[k] / 400.0,
4.0);
} else if (mode_lib->vba.PixelClockBackEnd[k] > 1360.0) {
mode_lib->vba.slices = 8.0;
} else if (mode_lib->vba.PixelClockBackEnd[k] > 680.0) {
mode_lib->vba.slices = 4.0;
} else if (mode_lib->vba.PixelClockBackEnd[k] > 340.0) {
mode_lib->vba.slices = 2.0;
} else {
mode_lib->vba.slices = 1.0;
}
if (locals->OutputBppPerState[i][k] == BPP_BLENDED_PIPE
|| locals->OutputBppPerState[i][k] == BPP_INVALID) {
mode_lib->vba.bpp = 0.0;
} else {
mode_lib->vba.bpp = locals->OutputBppPerState[i][k];
}
if (locals->RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) {
if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
locals->DSCDelayPerState[i][k] =
dscceComputeDelay(
mode_lib->vba.DSCInputBitPerComponent[k],
mode_lib->vba.bpp,
dml_ceil(
mode_lib->vba.HActive[k]
/ mode_lib->vba.slices,
1.0),
mode_lib->vba.slices,
mode_lib->vba.OutputFormat[k])
+ dscComputeDelay(
mode_lib->vba.OutputFormat[k]);
} else {
locals->DSCDelayPerState[i][k] =
2.0 * (dscceComputeDelay(
mode_lib->vba.DSCInputBitPerComponent[k],
mode_lib->vba.bpp,
dml_ceil(mode_lib->vba.HActive[k] / mode_lib->vba.slices, 1.0),
mode_lib->vba.slices / 2,
mode_lib->vba.OutputFormat[k])
+ dscComputeDelay(mode_lib->vba.OutputFormat[k]));
}
locals->DSCDelayPerState[i][k] =
locals->DSCDelayPerState[i][k] * mode_lib->vba.PixelClock[k] / mode_lib->vba.PixelClockBackEnd[k];
} else {
locals->DSCDelayPerState[i][k] = 0.0;
}
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
for (m = 0; m <= mode_lib->vba.NumberOfActivePlanes - 1; m++) {
for (j = 0; j <= mode_lib->vba.NumberOfActivePlanes - 1; j++) {
if (mode_lib->vba.BlendingAndTiming[k] == m && locals->RequiresDSC[i][m] == true)
locals->DSCDelayPerState[i][k] = locals->DSCDelayPerState[i][m];
}
}
}
}
//Prefetch Check
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
for (j = 0; j < 2; j++) {
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1)
locals->SwathWidthYPerState[i][j][k] = dml_min(locals->SwathWidthYSingleDPP[k], dml_round(locals->HActive[k] / 2 * locals->HRatio[k]));
else
locals->SwathWidthYPerState[i][j][k] = locals->SwathWidthYSingleDPP[k] / locals->NoOfDPP[i][j][k];
locals->SwathWidthGranularityY = 256 / dml_ceil(locals->BytePerPixelInDETY[k], 1) / locals->MaxSwathHeightY[k];
locals->RoundedUpMaxSwathSizeBytesY = (dml_ceil(locals->SwathWidthYPerState[i][j][k] - 1, locals->SwathWidthGranularityY)
+ locals->SwathWidthGranularityY) * locals->BytePerPixelInDETY[k] * locals->MaxSwathHeightY[k];
if (locals->SourcePixelFormat[k] == dm_420_10) {
locals->RoundedUpMaxSwathSizeBytesY = dml_ceil(locals->RoundedUpMaxSwathSizeBytesY, 256) + 256;
}
if (locals->MaxSwathHeightC[k] > 0) {
locals->SwathWidthGranularityC = 256 / dml_ceil(locals->BytePerPixelInDETC[k], 2) / locals->MaxSwathHeightC[k];
locals->RoundedUpMaxSwathSizeBytesC = (dml_ceil(locals->SwathWidthYPerState[i][j][k] / 2 - 1, locals->SwathWidthGranularityC)
+ locals->SwathWidthGranularityC) * locals->BytePerPixelInDETC[k] * locals->MaxSwathHeightC[k];
}
if (locals->SourcePixelFormat[k] == dm_420_10) {
locals->RoundedUpMaxSwathSizeBytesC = dml_ceil(locals->RoundedUpMaxSwathSizeBytesC, 256) + 256;
} else {
locals->RoundedUpMaxSwathSizeBytesC = 0;
}
if (locals->RoundedUpMaxSwathSizeBytesY + locals->RoundedUpMaxSwathSizeBytesC <= locals->DETBufferSizeInKByte[0] * 1024 / 2) {
locals->SwathHeightYPerState[i][j][k] = locals->MaxSwathHeightY[k];
locals->SwathHeightCPerState[i][j][k] = locals->MaxSwathHeightC[k];
} else {
locals->SwathHeightYPerState[i][j][k] = locals->MinSwathHeightY[k];
locals->SwathHeightCPerState[i][j][k] = locals->MinSwathHeightC[k];
}
if (locals->BytePerPixelInDETC[k] == 0) {
locals->LinesInDETLuma = locals->DETBufferSizeInKByte[0] * 1024 / locals->BytePerPixelInDETY[k] / locals->SwathWidthYPerState[i][j][k];
locals->LinesInDETChroma = 0;
} else if (locals->SwathHeightYPerState[i][j][k] <= locals->SwathHeightCPerState[i][j][k]) {
locals->LinesInDETLuma = locals->DETBufferSizeInKByte[0] * 1024 / 2 / locals->BytePerPixelInDETY[k] /
locals->SwathWidthYPerState[i][j][k];
locals->LinesInDETChroma = locals->DETBufferSizeInKByte[0] * 1024 / 2 / locals->BytePerPixelInDETC[k] / (locals->SwathWidthYPerState[i][j][k] / 2);
} else {
locals->LinesInDETLuma = locals->DETBufferSizeInKByte[0] * 1024 * 2 / 3 / locals->BytePerPixelInDETY[k] / locals->SwathWidthYPerState[i][j][k];
locals->LinesInDETChroma = locals->DETBufferSizeInKByte[0] * 1024 / 3 / locals->BytePerPixelInDETY[k] / (locals->SwathWidthYPerState[i][j][k] / 2);
}
locals->EffectiveLBLatencyHidingSourceLinesLuma = dml_min(locals->MaxLineBufferLines,
dml_floor(locals->LineBufferSize / locals->LBBitPerPixel[k] / (locals->SwathWidthYPerState[i][j][k]
/ dml_max(locals->HRatio[k], 1)), 1)) - (locals->vtaps[k] - 1);
locals->EffectiveLBLatencyHidingSourceLinesChroma = dml_min(locals->MaxLineBufferLines,
dml_floor(locals->LineBufferSize / locals->LBBitPerPixel[k]
/ (locals->SwathWidthYPerState[i][j][k] / 2
/ dml_max(locals->HRatio[k] / 2, 1)), 1)) - (locals->VTAPsChroma[k] - 1);
locals->EffectiveDETLBLinesLuma = dml_floor(locals->LinesInDETLuma + dml_min(
locals->LinesInDETLuma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETY[k] *
locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i][0],
locals->EffectiveLBLatencyHidingSourceLinesLuma),
locals->SwathHeightYPerState[i][j][k]);
if (locals->BytePerPixelInDETC[k] == 0) {
locals->UrgentLatencySupportUsPerState[i][j][k] = locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])
/ locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] *
dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]);
} else {
locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + dml_min(
locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETC[k] *
locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0],
locals->EffectiveLBLatencyHidingSourceLinesChroma),
locals->SwathHeightCPerState[i][j][k]);
locals->UrgentLatencySupportUsPerState[i][j][k] = dml_min(
locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])
/ locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] *
dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]),
locals->EffectiveDETLBLinesChroma * (locals->HTotal[k] / locals->PixelClock[k]) / (locals->VRatio[k] / 2) -
locals->EffectiveDETLBLinesChroma * locals->SwathWidthYPerState[i][j][k] / 2 *
dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]));
}
}
}
}
for (i = 0; i <= locals->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
locals->UrgentLatencySupport[i][j] = true;
for (k = 0; k < locals->NumberOfActivePlanes; k++) {
if (locals->UrgentLatencySupportUsPerState[i][j][k] < locals->UrgentLatency)
locals->UrgentLatencySupport[i][j] = false;
}
}
}
/*Prefetch Check*/
for (i = 0; i <= locals->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
locals->TotalNumberOfDCCActiveDPP[i][j] = 0;
for (k = 0; k < locals->NumberOfActivePlanes; k++) {
if (locals->DCCEnable[k] == true) {
locals->TotalNumberOfDCCActiveDPP[i][j] =
locals->TotalNumberOfDCCActiveDPP[i][j] + locals->NoOfDPP[i][j][k];
}
}
}
}
CalculateMinAndMaxPrefetchMode(locals->AllowDRAMSelfRefreshOrDRAMClockChangeInVblank, &locals->MinPrefetchMode, &locals->MaxPrefetchMode);
locals->MaxTotalVActiveRDBandwidth = 0;
for (k = 0; k < locals->NumberOfActivePlanes; k++) {
locals->MaxTotalVActiveRDBandwidth = locals->MaxTotalVActiveRDBandwidth + locals->ReadBandwidth[k];
}
for (i = 0; i <= locals->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
for (k = 0; k < locals->NumberOfActivePlanes; k++) {
locals->NoOfDPPThisState[k] = locals->NoOfDPP[i][j][k];
locals->RequiredDPPCLKThisState[k] = locals->RequiredDPPCLK[i][j][k];
locals->SwathHeightYThisState[k] = locals->SwathHeightYPerState[i][j][k];
locals->SwathHeightCThisState[k] = locals->SwathHeightCPerState[i][j][k];
locals->SwathWidthYThisState[k] = locals->SwathWidthYPerState[i][j][k];
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max(
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
mode_lib->vba.PixelClock[k] / 16.0);
if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
if (mode_lib->vba.VRatio[k] <= 1.0) {
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETY[k],
1.0)
/ 64.0
* mode_lib->vba.HRatio[k]
* mode_lib->vba.PixelClock[k]
/ mode_lib->vba.NoOfDPP[i][j][k]);
} else {
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETY[k],
1.0)
/ 64.0
* mode_lib->vba.PSCL_FACTOR[k]
* mode_lib->vba.RequiredDPPCLK[i][j][k]);
}
} else {
if (mode_lib->vba.VRatio[k] <= 1.0) {
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETY[k],
1.0)
/ 32.0
* mode_lib->vba.HRatio[k]
* mode_lib->vba.PixelClock[k]
/ mode_lib->vba.NoOfDPP[i][j][k]);
} else {
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETY[k],
1.0)
/ 32.0
* mode_lib->vba.PSCL_FACTOR[k]
* mode_lib->vba.RequiredDPPCLK[i][j][k]);
}
if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0) {
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETC[k],
2.0)
/ 32.0
* mode_lib->vba.HRatio[k]
/ 2.0
* mode_lib->vba.PixelClock[k]
/ mode_lib->vba.NoOfDPP[i][j][k]);
} else {
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETC[k],
2.0)
/ 32.0
* mode_lib->vba.PSCL_FACTOR_CHROMA[k]
* mode_lib->vba.RequiredDPPCLK[i][j][k]);
}
}
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
mode_lib->vba.PDEAndMetaPTEBytesPerFrameY = CalculateVMAndRowBytes(
mode_lib,
mode_lib->vba.DCCEnable[k],
mode_lib->vba.Read256BlockHeightY[k],
mode_lib->vba.Read256BlockWidthY[k],
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.SurfaceTiling[k],
dml_ceil(mode_lib->vba.BytePerPixelInDETY[k], 1.0),
mode_lib->vba.SourceScan[k],
mode_lib->vba.ViewportWidth[k],
mode_lib->vba.ViewportHeight[k],
mode_lib->vba.SwathWidthYPerState[i][j][k],
mode_lib->vba.GPUVMEnable,
mode_lib->vba.VMMPageSize,
mode_lib->vba.PTEBufferSizeInRequestsLuma,
mode_lib->vba.PDEProcessingBufIn64KBReqs,
mode_lib->vba.PitchY[k],
mode_lib->vba.DCCMetaPitchY[k],
&mode_lib->vba.MacroTileWidthY[k],
&mode_lib->vba.MetaRowBytesY,
&mode_lib->vba.DPTEBytesPerRowY,
&mode_lib->vba.PTEBufferSizeNotExceededY[i][j][k],
&mode_lib->vba.dpte_row_height[k],
&mode_lib->vba.meta_row_height[k]);
mode_lib->vba.PrefetchLinesY[0][0][k] = CalculatePrefetchSourceLines(
mode_lib,
mode_lib->vba.VRatio[k],
mode_lib->vba.vtaps[k],
mode_lib->vba.Interlace[k],
mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
mode_lib->vba.SwathHeightYPerState[i][j][k],
mode_lib->vba.ViewportYStartY[k],
&mode_lib->vba.PrefillY[k],
&mode_lib->vba.MaxNumSwY[k]);
if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_32
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_8)) {
mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = CalculateVMAndRowBytes(
mode_lib,
mode_lib->vba.DCCEnable[k],
mode_lib->vba.Read256BlockHeightY[k],
mode_lib->vba.Read256BlockWidthY[k],
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.SurfaceTiling[k],
dml_ceil(mode_lib->vba.BytePerPixelInDETC[k], 2.0),
mode_lib->vba.SourceScan[k],
mode_lib->vba.ViewportWidth[k] / 2.0,
mode_lib->vba.ViewportHeight[k] / 2.0,
mode_lib->vba.SwathWidthYPerState[i][j][k] / 2.0,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.VMMPageSize,
mode_lib->vba.PTEBufferSizeInRequestsLuma,
mode_lib->vba.PDEProcessingBufIn64KBReqs,
mode_lib->vba.PitchC[k],
0.0,
&mode_lib->vba.MacroTileWidthC[k],
&mode_lib->vba.MetaRowBytesC,
&mode_lib->vba.DPTEBytesPerRowC,
&mode_lib->vba.PTEBufferSizeNotExceededC[i][j][k],
&mode_lib->vba.dpte_row_height_chroma[k],
&mode_lib->vba.meta_row_height_chroma[k]);
mode_lib->vba.PrefetchLinesC[0][0][k] = CalculatePrefetchSourceLines(
mode_lib,
mode_lib->vba.VRatio[k] / 2.0,
mode_lib->vba.VTAPsChroma[k],
mode_lib->vba.Interlace[k],
mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
mode_lib->vba.SwathHeightCPerState[i][j][k],
mode_lib->vba.ViewportYStartC[k],
&mode_lib->vba.PrefillC[k],
&mode_lib->vba.MaxNumSwC[k]);
} else {
mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0;
mode_lib->vba.MetaRowBytesC = 0.0;
mode_lib->vba.DPTEBytesPerRowC = 0.0;
locals->PrefetchLinesC[0][0][k] = 0.0;
locals->PTEBufferSizeNotExceededC[i][j][k] = true;
locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma + mode_lib->vba.PTEBufferSizeInRequestsChroma;
}
locals->PDEAndMetaPTEBytesPerFrame[0][0][k] =
mode_lib->vba.PDEAndMetaPTEBytesPerFrameY + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC;
locals->MetaRowBytes[0][0][k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC;
locals->DPTEBytesPerRow[0][0][k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC;
CalculateActiveRowBandwidth(
mode_lib->vba.GPUVMEnable,
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.VRatio[k],
mode_lib->vba.DCCEnable[k],
mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
mode_lib->vba.MetaRowBytesY,
mode_lib->vba.MetaRowBytesC,
mode_lib->vba.meta_row_height[k],
mode_lib->vba.meta_row_height_chroma[k],
mode_lib->vba.DPTEBytesPerRowY,
mode_lib->vba.DPTEBytesPerRowC,
mode_lib->vba.dpte_row_height[k],
mode_lib->vba.dpte_row_height_chroma[k],
&mode_lib->vba.meta_row_bw[k],
&mode_lib->vba.dpte_row_bw[k],
&mode_lib->vba.qual_row_bw[k]);
}
mode_lib->vba.ExtraLatency =
mode_lib->vba.UrgentRoundTripAndOutOfOrderLatencyPerState[i]
+ (mode_lib->vba.TotalNumberOfActiveDPP[i][j]
* mode_lib->vba.PixelChunkSizeInKByte
+ mode_lib->vba.TotalNumberOfDCCActiveDPP[i][j]
* mode_lib->vba.MetaChunkSize)
* 1024.0
/ mode_lib->vba.ReturnBWPerState[i][0];
if (mode_lib->vba.GPUVMEnable == true) {
mode_lib->vba.ExtraLatency = mode_lib->vba.ExtraLatency
+ mode_lib->vba.TotalNumberOfActiveDPP[i][j]
* mode_lib->vba.PTEGroupSize
/ mode_lib->vba.ReturnBWPerState[i][0];
}
mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
if (mode_lib->vba.WritebackEnable[k] == true) {
locals->WritebackDelay[i][k] = mode_lib->vba.WritebackLatency
+ CalculateWriteBackDelay(
mode_lib->vba.WritebackPixelFormat[k],
mode_lib->vba.WritebackHRatio[k],
mode_lib->vba.WritebackVRatio[k],
mode_lib->vba.WritebackLumaHTaps[k],
mode_lib->vba.WritebackLumaVTaps[k],
mode_lib->vba.WritebackChromaHTaps[k],
mode_lib->vba.WritebackChromaVTaps[k],
mode_lib->vba.WritebackDestinationWidth[k]) / locals->RequiredDISPCLK[i][j];
} else {
locals->WritebackDelay[i][k] = 0.0;
}
for (m = 0; m <= mode_lib->vba.NumberOfActivePlanes - 1; m++) {
if (mode_lib->vba.BlendingAndTiming[m] == k
&& mode_lib->vba.WritebackEnable[m]
== true) {
locals->WritebackDelay[i][k] = dml_max(locals->WritebackDelay[i][k],
mode_lib->vba.WritebackLatency + CalculateWriteBackDelay(
mode_lib->vba.WritebackPixelFormat[m],
mode_lib->vba.WritebackHRatio[m],
mode_lib->vba.WritebackVRatio[m],
mode_lib->vba.WritebackLumaHTaps[m],
mode_lib->vba.WritebackLumaVTaps[m],
mode_lib->vba.WritebackChromaHTaps[m],
mode_lib->vba.WritebackChromaVTaps[m],
mode_lib->vba.WritebackDestinationWidth[m]) / locals->RequiredDISPCLK[i][j]);
}
}
}
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
for (m = 0; m <= mode_lib->vba.NumberOfActivePlanes - 1; m++) {
if (mode_lib->vba.BlendingAndTiming[k] == m) {
locals->WritebackDelay[i][k] = locals->WritebackDelay[i][m];
}
}
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
for (m = 0; m < locals->NumberOfCursors[k]; m++)
locals->cursor_bw[k] = locals->NumberOfCursors[k] * locals->CursorWidth[k][m] * locals->CursorBPP[k][m]
/ 8 / (locals->HTotal[k] / locals->PixelClock[k]) * locals->VRatio[k];
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
locals->MaximumVStartup[0][0][k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
- dml_max(1.0, dml_ceil(locals->WritebackDelay[i][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1.0));
}
mode_lib->vba.NextPrefetchMode = mode_lib->vba.MinPrefetchMode;
do {
mode_lib->vba.PrefetchMode[i][j] = mode_lib->vba.NextPrefetchMode;
mode_lib->vba.NextPrefetchMode = mode_lib->vba.NextPrefetchMode + 1;
mode_lib->vba.TWait = CalculateTWait(
mode_lib->vba.PrefetchMode[i][j],
mode_lib->vba.DRAMClockChangeLatency,
mode_lib->vba.UrgentLatency,
mode_lib->vba.SREnterPlusExitTime);
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.XFCEnabled[k] == true) {
mode_lib->vba.XFCRemoteSurfaceFlipDelay =
CalculateRemoteSurfaceFlipDelay(
mode_lib,
mode_lib->vba.VRatio[k],
locals->SwathWidthYPerState[i][j][k],
dml_ceil(locals->BytePerPixelInDETY[k], 1.0),
mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
mode_lib->vba.XFCTSlvVupdateOffset,
mode_lib->vba.XFCTSlvVupdateWidth,
mode_lib->vba.XFCTSlvVreadyOffset,
mode_lib->vba.XFCXBUFLatencyTolerance,
mode_lib->vba.XFCFillBWOverhead,
mode_lib->vba.XFCSlvChunkSize,
mode_lib->vba.XFCBusTransportTime,
mode_lib->vba.TimeCalc,
mode_lib->vba.TWait,
&mode_lib->vba.SrcActiveDrainRate,
&mode_lib->vba.TInitXFill,
&mode_lib->vba.TslvChk);
} else {
mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0.0;
}
CalculateDelayAfterScaler(mode_lib, mode_lib->vba.ReturnBWPerState[i][0], mode_lib->vba.ReadBandwidthLuma[k], mode_lib->vba.ReadBandwidthChroma[k], mode_lib->vba.MaxTotalVActiveRDBandwidth,
mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k], mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k],
mode_lib->vba.RequiredDPPCLK[i][j][k], mode_lib->vba.RequiredDISPCLK[i][j], mode_lib->vba.PixelClock[k], mode_lib->vba.DSCDelayPerState[i][k], mode_lib->vba.NoOfDPP[i][j][k], mode_lib->vba.ScalerEnabled[k], mode_lib->vba.NumberOfCursors[k],
mode_lib->vba.DPPCLKDelaySubtotal, mode_lib->vba.DPPCLKDelaySCL, mode_lib->vba.DPPCLKDelaySCLLBOnly, mode_lib->vba.DPPCLKDelayCNVCFormater, mode_lib->vba.DPPCLKDelayCNVCCursor, mode_lib->vba.DISPCLKDelaySubtotal,
mode_lib->vba.SwathWidthYPerState[i][j][k] / mode_lib->vba.HRatio[k], mode_lib->vba.OutputFormat[k], mode_lib->vba.HTotal[k],
mode_lib->vba.SwathWidthYSingleDPP[k], mode_lib->vba.BytePerPixelInDETY[k], mode_lib->vba.BytePerPixelInDETC[k], mode_lib->vba.SwathHeightYThisState[k], mode_lib->vba.SwathHeightCThisState[k], mode_lib->vba.Interlace[k], mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
&mode_lib->vba.DSTXAfterScaler[k], &mode_lib->vba.DSTYAfterScaler[k]);
mode_lib->vba.IsErrorResult[i][j][k] =
CalculatePrefetchSchedule(
mode_lib,
mode_lib->vba.RequiredDPPCLK[i][j][k],
mode_lib->vba.RequiredDISPCLK[i][j],
mode_lib->vba.PixelClock[k],
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
mode_lib->vba.NoOfDPP[i][j][k],
mode_lib->vba.NumberOfCursors[k],
mode_lib->vba.VTotal[k]
- mode_lib->vba.VActive[k],
mode_lib->vba.HTotal[k],
mode_lib->vba.MaxInterDCNTileRepeaters,
mode_lib->vba.MaximumVStartup[0][0][k],
mode_lib->vba.GPUVMMaxPageTableLevels,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.DynamicMetadataEnable[k],
mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
mode_lib->vba.DynamicMetadataTransmittedBytes[k],
mode_lib->vba.DCCEnable[k],
mode_lib->vba.UrgentLatencyPixelDataOnly,
mode_lib->vba.ExtraLatency,
mode_lib->vba.TimeCalc,
mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k],
mode_lib->vba.MetaRowBytes[0][0][k],
mode_lib->vba.DPTEBytesPerRow[0][0][k],
mode_lib->vba.PrefetchLinesY[0][0][k],
mode_lib->vba.SwathWidthYPerState[i][j][k],
mode_lib->vba.BytePerPixelInDETY[k],
mode_lib->vba.PrefillY[k],
mode_lib->vba.MaxNumSwY[k],
mode_lib->vba.PrefetchLinesC[0][0][k],
mode_lib->vba.BytePerPixelInDETC[k],
mode_lib->vba.PrefillC[k],
mode_lib->vba.MaxNumSwC[k],
mode_lib->vba.SwathHeightYPerState[i][j][k],
mode_lib->vba.SwathHeightCPerState[i][j][k],
mode_lib->vba.TWait,
mode_lib->vba.XFCEnabled[k],
mode_lib->vba.XFCRemoteSurfaceFlipDelay,
mode_lib->vba.Interlace[k],
mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
mode_lib->vba.DSTXAfterScaler[k],
mode_lib->vba.DSTYAfterScaler[k],
&mode_lib->vba.LineTimesForPrefetch[k],
&mode_lib->vba.PrefetchBW[k],
&mode_lib->vba.LinesForMetaPTE[k],
&mode_lib->vba.LinesForMetaAndDPTERow[k],
&mode_lib->vba.VRatioPreY[i][j][k],
&mode_lib->vba.VRatioPreC[i][j][k],
&mode_lib->vba.RequiredPrefetchPixelDataBWLuma[i][j][k],
&mode_lib->vba.Tno_bw[k],
&mode_lib->vba.VUpdateOffsetPix[k],
&mode_lib->vba.VUpdateWidthPix[k],
&mode_lib->vba.VReadyOffsetPix[k]);
}
mode_lib->vba.MaximumReadBandwidthWithoutPrefetch = 0.0;
mode_lib->vba.MaximumReadBandwidthWithPrefetch = 0.0;
locals->prefetch_vm_bw_valid = true;
locals->prefetch_row_bw_valid = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (locals->PDEAndMetaPTEBytesPerFrame[0][0][k] == 0)
locals->prefetch_vm_bw[k] = 0;
else if (locals->LinesForMetaPTE[k] > 0)
locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[0][0][k]
/ (locals->LinesForMetaPTE[k] * locals->HTotal[k] / locals->PixelClock[k]);
else {
locals->prefetch_vm_bw[k] = 0;
locals->prefetch_vm_bw_valid = false;
}
if (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k] == 0)
locals->prefetch_row_bw[k] = 0;
else if (locals->LinesForMetaAndDPTERow[k] > 0)
locals->prefetch_row_bw[k] = (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k])
/ (locals->LinesForMetaAndDPTERow[k] * locals->HTotal[k] / locals->PixelClock[k]);
else {
locals->prefetch_row_bw[k] = 0;
locals->prefetch_row_bw_valid = false;
}
mode_lib->vba.MaximumReadBandwidthWithoutPrefetch = mode_lib->vba.MaximumReadBandwidthWithPrefetch
+ mode_lib->vba.cursor_bw[k] + mode_lib->vba.ReadBandwidth[k] + mode_lib->vba.meta_row_bw[k] + mode_lib->vba.dpte_row_bw[k];
mode_lib->vba.MaximumReadBandwidthWithPrefetch =
mode_lib->vba.MaximumReadBandwidthWithPrefetch
+ mode_lib->vba.cursor_bw[k]
+ dml_max3(
mode_lib->vba.prefetch_vm_bw[k],
mode_lib->vba.prefetch_row_bw[k],
dml_max(mode_lib->vba.ReadBandwidth[k],
mode_lib->vba.RequiredPrefetchPixelDataBWLuma[i][j][k])
+ mode_lib->vba.meta_row_bw[k] + mode_lib->vba.dpte_row_bw[k]);
}
locals->BandwidthWithoutPrefetchSupported[i][0] = true;
if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i][0]) {
locals->BandwidthWithoutPrefetchSupported[i][0] = false;
}
locals->PrefetchSupported[i][j] = true;
if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i][0]) {
locals->PrefetchSupported[i][j] = false;
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (locals->LineTimesForPrefetch[k] < 2.0
|| locals->LinesForMetaPTE[k] >= 8.0
|| locals->LinesForMetaAndDPTERow[k] >= 16.0
|| mode_lib->vba.IsErrorResult[i][j][k] == true) {
locals->PrefetchSupported[i][j] = false;
}
}
locals->VRatioInPrefetchSupported[i][j] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (locals->VRatioPreY[i][j][k] > 4.0
|| locals->VRatioPreC[i][j][k] > 4.0
|| mode_lib->vba.IsErrorResult[i][j][k] == true) {
locals->VRatioInPrefetchSupported[i][j] = false;
}
}
} while ((locals->PrefetchSupported[i][j] != true || locals->VRatioInPrefetchSupported[i][j] != true)
&& mode_lib->vba.NextPrefetchMode < mode_lib->vba.MaxPrefetchMode);
if (mode_lib->vba.PrefetchSupported[i][j] == true
&& mode_lib->vba.VRatioInPrefetchSupported[i][j] == true) {
mode_lib->vba.BandwidthAvailableForImmediateFlip =
mode_lib->vba.ReturnBWPerState[i][0];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
mode_lib->vba.BandwidthAvailableForImmediateFlip =
mode_lib->vba.BandwidthAvailableForImmediateFlip
- mode_lib->vba.cursor_bw[k]
- dml_max(
mode_lib->vba.ReadBandwidth[k] + mode_lib->vba.qual_row_bw[k],
mode_lib->vba.PrefetchBW[k]);
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
mode_lib->vba.ImmediateFlipBytes[k] = 0.0;
if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
&& mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
mode_lib->vba.ImmediateFlipBytes[k] =
mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k]
+ mode_lib->vba.MetaRowBytes[0][0][k]
+ mode_lib->vba.DPTEBytesPerRow[0][0][k];
}
}
mode_lib->vba.TotImmediateFlipBytes = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
&& mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
mode_lib->vba.TotImmediateFlipBytes =
mode_lib->vba.TotImmediateFlipBytes
+ mode_lib->vba.ImmediateFlipBytes[k];
}
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
CalculateFlipSchedule(
mode_lib,
mode_lib->vba.ExtraLatency,
mode_lib->vba.UrgentLatencyPixelDataOnly,
mode_lib->vba.GPUVMMaxPageTableLevels,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.BandwidthAvailableForImmediateFlip,
mode_lib->vba.TotImmediateFlipBytes,
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.ImmediateFlipBytes[k],
mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k],
mode_lib->vba.VRatio[k],
mode_lib->vba.Tno_bw[k],
mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k],
mode_lib->vba.MetaRowBytes[0][0][k],
mode_lib->vba.DPTEBytesPerRow[0][0][k],
mode_lib->vba.DCCEnable[k],
mode_lib->vba.dpte_row_height[k],
mode_lib->vba.meta_row_height[k],
mode_lib->vba.qual_row_bw[k],
&mode_lib->vba.DestinationLinesToRequestVMInImmediateFlip[k],
&mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip[k],
&mode_lib->vba.final_flip_bw[k],
&mode_lib->vba.ImmediateFlipSupportedForPipe[k]);
}
mode_lib->vba.total_dcn_read_bw_with_flip = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
mode_lib->vba.total_dcn_read_bw_with_flip =
mode_lib->vba.total_dcn_read_bw_with_flip
+ mode_lib->vba.cursor_bw[k]
+ dml_max3(
mode_lib->vba.prefetch_vm_bw[k],
mode_lib->vba.prefetch_row_bw[k],
mode_lib->vba.final_flip_bw[k]
+ dml_max(
mode_lib->vba.ReadBandwidth[k],
mode_lib->vba.RequiredPrefetchPixelDataBWLuma[i][j][k]));
}
mode_lib->vba.ImmediateFlipSupportedForState[i][j] = true;
if (mode_lib->vba.total_dcn_read_bw_with_flip
> mode_lib->vba.ReturnBWPerState[i][0]) {
mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false;
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.ImmediateFlipSupportedForPipe[k] == false) {
mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false;
}
}
} else {
mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false;
}
}
}
/*Vertical Active BW support*/
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0] = dml_min(mode_lib->vba.ReturnBusWidth *
mode_lib->vba.DCFCLKPerState[i], mode_lib->vba.FabricAndDRAMBandwidthPerState[i] * 1000) *
mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation / 100;
if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0])
mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = true;
else
mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = false;
}
/*PTE Buffer Size Check*/
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
for (j = 0; j < 2; j++) {
locals->PTEBufferSizeNotExceeded[i][j] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (locals->PTEBufferSizeNotExceededY[i][j][k] == false
|| locals->PTEBufferSizeNotExceededC[i][j][k] == false) {
locals->PTEBufferSizeNotExceeded[i][j] = false;
}
}
}
}
/*Cursor Support Check*/
mode_lib->vba.CursorSupport = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
for (j = 0; j < 2; j++) {
if (mode_lib->vba.CursorWidth[k][j] > 0.0) {
if (dml_floor(
dml_floor(
mode_lib->vba.CursorBufferSize
- mode_lib->vba.CursorChunkSize,
mode_lib->vba.CursorChunkSize) * 1024.0
/ (mode_lib->vba.CursorWidth[k][j]
* mode_lib->vba.CursorBPP[k][j]
/ 8.0),
1.0)
* (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
/ mode_lib->vba.VRatio[k] < mode_lib->vba.UrgentLatencyPixelDataOnly
|| (mode_lib->vba.CursorBPP[k][j] == 64.0
&& mode_lib->vba.Cursor64BppSupport == false)) {
mode_lib->vba.CursorSupport = false;
}
}
}
}
/*Valid Pitch Check*/
mode_lib->vba.PitchSupport = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
locals->AlignedYPitch[k] = dml_ceil(
dml_max(mode_lib->vba.PitchY[k], mode_lib->vba.ViewportWidth[k]),
locals->MacroTileWidthY[k]);
if (locals->AlignedYPitch[k] > mode_lib->vba.PitchY[k]) {
mode_lib->vba.PitchSupport = false;
}
if (mode_lib->vba.DCCEnable[k] == true) {
locals->AlignedDCCMetaPitch[k] = dml_ceil(
dml_max(
mode_lib->vba.DCCMetaPitchY[k],
mode_lib->vba.ViewportWidth[k]),
64.0 * locals->Read256BlockWidthY[k]);
} else {
locals->AlignedDCCMetaPitch[k] = mode_lib->vba.DCCMetaPitchY[k];
}
if (locals->AlignedDCCMetaPitch[k] > mode_lib->vba.DCCMetaPitchY[k]) {
mode_lib->vba.PitchSupport = false;
}
if (mode_lib->vba.SourcePixelFormat[k] != dm_444_64
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_32
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_8) {
locals->AlignedCPitch[k] = dml_ceil(
dml_max(
mode_lib->vba.PitchC[k],
mode_lib->vba.ViewportWidth[k] / 2.0),
locals->MacroTileWidthC[k]);
} else {
locals->AlignedCPitch[k] = mode_lib->vba.PitchC[k];
}
if (locals->AlignedCPitch[k] > mode_lib->vba.PitchC[k]) {
mode_lib->vba.PitchSupport = false;
}
}
/*Mode Support, Voltage State and SOC Configuration*/
for (i = mode_lib->vba.soc.num_states; i >= 0; i--) {
for (j = 0; j < 2; j++) {
enum dm_validation_status status = DML_VALIDATION_OK;
if (mode_lib->vba.ScaleRatioAndTapsSupport != true) {
status = DML_FAIL_SCALE_RATIO_TAP;
} else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) {
status = DML_FAIL_SOURCE_PIXEL_FORMAT;
} else if (locals->ViewportSizeSupport[i][0] != true) {
status = DML_FAIL_VIEWPORT_SIZE;
} else if (locals->DIOSupport[i] != true) {
status = DML_FAIL_DIO_SUPPORT;
} else if (locals->NotEnoughDSCUnits[i] != false) {
status = DML_FAIL_NOT_ENOUGH_DSC;
} else if (locals->DSCCLKRequiredMoreThanSupported[i] != false) {
status = DML_FAIL_DSC_CLK_REQUIRED;
} else if (locals->UrgentLatencySupport[i][j] != true) {
status = DML_FAIL_URGENT_LATENCY;
} else if (locals->ROBSupport[i][0] != true) {
status = DML_FAIL_REORDERING_BUFFER;
} else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) {
status = DML_FAIL_DISPCLK_DPPCLK;
} else if (locals->TotalAvailablePipesSupport[i][j] != true) {
status = DML_FAIL_TOTAL_AVAILABLE_PIPES;
} else if (mode_lib->vba.NumberOfOTGSupport != true) {
status = DML_FAIL_NUM_OTG;
} else if (mode_lib->vba.WritebackModeSupport != true) {
status = DML_FAIL_WRITEBACK_MODE;
} else if (mode_lib->vba.WritebackLatencySupport != true) {
status = DML_FAIL_WRITEBACK_LATENCY;
} else if (mode_lib->vba.WritebackScaleRatioAndTapsSupport != true) {
status = DML_FAIL_WRITEBACK_SCALE_RATIO_TAP;
} else if (mode_lib->vba.CursorSupport != true) {
status = DML_FAIL_CURSOR_SUPPORT;
} else if (mode_lib->vba.PitchSupport != true) {
status = DML_FAIL_PITCH_SUPPORT;
} else if (locals->PrefetchSupported[i][j] != true) {
status = DML_FAIL_PREFETCH_SUPPORT;
} else if (locals->TotalVerticalActiveBandwidthSupport[i][0] != true) {
status = DML_FAIL_TOTAL_V_ACTIVE_BW;
} else if (locals->VRatioInPrefetchSupported[i][j] != true) {
status = DML_FAIL_V_RATIO_PREFETCH;
} else if (locals->PTEBufferSizeNotExceeded[i][j] != true) {
status = DML_FAIL_PTE_BUFFER_SIZE;
} else if (mode_lib->vba.NonsupportedDSCInputBPC != false) {
status = DML_FAIL_DSC_INPUT_BPC;
}
if (status == DML_VALIDATION_OK) {
locals->ModeSupport[i][j] = true;
} else {
locals->ModeSupport[i][j] = false;
}
locals->ValidationStatus[i] = status;
}
}
{
unsigned int MaximumMPCCombine = 0;
mode_lib->vba.VoltageLevel = mode_lib->vba.soc.num_states + 1;
for (i = mode_lib->vba.VoltageOverrideLevel; i <= mode_lib->vba.soc.num_states; i++) {
if (locals->ModeSupport[i][0] == true || locals->ModeSupport[i][1] == true) {
mode_lib->vba.VoltageLevel = i;
if (locals->ModeSupport[i][1] == true && (locals->ModeSupport[i][0] == false
|| mode_lib->vba.WhenToDoMPCCombine == dm_mpc_always_when_possible)) {
MaximumMPCCombine = 1;
} else {
MaximumMPCCombine = 0;
}
break;
}
}
mode_lib->vba.ImmediateFlipSupport =
locals->ImmediateFlipSupportedForState[mode_lib->vba.VoltageLevel][MaximumMPCCombine];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
mode_lib->vba.DPPPerPlane[k] = locals->NoOfDPP[mode_lib->vba.VoltageLevel][MaximumMPCCombine][k];
locals->DPPCLK[k] = locals->RequiredDPPCLK[mode_lib->vba.VoltageLevel][MaximumMPCCombine][k];
}
mode_lib->vba.DISPCLK = locals->RequiredDISPCLK[mode_lib->vba.VoltageLevel][MaximumMPCCombine];
mode_lib->vba.maxMpcComb = MaximumMPCCombine;
}
mode_lib->vba.DCFCLK = mode_lib->vba.DCFCLKPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel][0];
mode_lib->vba.FabricAndDRAMBandwidth = locals->FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
mode_lib->vba.ODMCombineEnabled[k] =
locals->ODMCombineEnablePerState[mode_lib->vba.VoltageLevel][k];
} else {
mode_lib->vba.ODMCombineEnabled[k] = 0;
}
mode_lib->vba.DSCEnabled[k] =
locals->RequiresDSC[mode_lib->vba.VoltageLevel][k];
mode_lib->vba.OutputBpp[k] =
locals->OutputBppPerState[mode_lib->vba.VoltageLevel][k];
}
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c |
// SPDX-License-Identifier: MIT
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dcn10/dcn10_resource.h"
#include "dcn10_fpu.h"
#include "resource.h"
#include "amdgpu_dm/dc_fpu.h"
/**
* DOC: DCN10 FPU manipulation Overview
*
* The DCN architecture relies on FPU operations, which require special
* compilation flags and the use of kernel_fpu_begin/end functions; ideally, we
* want to avoid spreading FPU access across multiple files. With this idea in
* mind, this file aims to centralize DCN10 functions that require FPU access
* in a single place. Code in this file follows the following code pattern:
*
* 1. Functions that use FPU operations should be isolated in static functions.
* 2. The FPU functions should have the noinline attribute to ensure anything
* that deals with FP register is contained within this call.
* 3. All function that needs to be accessed outside this file requires a
* public interface that not uses any FPU reference.
* 4. Developers **must not** use DC_FP_START/END in this file, but they need
* to ensure that the caller invokes it before access any function available
* in this file. For this reason, public functions in this file must invoke
* dc_assert_fp_enabled();
*
* Let's expand a little bit more the idea in the code pattern. To fully
* isolate FPU operations in a single place, we must avoid situations where
* compilers spill FP values to registers due to FP enable in a specific C
* file. Note that even if we isolate all FPU functions in a single file and
* call its interface from other files, the compiler might enable the use of
* FPU before we call DC_FP_START. Nevertheless, it is the programmer's
* responsibility to invoke DC_FP_START/END in the correct place. To highlight
* situations where developers forgot to use the FP protection before calling
* the DC FPU interface functions, we introduce a helper that checks if the
* function is invoked under FP protection. If not, it will trigger a kernel
* warning.
*/
struct _vcs_dpi_ip_params_st dcn1_0_ip = {
.rob_buffer_size_kbytes = 64,
.det_buffer_size_kbytes = 164,
.dpte_buffer_size_in_pte_reqs_luma = 42,
.dpp_output_buffer_pixels = 2560,
.opp_output_buffer_lines = 1,
.pixel_chunk_size_kbytes = 8,
.pte_enable = 1,
.pte_chunk_size_kbytes = 2,
.meta_chunk_size_kbytes = 2,
.writeback_chunk_size_kbytes = 2,
.line_buffer_size_bits = 589824,
.max_line_buffer_lines = 12,
.IsLineBufferBppFixed = 0,
.LineBufferFixedBpp = -1,
.writeback_luma_buffer_size_kbytes = 12,
.writeback_chroma_buffer_size_kbytes = 8,
.max_num_dpp = 4,
.max_num_wb = 2,
.max_dchub_pscl_bw_pix_per_clk = 4,
.max_pscl_lb_bw_pix_per_clk = 2,
.max_lb_vscl_bw_pix_per_clk = 4,
.max_vscl_hscl_bw_pix_per_clk = 4,
.max_hscl_ratio = 4,
.max_vscl_ratio = 4,
.hscl_mults = 4,
.vscl_mults = 4,
.max_hscl_taps = 8,
.max_vscl_taps = 8,
.dispclk_ramp_margin_percent = 1,
.underscan_factor = 1.10,
.min_vblank_lines = 14,
.dppclk_delay_subtotal = 90,
.dispclk_delay_subtotal = 42,
.dcfclk_cstate_latency = 10,
.max_inter_dcn_tile_repeaters = 8,
.can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0,
.bug_forcing_LC_req_same_size_fixed = 0,
};
struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = {
.sr_exit_time_us = 9.0,
.sr_enter_plus_exit_time_us = 11.0,
.urgent_latency_us = 4.0,
.writeback_latency_us = 12.0,
.ideal_dram_bw_after_urgent_percent = 80.0,
.max_request_size_bytes = 256,
.downspread_percent = 0.5,
.dram_page_open_time_ns = 50.0,
.dram_rw_turnaround_time_ns = 17.5,
.dram_return_buffer_per_channel_bytes = 8192,
.round_trip_ping_latency_dcfclk_cycles = 128,
.urgent_out_of_order_return_per_channel_bytes = 256,
.channel_interleave_bytes = 256,
.num_banks = 8,
.num_chans = 2,
.vmm_page_size_bytes = 4096,
.dram_clock_change_latency_us = 17.0,
.writeback_dram_clock_change_latency_us = 23.0,
.return_bus_width_bytes = 64,
};
void dcn10_resource_construct_fp(struct dc *dc)
{
dc_assert_fp_enabled();
if (dc->ctx->dce_version == DCN_VERSION_1_01) {
struct dcn_soc_bounding_box *dcn_soc = dc->dcn_soc;
struct dcn_ip_params *dcn_ip = dc->dcn_ip;
struct display_mode_lib *dml = &dc->dml;
dml->ip.max_num_dpp = 3;
/* TODO how to handle 23.84? */
dcn_soc->dram_clock_change_latency = 23;
dcn_ip->max_num_dpp = 3;
}
if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) {
dc->dcn_soc->urgent_latency = 3;
dc->debug.disable_dmcu = true;
dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 41.60f;
}
dc->dcn_soc->number_of_channels = dc->ctx->asic_id.vram_width / ddr4_dram_width;
ASSERT(dc->dcn_soc->number_of_channels < 3);
if (dc->dcn_soc->number_of_channels == 0)/*old sbios bug*/
dc->dcn_soc->number_of_channels = 2;
if (dc->dcn_soc->number_of_channels == 1) {
dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 19.2f;
dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = 17.066f;
dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = 14.933f;
dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 12.8f;
if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev))
dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 20.80f;
}
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.c |
/*
* Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "../display_mode_lib.h"
#include "../display_mode_vba.h"
#include "../dml_inline_defs.h"
#include "display_rq_dlg_calc_21.h"
/*
* NOTE:
* This file is gcc-parseable HW gospel, coming straight from HW engineers.
*
* It doesn't adhere to Linux kernel style and sometimes will do things in odd
* ways. Unless there is something clearly wrong with it the code should
* remain as-is as it provides us with a guarantee from HW that it is correct.
*/
static void calculate_ttu_cursor(
struct display_mode_lib *mode_lib,
double *refcyc_per_req_delivery_pre_cur,
double *refcyc_per_req_delivery_cur,
double refclk_freq_in_mhz,
double ref_freq_to_pix_freq,
double hscale_pixel_rate_l,
double hscl_ratio,
double vratio_pre_l,
double vratio_l,
unsigned int cur_width,
enum cursor_bpp cur_bpp);
static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
{
unsigned int ret_val = 0;
if (source_format == dm_444_16) {
if (!is_chroma)
ret_val = 2;
} else if (source_format == dm_444_32) {
if (!is_chroma)
ret_val = 4;
} else if (source_format == dm_444_64) {
if (!is_chroma)
ret_val = 8;
} else if (source_format == dm_420_8) {
if (is_chroma)
ret_val = 2;
else
ret_val = 1;
} else if (source_format == dm_420_10) {
if (is_chroma)
ret_val = 4;
else
ret_val = 2;
} else if (source_format == dm_444_8) {
ret_val = 1;
}
return ret_val;
}
static bool is_dual_plane(enum source_format_class source_format)
{
bool ret_val = false;
if ((source_format == dm_420_8) || (source_format == dm_420_10))
ret_val = true;
return ret_val;
}
static double get_refcyc_per_delivery(
struct display_mode_lib *mode_lib,
double refclk_freq_in_mhz,
double pclk_freq_in_mhz,
bool odm_combine,
unsigned int recout_width,
unsigned int hactive,
double vratio,
double hscale_pixel_rate,
unsigned int delivery_width,
unsigned int req_per_swath_ub)
{
double refcyc_per_delivery = 0.0;
if (vratio <= 1.0) {
if (odm_combine)
refcyc_per_delivery = (double) refclk_freq_in_mhz
* dml_min((double) recout_width, (double) hactive / 2.0)
/ pclk_freq_in_mhz / (double) req_per_swath_ub;
else
refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) recout_width
/ pclk_freq_in_mhz / (double) req_per_swath_ub;
} else {
refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) delivery_width
/ (double) hscale_pixel_rate / (double) req_per_swath_ub;
}
dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
dml_print("DML_DLG: %s: recout_width = %d\n", __func__, recout_width);
dml_print("DML_DLG: %s: vratio = %3.2f\n", __func__, vratio);
dml_print("DML_DLG: %s: req_per_swath_ub = %d\n", __func__, req_per_swath_ub);
dml_print("DML_DLG: %s: refcyc_per_delivery= %3.2f\n", __func__, refcyc_per_delivery);
return refcyc_per_delivery;
}
static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_size)
{
if (tile_size == dm_256k_tile)
return (256 * 1024);
else if (tile_size == dm_64k_tile)
return (64 * 1024);
else
return (4 * 1024);
}
static void extract_rq_sizing_regs(
struct display_mode_lib *mode_lib,
display_data_rq_regs_st *rq_regs,
const display_data_rq_sizing_params_st *rq_sizing)
{
dml_print("DML_DLG: %s: rq_sizing param\n", __func__);
print__data_rq_sizing_params_st(mode_lib, rq_sizing);
rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10;
if (rq_sizing->min_chunk_bytes == 0)
rq_regs->min_chunk_size = 0;
else
rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1;
rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10;
if (rq_sizing->min_meta_chunk_bytes == 0)
rq_regs->min_meta_chunk_size = 0;
else
rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1;
rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6;
rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6;
}
static void extract_rq_regs(
struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
const display_rq_params_st *rq_param)
{
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
unsigned int detile_buf_plane1_addr = 0;
extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l);
rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(
dml_log2(rq_param->dlg.rq_l.dpte_row_height),
1) - 3;
if (rq_param->yuv420) {
extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c);
rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(
dml_log2(rq_param->dlg.rq_c.dpte_row_height),
1) - 3;
}
rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height);
rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height);
// FIXME: take the max between luma, chroma chunk size?
// okay for now, as we are setting chunk_bytes to 8kb anyways
if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
rq_regs->drq_expansion_mode = 0;
} else {
rq_regs->drq_expansion_mode = 2;
}
rq_regs->prq_expansion_mode = 1;
rq_regs->mrq_expansion_mode = 1;
rq_regs->crq_expansion_mode = 1;
if (rq_param->yuv420) {
if ((double) rq_param->misc.rq_l.stored_swath_bytes
/ (double) rq_param->misc.rq_c.stored_swath_bytes <= 1.5) {
detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma
} else {
detile_buf_plane1_addr = dml_round_to_multiple(
(unsigned int) ((2.0 * detile_buf_size_in_bytes) / 3.0),
256,
0) / 64.0; // 2/3 to chroma
}
}
rq_regs->plane1_base_address = detile_buf_plane1_addr;
}
static void handle_det_buf_split(
struct display_mode_lib *mode_lib,
display_rq_params_st *rq_param,
const display_pipe_source_params_st *pipe_src_param)
{
unsigned int total_swath_bytes = 0;
unsigned int swath_bytes_l = 0;
unsigned int swath_bytes_c = 0;
unsigned int full_swath_bytes_packed_l = 0;
unsigned int full_swath_bytes_packed_c = 0;
bool req128_l = false;
bool req128_c = false;
bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear);
bool surf_vert = (pipe_src_param->source_scan == dm_vert);
unsigned int log2_swath_height_l = 0;
unsigned int log2_swath_height_c = 0;
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
full_swath_bytes_packed_l = rq_param->misc.rq_l.full_swath_bytes;
full_swath_bytes_packed_c = rq_param->misc.rq_c.full_swath_bytes;
if (rq_param->yuv420_10bpc) {
full_swath_bytes_packed_l = dml_round_to_multiple(
rq_param->misc.rq_l.full_swath_bytes * 2 / 3,
256,
1) + 256;
full_swath_bytes_packed_c = dml_round_to_multiple(
rq_param->misc.rq_c.full_swath_bytes * 2 / 3,
256,
1) + 256;
}
if (rq_param->yuv420) {
total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;
if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request
req128_l = false;
req128_c = false;
swath_bytes_l = full_swath_bytes_packed_l;
swath_bytes_c = full_swath_bytes_packed_c;
} else { //128b request (for luma only for yuv420 8bpc)
req128_l = true;
req128_c = false;
swath_bytes_l = full_swath_bytes_packed_l / 2;
swath_bytes_c = full_swath_bytes_packed_c;
}
// Note: assumption, the config that pass in will fit into
// the detiled buffer.
} else {
total_swath_bytes = 2 * full_swath_bytes_packed_l;
if (total_swath_bytes <= detile_buf_size_in_bytes)
req128_l = false;
else
req128_l = true;
swath_bytes_l = total_swath_bytes;
swath_bytes_c = 0;
}
rq_param->misc.rq_l.stored_swath_bytes = swath_bytes_l;
rq_param->misc.rq_c.stored_swath_bytes = swath_bytes_c;
if (surf_linear) {
log2_swath_height_l = 0;
log2_swath_height_c = 0;
} else {
unsigned int swath_height_l;
unsigned int swath_height_c;
if (!surf_vert) {
swath_height_l = rq_param->misc.rq_l.blk256_height;
swath_height_c = rq_param->misc.rq_c.blk256_height;
} else {
swath_height_l = rq_param->misc.rq_l.blk256_width;
swath_height_c = rq_param->misc.rq_c.blk256_width;
}
if (swath_height_l > 0)
log2_swath_height_l = dml_log2(swath_height_l);
if (req128_l && log2_swath_height_l > 0)
log2_swath_height_l -= 1;
if (swath_height_c > 0)
log2_swath_height_c = dml_log2(swath_height_c);
if (req128_c && log2_swath_height_c > 0)
log2_swath_height_c -= 1;
}
rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
dml_print("DML_DLG: %s: req128_l = %0d\n", __func__, req128_l);
dml_print("DML_DLG: %s: req128_c = %0d\n", __func__, req128_c);
dml_print(
"DML_DLG: %s: full_swath_bytes_packed_l = %0d\n",
__func__,
full_swath_bytes_packed_l);
dml_print(
"DML_DLG: %s: full_swath_bytes_packed_c = %0d\n",
__func__,
full_swath_bytes_packed_c);
}
static void get_meta_and_pte_attr(
struct display_mode_lib *mode_lib,
display_data_rq_dlg_params_st *rq_dlg_param,
display_data_rq_misc_params_st *rq_misc_param,
display_data_rq_sizing_params_st *rq_sizing_param,
unsigned int vp_width,
unsigned int vp_height,
unsigned int data_pitch,
unsigned int meta_pitch,
unsigned int source_format,
unsigned int tiling,
unsigned int macro_tile_size,
unsigned int source_scan,
unsigned int hostvm_enable,
unsigned int is_chroma)
{
bool surf_linear = (tiling == dm_sw_linear);
bool surf_vert = (source_scan == dm_vert);
unsigned int bytes_per_element;
unsigned int bytes_per_element_y = get_bytes_per_element(
(enum source_format_class) (source_format),
false);
unsigned int bytes_per_element_c = get_bytes_per_element(
(enum source_format_class) (source_format),
true);
unsigned int blk256_width = 0;
unsigned int blk256_height = 0;
unsigned int blk256_width_y = 0;
unsigned int blk256_height_y = 0;
unsigned int blk256_width_c = 0;
unsigned int blk256_height_c = 0;
unsigned int log2_bytes_per_element;
unsigned int log2_blk256_width;
unsigned int log2_blk256_height;
unsigned int blk_bytes;
unsigned int log2_blk_bytes;
unsigned int log2_blk_height;
unsigned int log2_blk_width;
unsigned int log2_meta_req_bytes;
unsigned int log2_meta_req_height;
unsigned int log2_meta_req_width;
unsigned int meta_req_width;
unsigned int meta_req_height;
unsigned int log2_meta_row_height;
unsigned int meta_row_width_ub;
unsigned int log2_meta_chunk_bytes;
unsigned int log2_meta_chunk_height;
//full sized meta chunk width in unit of data elements
unsigned int log2_meta_chunk_width;
unsigned int log2_min_meta_chunk_bytes;
unsigned int min_meta_chunk_width;
unsigned int meta_chunk_width;
unsigned int meta_chunk_per_row_int;
unsigned int meta_row_remainder;
unsigned int meta_chunk_threshold;
unsigned int meta_blk_bytes;
unsigned int meta_blk_height;
unsigned int meta_blk_width;
unsigned int meta_surface_bytes;
unsigned int vmpg_bytes;
unsigned int meta_pte_req_per_frame_ub;
unsigned int meta_pte_bytes_per_frame_ub;
const unsigned int log2_vmpg_bytes = dml_log2(mode_lib->soc.vmm_page_size_bytes);
const unsigned int dpte_buf_in_pte_reqs =
mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma + mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma;
const unsigned int pde_proc_buffer_size_64k_reqs =
mode_lib->ip.pde_proc_buffer_size_64k_reqs;
unsigned int log2_vmpg_height = 0;
unsigned int log2_vmpg_width = 0;
unsigned int log2_dpte_req_height_ptes = 0;
unsigned int log2_dpte_req_height = 0;
unsigned int log2_dpte_req_width = 0;
unsigned int log2_dpte_row_height_linear = 0;
unsigned int log2_dpte_row_height = 0;
unsigned int log2_dpte_group_width = 0;
unsigned int dpte_row_width_ub = 0;
unsigned int dpte_req_height = 0;
unsigned int dpte_req_width = 0;
unsigned int dpte_group_width = 0;
unsigned int log2_dpte_group_bytes = 0;
unsigned int log2_dpte_group_length = 0;
unsigned int pde_buf_entries;
bool yuv420 = (source_format == dm_420_8 || source_format == dm_420_10);
Calculate256BBlockSizes(
(enum source_format_class) (source_format),
(enum dm_swizzle_mode) (tiling),
bytes_per_element_y,
bytes_per_element_c,
&blk256_height_y,
&blk256_height_c,
&blk256_width_y,
&blk256_width_c);
if (!is_chroma) {
blk256_width = blk256_width_y;
blk256_height = blk256_height_y;
bytes_per_element = bytes_per_element_y;
} else {
blk256_width = blk256_width_c;
blk256_height = blk256_height_c;
bytes_per_element = bytes_per_element_c;
}
log2_bytes_per_element = dml_log2(bytes_per_element);
dml_print("DML_DLG: %s: surf_linear = %d\n", __func__, surf_linear);
dml_print("DML_DLG: %s: surf_vert = %d\n", __func__, surf_vert);
dml_print("DML_DLG: %s: blk256_width = %d\n", __func__, blk256_width);
dml_print("DML_DLG: %s: blk256_height = %d\n", __func__, blk256_height);
log2_blk256_width = dml_log2((double) blk256_width);
log2_blk256_height = dml_log2((double) blk256_height);
blk_bytes = surf_linear ?
256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
log2_blk_bytes = dml_log2((double) blk_bytes);
log2_blk_height = 0;
log2_blk_width = 0;
// remember log rule
// "+" in log is multiply
// "-" in log is divide
// "/2" is like square root
// blk is vertical biased
if (tiling != dm_sw_linear)
log2_blk_height = log2_blk256_height
+ dml_ceil((double) (log2_blk_bytes - 8) / 2.0, 1);
else
log2_blk_height = 0; // blk height of 1
log2_blk_width = log2_blk_bytes - log2_bytes_per_element - log2_blk_height;
if (!surf_vert) {
rq_dlg_param->swath_width_ub = dml_round_to_multiple(vp_width - 1, blk256_width, 1)
+ blk256_width;
rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_width;
} else {
rq_dlg_param->swath_width_ub = dml_round_to_multiple(
vp_height - 1,
blk256_height,
1) + blk256_height;
rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_height;
}
if (!surf_vert)
rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_height
* bytes_per_element;
else
rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_width
* bytes_per_element;
rq_misc_param->blk256_height = blk256_height;
rq_misc_param->blk256_width = blk256_width;
// -------
// meta
// -------
log2_meta_req_bytes = 6; // meta request is 64b and is 8x8byte meta element
// each 64b meta request for dcn is 8x8 meta elements and
// a meta element covers one 256b block of the data surface.
log2_meta_req_height = log2_blk256_height + 3; // meta req is 8x8 byte, each byte represent 1 blk256
log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element
- log2_meta_req_height;
meta_req_width = 1 << log2_meta_req_width;
meta_req_height = 1 << log2_meta_req_height;
log2_meta_row_height = 0;
meta_row_width_ub = 0;
// the dimensions of a meta row are meta_row_width x meta_row_height in elements.
// calculate upper bound of the meta_row_width
if (!surf_vert) {
log2_meta_row_height = log2_meta_req_height;
meta_row_width_ub = dml_round_to_multiple(vp_width - 1, meta_req_width, 1)
+ meta_req_width;
rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_width;
} else {
log2_meta_row_height = log2_meta_req_width;
meta_row_width_ub = dml_round_to_multiple(vp_height - 1, meta_req_height, 1)
+ meta_req_height;
rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_height;
}
rq_dlg_param->meta_bytes_per_row_ub = rq_dlg_param->meta_req_per_row_ub * 64;
rq_dlg_param->meta_row_height = 1 << log2_meta_row_height;
log2_meta_chunk_bytes = dml_log2(rq_sizing_param->meta_chunk_bytes);
log2_meta_chunk_height = log2_meta_row_height;
//full sized meta chunk width in unit of data elements
log2_meta_chunk_width = log2_meta_chunk_bytes + 8 - log2_bytes_per_element
- log2_meta_chunk_height;
log2_min_meta_chunk_bytes = dml_log2(rq_sizing_param->min_meta_chunk_bytes);
min_meta_chunk_width = 1
<< (log2_min_meta_chunk_bytes + 8 - log2_bytes_per_element
- log2_meta_chunk_height);
meta_chunk_width = 1 << log2_meta_chunk_width;
meta_chunk_per_row_int = (unsigned int) (meta_row_width_ub / meta_chunk_width);
meta_row_remainder = meta_row_width_ub % meta_chunk_width;
meta_chunk_threshold = 0;
meta_blk_bytes = 4096;
meta_blk_height = blk256_height * 64;
meta_blk_width = meta_blk_bytes * 256 / bytes_per_element / meta_blk_height;
meta_surface_bytes = meta_pitch
* (dml_round_to_multiple(vp_height - 1, meta_blk_height, 1)
+ meta_blk_height) * bytes_per_element / 256;
vmpg_bytes = mode_lib->soc.vmm_page_size_bytes;
meta_pte_req_per_frame_ub = (dml_round_to_multiple(
meta_surface_bytes - vmpg_bytes,
8 * vmpg_bytes,
1) + 8 * vmpg_bytes) / (8 * vmpg_bytes);
meta_pte_bytes_per_frame_ub = meta_pte_req_per_frame_ub * 64; //64B mpte request
rq_dlg_param->meta_pte_bytes_per_frame_ub = meta_pte_bytes_per_frame_ub;
dml_print("DML_DLG: %s: meta_blk_height = %d\n", __func__, meta_blk_height);
dml_print("DML_DLG: %s: meta_blk_width = %d\n", __func__, meta_blk_width);
dml_print("DML_DLG: %s: meta_surface_bytes = %d\n", __func__, meta_surface_bytes);
dml_print(
"DML_DLG: %s: meta_pte_req_per_frame_ub = %d\n",
__func__,
meta_pte_req_per_frame_ub);
dml_print(
"DML_DLG: %s: meta_pte_bytes_per_frame_ub = %d\n",
__func__,
meta_pte_bytes_per_frame_ub);
if (!surf_vert)
meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_width;
else
meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_height;
if (meta_row_remainder <= meta_chunk_threshold)
rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 1;
else
rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 2;
// ------
// dpte
// ------
if (surf_linear) {
log2_vmpg_height = 0; // one line high
} else {
log2_vmpg_height = (log2_vmpg_bytes - 8) / 2 + log2_blk256_height;
}
log2_vmpg_width = log2_vmpg_bytes - log2_bytes_per_element - log2_vmpg_height;
// only 3 possible shapes for dpte request in dimensions of ptes: 8x1, 4x2, 2x4.
if (surf_linear) { //one 64B PTE request returns 8 PTEs
log2_dpte_req_height_ptes = 0;
log2_dpte_req_width = log2_vmpg_width + 3;
log2_dpte_req_height = 0;
} else if (log2_blk_bytes == 12) { //4KB tile means 4kB page size
//one 64B req gives 8x1 PTEs for 4KB tile
log2_dpte_req_height_ptes = 0;
log2_dpte_req_width = log2_blk_width + 3;
log2_dpte_req_height = log2_blk_height + 0;
} else if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) { // tile block >= 64KB
//two 64B reqs of 2x4 PTEs give 16 PTEs to cover 64KB
log2_dpte_req_height_ptes = 4;
log2_dpte_req_width = log2_blk256_width + 4; // log2_64KB_width
log2_dpte_req_height = log2_blk256_height + 4; // log2_64KB_height
} else { //64KB page size and must 64KB tile block
//one 64B req gives 8x1 PTEs for 64KB tile
log2_dpte_req_height_ptes = 0;
log2_dpte_req_width = log2_blk_width + 3;
log2_dpte_req_height = log2_blk_height + 0;
}
// The dpte request dimensions in data elements is dpte_req_width x dpte_req_height
// log2_vmpg_width is how much 1 pte represent, now calculating how much a 64b pte req represent
// That depends on the pte shape (i.e. 8x1, 4x2, 2x4)
//log2_dpte_req_height = log2_vmpg_height + log2_dpte_req_height_ptes;
//log2_dpte_req_width = log2_vmpg_width + log2_dpte_req_width_ptes;
dpte_req_height = 1 << log2_dpte_req_height;
dpte_req_width = 1 << log2_dpte_req_width;
// calculate pitch dpte row buffer can hold
// round the result down to a power of two.
pde_buf_entries =
yuv420 ? (pde_proc_buffer_size_64k_reqs >> 1) : pde_proc_buffer_size_64k_reqs;
if (surf_linear) {
unsigned int dpte_row_height;
log2_dpte_row_height_linear = dml_floor(
dml_log2(
dml_min(
64 * 1024 * pde_buf_entries
/ bytes_per_element,
dpte_buf_in_pte_reqs
* dpte_req_width)
/ data_pitch),
1);
ASSERT(log2_dpte_row_height_linear >= 3);
if (log2_dpte_row_height_linear > 7)
log2_dpte_row_height_linear = 7;
log2_dpte_row_height = log2_dpte_row_height_linear;
// For linear, the dpte row is pitch dependent and the pte requests wrap at the pitch boundary.
// the dpte_row_width_ub is the upper bound of data_pitch*dpte_row_height in elements with this unique buffering.
dpte_row_height = 1 << log2_dpte_row_height;
dpte_row_width_ub = dml_round_to_multiple(
data_pitch * dpte_row_height - 1,
dpte_req_width,
1) + dpte_req_width;
rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
} else {
// the upper bound of the dpte_row_width without dependency on viewport position follows.
// for tiled mode, row height is the same as req height and row store up to vp size upper bound
if (!surf_vert) {
log2_dpte_row_height = log2_dpte_req_height;
dpte_row_width_ub = dml_round_to_multiple(vp_width - 1, dpte_req_width, 1)
+ dpte_req_width;
rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
} else {
log2_dpte_row_height =
(log2_blk_width < log2_dpte_req_width) ?
log2_blk_width : log2_dpte_req_width;
dpte_row_width_ub = dml_round_to_multiple(vp_height - 1, dpte_req_height, 1)
+ dpte_req_height;
rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_height;
}
}
if (log2_blk_bytes >= 16 && log2_vmpg_bytes == 12) // tile block >= 64KB
rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 128; //2*64B dpte request
else
rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 64; //64B dpte request
rq_dlg_param->dpte_row_height = 1 << log2_dpte_row_height;
// the dpte_group_bytes is reduced for the specific case of vertical
// access of a tile surface that has dpte request of 8x1 ptes.
if (hostvm_enable)
rq_sizing_param->dpte_group_bytes = 512;
else {
if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
//full size
rq_sizing_param->dpte_group_bytes = 2048;
}
//since pte request size is 64byte, the number of data pte requests per full sized group is as follows.
log2_dpte_group_bytes = dml_log2(rq_sizing_param->dpte_group_bytes);
log2_dpte_group_length = log2_dpte_group_bytes - 6; //length in 64b requests
// full sized data pte group width in elements
if (!surf_vert)
log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_width;
else
log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_height;
//But if the tile block >=64KB and the page size is 4KB, then each dPTE request is 2*64B
if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) // tile block >= 64KB
log2_dpte_group_width = log2_dpte_group_width - 1;
dpte_group_width = 1 << log2_dpte_group_width;
// since dpte groups are only aligned to dpte_req_width and not dpte_group_width,
// the upper bound for the dpte groups per row is as follows.
rq_dlg_param->dpte_groups_per_row_ub = dml_ceil(
(double) dpte_row_width_ub / dpte_group_width,
1);
}
static void get_surf_rq_param(
struct display_mode_lib *mode_lib,
display_data_rq_sizing_params_st *rq_sizing_param,
display_data_rq_dlg_params_st *rq_dlg_param,
display_data_rq_misc_params_st *rq_misc_param,
const display_pipe_params_st *pipe_param,
bool is_chroma)
{
bool mode_422 = false;
unsigned int vp_width = 0;
unsigned int vp_height = 0;
unsigned int data_pitch = 0;
unsigned int meta_pitch = 0;
unsigned int ppe = mode_422 ? 2 : 1;
// FIXME check if ppe apply for both luma and chroma in 422 case
if (is_chroma) {
vp_width = pipe_param->src.viewport_width_c / ppe;
vp_height = pipe_param->src.viewport_height_c;
data_pitch = pipe_param->src.data_pitch_c;
meta_pitch = pipe_param->src.meta_pitch_c;
} else {
vp_width = pipe_param->src.viewport_width / ppe;
vp_height = pipe_param->src.viewport_height;
data_pitch = pipe_param->src.data_pitch;
meta_pitch = pipe_param->src.meta_pitch;
}
if (pipe_param->dest.odm_combine) {
unsigned int access_dir;
unsigned int full_src_vp_width;
unsigned int hactive_half;
unsigned int src_hactive_half;
access_dir = (pipe_param->src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
hactive_half = pipe_param->dest.hactive / 2;
if (is_chroma) {
full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio_c * pipe_param->dest.full_recout_width;
src_hactive_half = pipe_param->scale_ratio_depth.hscl_ratio_c * hactive_half;
} else {
full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio * pipe_param->dest.full_recout_width;
src_hactive_half = pipe_param->scale_ratio_depth.hscl_ratio * hactive_half;
}
if (access_dir == 0) {
vp_width = dml_min(full_src_vp_width, src_hactive_half);
dml_print("DML_DLG: %s: vp_width = %d\n", __func__, vp_width);
} else {
vp_height = dml_min(full_src_vp_width, src_hactive_half);
dml_print("DML_DLG: %s: vp_height = %d\n", __func__, vp_height);
}
dml_print("DML_DLG: %s: full_src_vp_width = %d\n", __func__, full_src_vp_width);
dml_print("DML_DLG: %s: hactive_half = %d\n", __func__, hactive_half);
dml_print("DML_DLG: %s: src_hactive_half = %d\n", __func__, src_hactive_half);
}
rq_sizing_param->chunk_bytes = 8192;
if (rq_sizing_param->chunk_bytes == 64 * 1024)
rq_sizing_param->min_chunk_bytes = 0;
else
rq_sizing_param->min_chunk_bytes = 1024;
rq_sizing_param->meta_chunk_bytes = 2048;
rq_sizing_param->min_meta_chunk_bytes = 256;
if (pipe_param->src.hostvm)
rq_sizing_param->mpte_group_bytes = 512;
else
rq_sizing_param->mpte_group_bytes = 2048;
get_meta_and_pte_attr(
mode_lib,
rq_dlg_param,
rq_misc_param,
rq_sizing_param,
vp_width,
vp_height,
data_pitch,
meta_pitch,
pipe_param->src.source_format,
pipe_param->src.sw_mode,
pipe_param->src.macro_tile_size,
pipe_param->src.source_scan,
pipe_param->src.hostvm,
is_chroma);
}
static void dml_rq_dlg_get_rq_params(
struct display_mode_lib *mode_lib,
display_rq_params_st *rq_param,
const display_pipe_params_st *pipe_param)
{
// get param for luma surface
rq_param->yuv420 = pipe_param->src.source_format == dm_420_8
|| pipe_param->src.source_format == dm_420_10;
rq_param->yuv420_10bpc = pipe_param->src.source_format == dm_420_10;
get_surf_rq_param(
mode_lib,
&(rq_param->sizing.rq_l),
&(rq_param->dlg.rq_l),
&(rq_param->misc.rq_l),
pipe_param,
0);
if (is_dual_plane((enum source_format_class) (pipe_param->src.source_format))) {
// get param for chroma surface
get_surf_rq_param(
mode_lib,
&(rq_param->sizing.rq_c),
&(rq_param->dlg.rq_c),
&(rq_param->misc.rq_c),
pipe_param,
1);
}
// calculate how to split the det buffer space between luma and chroma
handle_det_buf_split(mode_lib, rq_param, &pipe_param->src);
print__rq_params_st(mode_lib, rq_param);
}
void dml21_rq_dlg_get_rq_reg(
struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
const display_pipe_params_st *pipe_param)
{
display_rq_params_st rq_param = {0};
memset(rq_regs, 0, sizeof(*rq_regs));
dml_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param);
extract_rq_regs(mode_lib, rq_regs, &rq_param);
print__rq_regs_st(mode_lib, rq_regs);
}
// Note: currently taken in as is.
// Nice to decouple code from hw register implement and extract code that are repeated for luma and chroma.
static void dml_rq_dlg_get_dlg_params(
struct display_mode_lib *mode_lib,
const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
display_dlg_regs_st *disp_dlg_regs,
display_ttu_regs_st *disp_ttu_regs,
const display_rq_dlg_params_st *rq_dlg_param,
const display_dlg_sys_params_st *dlg_sys_param,
const bool cstate_en,
const bool pstate_en)
{
const display_pipe_source_params_st *src = &e2e_pipe_param[pipe_idx].pipe.src;
const display_pipe_dest_params_st *dst = &e2e_pipe_param[pipe_idx].pipe.dest;
const display_output_params_st *dout = &e2e_pipe_param[pipe_idx].dout;
const display_clocks_and_cfg_st *clks = &e2e_pipe_param[pipe_idx].clks_cfg;
const scaler_ratio_depth_st *scl = &e2e_pipe_param[pipe_idx].pipe.scale_ratio_depth;
const scaler_taps_st *taps = &e2e_pipe_param[pipe_idx].pipe.scale_taps;
// -------------------------
// Section 1.15.2.1: OTG dependent Params
// -------------------------
// Timing
unsigned int htotal = dst->htotal;
// unsigned int hblank_start = dst.hblank_start; // TODO: Remove
unsigned int hblank_end = dst->hblank_end;
unsigned int vblank_start = dst->vblank_start;
unsigned int vblank_end = dst->vblank_end;
unsigned int min_vblank = mode_lib->ip.min_vblank_lines;
double dppclk_freq_in_mhz = clks->dppclk_mhz;
double dispclk_freq_in_mhz = clks->dispclk_mhz;
double refclk_freq_in_mhz = clks->refclk_mhz;
double pclk_freq_in_mhz = dst->pixel_rate_mhz;
bool interlaced = dst->interlaced;
double ref_freq_to_pix_freq = refclk_freq_in_mhz / pclk_freq_in_mhz;
double min_dcfclk_mhz;
double t_calc_us;
double min_ttu_vblank;
double min_dst_y_ttu_vblank;
unsigned int dlg_vblank_start;
bool dual_plane;
bool mode_422;
unsigned int access_dir;
unsigned int vp_height_l;
unsigned int vp_width_l;
unsigned int vp_height_c;
unsigned int vp_width_c;
// Scaling
unsigned int htaps_l;
unsigned int htaps_c;
double hratio_l;
double hratio_c;
double vratio_l;
double vratio_c;
bool scl_enable;
double line_time_in_us;
// double vinit_l;
// double vinit_c;
// double vinit_bot_l;
// double vinit_bot_c;
// unsigned int swath_height_l;
unsigned int swath_width_ub_l;
// unsigned int dpte_bytes_per_row_ub_l;
unsigned int dpte_groups_per_row_ub_l;
// unsigned int meta_pte_bytes_per_frame_ub_l;
// unsigned int meta_bytes_per_row_ub_l;
// unsigned int swath_height_c;
unsigned int swath_width_ub_c;
// unsigned int dpte_bytes_per_row_ub_c;
unsigned int dpte_groups_per_row_ub_c;
unsigned int meta_chunks_per_row_ub_l;
unsigned int meta_chunks_per_row_ub_c;
unsigned int vupdate_offset;
unsigned int vupdate_width;
unsigned int vready_offset;
unsigned int dppclk_delay_subtotal;
unsigned int dispclk_delay_subtotal;
unsigned int pixel_rate_delay_subtotal;
unsigned int vstartup_start;
unsigned int dst_x_after_scaler;
unsigned int dst_y_after_scaler;
double line_wait;
double dst_y_prefetch;
double dst_y_per_vm_vblank;
double dst_y_per_row_vblank;
double dst_y_per_vm_flip;
double dst_y_per_row_flip;
double max_dst_y_per_vm_vblank;
double max_dst_y_per_row_vblank;
double lsw;
double vratio_pre_l;
double vratio_pre_c;
unsigned int req_per_swath_ub_l;
unsigned int req_per_swath_ub_c;
unsigned int meta_row_height_l;
unsigned int meta_row_height_c;
unsigned int swath_width_pixels_ub_l;
unsigned int swath_width_pixels_ub_c;
unsigned int scaler_rec_in_width_l;
unsigned int scaler_rec_in_width_c;
unsigned int dpte_row_height_l;
unsigned int dpte_row_height_c;
double hscale_pixel_rate_l;
double hscale_pixel_rate_c;
double min_hratio_fact_l;
double min_hratio_fact_c;
double refcyc_per_line_delivery_pre_l;
double refcyc_per_line_delivery_pre_c;
double refcyc_per_line_delivery_l;
double refcyc_per_line_delivery_c;
double refcyc_per_req_delivery_pre_l;
double refcyc_per_req_delivery_pre_c;
double refcyc_per_req_delivery_l;
double refcyc_per_req_delivery_c;
unsigned int full_recout_width;
double refcyc_per_req_delivery_pre_cur0;
double refcyc_per_req_delivery_cur0;
double refcyc_per_req_delivery_pre_cur1;
double refcyc_per_req_delivery_cur1;
memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
dml_print("DML_DLG: %s: cstate_en = %d\n", __func__, cstate_en);
dml_print("DML_DLG: %s: pstate_en = %d\n", __func__, pstate_en);
dml_print("DML_DLG: %s: dppclk_freq_in_mhz = %3.2f\n", __func__, dppclk_freq_in_mhz);
dml_print("DML_DLG: %s: dispclk_freq_in_mhz = %3.2f\n", __func__, dispclk_freq_in_mhz);
dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
dml_print("DML_DLG: %s: interlaced = %d\n", __func__, interlaced);
ASSERT(ref_freq_to_pix_freq < 4.0);
disp_dlg_regs->ref_freq_to_pix_freq =
(unsigned int) (ref_freq_to_pix_freq * dml_pow(2, 19));
disp_dlg_regs->refcyc_per_htotal = (unsigned int) (ref_freq_to_pix_freq * (double) htotal
* dml_pow(2, 8));
disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; // 15 bits
disp_dlg_regs->refcyc_h_blank_end = (unsigned int) ((double) hblank_end
* (double) ref_freq_to_pix_freq);
ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int)dml_pow(2, 13));
min_dcfclk_mhz = dlg_sys_param->deepsleep_dcfclk_mhz;
t_calc_us = get_tcalc(mode_lib, e2e_pipe_param, num_pipes);
min_ttu_vblank = get_min_ttu_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double) htotal;
dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start) * dml_pow(2, 2));
ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18));
dml_print(
"DML_DLG: %s: min_dcfclk_mhz = %3.2f\n",
__func__,
min_dcfclk_mhz);
dml_print(
"DML_DLG: %s: min_ttu_vblank = %3.2f\n",
__func__,
min_ttu_vblank);
dml_print(
"DML_DLG: %s: min_dst_y_ttu_vblank = %3.2f\n",
__func__,
min_dst_y_ttu_vblank);
dml_print(
"DML_DLG: %s: t_calc_us = %3.2f\n",
__func__,
t_calc_us);
dml_print(
"DML_DLG: %s: disp_dlg_regs->min_dst_y_next_start = 0x%0x\n",
__func__,
disp_dlg_regs->min_dst_y_next_start);
dml_print(
"DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n",
__func__,
ref_freq_to_pix_freq);
// -------------------------
// Section 1.15.2.2: Prefetch, Active and TTU
// -------------------------
// Prefetch Calc
// Source
// dcc_en = src.dcc;
dual_plane = is_dual_plane((enum source_format_class) (src->source_format));
mode_422 = false; // FIXME
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
// bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
// bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
vp_height_l = src->viewport_height;
vp_width_l = src->viewport_width;
vp_height_c = src->viewport_height_c;
vp_width_c = src->viewport_width_c;
// Scaling
htaps_l = taps->htaps;
htaps_c = taps->htaps_c;
hratio_l = scl->hscl_ratio;
hratio_c = scl->hscl_ratio_c;
vratio_l = scl->vscl_ratio;
vratio_c = scl->vscl_ratio_c;
scl_enable = scl->scl_enable;
line_time_in_us = (htotal / pclk_freq_in_mhz);
swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub;
dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub;
swath_width_ub_c = rq_dlg_param->rq_c.swath_width_ub;
dpte_groups_per_row_ub_c = rq_dlg_param->rq_c.dpte_groups_per_row_ub;
meta_chunks_per_row_ub_l = rq_dlg_param->rq_l.meta_chunks_per_row_ub;
meta_chunks_per_row_ub_c = rq_dlg_param->rq_c.meta_chunks_per_row_ub;
vupdate_offset = dst->vupdate_offset;
vupdate_width = dst->vupdate_width;
vready_offset = dst->vready_offset;
dppclk_delay_subtotal = mode_lib->ip.dppclk_delay_subtotal;
dispclk_delay_subtotal = mode_lib->ip.dispclk_delay_subtotal;
if (scl_enable)
dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_scl;
else
dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_scl_lb_only;
dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_cnvc_formatter
+ src->num_cursors * mode_lib->ip.dppclk_delay_cnvc_cursor;
if (dout->dsc_enable) {
double dsc_delay = get_dsc_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
dispclk_delay_subtotal += dsc_delay;
}
pixel_rate_delay_subtotal = dppclk_delay_subtotal * pclk_freq_in_mhz / dppclk_freq_in_mhz
+ dispclk_delay_subtotal * pclk_freq_in_mhz / dispclk_freq_in_mhz;
vstartup_start = dst->vstartup_start;
if (interlaced) {
if (vstartup_start / 2.0
- (double) (vready_offset + vupdate_width + vupdate_offset) / htotal
<= vblank_end / 2.0)
disp_dlg_regs->vready_after_vcount0 = 1;
else
disp_dlg_regs->vready_after_vcount0 = 0;
} else {
if (vstartup_start
- (double) (vready_offset + vupdate_width + vupdate_offset) / htotal
<= vblank_end)
disp_dlg_regs->vready_after_vcount0 = 1;
else
disp_dlg_regs->vready_after_vcount0 = 0;
}
// TODO: Where is this coming from?
if (interlaced)
vstartup_start = vstartup_start / 2;
// TODO: What if this min_vblank doesn't match the value in the dml_config_settings.cpp?
if (vstartup_start >= min_vblank) {
dml_print(
"WARNING: DML_DLG: %s: vblank_start=%d vblank_end=%d\n",
__func__,
vblank_start,
vblank_end);
dml_print(
"WARNING: DML_DLG: %s: vstartup_start=%d should be less than min_vblank=%d\n",
__func__,
vstartup_start,
min_vblank);
min_vblank = vstartup_start + 1;
dml_print(
"WARNING: DML_DLG: %s: vstartup_start=%d should be less than min_vblank=%d\n",
__func__,
vstartup_start,
min_vblank);
}
dst_x_after_scaler = get_dst_x_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
dst_y_after_scaler = get_dst_y_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
dml_print("DML_DLG: %s: htotal = %d\n", __func__, htotal);
dml_print(
"DML_DLG: %s: pixel_rate_delay_subtotal = %d\n",
__func__,
pixel_rate_delay_subtotal);
dml_print(
"DML_DLG: %s: dst_x_after_scaler = %d\n",
__func__,
dst_x_after_scaler);
dml_print(
"DML_DLG: %s: dst_y_after_scaler = %d\n",
__func__,
dst_y_after_scaler);
// Lwait
// TODO: Should this be urgent_latency_pixel_mixed_with_vm_data_us?
line_wait = mode_lib->soc.urgent_latency_pixel_data_only_us;
if (cstate_en)
line_wait = dml_max(mode_lib->soc.sr_enter_plus_exit_time_us, line_wait);
if (pstate_en)
line_wait = dml_max(
mode_lib->soc.dram_clock_change_latency_us
+ mode_lib->soc.urgent_latency_pixel_data_only_us, // TODO: Should this be urgent_latency_pixel_mixed_with_vm_data_us?
line_wait);
line_wait = line_wait / line_time_in_us;
dst_y_prefetch = get_dst_y_prefetch(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
dml_print("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, dst_y_prefetch);
dst_y_per_vm_vblank = get_dst_y_per_vm_vblank(
mode_lib,
e2e_pipe_param,
num_pipes,
pipe_idx);
dst_y_per_row_vblank = get_dst_y_per_row_vblank(
mode_lib,
e2e_pipe_param,
num_pipes,
pipe_idx);
dst_y_per_vm_flip = get_dst_y_per_vm_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
dst_y_per_row_flip = get_dst_y_per_row_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
max_dst_y_per_vm_vblank = 32.0;
max_dst_y_per_row_vblank = 16.0;
// magic!
if (htotal <= 75) {
min_vblank = 300;
max_dst_y_per_vm_vblank = 100.0;
max_dst_y_per_row_vblank = 100.0;
}
dml_print("DML_DLG: %s: dst_y_per_vm_flip = %3.2f\n", __func__, dst_y_per_vm_flip);
dml_print("DML_DLG: %s: dst_y_per_row_flip = %3.2f\n", __func__, dst_y_per_row_flip);
dml_print("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, dst_y_per_vm_vblank);
dml_print("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, dst_y_per_row_vblank);
ASSERT(dst_y_per_vm_vblank < max_dst_y_per_vm_vblank);
ASSERT(dst_y_per_row_vblank < max_dst_y_per_row_vblank);
ASSERT(dst_y_prefetch > (dst_y_per_vm_vblank + dst_y_per_row_vblank));
lsw = dst_y_prefetch - (dst_y_per_vm_vblank + dst_y_per_row_vblank);
dml_print("DML_DLG: %s: lsw = %3.2f\n", __func__, lsw);
vratio_pre_l = get_vratio_prefetch_l(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
vratio_pre_c = get_vratio_prefetch_c(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
dml_print("DML_DLG: %s: vratio_pre_l=%3.2f\n", __func__, vratio_pre_l);
dml_print("DML_DLG: %s: vratio_pre_c=%3.2f\n", __func__, vratio_pre_c);
// Active
req_per_swath_ub_l = rq_dlg_param->rq_l.req_per_swath_ub;
req_per_swath_ub_c = rq_dlg_param->rq_c.req_per_swath_ub;
meta_row_height_l = rq_dlg_param->rq_l.meta_row_height;
meta_row_height_c = rq_dlg_param->rq_c.meta_row_height;
swath_width_pixels_ub_l = 0;
swath_width_pixels_ub_c = 0;
scaler_rec_in_width_l = 0;
scaler_rec_in_width_c = 0;
dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height;
dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height;
if (mode_422) {
swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element
swath_width_pixels_ub_c = swath_width_ub_c * 2;
} else {
swath_width_pixels_ub_l = swath_width_ub_l * 1;
swath_width_pixels_ub_c = swath_width_ub_c * 1;
}
hscale_pixel_rate_l = 0.;
hscale_pixel_rate_c = 0.;
min_hratio_fact_l = 1.0;
min_hratio_fact_c = 1.0;
if (hratio_l <= 1)
min_hratio_fact_l = 2.0;
else if (htaps_l <= 6) {
if ((hratio_l * 2.0) > 4.0)
min_hratio_fact_l = 4.0;
else
min_hratio_fact_l = hratio_l * 2.0;
} else {
if (hratio_l > 4.0)
min_hratio_fact_l = 4.0;
else
min_hratio_fact_l = hratio_l;
}
hscale_pixel_rate_l = min_hratio_fact_l * dppclk_freq_in_mhz;
if (hratio_c <= 1)
min_hratio_fact_c = 2.0;
else if (htaps_c <= 6) {
if ((hratio_c * 2.0) > 4.0)
min_hratio_fact_c = 4.0;
else
min_hratio_fact_c = hratio_c * 2.0;
} else {
if (hratio_c > 4.0)
min_hratio_fact_c = 4.0;
else
min_hratio_fact_c = hratio_c;
}
hscale_pixel_rate_c = min_hratio_fact_c * dppclk_freq_in_mhz;
refcyc_per_line_delivery_pre_l = 0.;
refcyc_per_line_delivery_pre_c = 0.;
refcyc_per_line_delivery_l = 0.;
refcyc_per_line_delivery_c = 0.;
refcyc_per_req_delivery_pre_l = 0.;
refcyc_per_req_delivery_pre_c = 0.;
refcyc_per_req_delivery_l = 0.;
refcyc_per_req_delivery_c = 0.;
full_recout_width = 0;
// In ODM
if (src->is_hsplit) {
// This "hack" is only allowed (and valid) for MPC combine. In ODM
// combine, you MUST specify the full_recout_width...according to Oswin
if (dst->full_recout_width == 0 && !dst->odm_combine) {
dml_print(
"DML_DLG: %s: Warning: full_recout_width not set in hsplit mode\n",
__func__);
full_recout_width = dst->recout_width * 2; // assume half split for dcn1
} else
full_recout_width = dst->full_recout_width;
} else
full_recout_width = dst->recout_width;
// As of DCN2, mpc_combine and odm_combine are mutually exclusive
refcyc_per_line_delivery_pre_l = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_pre_l,
hscale_pixel_rate_l,
swath_width_pixels_ub_l,
1); // per line
refcyc_per_line_delivery_l = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_l,
hscale_pixel_rate_l,
swath_width_pixels_ub_l,
1); // per line
dml_print("DML_DLG: %s: full_recout_width = %d\n", __func__, full_recout_width);
dml_print(
"DML_DLG: %s: hscale_pixel_rate_l = %3.2f\n",
__func__,
hscale_pixel_rate_l);
dml_print(
"DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n",
__func__,
refcyc_per_line_delivery_pre_l);
dml_print(
"DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n",
__func__,
refcyc_per_line_delivery_l);
if (dual_plane) {
refcyc_per_line_delivery_pre_c = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_pre_c,
hscale_pixel_rate_c,
swath_width_pixels_ub_c,
1); // per line
refcyc_per_line_delivery_c = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_c,
hscale_pixel_rate_c,
swath_width_pixels_ub_c,
1); // per line
dml_print(
"DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n",
__func__,
refcyc_per_line_delivery_pre_c);
dml_print(
"DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n",
__func__,
refcyc_per_line_delivery_c);
}
// TTU - Luma / Chroma
if (access_dir) { // vertical access
scaler_rec_in_width_l = vp_height_l;
scaler_rec_in_width_c = vp_height_c;
} else {
scaler_rec_in_width_l = vp_width_l;
scaler_rec_in_width_c = vp_width_c;
}
refcyc_per_req_delivery_pre_l = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_pre_l,
hscale_pixel_rate_l,
scaler_rec_in_width_l,
req_per_swath_ub_l); // per req
refcyc_per_req_delivery_l = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_l,
hscale_pixel_rate_l,
scaler_rec_in_width_l,
req_per_swath_ub_l); // per req
dml_print(
"DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n",
__func__,
refcyc_per_req_delivery_pre_l);
dml_print(
"DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n",
__func__,
refcyc_per_req_delivery_l);
ASSERT(refcyc_per_req_delivery_pre_l < dml_pow(2, 13));
ASSERT(refcyc_per_req_delivery_l < dml_pow(2, 13));
if (dual_plane) {
refcyc_per_req_delivery_pre_c = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_pre_c,
hscale_pixel_rate_c,
scaler_rec_in_width_c,
req_per_swath_ub_c); // per req
refcyc_per_req_delivery_c = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_c,
hscale_pixel_rate_c,
scaler_rec_in_width_c,
req_per_swath_ub_c); // per req
dml_print(
"DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n",
__func__,
refcyc_per_req_delivery_pre_c);
dml_print(
"DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n",
__func__,
refcyc_per_req_delivery_c);
ASSERT(refcyc_per_req_delivery_pre_c < dml_pow(2, 13));
ASSERT(refcyc_per_req_delivery_c < dml_pow(2, 13));
}
// TTU - Cursor
refcyc_per_req_delivery_pre_cur0 = 0.0;
refcyc_per_req_delivery_cur0 = 0.0;
if (src->num_cursors > 0) {
calculate_ttu_cursor(
mode_lib,
&refcyc_per_req_delivery_pre_cur0,
&refcyc_per_req_delivery_cur0,
refclk_freq_in_mhz,
ref_freq_to_pix_freq,
hscale_pixel_rate_l,
scl->hscl_ratio,
vratio_pre_l,
vratio_l,
src->cur0_src_width,
(enum cursor_bpp) (src->cur0_bpp));
}
refcyc_per_req_delivery_pre_cur1 = 0.0;
refcyc_per_req_delivery_cur1 = 0.0;
if (src->num_cursors > 1) {
calculate_ttu_cursor(
mode_lib,
&refcyc_per_req_delivery_pre_cur1,
&refcyc_per_req_delivery_cur1,
refclk_freq_in_mhz,
ref_freq_to_pix_freq,
hscale_pixel_rate_l,
scl->hscl_ratio,
vratio_pre_l,
vratio_l,
src->cur1_src_width,
(enum cursor_bpp) (src->cur1_bpp));
}
// TTU - Misc
// all hard-coded
// Assignment to register structures
disp_dlg_regs->dst_y_after_scaler = dst_y_after_scaler; // in terms of line
disp_dlg_regs->refcyc_x_after_scaler = dst_x_after_scaler * ref_freq_to_pix_freq; // in terms of refclk
ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int)dml_pow(2, 13));
disp_dlg_regs->dst_y_prefetch = (unsigned int) (dst_y_prefetch * dml_pow(2, 2));
disp_dlg_regs->dst_y_per_vm_vblank = (unsigned int) (dst_y_per_vm_vblank * dml_pow(2, 2));
disp_dlg_regs->dst_y_per_row_vblank = (unsigned int) (dst_y_per_row_vblank * dml_pow(2, 2));
disp_dlg_regs->dst_y_per_vm_flip = (unsigned int) (dst_y_per_vm_flip * dml_pow(2, 2));
disp_dlg_regs->dst_y_per_row_flip = (unsigned int) (dst_y_per_row_flip * dml_pow(2, 2));
disp_dlg_regs->vratio_prefetch = (unsigned int) (vratio_pre_l * dml_pow(2, 19));
disp_dlg_regs->vratio_prefetch_c = (unsigned int) (vratio_pre_c * dml_pow(2, 19));
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_vblank);
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_vblank);
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip);
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip);
disp_dlg_regs->refcyc_per_pte_group_vblank_l =
(unsigned int) (dst_y_per_row_vblank * (double) htotal
* ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l);
if ((refclk_freq_in_mhz / ref_freq_to_pix_freq < 28) &&
disp_dlg_regs->refcyc_per_pte_group_vblank_l >= (unsigned int)dml_pow(2, 13))
disp_dlg_regs->refcyc_per_pte_group_vblank_l = (1 << 13) - 1;
else
ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)dml_pow(2, 13));
if (dual_plane) {
disp_dlg_regs->refcyc_per_pte_group_vblank_c = (unsigned int) (dst_y_per_row_vblank
* (double) htotal * ref_freq_to_pix_freq
/ (double) dpte_groups_per_row_ub_c);
if ((refclk_freq_in_mhz / ref_freq_to_pix_freq < 28) &&
disp_dlg_regs->refcyc_per_pte_group_vblank_c >= (unsigned int)dml_pow(2, 13))
disp_dlg_regs->refcyc_per_pte_group_vblank_c = (1 << 13) - 1;
else
ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c
< (unsigned int)dml_pow(2, 13));
}
if (src->dcc)
disp_dlg_regs->refcyc_per_meta_chunk_vblank_l =
(unsigned int) (dst_y_per_row_vblank * (double) htotal
* ref_freq_to_pix_freq / (double) meta_chunks_per_row_ub_l);
else
disp_dlg_regs->refcyc_per_meta_chunk_vblank_l = 0;
ASSERT(disp_dlg_regs->refcyc_per_meta_chunk_vblank_l < (unsigned int)dml_pow(2, 13));
disp_dlg_regs->refcyc_per_meta_chunk_vblank_c =
disp_dlg_regs->refcyc_per_meta_chunk_vblank_l; // dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now
disp_dlg_regs->refcyc_per_pte_group_flip_l = (unsigned int) (dst_y_per_row_flip * htotal
* ref_freq_to_pix_freq) / dpte_groups_per_row_ub_l;
disp_dlg_regs->refcyc_per_meta_chunk_flip_l = (unsigned int) (dst_y_per_row_flip * htotal
* ref_freq_to_pix_freq) / meta_chunks_per_row_ub_l;
if (dual_plane) {
disp_dlg_regs->refcyc_per_pte_group_flip_c = (unsigned int) (dst_y_per_row_flip
* htotal * ref_freq_to_pix_freq) / dpte_groups_per_row_ub_c;
disp_dlg_regs->refcyc_per_meta_chunk_flip_c = (unsigned int) (dst_y_per_row_flip
* htotal * ref_freq_to_pix_freq) / meta_chunks_per_row_ub_c;
}
disp_dlg_regs->refcyc_per_vm_group_vblank = get_refcyc_per_vm_group_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
disp_dlg_regs->refcyc_per_vm_group_flip = get_refcyc_per_vm_group_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
disp_dlg_regs->refcyc_per_vm_req_vblank = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10);
disp_dlg_regs->refcyc_per_vm_req_flip = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10);
// Clamp to max for now
if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int)dml_pow(2, 23))
disp_dlg_regs->refcyc_per_vm_group_vblank = dml_pow(2, 23) - 1;
if (disp_dlg_regs->refcyc_per_vm_group_flip >= (unsigned int)dml_pow(2, 23))
disp_dlg_regs->refcyc_per_vm_group_flip = dml_pow(2, 23) - 1;
if (disp_dlg_regs->refcyc_per_vm_req_vblank >= (unsigned int)dml_pow(2, 23))
disp_dlg_regs->refcyc_per_vm_req_vblank = dml_pow(2, 23) - 1;
if (disp_dlg_regs->refcyc_per_vm_req_flip >= (unsigned int)dml_pow(2, 23))
disp_dlg_regs->refcyc_per_vm_req_flip = dml_pow(2, 23) - 1;
disp_dlg_regs->dst_y_per_pte_row_nom_l = (unsigned int) ((double) dpte_row_height_l
/ (double) vratio_l * dml_pow(2, 2));
ASSERT(disp_dlg_regs->dst_y_per_pte_row_nom_l < (unsigned int)dml_pow(2, 17));
if (dual_plane) {
disp_dlg_regs->dst_y_per_pte_row_nom_c = (unsigned int) ((double) dpte_row_height_c
/ (double) vratio_c * dml_pow(2, 2));
if (disp_dlg_regs->dst_y_per_pte_row_nom_c >= (unsigned int) dml_pow(2, 17)) {
dml_print(
"DML_DLG: %s: Warning dst_y_per_pte_row_nom_c %u larger than supported by register format U15.2 %u\n",
__func__,
disp_dlg_regs->dst_y_per_pte_row_nom_c,
(unsigned int)dml_pow(2, 17) - 1);
}
}
disp_dlg_regs->dst_y_per_meta_row_nom_l = (unsigned int) ((double) meta_row_height_l
/ (double) vratio_l * dml_pow(2, 2));
ASSERT(disp_dlg_regs->dst_y_per_meta_row_nom_l < (unsigned int)dml_pow(2, 17));
disp_dlg_regs->dst_y_per_meta_row_nom_c = disp_dlg_regs->dst_y_per_meta_row_nom_l; // TODO: dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now
dml_print(
"DML: Trow: %fus\n",
line_time_in_us * (double)dpte_row_height_l / (double)vratio_l);
disp_dlg_regs->refcyc_per_pte_group_nom_l = (unsigned int) ((double) dpte_row_height_l
/ (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
/ (double) dpte_groups_per_row_ub_l);
if (disp_dlg_regs->refcyc_per_pte_group_nom_l >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_pte_group_nom_l = dml_pow(2, 23) - 1;
disp_dlg_regs->refcyc_per_meta_chunk_nom_l = (unsigned int) ((double) meta_row_height_l
/ (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
/ (double) meta_chunks_per_row_ub_l);
if (disp_dlg_regs->refcyc_per_meta_chunk_nom_l >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_meta_chunk_nom_l = dml_pow(2, 23) - 1;
if (dual_plane) {
disp_dlg_regs->refcyc_per_pte_group_nom_c =
(unsigned int) ((double) dpte_row_height_c / (double) vratio_c
* (double) htotal * ref_freq_to_pix_freq
/ (double) dpte_groups_per_row_ub_c);
if (disp_dlg_regs->refcyc_per_pte_group_nom_c >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_pte_group_nom_c = dml_pow(2, 23) - 1;
// TODO: Is this the right calculation? Does htotal need to be halved?
disp_dlg_regs->refcyc_per_meta_chunk_nom_c =
(unsigned int) ((double) meta_row_height_c / (double) vratio_c
* (double) htotal * ref_freq_to_pix_freq
/ (double) meta_chunks_per_row_ub_c);
if (disp_dlg_regs->refcyc_per_meta_chunk_nom_c >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_meta_chunk_nom_c = dml_pow(2, 23) - 1;
}
disp_dlg_regs->refcyc_per_line_delivery_pre_l = (unsigned int) dml_floor(
refcyc_per_line_delivery_pre_l, 1);
disp_dlg_regs->refcyc_per_line_delivery_l = (unsigned int) dml_floor(
refcyc_per_line_delivery_l, 1);
ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int)dml_pow(2, 13));
ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int)dml_pow(2, 13));
disp_dlg_regs->refcyc_per_line_delivery_pre_c = (unsigned int) dml_floor(
refcyc_per_line_delivery_pre_c, 1);
disp_dlg_regs->refcyc_per_line_delivery_c = (unsigned int) dml_floor(
refcyc_per_line_delivery_c, 1);
ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int)dml_pow(2, 13));
ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int)dml_pow(2, 13));
disp_dlg_regs->chunk_hdl_adjust_cur0 = 3;
disp_dlg_regs->dst_y_offset_cur0 = 0;
disp_dlg_regs->chunk_hdl_adjust_cur1 = 3;
disp_dlg_regs->dst_y_offset_cur1 = 0;
disp_dlg_regs->dst_y_delta_drq_limit = 0x7fff; // off
disp_ttu_regs->refcyc_per_req_delivery_pre_l = (unsigned int) (refcyc_per_req_delivery_pre_l
* dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_l = (unsigned int) (refcyc_per_req_delivery_l
* dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_pre_c = (unsigned int) (refcyc_per_req_delivery_pre_c
* dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_c = (unsigned int) (refcyc_per_req_delivery_c
* dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_pre_cur0 =
(unsigned int) (refcyc_per_req_delivery_pre_cur0 * dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_cur0 = (unsigned int) (refcyc_per_req_delivery_cur0
* dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_pre_cur1 =
(unsigned int) (refcyc_per_req_delivery_pre_cur1 * dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_cur1 = (unsigned int) (refcyc_per_req_delivery_cur1
* dml_pow(2, 10));
disp_ttu_regs->qos_level_low_wm = 0;
ASSERT(disp_ttu_regs->qos_level_low_wm < dml_pow(2, 14));
disp_ttu_regs->qos_level_high_wm = (unsigned int) (4.0 * (double) htotal
* ref_freq_to_pix_freq);
ASSERT(disp_ttu_regs->qos_level_high_wm < dml_pow(2, 14));
disp_ttu_regs->qos_level_flip = 14;
disp_ttu_regs->qos_level_fixed_l = 8;
disp_ttu_regs->qos_level_fixed_c = 8;
disp_ttu_regs->qos_level_fixed_cur0 = 8;
disp_ttu_regs->qos_ramp_disable_l = 0;
disp_ttu_regs->qos_ramp_disable_c = 0;
disp_ttu_regs->qos_ramp_disable_cur0 = 0;
disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
print__ttu_regs_st(mode_lib, disp_ttu_regs);
print__dlg_regs_st(mode_lib, disp_dlg_regs);
}
void dml21_rq_dlg_get_dlg_reg(
struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
const bool pstate_en,
const bool vm_en,
const bool ignore_viewport_pos,
const bool immediate_flip_support)
{
display_rq_params_st rq_param = {0};
display_dlg_sys_params_st dlg_sys_param = {0};
// Get watermark and Tex.
dlg_sys_param.t_urg_wm_us = get_wm_urgent(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.deepsleep_dcfclk_mhz = get_clk_dcf_deepsleep(
mode_lib,
e2e_pipe_param,
num_pipes);
dlg_sys_param.t_extra_us = get_urgent_extra_latency(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.mem_trip_us = get_wm_memory_trip(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.t_mclk_wm_us = get_wm_dram_clock_change(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.t_sr_wm_us = get_wm_stutter_enter_exit(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.total_flip_bw = get_total_immediate_flip_bw(
mode_lib,
e2e_pipe_param,
num_pipes);
dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(
mode_lib,
e2e_pipe_param,
num_pipes);
print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
// system parameter calculation done
dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx);
dml_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe);
dml_rq_dlg_get_dlg_params(
mode_lib,
e2e_pipe_param,
num_pipes,
pipe_idx,
dlg_regs,
ttu_regs,
&rq_param.dlg,
&dlg_sys_param,
cstate_en,
pstate_en);
dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx);
}
static void calculate_ttu_cursor(
struct display_mode_lib *mode_lib,
double *refcyc_per_req_delivery_pre_cur,
double *refcyc_per_req_delivery_cur,
double refclk_freq_in_mhz,
double ref_freq_to_pix_freq,
double hscale_pixel_rate_l,
double hscl_ratio,
double vratio_pre_l,
double vratio_l,
unsigned int cur_width,
enum cursor_bpp cur_bpp)
{
unsigned int cur_src_width = cur_width;
unsigned int cur_req_size = 0;
unsigned int cur_req_width = 0;
double cur_width_ub = 0.0;
double cur_req_per_width = 0.0;
double hactive_cur = 0.0;
ASSERT(cur_src_width <= 256);
*refcyc_per_req_delivery_pre_cur = 0.0;
*refcyc_per_req_delivery_cur = 0.0;
if (cur_src_width > 0) {
unsigned int cur_bit_per_pixel = 0;
if (cur_bpp == dm_cur_2bit) {
cur_req_size = 64; // byte
cur_bit_per_pixel = 2;
} else { // 32bit
cur_bit_per_pixel = 32;
if (cur_src_width >= 1 && cur_src_width <= 16)
cur_req_size = 64;
else if (cur_src_width >= 17 && cur_src_width <= 31)
cur_req_size = 128;
else
cur_req_size = 256;
}
cur_req_width = (double) cur_req_size / ((double) cur_bit_per_pixel / 8.0);
cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1)
* (double) cur_req_width;
cur_req_per_width = cur_width_ub / (double) cur_req_width;
hactive_cur = (double) cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor
if (vratio_pre_l <= 1.0) {
*refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq
/ (double) cur_req_per_width;
} else {
*refcyc_per_req_delivery_pre_cur = (double) refclk_freq_in_mhz
* (double) cur_src_width / hscale_pixel_rate_l
/ (double) cur_req_per_width;
}
ASSERT(*refcyc_per_req_delivery_pre_cur < dml_pow(2, 13));
if (vratio_l <= 1.0) {
*refcyc_per_req_delivery_cur = hactive_cur * ref_freq_to_pix_freq
/ (double) cur_req_per_width;
} else {
*refcyc_per_req_delivery_cur = (double) refclk_freq_in_mhz
* (double) cur_src_width / hscale_pixel_rate_l
/ (double) cur_req_per_width;
}
dml_print(
"DML_DLG: %s: cur_req_width = %d\n",
__func__,
cur_req_width);
dml_print(
"DML_DLG: %s: cur_width_ub = %3.2f\n",
__func__,
cur_width_ub);
dml_print(
"DML_DLG: %s: cur_req_per_width = %3.2f\n",
__func__,
cur_req_per_width);
dml_print(
"DML_DLG: %s: hactive_cur = %3.2f\n",
__func__,
hactive_cur);
dml_print(
"DML_DLG: %s: refcyc_per_req_delivery_pre_cur = %3.2f\n",
__func__,
*refcyc_per_req_delivery_pre_cur);
dml_print(
"DML_DLG: %s: refcyc_per_req_delivery_cur = %3.2f\n",
__func__,
*refcyc_per_req_delivery_cur);
ASSERT(*refcyc_per_req_delivery_cur < dml_pow(2, 13));
}
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c |
/*
* Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "../display_mode_lib.h"
#include "../dml_inline_defs.h"
#include "../display_mode_vba.h"
#include "display_mode_vba_21.h"
/*
* NOTE:
* This file is gcc-parsable HW gospel, coming straight from HW engineers.
*
* It doesn't adhere to Linux kernel style and sometimes will do things in odd
* ways. Unless there is something clearly wrong with it the code should
* remain as-is as it provides us with a guarantee from HW that it is correct.
*/
typedef struct {
double DPPCLK;
double DISPCLK;
double PixelClock;
double DCFCLKDeepSleep;
unsigned int DPPPerPlane;
bool ScalerEnabled;
enum scan_direction_class SourceScan;
unsigned int BlockWidth256BytesY;
unsigned int BlockHeight256BytesY;
unsigned int BlockWidth256BytesC;
unsigned int BlockHeight256BytesC;
unsigned int InterlaceEnable;
unsigned int NumberOfCursors;
unsigned int VBlank;
unsigned int HTotal;
} Pipe;
typedef struct {
bool Enable;
unsigned int MaxPageTableLevels;
unsigned int CachedPageTableLevels;
} HostVM;
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
#define DCN21_MAX_DSC_IMAGE_WIDTH 5184
#define DCN21_MAX_420_IMAGE_WIDTH 4096
static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
struct display_mode_lib *mode_lib);
static unsigned int dscceComputeDelay(
unsigned int bpc,
double bpp,
unsigned int sliceWidth,
unsigned int numSlices,
enum output_format_class pixelFormat);
static unsigned int dscComputeDelay(enum output_format_class pixelFormat);
// Super monster function with some 45 argument
static bool CalculatePrefetchSchedule(
struct display_mode_lib *mode_lib,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
Pipe *myPipe,
unsigned int DSCDelay,
double DPPCLKDelaySubtotal,
double DPPCLKDelaySCL,
double DPPCLKDelaySCLLBOnly,
double DPPCLKDelayCNVCFormater,
double DPPCLKDelayCNVCCursor,
double DISPCLKDelaySubtotal,
unsigned int ScalerRecoutWidth,
enum output_format_class OutputFormat,
unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int MaxVStartup,
unsigned int GPUVMPageTableLevels,
bool GPUVMEnable,
HostVM *myHostVM,
bool DynamicMetadataEnable,
int DynamicMetadataLinesBeforeActiveRequired,
unsigned int DynamicMetadataTransmittedBytes,
bool DCCEnable,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
unsigned int PDEAndMetaPTEBytesFrame,
unsigned int MetaRowByte,
unsigned int PixelPTEBytesPerRow,
double PrefetchSourceLinesY,
unsigned int SwathWidthY,
double BytePerPixelDETY,
double VInitPreFillY,
unsigned int MaxNumSwathY,
double PrefetchSourceLinesC,
double BytePerPixelDETC,
double VInitPreFillC,
unsigned int MaxNumSwathC,
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double TWait,
bool XFCEnabled,
double XFCRemoteSurfaceFlipDelay,
bool ProgressiveToInterlaceUnitInOPP,
double *DSTXAfterScaler,
double *DSTYAfterScaler,
double *DestinationLinesForPrefetch,
double *PrefetchBandwidth,
double *DestinationLinesToRequestVMInVBlank,
double *DestinationLinesToRequestRowInVBlank,
double *VRatioPrefetchY,
double *VRatioPrefetchC,
double *RequiredPrefetchPixDataBWLuma,
double *RequiredPrefetchPixDataBWChroma,
unsigned int *VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
double *Tno_bw,
double *prefetch_vmrow_bw,
unsigned int *swath_width_luma_ub,
unsigned int *swath_width_chroma_ub,
unsigned int *VUpdateOffsetPix,
double *VUpdateWidthPix,
double *VReadyOffsetPix);
static double RoundToDFSGranularityUp(double Clock, double VCOSpeed);
static double RoundToDFSGranularityDown(double Clock, double VCOSpeed);
static double CalculateDCCConfiguration(
bool DCCEnabled,
bool DCCProgrammingAssumesScanDirectionUnknown,
unsigned int ViewportWidth,
unsigned int ViewportHeight,
unsigned int DETBufferSize,
unsigned int RequestHeight256Byte,
unsigned int SwathHeight,
enum dm_swizzle_mode TilingFormat,
unsigned int BytePerPixel,
enum scan_direction_class ScanOrientation,
unsigned int *MaxUncompressedBlock,
unsigned int *MaxCompressedBlock,
unsigned int *Independent64ByteBlock);
static double CalculatePrefetchSourceLines(
struct display_mode_lib *mode_lib,
double VRatio,
double vtaps,
bool Interlace,
bool ProgressiveToInterlaceUnitInOPP,
unsigned int SwathHeight,
unsigned int ViewportYStart,
double *VInitPreFill,
unsigned int *MaxNumSwath);
static unsigned int CalculateVMAndRowBytes(
struct display_mode_lib *mode_lib,
bool DCCEnable,
unsigned int BlockHeight256Bytes,
unsigned int BlockWidth256Bytes,
enum source_format_class SourcePixelFormat,
unsigned int SurfaceTiling,
unsigned int BytePerPixel,
enum scan_direction_class ScanDirection,
unsigned int ViewportWidth,
unsigned int ViewportHeight,
unsigned int SwathWidthY,
bool GPUVMEnable,
bool HostVMEnable,
unsigned int HostVMMaxPageTableLevels,
unsigned int HostVMCachedPageTableLevels,
unsigned int VMMPageSize,
unsigned int PTEBufferSizeInRequests,
unsigned int Pitch,
unsigned int DCCMetaPitch,
unsigned int *MacroTileWidth,
unsigned int *MetaRowByte,
unsigned int *PixelPTEBytesPerRow,
bool *PTEBufferSizeNotExceeded,
unsigned int *dpte_row_width_ub,
unsigned int *dpte_row_height,
unsigned int *MetaRequestWidth,
unsigned int *MetaRequestHeight,
unsigned int *meta_row_width,
unsigned int *meta_row_height,
unsigned int *vm_group_bytes,
unsigned int *dpte_group_bytes,
unsigned int *PixelPTEReqWidth,
unsigned int *PixelPTEReqHeight,
unsigned int *PTERequestSize,
unsigned int *DPDE0BytesFrame,
unsigned int *MetaPTEBytesFrame);
static double CalculateTWait(
unsigned int PrefetchMode,
double DRAMClockChangeLatency,
double UrgentLatency,
double SREnterPlusExitTime);
static double CalculateRemoteSurfaceFlipDelay(
struct display_mode_lib *mode_lib,
double VRatio,
double SwathWidth,
double Bpp,
double LineTime,
double XFCTSlvVupdateOffset,
double XFCTSlvVupdateWidth,
double XFCTSlvVreadyOffset,
double XFCXBUFLatencyTolerance,
double XFCFillBWOverhead,
double XFCSlvChunkSize,
double XFCBusTransportTime,
double TCalc,
double TWait,
double *SrcActiveDrainRate,
double *TInitXFill,
double *TslvChk);
static void CalculateActiveRowBandwidth(
bool GPUVMEnable,
enum source_format_class SourcePixelFormat,
double VRatio,
bool DCCEnable,
double LineTime,
unsigned int MetaRowByteLuma,
unsigned int MetaRowByteChroma,
unsigned int meta_row_height_luma,
unsigned int meta_row_height_chroma,
unsigned int PixelPTEBytesPerRowLuma,
unsigned int PixelPTEBytesPerRowChroma,
unsigned int dpte_row_height_luma,
unsigned int dpte_row_height_chroma,
double *meta_row_bw,
double *dpte_row_bw);
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
double UrgentExtraLatency,
double UrgentLatency,
unsigned int GPUVMMaxPageTableLevels,
bool HostVMEnable,
unsigned int HostVMMaxPageTableLevels,
unsigned int HostVMCachedPageTableLevels,
bool GPUVMEnable,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
double DPTEBytesPerRow,
double BandwidthAvailableForImmediateFlip,
unsigned int TotImmediateFlipBytes,
enum source_format_class SourcePixelFormat,
double LineTime,
double VRatio,
double Tno_bw,
bool DCCEnable,
unsigned int dpte_row_height,
unsigned int meta_row_height,
unsigned int dpte_row_height_chroma,
unsigned int meta_row_height_chroma,
double *DestinationLinesToRequestVMInImmediateFlip,
double *DestinationLinesToRequestRowInImmediateFlip,
double *final_flip_bw,
bool *ImmediateFlipSupportedForPipe);
static double CalculateWriteBackDelay(
enum source_format_class WritebackPixelFormat,
double WritebackHRatio,
double WritebackVRatio,
unsigned int WritebackLumaHTaps,
unsigned int WritebackLumaVTaps,
unsigned int WritebackChromaHTaps,
unsigned int WritebackChromaVTaps,
unsigned int WritebackDestinationWidth);
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
unsigned int NumberOfActivePlanes,
unsigned int MaxLineBufferLines,
unsigned int LineBufferSize,
unsigned int DPPOutputBufferPixels,
unsigned int DETBufferSizeInKByte,
unsigned int WritebackInterfaceLumaBufferSize,
unsigned int WritebackInterfaceChromaBufferSize,
double DCFCLK,
double UrgentOutOfOrderReturn,
double ReturnBW,
bool GPUVMEnable,
int dpte_group_bytes[],
unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
double WritebackLatency,
double WritebackChunkSize,
double SOCCLK,
double DRAMClockChangeLatency,
double SRExitTime,
double SREnterPlusExitTime,
double DCFCLKDeepSleep,
int DPPPerPlane[],
bool DCCEnable[],
double DPPCLK[],
double SwathWidthSingleDPPY[],
unsigned int SwathHeightY[],
double ReadBandwidthPlaneLuma[],
unsigned int SwathHeightC[],
double ReadBandwidthPlaneChroma[],
unsigned int LBBitPerPixel[],
double SwathWidthY[],
double HRatio[],
unsigned int vtaps[],
unsigned int VTAPsChroma[],
double VRatio[],
unsigned int HTotal[],
double PixelClock[],
unsigned int BlendingAndTiming[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
bool WritebackEnable[],
enum source_format_class WritebackPixelFormat[],
double WritebackDestinationWidth[],
double WritebackDestinationHeight[],
double WritebackSourceHeight[],
enum clock_change_support *DRAMClockChangeSupport,
double *UrgentWatermark,
double *WritebackUrgentWatermark,
double *DRAMClockChangeWatermark,
double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *MinActiveDRAMClockChangeLatencySupported);
static void CalculateDCFCLKDeepSleep(
struct display_mode_lib *mode_lib,
unsigned int NumberOfActivePlanes,
double BytePerPixelDETY[],
double BytePerPixelDETC[],
double VRatio[],
double SwathWidthY[],
int DPPPerPlane[],
double HRatio[],
double PixelClock[],
double PSCL_THROUGHPUT[],
double PSCL_THROUGHPUT_CHROMA[],
double DPPCLK[],
double *DCFCLKDeepSleep);
static void CalculateDETBufferSize(
unsigned int DETBufferSizeInKByte,
unsigned int SwathHeightY,
unsigned int SwathHeightC,
unsigned int *DETBufferSizeY,
unsigned int *DETBufferSizeC);
static void CalculateUrgentBurstFactor(
unsigned int DETBufferSizeInKByte,
unsigned int SwathHeightY,
unsigned int SwathHeightC,
unsigned int SwathWidthY,
double LineTime,
double UrgentLatency,
double CursorBufferSize,
unsigned int CursorWidth,
unsigned int CursorBPP,
double VRatio,
double VRatioPreY,
double VRatioPreC,
double BytePerPixelInDETY,
double BytePerPixelInDETC,
double *UrgentBurstFactorCursor,
double *UrgentBurstFactorCursorPre,
double *UrgentBurstFactorLuma,
double *UrgentBurstFactorLumaPre,
double *UrgentBurstFactorChroma,
double *UrgentBurstFactorChromaPre,
unsigned int *NotEnoughUrgentLatencyHiding,
unsigned int *NotEnoughUrgentLatencyHidingPre);
static void CalculatePixelDeliveryTimes(
unsigned int NumberOfActivePlanes,
double VRatio[],
double VRatioPrefetchY[],
double VRatioPrefetchC[],
unsigned int swath_width_luma_ub[],
unsigned int swath_width_chroma_ub[],
int DPPPerPlane[],
double HRatio[],
double PixelClock[],
double PSCL_THROUGHPUT[],
double PSCL_THROUGHPUT_CHROMA[],
double DPPCLK[],
double BytePerPixelDETC[],
enum scan_direction_class SourceScan[],
unsigned int BlockWidth256BytesY[],
unsigned int BlockHeight256BytesY[],
unsigned int BlockWidth256BytesC[],
unsigned int BlockHeight256BytesC[],
double DisplayPipeLineDeliveryTimeLuma[],
double DisplayPipeLineDeliveryTimeChroma[],
double DisplayPipeLineDeliveryTimeLumaPrefetch[],
double DisplayPipeLineDeliveryTimeChromaPrefetch[],
double DisplayPipeRequestDeliveryTimeLuma[],
double DisplayPipeRequestDeliveryTimeChroma[],
double DisplayPipeRequestDeliveryTimeLumaPrefetch[],
double DisplayPipeRequestDeliveryTimeChromaPrefetch[]);
static void CalculateMetaAndPTETimes(
unsigned int NumberOfActivePlanes,
bool GPUVMEnable,
unsigned int MetaChunkSize,
unsigned int MinMetaChunkSizeBytes,
unsigned int GPUVMMaxPageTableLevels,
unsigned int HTotal[],
double VRatio[],
double VRatioPrefetchY[],
double VRatioPrefetchC[],
double DestinationLinesToRequestRowInVBlank[],
double DestinationLinesToRequestRowInImmediateFlip[],
double DestinationLinesToRequestVMInVBlank[],
double DestinationLinesToRequestVMInImmediateFlip[],
bool DCCEnable[],
double PixelClock[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
enum scan_direction_class SourceScan[],
unsigned int dpte_row_height[],
unsigned int dpte_row_height_chroma[],
unsigned int meta_row_width[],
unsigned int meta_row_height[],
unsigned int meta_req_width[],
unsigned int meta_req_height[],
int dpte_group_bytes[],
unsigned int PTERequestSizeY[],
unsigned int PTERequestSizeC[],
unsigned int PixelPTEReqWidthY[],
unsigned int PixelPTEReqHeightY[],
unsigned int PixelPTEReqWidthC[],
unsigned int PixelPTEReqHeightC[],
unsigned int dpte_row_width_luma_ub[],
unsigned int dpte_row_width_chroma_ub[],
unsigned int vm_group_bytes[],
unsigned int dpde0_bytes_per_frame_ub_l[],
unsigned int dpde0_bytes_per_frame_ub_c[],
unsigned int meta_pte_bytes_per_frame_ub_l[],
unsigned int meta_pte_bytes_per_frame_ub_c[],
double DST_Y_PER_PTE_ROW_NOM_L[],
double DST_Y_PER_PTE_ROW_NOM_C[],
double DST_Y_PER_META_ROW_NOM_L[],
double TimePerMetaChunkNominal[],
double TimePerMetaChunkVBlank[],
double TimePerMetaChunkFlip[],
double time_per_pte_group_nom_luma[],
double time_per_pte_group_vblank_luma[],
double time_per_pte_group_flip_luma[],
double time_per_pte_group_nom_chroma[],
double time_per_pte_group_vblank_chroma[],
double time_per_pte_group_flip_chroma[],
double TimePerVMGroupVBlank[],
double TimePerVMGroupFlip[],
double TimePerVMRequestVBlank[],
double TimePerVMRequestFlip[]);
static double CalculateExtraLatency(
double UrgentRoundTripAndOutOfOrderLatency,
int TotalNumberOfActiveDPP,
int PixelChunkSizeInKByte,
int TotalNumberOfDCCActiveDPP,
int MetaChunkSize,
double ReturnBW,
bool GPUVMEnable,
bool HostVMEnable,
int NumberOfActivePlanes,
int NumberOfDPP[],
int dpte_group_bytes[],
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
int HostVMMaxPageTableLevels,
int HostVMCachedPageTableLevels);
void dml21_recalculate(struct display_mode_lib *mode_lib)
{
ModeSupportAndSystemConfiguration(mode_lib);
PixelClockAdjustmentForProgressiveToInterlaceUnit(mode_lib);
DisplayPipeConfiguration(mode_lib);
DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(mode_lib);
}
static unsigned int dscceComputeDelay(
unsigned int bpc,
double bpp,
unsigned int sliceWidth,
unsigned int numSlices,
enum output_format_class pixelFormat)
{
// valid bpc = source bits per component in the set of {8, 10, 12}
// valid bpp = increments of 1/16 of a bit
// min = 6/7/8 in N420/N422/444, respectively
// max = such that compression is 1:1
//valid sliceWidth = number of pixels per slice line, must be less than or equal to 5184/numSlices (or 4096/numSlices in 420 mode)
//valid numSlices = number of slices in the horiziontal direction per DSC engine in the set of {1, 2, 3, 4}
//valid pixelFormat = pixel/color format in the set of {:N444_RGB, :S422, :N422, :N420}
// fixed value
unsigned int rcModelSize = 8192;
// N422/N420 operate at 2 pixels per clock
unsigned int pixelsPerClock, lstall, D, initalXmitDelay, w, S, ix, wx, p, l0, a, ax, l,
Delay, pixels;
if (pixelFormat == dm_n422 || pixelFormat == dm_420)
pixelsPerClock = 2;
// #all other modes operate at 1 pixel per clock
else
pixelsPerClock = 1;
//initial transmit delay as per PPS
initalXmitDelay = dml_round(rcModelSize / 2.0 / bpp / pixelsPerClock);
//compute ssm delay
if (bpc == 8)
D = 81;
else if (bpc == 10)
D = 89;
else
D = 113;
//divide by pixel per cycle to compute slice width as seen by DSC
w = sliceWidth / pixelsPerClock;
//422 mode has an additional cycle of delay
if (pixelFormat == dm_s422)
S = 1;
else
S = 0;
//main calculation for the dscce
ix = initalXmitDelay + 45;
wx = (w + 2) / 3;
p = 3 * wx - w;
l0 = ix / w;
a = ix + p * l0;
ax = (a + 2) / 3 + D + 6 + 1;
l = (ax + wx - 1) / wx;
if ((ix % w) == 0 && p != 0)
lstall = 1;
else
lstall = 0;
Delay = l * wx * (numSlices - 1) + ax + S + lstall + 22;
//dsc processes 3 pixel containers per cycle and a container can contain 1 or 2 pixels
pixels = Delay * 3 * pixelsPerClock;
return pixels;
}
static unsigned int dscComputeDelay(enum output_format_class pixelFormat)
{
unsigned int Delay = 0;
if (pixelFormat == dm_420) {
// sfr
Delay = Delay + 2;
// dsccif
Delay = Delay + 0;
// dscc - input deserializer
Delay = Delay + 3;
// dscc gets pixels every other cycle
Delay = Delay + 2;
// dscc - input cdc fifo
Delay = Delay + 12;
// dscc gets pixels every other cycle
Delay = Delay + 13;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output cdc fifo
Delay = Delay + 7;
// dscc gets pixels every other cycle
Delay = Delay + 3;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output serializer
Delay = Delay + 1;
// sft
Delay = Delay + 1;
} else if (pixelFormat == dm_n422) {
// sfr
Delay = Delay + 2;
// dsccif
Delay = Delay + 1;
// dscc - input deserializer
Delay = Delay + 5;
// dscc - input cdc fifo
Delay = Delay + 25;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output cdc fifo
Delay = Delay + 10;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output serializer
Delay = Delay + 1;
// sft
Delay = Delay + 1;
} else {
// sfr
Delay = Delay + 2;
// dsccif
Delay = Delay + 0;
// dscc - input deserializer
Delay = Delay + 3;
// dscc - input cdc fifo
Delay = Delay + 12;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output cdc fifo
Delay = Delay + 7;
// dscc - output serializer
Delay = Delay + 1;
// dscc - cdc uncertainty
Delay = Delay + 2;
// sft
Delay = Delay + 1;
}
return Delay;
}
static bool CalculatePrefetchSchedule(
struct display_mode_lib *mode_lib,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
Pipe *myPipe,
unsigned int DSCDelay,
double DPPCLKDelaySubtotal,
double DPPCLKDelaySCL,
double DPPCLKDelaySCLLBOnly,
double DPPCLKDelayCNVCFormater,
double DPPCLKDelayCNVCCursor,
double DISPCLKDelaySubtotal,
unsigned int ScalerRecoutWidth,
enum output_format_class OutputFormat,
unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int MaxVStartup,
unsigned int GPUVMPageTableLevels,
bool GPUVMEnable,
HostVM *myHostVM,
bool DynamicMetadataEnable,
int DynamicMetadataLinesBeforeActiveRequired,
unsigned int DynamicMetadataTransmittedBytes,
bool DCCEnable,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
unsigned int PDEAndMetaPTEBytesFrame,
unsigned int MetaRowByte,
unsigned int PixelPTEBytesPerRow,
double PrefetchSourceLinesY,
unsigned int SwathWidthY,
double BytePerPixelDETY,
double VInitPreFillY,
unsigned int MaxNumSwathY,
double PrefetchSourceLinesC,
double BytePerPixelDETC,
double VInitPreFillC,
unsigned int MaxNumSwathC,
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double TWait,
bool XFCEnabled,
double XFCRemoteSurfaceFlipDelay,
bool ProgressiveToInterlaceUnitInOPP,
double *DSTXAfterScaler,
double *DSTYAfterScaler,
double *DestinationLinesForPrefetch,
double *PrefetchBandwidth,
double *DestinationLinesToRequestVMInVBlank,
double *DestinationLinesToRequestRowInVBlank,
double *VRatioPrefetchY,
double *VRatioPrefetchC,
double *RequiredPrefetchPixDataBWLuma,
double *RequiredPrefetchPixDataBWChroma,
unsigned int *VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
double *Tno_bw,
double *prefetch_vmrow_bw,
unsigned int *swath_width_luma_ub,
unsigned int *swath_width_chroma_ub,
unsigned int *VUpdateOffsetPix,
double *VUpdateWidthPix,
double *VReadyOffsetPix)
{
bool MyError = false;
unsigned int DPPCycles, DISPCLKCycles;
double DSTTotalPixelsAfterScaler, TotalRepeaterDelayTime;
double Tdm, LineTime, Tsetup;
double dst_y_prefetch_equ;
double Tsw_oto;
double prefetch_bw_oto;
double Tvm_oto;
double Tr0_oto;
double Tvm_oto_lines;
double Tr0_oto_lines;
double Tsw_oto_lines;
double dst_y_prefetch_oto;
double TimeForFetchingMetaPTE = 0;
double TimeForFetchingRowInVBlank = 0;
double LinesToRequestPrefetchPixelData = 0;
double HostVMInefficiencyFactor;
unsigned int HostVMDynamicLevels;
if (GPUVMEnable == true && myHostVM->Enable == true) {
HostVMInefficiencyFactor =
PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData
/ PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly;
HostVMDynamicLevels = myHostVM->MaxPageTableLevels
- myHostVM->CachedPageTableLevels;
} else {
HostVMInefficiencyFactor = 1;
HostVMDynamicLevels = 0;
}
if (myPipe->ScalerEnabled)
DPPCycles = DPPCLKDelaySubtotal + DPPCLKDelaySCL;
else
DPPCycles = DPPCLKDelaySubtotal + DPPCLKDelaySCLLBOnly;
DPPCycles = DPPCycles + DPPCLKDelayCNVCFormater + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor;
DISPCLKCycles = DISPCLKDelaySubtotal;
if (myPipe->DPPCLK == 0.0 || myPipe->DISPCLK == 0.0)
return true;
*DSTXAfterScaler = DPPCycles * myPipe->PixelClock / myPipe->DPPCLK
+ DISPCLKCycles * myPipe->PixelClock / myPipe->DISPCLK + DSCDelay;
if (myPipe->DPPPerPlane > 1)
*DSTXAfterScaler = *DSTXAfterScaler + ScalerRecoutWidth;
if (OutputFormat == dm_420 || (myPipe->InterlaceEnable && ProgressiveToInterlaceUnitInOPP))
*DSTYAfterScaler = 1;
else
*DSTYAfterScaler = 0;
DSTTotalPixelsAfterScaler = ((double) (*DSTYAfterScaler * myPipe->HTotal)) + *DSTXAfterScaler;
*DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
*DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * myPipe->HTotal));
*VUpdateOffsetPix = dml_ceil(myPipe->HTotal / 4.0, 1);
TotalRepeaterDelayTime = MaxInterDCNTileRepeaters * (2.0 / myPipe->DPPCLK + 3.0 / myPipe->DISPCLK);
*VUpdateWidthPix = (14.0 / myPipe->DCFCLKDeepSleep + 12.0 / myPipe->DPPCLK + TotalRepeaterDelayTime)
* myPipe->PixelClock;
*VReadyOffsetPix = dml_max(
150.0 / myPipe->DPPCLK,
TotalRepeaterDelayTime + 20.0 / myPipe->DCFCLKDeepSleep + 10.0 / myPipe->DPPCLK)
* myPipe->PixelClock;
Tsetup = (double) (*VUpdateOffsetPix + *VUpdateWidthPix + *VReadyOffsetPix) / myPipe->PixelClock;
LineTime = (double) myPipe->HTotal / myPipe->PixelClock;
if (DynamicMetadataEnable) {
double Tdmbf, Tdmec, Tdmsks;
Tdm = dml_max(0.0, UrgentExtraLatency - TCalc);
Tdmbf = DynamicMetadataTransmittedBytes / 4.0 / myPipe->DISPCLK;
Tdmec = LineTime;
if (DynamicMetadataLinesBeforeActiveRequired == -1)
Tdmsks = myPipe->VBlank * LineTime / 2.0;
else
Tdmsks = DynamicMetadataLinesBeforeActiveRequired * LineTime;
if (myPipe->InterlaceEnable && !ProgressiveToInterlaceUnitInOPP)
Tdmsks = Tdmsks / 2;
if (VStartup * LineTime
< Tsetup + TWait + UrgentExtraLatency + Tdmbf + Tdmec + Tdmsks) {
MyError = true;
*VStartupRequiredWhenNotEnoughTimeForDynamicMetadata = (Tsetup + TWait
+ UrgentExtraLatency + Tdmbf + Tdmec + Tdmsks) / LineTime;
} else
*VStartupRequiredWhenNotEnoughTimeForDynamicMetadata = 0.0;
} else
Tdm = 0;
if (GPUVMEnable) {
if (GPUVMPageTableLevels >= 3)
*Tno_bw = UrgentExtraLatency + UrgentLatency * ((GPUVMPageTableLevels - 2) * (myHostVM->MaxPageTableLevels + 1) - 1);
else
*Tno_bw = 0;
} else if (!DCCEnable)
*Tno_bw = LineTime;
else
*Tno_bw = LineTime / 4;
dst_y_prefetch_equ = VStartup - dml_max(TCalc + TWait, XFCRemoteSurfaceFlipDelay) / LineTime
- (Tsetup + Tdm) / LineTime
- (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal);
Tsw_oto = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime;
if (myPipe->SourceScan == dm_horz) {
*swath_width_luma_ub = dml_ceil(SwathWidthY - 1, myPipe->BlockWidth256BytesY) + myPipe->BlockWidth256BytesY;
if (myPipe->BlockWidth256BytesC > 0)
*swath_width_chroma_ub = dml_ceil(SwathWidthY / 2 - 1, myPipe->BlockWidth256BytesC) + myPipe->BlockWidth256BytesC;
} else {
*swath_width_luma_ub = dml_ceil(SwathWidthY - 1, myPipe->BlockHeight256BytesY) + myPipe->BlockHeight256BytesY;
if (myPipe->BlockHeight256BytesC > 0)
*swath_width_chroma_ub = dml_ceil(SwathWidthY / 2 - 1, myPipe->BlockHeight256BytesC) + myPipe->BlockHeight256BytesC;
}
prefetch_bw_oto = (PrefetchSourceLinesY * *swath_width_luma_ub * dml_ceil(BytePerPixelDETY, 1) + PrefetchSourceLinesC * *swath_width_chroma_ub * dml_ceil(BytePerPixelDETC, 2)) / Tsw_oto;
if (GPUVMEnable == true) {
Tvm_oto = dml_max(*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto,
dml_max(UrgentExtraLatency + UrgentLatency * (GPUVMPageTableLevels * (HostVMDynamicLevels + 1) - 1),
LineTime / 4.0));
} else
Tvm_oto = LineTime / 4.0;
if ((GPUVMEnable == true || DCCEnable == true)) {
Tr0_oto = dml_max(
(MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto,
dml_max(UrgentLatency * (HostVMDynamicLevels + 1), dml_max(LineTime - Tvm_oto, LineTime / 4)));
} else
Tr0_oto = (LineTime - Tvm_oto) / 2.0;
Tvm_oto_lines = dml_ceil(4 * Tvm_oto / LineTime, 1) / 4.0;
Tr0_oto_lines = dml_ceil(4 * Tr0_oto / LineTime, 1) / 4.0;
Tsw_oto_lines = dml_ceil(4 * Tsw_oto / LineTime, 1) / 4.0;
dst_y_prefetch_oto = Tvm_oto_lines + 2 * Tr0_oto_lines + Tsw_oto_lines + 0.75;
dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0;
if (dst_y_prefetch_oto < dst_y_prefetch_equ)
*DestinationLinesForPrefetch = dst_y_prefetch_oto;
else
*DestinationLinesForPrefetch = dst_y_prefetch_equ;
// Limit to prevent overflow in DST_Y_PREFETCH register
*DestinationLinesForPrefetch = dml_min(*DestinationLinesForPrefetch, 63.75);
dml_print("DML: VStartup: %d\n", VStartup);
dml_print("DML: TCalc: %f\n", TCalc);
dml_print("DML: TWait: %f\n", TWait);
dml_print("DML: XFCRemoteSurfaceFlipDelay: %f\n", XFCRemoteSurfaceFlipDelay);
dml_print("DML: LineTime: %f\n", LineTime);
dml_print("DML: Tsetup: %f\n", Tsetup);
dml_print("DML: Tdm: %f\n", Tdm);
dml_print("DML: DSTYAfterScaler: %f\n", *DSTYAfterScaler);
dml_print("DML: DSTXAfterScaler: %f\n", *DSTXAfterScaler);
dml_print("DML: HTotal: %d\n", myPipe->HTotal);
*PrefetchBandwidth = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
if (*DestinationLinesForPrefetch > 1) {
double PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte
+ 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
+ PrefetchSourceLinesY * *swath_width_luma_ub * dml_ceil(BytePerPixelDETY, 1)
+ PrefetchSourceLinesC * *swath_width_chroma_ub * dml_ceil(BytePerPixelDETC, 2))
/ (*DestinationLinesForPrefetch * LineTime - *Tno_bw);
double PrefetchBandwidth2 = (PDEAndMetaPTEBytesFrame *
HostVMInefficiencyFactor + PrefetchSourceLinesY *
*swath_width_luma_ub * dml_ceil(BytePerPixelDETY, 1) +
PrefetchSourceLinesC * *swath_width_chroma_ub *
dml_ceil(BytePerPixelDETC, 2)) /
(*DestinationLinesForPrefetch * LineTime - *Tno_bw - 2 *
UrgentLatency * (1 + HostVMDynamicLevels));
double PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow
* HostVMInefficiencyFactor + PrefetchSourceLinesY *
*swath_width_luma_ub * dml_ceil(BytePerPixelDETY, 1) +
PrefetchSourceLinesC * *swath_width_chroma_ub *
dml_ceil(BytePerPixelDETC, 2)) /
(*DestinationLinesForPrefetch * LineTime -
UrgentExtraLatency - UrgentLatency * (GPUVMPageTableLevels
* (HostVMDynamicLevels + 1) - 1));
double PrefetchBandwidth4 = (PrefetchSourceLinesY * *swath_width_luma_ub *
dml_ceil(BytePerPixelDETY, 1) + PrefetchSourceLinesC *
*swath_width_chroma_ub * dml_ceil(BytePerPixelDETC, 2)) /
(*DestinationLinesForPrefetch * LineTime -
UrgentExtraLatency - UrgentLatency * (GPUVMPageTableLevels
* (HostVMDynamicLevels + 1) - 1) - 2 * UrgentLatency *
(1 + HostVMDynamicLevels));
if (VStartup == MaxVStartup && (PrefetchBandwidth1 > 4 * prefetch_bw_oto) && (*DestinationLinesForPrefetch - dml_ceil(Tsw_oto_lines, 1) / 4.0 - 0.75) * LineTime - *Tno_bw > 0) {
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor) / ((*DestinationLinesForPrefetch - dml_ceil(Tsw_oto_lines, 1) / 4.0 - 0.75) * LineTime - *Tno_bw);
}
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth1 >= UrgentExtraLatency + UrgentLatency * (GPUVMPageTableLevels * (HostVMDynamicLevels + 1) - 1) && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth1 >= UrgentLatency * (1 + HostVMDynamicLevels)) {
*PrefetchBandwidth = PrefetchBandwidth1;
} else if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth2 >= UrgentExtraLatency + UrgentLatency * (GPUVMPageTableLevels * (HostVMDynamicLevels + 1) - 1) && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth2 < UrgentLatency * (1 + HostVMDynamicLevels)) {
*PrefetchBandwidth = PrefetchBandwidth2;
} else if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth3 < UrgentExtraLatency + UrgentLatency * (GPUVMPageTableLevels * (HostVMDynamicLevels + 1) - 1) && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth3 >= UrgentLatency * (1 + HostVMDynamicLevels)) {
*PrefetchBandwidth = PrefetchBandwidth3;
} else {
*PrefetchBandwidth = PrefetchBandwidth4;
}
if (GPUVMEnable) {
TimeForFetchingMetaPTE = dml_max(*Tno_bw + (double) PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / *PrefetchBandwidth,
dml_max(UrgentExtraLatency + UrgentLatency * (GPUVMPageTableLevels * (HostVMDynamicLevels + 1) - 1), LineTime / 4));
} else {
// 5/30/2018 - This was an optimization requested from Sy but now NumberOfCursors is no longer a factor
// so if this needs to be reinstated, then it should be officially done in the VBA code as well.
// if (mode_lib->NumberOfCursors > 0 || XFCEnabled)
TimeForFetchingMetaPTE = LineTime / 4;
// else
// TimeForFetchingMetaPTE = 0.0;
}
if ((GPUVMEnable == true || DCCEnable == true)) {
TimeForFetchingRowInVBlank =
dml_max(
(MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor)
/ *PrefetchBandwidth,
dml_max(
UrgentLatency * (1 + HostVMDynamicLevels),
dml_max(
(LineTime
- TimeForFetchingMetaPTE) / 2.0,
LineTime
/ 4.0)));
} else {
// See note above dated 5/30/2018
// if (NumberOfCursors > 0 || XFCEnabled)
TimeForFetchingRowInVBlank = (LineTime - TimeForFetchingMetaPTE) / 2.0;
// else // TODO: Did someone else add this??
// TimeForFetchingRowInVBlank = 0.0;
}
*DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0;
*DestinationLinesToRequestRowInVBlank = dml_ceil(4.0 * TimeForFetchingRowInVBlank / LineTime, 1.0) / 4.0;
LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch
// See note above dated 5/30/2018
// - ((NumberOfCursors > 0 || GPUVMEnable || DCCEnable) ?
- ((GPUVMEnable || DCCEnable) ?
(*DestinationLinesToRequestVMInVBlank + 2 * *DestinationLinesToRequestRowInVBlank) :
0.0); // TODO: Did someone else add this??
if (LinesToRequestPrefetchPixelData > 0) {
*VRatioPrefetchY = (double) PrefetchSourceLinesY
/ LinesToRequestPrefetchPixelData;
*VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
if ((SwathHeightY > 4) && (VInitPreFillY > 3)) {
if (LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
*VRatioPrefetchY =
dml_max(
(double) PrefetchSourceLinesY
/ LinesToRequestPrefetchPixelData,
(double) MaxNumSwathY
* SwathHeightY
/ (LinesToRequestPrefetchPixelData
- (VInitPreFillY
- 3.0)
/ 2.0));
*VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
} else {
MyError = true;
*VRatioPrefetchY = 0;
}
}
*VRatioPrefetchC = (double) PrefetchSourceLinesC
/ LinesToRequestPrefetchPixelData;
*VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
if ((SwathHeightC > 4)) {
if (LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
*VRatioPrefetchC =
dml_max(
*VRatioPrefetchC,
(double) MaxNumSwathC
* SwathHeightC
/ (LinesToRequestPrefetchPixelData
- (VInitPreFillC
- 3.0)
/ 2.0));
*VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
} else {
MyError = true;
*VRatioPrefetchC = 0;
}
}
*RequiredPrefetchPixDataBWLuma = myPipe->DPPPerPlane
* (double) PrefetchSourceLinesY / LinesToRequestPrefetchPixelData
* dml_ceil(BytePerPixelDETY, 1)
* *swath_width_luma_ub / LineTime;
*RequiredPrefetchPixDataBWChroma = myPipe->DPPPerPlane
* (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData
* dml_ceil(BytePerPixelDETC, 2)
* *swath_width_chroma_ub / LineTime;
} else {
MyError = true;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
*RequiredPrefetchPixDataBWChroma = 0;
}
dml_print("DML: Tvm: %fus\n", TimeForFetchingMetaPTE);
dml_print("DML: Tr0: %fus\n", TimeForFetchingRowInVBlank);
dml_print("DML: Tsw: %fus\n", (double)(*DestinationLinesForPrefetch) * LineTime - TimeForFetchingMetaPTE - TimeForFetchingRowInVBlank);
dml_print("DML: Tpre: %fus\n", (double)(*DestinationLinesForPrefetch) * LineTime);
dml_print("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %d\n", PixelPTEBytesPerRow);
} else {
MyError = true;
}
{
double prefetch_vm_bw;
double prefetch_row_bw;
if (PDEAndMetaPTEBytesFrame == 0) {
prefetch_vm_bw = 0;
} else if (*DestinationLinesToRequestVMInVBlank > 0) {
prefetch_vm_bw = PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInVBlank * LineTime);
} else {
prefetch_vm_bw = 0;
MyError = true;
}
if (MetaRowByte + PixelPTEBytesPerRow == 0) {
prefetch_row_bw = 0;
} else if (*DestinationLinesToRequestRowInVBlank > 0) {
prefetch_row_bw = (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInVBlank * LineTime);
} else {
prefetch_row_bw = 0;
MyError = true;
}
*prefetch_vmrow_bw = dml_max(prefetch_vm_bw, prefetch_row_bw);
}
if (MyError) {
*PrefetchBandwidth = 0;
TimeForFetchingMetaPTE = 0;
TimeForFetchingRowInVBlank = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*DestinationLinesForPrefetch = 0;
LinesToRequestPrefetchPixelData = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
*RequiredPrefetchPixDataBWChroma = 0;
}
return MyError;
}
static double RoundToDFSGranularityUp(double Clock, double VCOSpeed)
{
return VCOSpeed * 4 / dml_floor(VCOSpeed * 4 / Clock, 1);
}
static double RoundToDFSGranularityDown(double Clock, double VCOSpeed)
{
return VCOSpeed * 4 / dml_ceil(VCOSpeed * 4 / Clock, 1);
}
static double CalculateDCCConfiguration(
bool DCCEnabled,
bool DCCProgrammingAssumesScanDirectionUnknown,
unsigned int ViewportWidth,
unsigned int ViewportHeight,
unsigned int DETBufferSize,
unsigned int RequestHeight256Byte,
unsigned int SwathHeight,
enum dm_swizzle_mode TilingFormat,
unsigned int BytePerPixel,
enum scan_direction_class ScanOrientation,
unsigned int *MaxUncompressedBlock,
unsigned int *MaxCompressedBlock,
unsigned int *Independent64ByteBlock)
{
double MaximumDCCCompressionSurface = 0.0;
enum {
REQ_256Bytes,
REQ_128BytesNonContiguous,
REQ_128BytesContiguous,
REQ_NA
} Request = REQ_NA;
if (DCCEnabled == true) {
if (DCCProgrammingAssumesScanDirectionUnknown == true) {
if (DETBufferSize >= RequestHeight256Byte * ViewportWidth * BytePerPixel
&& DETBufferSize
>= 256 / RequestHeight256Byte
* ViewportHeight) {
Request = REQ_256Bytes;
} else if ((DETBufferSize
< RequestHeight256Byte * ViewportWidth * BytePerPixel
&& (BytePerPixel == 2 || BytePerPixel == 4))
|| (DETBufferSize
< 256 / RequestHeight256Byte
* ViewportHeight
&& BytePerPixel == 8
&& (TilingFormat == dm_sw_4kb_d
|| TilingFormat
== dm_sw_4kb_d_x
|| TilingFormat
== dm_sw_var_d
|| TilingFormat
== dm_sw_var_d_x
|| TilingFormat
== dm_sw_64kb_d
|| TilingFormat
== dm_sw_64kb_d_x
|| TilingFormat
== dm_sw_64kb_d_t
|| TilingFormat
== dm_sw_64kb_r_x))) {
Request = REQ_128BytesNonContiguous;
} else {
Request = REQ_128BytesContiguous;
}
} else {
if (BytePerPixel == 1) {
if (ScanOrientation == dm_vert || SwathHeight == 16) {
Request = REQ_256Bytes;
} else {
Request = REQ_128BytesContiguous;
}
} else if (BytePerPixel == 2) {
if ((ScanOrientation == dm_vert && SwathHeight == 16) || (ScanOrientation != dm_vert && SwathHeight == 8)) {
Request = REQ_256Bytes;
} else if (ScanOrientation == dm_vert) {
Request = REQ_128BytesContiguous;
} else {
Request = REQ_128BytesNonContiguous;
}
} else if (BytePerPixel == 4) {
if (SwathHeight == 8) {
Request = REQ_256Bytes;
} else if (ScanOrientation == dm_vert) {
Request = REQ_128BytesContiguous;
} else {
Request = REQ_128BytesNonContiguous;
}
} else if (BytePerPixel == 8) {
if (TilingFormat == dm_sw_4kb_d || TilingFormat == dm_sw_4kb_d_x
|| TilingFormat == dm_sw_var_d
|| TilingFormat == dm_sw_var_d_x
|| TilingFormat == dm_sw_64kb_d
|| TilingFormat == dm_sw_64kb_d_x
|| TilingFormat == dm_sw_64kb_d_t
|| TilingFormat == dm_sw_64kb_r_x) {
if ((ScanOrientation == dm_vert && SwathHeight == 8)
|| (ScanOrientation != dm_vert
&& SwathHeight == 4)) {
Request = REQ_256Bytes;
} else if (ScanOrientation != dm_vert) {
Request = REQ_128BytesContiguous;
} else {
Request = REQ_128BytesNonContiguous;
}
} else {
if (ScanOrientation != dm_vert || SwathHeight == 8) {
Request = REQ_256Bytes;
} else {
Request = REQ_128BytesContiguous;
}
}
}
}
} else {
Request = REQ_NA;
}
if (Request == REQ_256Bytes) {
*MaxUncompressedBlock = 256;
*MaxCompressedBlock = 256;
*Independent64ByteBlock = false;
MaximumDCCCompressionSurface = 4.0;
} else if (Request == REQ_128BytesContiguous) {
*MaxUncompressedBlock = 128;
*MaxCompressedBlock = 128;
*Independent64ByteBlock = false;
MaximumDCCCompressionSurface = 2.0;
} else if (Request == REQ_128BytesNonContiguous) {
*MaxUncompressedBlock = 256;
*MaxCompressedBlock = 64;
*Independent64ByteBlock = true;
MaximumDCCCompressionSurface = 4.0;
} else {
*MaxUncompressedBlock = 0;
*MaxCompressedBlock = 0;
*Independent64ByteBlock = 0;
MaximumDCCCompressionSurface = 0.0;
}
return MaximumDCCCompressionSurface;
}
static double CalculatePrefetchSourceLines(
struct display_mode_lib *mode_lib,
double VRatio,
double vtaps,
bool Interlace,
bool ProgressiveToInterlaceUnitInOPP,
unsigned int SwathHeight,
unsigned int ViewportYStart,
double *VInitPreFill,
unsigned int *MaxNumSwath)
{
unsigned int MaxPartialSwath;
if (ProgressiveToInterlaceUnitInOPP)
*VInitPreFill = dml_floor((VRatio + vtaps + 1) / 2.0, 1);
else
*VInitPreFill = dml_floor((VRatio + vtaps + 1 + Interlace * 0.5 * VRatio) / 2.0, 1);
if (!mode_lib->vba.IgnoreViewportPositioning) {
*MaxNumSwath = dml_ceil((*VInitPreFill - 1.0) / SwathHeight, 1) + 1.0;
if (*VInitPreFill > 1.0)
MaxPartialSwath = (unsigned int) (*VInitPreFill - 2) % SwathHeight;
else
MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 2)
% SwathHeight;
MaxPartialSwath = dml_max(1U, MaxPartialSwath);
} else {
if (ViewportYStart != 0)
dml_print(
"WARNING DML: using viewport y position of 0 even though actual viewport y position is non-zero in prefetch source lines calculation\n");
*MaxNumSwath = dml_ceil(*VInitPreFill / SwathHeight, 1);
if (*VInitPreFill > 1.0)
MaxPartialSwath = (unsigned int) (*VInitPreFill - 1) % SwathHeight;
else
MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 1)
% SwathHeight;
}
return *MaxNumSwath * SwathHeight + MaxPartialSwath;
}
static unsigned int CalculateVMAndRowBytes(
struct display_mode_lib *mode_lib,
bool DCCEnable,
unsigned int BlockHeight256Bytes,
unsigned int BlockWidth256Bytes,
enum source_format_class SourcePixelFormat,
unsigned int SurfaceTiling,
unsigned int BytePerPixel,
enum scan_direction_class ScanDirection,
unsigned int ViewportWidth,
unsigned int ViewportHeight,
unsigned int SwathWidth,
bool GPUVMEnable,
bool HostVMEnable,
unsigned int HostVMMaxPageTableLevels,
unsigned int HostVMCachedPageTableLevels,
unsigned int VMMPageSize,
unsigned int PTEBufferSizeInRequests,
unsigned int Pitch,
unsigned int DCCMetaPitch,
unsigned int *MacroTileWidth,
unsigned int *MetaRowByte,
unsigned int *PixelPTEBytesPerRow,
bool *PTEBufferSizeNotExceeded,
unsigned int *dpte_row_width_ub,
unsigned int *dpte_row_height,
unsigned int *MetaRequestWidth,
unsigned int *MetaRequestHeight,
unsigned int *meta_row_width,
unsigned int *meta_row_height,
unsigned int *vm_group_bytes,
unsigned int *dpte_group_bytes,
unsigned int *PixelPTEReqWidth,
unsigned int *PixelPTEReqHeight,
unsigned int *PTERequestSize,
unsigned int *DPDE0BytesFrame,
unsigned int *MetaPTEBytesFrame)
{
unsigned int MPDEBytesFrame;
unsigned int DCCMetaSurfaceBytes;
unsigned int MacroTileSizeBytes;
unsigned int MacroTileHeight;
unsigned int ExtraDPDEBytesFrame;
unsigned int PDEAndMetaPTEBytesFrame;
unsigned int PixelPTEReqHeightPTEs = 0;
if (DCCEnable == true) {
*MetaRequestHeight = 8 * BlockHeight256Bytes;
*MetaRequestWidth = 8 * BlockWidth256Bytes;
if (ScanDirection == dm_horz) {
*meta_row_height = *MetaRequestHeight;
*meta_row_width = dml_ceil((double) SwathWidth - 1, *MetaRequestWidth)
+ *MetaRequestWidth;
*MetaRowByte = *meta_row_width * *MetaRequestHeight * BytePerPixel / 256.0;
} else {
*meta_row_height = *MetaRequestWidth;
*meta_row_width = dml_ceil((double) SwathWidth - 1, *MetaRequestHeight)
+ *MetaRequestHeight;
*MetaRowByte = *meta_row_width * *MetaRequestWidth * BytePerPixel / 256.0;
}
if (ScanDirection == dm_horz) {
DCCMetaSurfaceBytes = DCCMetaPitch
* (dml_ceil(ViewportHeight - 1, 64 * BlockHeight256Bytes)
+ 64 * BlockHeight256Bytes) * BytePerPixel
/ 256;
} else {
DCCMetaSurfaceBytes = DCCMetaPitch
* (dml_ceil(
(double) ViewportHeight - 1,
64 * BlockHeight256Bytes)
+ 64 * BlockHeight256Bytes) * BytePerPixel
/ 256;
}
if (GPUVMEnable == true) {
*MetaPTEBytesFrame = (dml_ceil(
(double) (DCCMetaSurfaceBytes - VMMPageSize)
/ (8 * VMMPageSize),
1) + 1) * 64;
MPDEBytesFrame = 128 * ((mode_lib->vba.GPUVMMaxPageTableLevels + 1) * (mode_lib->vba.HostVMMaxPageTableLevels + 1) - 2);
} else {
*MetaPTEBytesFrame = 0;
MPDEBytesFrame = 0;
}
} else {
*MetaPTEBytesFrame = 0;
MPDEBytesFrame = 0;
*MetaRowByte = 0;
}
if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_l_vp) {
MacroTileSizeBytes = 256;
MacroTileHeight = BlockHeight256Bytes;
} else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x
|| SurfaceTiling == dm_sw_4kb_d || SurfaceTiling == dm_sw_4kb_d_x) {
MacroTileSizeBytes = 4096;
MacroTileHeight = 4 * BlockHeight256Bytes;
} else if (SurfaceTiling == dm_sw_64kb_s || SurfaceTiling == dm_sw_64kb_s_t
|| SurfaceTiling == dm_sw_64kb_s_x || SurfaceTiling == dm_sw_64kb_d
|| SurfaceTiling == dm_sw_64kb_d_t || SurfaceTiling == dm_sw_64kb_d_x
|| SurfaceTiling == dm_sw_64kb_r_x) {
MacroTileSizeBytes = 65536;
MacroTileHeight = 16 * BlockHeight256Bytes;
} else {
MacroTileSizeBytes = 262144;
MacroTileHeight = 32 * BlockHeight256Bytes;
}
*MacroTileWidth = MacroTileSizeBytes / BytePerPixel / MacroTileHeight;
if (GPUVMEnable == true && (mode_lib->vba.GPUVMMaxPageTableLevels + 1) * (mode_lib->vba.HostVMMaxPageTableLevels + 1) > 2) {
if (ScanDirection == dm_horz) {
*DPDE0BytesFrame = 64 * (dml_ceil(((Pitch * (dml_ceil(ViewportHeight - 1, MacroTileHeight) + MacroTileHeight) * BytePerPixel) - MacroTileSizeBytes) / (8 * 2097152), 1) + 1);
} else {
*DPDE0BytesFrame = 64 * (dml_ceil(((Pitch * (dml_ceil((double) SwathWidth - 1, MacroTileHeight) + MacroTileHeight) * BytePerPixel) - MacroTileSizeBytes) / (8 * 2097152), 1) + 1);
}
ExtraDPDEBytesFrame = 128 * ((mode_lib->vba.GPUVMMaxPageTableLevels + 1) * (mode_lib->vba.HostVMMaxPageTableLevels + 1) - 3);
} else {
*DPDE0BytesFrame = 0;
ExtraDPDEBytesFrame = 0;
}
PDEAndMetaPTEBytesFrame = *MetaPTEBytesFrame + MPDEBytesFrame + *DPDE0BytesFrame
+ ExtraDPDEBytesFrame;
if (HostVMEnable == true) {
PDEAndMetaPTEBytesFrame = PDEAndMetaPTEBytesFrame * (1 + 8 * (HostVMMaxPageTableLevels - HostVMCachedPageTableLevels));
}
if (GPUVMEnable == true) {
double FractionOfPTEReturnDrop;
if (SurfaceTiling == dm_sw_linear) {
PixelPTEReqHeightPTEs = 1;
*PixelPTEReqHeight = 1;
*PixelPTEReqWidth = 8.0 * VMMPageSize / BytePerPixel;
*PTERequestSize = 64;
FractionOfPTEReturnDrop = 0;
} else if (MacroTileSizeBytes == 4096) {
PixelPTEReqHeightPTEs = 1;
*PixelPTEReqHeight = MacroTileHeight;
*PixelPTEReqWidth = 8 * *MacroTileWidth;
*PTERequestSize = 64;
if (ScanDirection == dm_horz)
FractionOfPTEReturnDrop = 0;
else
FractionOfPTEReturnDrop = 7 / 8;
} else if (VMMPageSize == 4096 && MacroTileSizeBytes > 4096) {
PixelPTEReqHeightPTEs = 16;
*PixelPTEReqHeight = 16 * BlockHeight256Bytes;
*PixelPTEReqWidth = 16 * BlockWidth256Bytes;
*PTERequestSize = 128;
FractionOfPTEReturnDrop = 0;
} else {
PixelPTEReqHeightPTEs = 1;
*PixelPTEReqHeight = MacroTileHeight;
*PixelPTEReqWidth = 8 * *MacroTileWidth;
*PTERequestSize = 64;
FractionOfPTEReturnDrop = 0;
}
if (SurfaceTiling == dm_sw_linear) {
*dpte_row_height = dml_min(128,
1 << (unsigned int) dml_floor(
dml_log2(
(double) PTEBufferSizeInRequests * *PixelPTEReqWidth / Pitch),
1));
*dpte_row_width_ub = (dml_ceil((double) (Pitch * *dpte_row_height - 1) / *PixelPTEReqWidth, 1) + 1) * *PixelPTEReqWidth;
*PixelPTEBytesPerRow = *dpte_row_width_ub / *PixelPTEReqWidth * *PTERequestSize;
} else if (ScanDirection == dm_horz) {
*dpte_row_height = *PixelPTEReqHeight;
*dpte_row_width_ub = (dml_ceil((double) (SwathWidth - 1) / *PixelPTEReqWidth, 1) + 1) * *PixelPTEReqWidth;
*PixelPTEBytesPerRow = *dpte_row_width_ub / *PixelPTEReqWidth * *PTERequestSize;
} else {
*dpte_row_height = dml_min(*PixelPTEReqWidth, *MacroTileWidth);
*dpte_row_width_ub = (dml_ceil((double) (SwathWidth - 1) / *PixelPTEReqHeight, 1) + 1) * *PixelPTEReqHeight;
*PixelPTEBytesPerRow = *dpte_row_width_ub / *PixelPTEReqHeight * *PTERequestSize;
}
if (*PixelPTEBytesPerRow * (1 - FractionOfPTEReturnDrop)
<= 64 * PTEBufferSizeInRequests) {
*PTEBufferSizeNotExceeded = true;
} else {
*PTEBufferSizeNotExceeded = false;
}
} else {
*PixelPTEBytesPerRow = 0;
*PTEBufferSizeNotExceeded = true;
}
dml_print("DML: vm_bytes = meta_pte_bytes_per_frame (per_pipe) = MetaPTEBytesFrame = : %d\n", *MetaPTEBytesFrame);
if (HostVMEnable == true) {
*PixelPTEBytesPerRow = *PixelPTEBytesPerRow * (1 + 8 * (HostVMMaxPageTableLevels - HostVMCachedPageTableLevels));
}
if (HostVMEnable == true) {
*vm_group_bytes = 512;
*dpte_group_bytes = 512;
} else if (GPUVMEnable == true) {
*vm_group_bytes = 2048;
if (SurfaceTiling != dm_sw_linear && PixelPTEReqHeightPTEs == 1 && ScanDirection != dm_horz) {
*dpte_group_bytes = 512;
} else {
*dpte_group_bytes = 2048;
}
} else {
*vm_group_bytes = 0;
*dpte_group_bytes = 0;
}
return PDEAndMetaPTEBytesFrame;
}
static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
struct display_mode_lib *mode_lib)
{
struct vba_vars_st *locals = &mode_lib->vba;
unsigned int j, k;
mode_lib->vba.WritebackDISPCLK = 0.0;
mode_lib->vba.DISPCLKWithRamping = 0;
mode_lib->vba.DISPCLKWithoutRamping = 0;
mode_lib->vba.GlobalDPPCLK = 0.0;
// DISPCLK and DPPCLK Calculation
//
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.WritebackEnable[k]) {
mode_lib->vba.WritebackDISPCLK =
dml_max(
mode_lib->vba.WritebackDISPCLK,
CalculateWriteBackDISPCLK(
mode_lib->vba.WritebackPixelFormat[k],
mode_lib->vba.PixelClock[k],
mode_lib->vba.WritebackHRatio[k],
mode_lib->vba.WritebackVRatio[k],
mode_lib->vba.WritebackLumaHTaps[k],
mode_lib->vba.WritebackLumaVTaps[k],
mode_lib->vba.WritebackChromaHTaps[k],
mode_lib->vba.WritebackChromaVTaps[k],
mode_lib->vba.WritebackDestinationWidth[k],
mode_lib->vba.HTotal[k],
mode_lib->vba.WritebackChromaLineBufferWidth));
}
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.HRatio[k] > 1) {
locals->PSCL_THROUGHPUT_LUMA[k] = dml_min(
mode_lib->vba.MaxDCHUBToPSCLThroughput,
mode_lib->vba.MaxPSCLToLBThroughput
* mode_lib->vba.HRatio[k]
/ dml_ceil(
mode_lib->vba.htaps[k]
/ 6.0,
1));
} else {
locals->PSCL_THROUGHPUT_LUMA[k] = dml_min(
mode_lib->vba.MaxDCHUBToPSCLThroughput,
mode_lib->vba.MaxPSCLToLBThroughput);
}
mode_lib->vba.DPPCLKUsingSingleDPPLuma =
mode_lib->vba.PixelClock[k]
* dml_max(
mode_lib->vba.vtaps[k] / 6.0
* dml_min(
1.0,
mode_lib->vba.HRatio[k]),
dml_max(
mode_lib->vba.HRatio[k]
* mode_lib->vba.VRatio[k]
/ locals->PSCL_THROUGHPUT_LUMA[k],
1.0));
if ((mode_lib->vba.htaps[k] > 6 || mode_lib->vba.vtaps[k] > 6)
&& mode_lib->vba.DPPCLKUsingSingleDPPLuma
< 2 * mode_lib->vba.PixelClock[k]) {
mode_lib->vba.DPPCLKUsingSingleDPPLuma = 2 * mode_lib->vba.PixelClock[k];
}
if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
&& mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
locals->PSCL_THROUGHPUT_CHROMA[k] = 0.0;
locals->DPPCLKUsingSingleDPP[k] =
mode_lib->vba.DPPCLKUsingSingleDPPLuma;
} else {
if (mode_lib->vba.HRatio[k] > 1) {
locals->PSCL_THROUGHPUT_CHROMA[k] =
dml_min(
mode_lib->vba.MaxDCHUBToPSCLThroughput,
mode_lib->vba.MaxPSCLToLBThroughput
* mode_lib->vba.HRatio[k]
/ 2
/ dml_ceil(
mode_lib->vba.HTAPsChroma[k]
/ 6.0,
1.0));
} else {
locals->PSCL_THROUGHPUT_CHROMA[k] = dml_min(
mode_lib->vba.MaxDCHUBToPSCLThroughput,
mode_lib->vba.MaxPSCLToLBThroughput);
}
mode_lib->vba.DPPCLKUsingSingleDPPChroma =
mode_lib->vba.PixelClock[k]
* dml_max(
mode_lib->vba.VTAPsChroma[k]
/ 6.0
* dml_min(
1.0,
mode_lib->vba.HRatio[k]
/ 2),
dml_max(
mode_lib->vba.HRatio[k]
* mode_lib->vba.VRatio[k]
/ 4
/ locals->PSCL_THROUGHPUT_CHROMA[k],
1.0));
if ((mode_lib->vba.HTAPsChroma[k] > 6 || mode_lib->vba.VTAPsChroma[k] > 6)
&& mode_lib->vba.DPPCLKUsingSingleDPPChroma
< 2 * mode_lib->vba.PixelClock[k]) {
mode_lib->vba.DPPCLKUsingSingleDPPChroma = 2
* mode_lib->vba.PixelClock[k];
}
locals->DPPCLKUsingSingleDPP[k] = dml_max(
mode_lib->vba.DPPCLKUsingSingleDPPLuma,
mode_lib->vba.DPPCLKUsingSingleDPPChroma);
}
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.BlendingAndTiming[k] != k)
continue;
if (mode_lib->vba.ODMCombineEnabled[k]) {
mode_lib->vba.DISPCLKWithRamping =
dml_max(
mode_lib->vba.DISPCLKWithRamping,
mode_lib->vba.PixelClock[k] / 2
* (1
+ mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
/ 100)
* (1
+ mode_lib->vba.DISPCLKRampingMargin
/ 100));
mode_lib->vba.DISPCLKWithoutRamping =
dml_max(
mode_lib->vba.DISPCLKWithoutRamping,
mode_lib->vba.PixelClock[k] / 2
* (1
+ mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
/ 100));
} else if (!mode_lib->vba.ODMCombineEnabled[k]) {
mode_lib->vba.DISPCLKWithRamping =
dml_max(
mode_lib->vba.DISPCLKWithRamping,
mode_lib->vba.PixelClock[k]
* (1
+ mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
/ 100)
* (1
+ mode_lib->vba.DISPCLKRampingMargin
/ 100));
mode_lib->vba.DISPCLKWithoutRamping =
dml_max(
mode_lib->vba.DISPCLKWithoutRamping,
mode_lib->vba.PixelClock[k]
* (1
+ mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
/ 100));
}
}
mode_lib->vba.DISPCLKWithRamping = dml_max(
mode_lib->vba.DISPCLKWithRamping,
mode_lib->vba.WritebackDISPCLK);
mode_lib->vba.DISPCLKWithoutRamping = dml_max(
mode_lib->vba.DISPCLKWithoutRamping,
mode_lib->vba.WritebackDISPCLK);
ASSERT(mode_lib->vba.DISPCLKDPPCLKVCOSpeed != 0);
mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity = RoundToDFSGranularityUp(
mode_lib->vba.DISPCLKWithRamping,
mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity = RoundToDFSGranularityUp(
mode_lib->vba.DISPCLKWithoutRamping,
mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
mode_lib->vba.MaxDispclkRoundedToDFSGranularity = RoundToDFSGranularityDown(
mode_lib->vba.soc.clock_limits[mode_lib->vba.soc.num_states - 1].dispclk_mhz,
mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
if (mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity
> mode_lib->vba.MaxDispclkRoundedToDFSGranularity) {
mode_lib->vba.DISPCLK_calculated =
mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity;
} else if (mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity
> mode_lib->vba.MaxDispclkRoundedToDFSGranularity) {
mode_lib->vba.DISPCLK_calculated = mode_lib->vba.MaxDispclkRoundedToDFSGranularity;
} else {
mode_lib->vba.DISPCLK_calculated =
mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity;
}
DTRACE(" dispclk_mhz (calculated) = %f", mode_lib->vba.DISPCLK_calculated);
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
mode_lib->vba.DPPCLK_calculated[k] = locals->DPPCLKUsingSingleDPP[k]
/ mode_lib->vba.DPPPerPlane[k]
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100);
mode_lib->vba.GlobalDPPCLK = dml_max(
mode_lib->vba.GlobalDPPCLK,
mode_lib->vba.DPPCLK_calculated[k]);
}
mode_lib->vba.GlobalDPPCLK = RoundToDFSGranularityUp(
mode_lib->vba.GlobalDPPCLK,
mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
mode_lib->vba.DPPCLK_calculated[k] = mode_lib->vba.GlobalDPPCLK / 255
* dml_ceil(
mode_lib->vba.DPPCLK_calculated[k] * 255
/ mode_lib->vba.GlobalDPPCLK,
1);
DTRACE(" dppclk_mhz[%i] (calculated) = %f", k, mode_lib->vba.DPPCLK_calculated[k]);
}
// Urgent and B P-State/DRAM Clock Change Watermark
DTRACE(" dcfclk_mhz = %f", mode_lib->vba.DCFCLK);
DTRACE(" return_bw_to_dcn = %f", mode_lib->vba.ReturnBandwidthToDCN);
DTRACE(" return_bus_bw = %f", mode_lib->vba.ReturnBW);
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
bool MainPlaneDoesODMCombine = false;
if (mode_lib->vba.SourceScan[k] == dm_horz)
locals->SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportWidth[k];
else
locals->SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k];
if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
MainPlaneDoesODMCombine = true;
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
if (mode_lib->vba.BlendingAndTiming[k] == j
&& mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
MainPlaneDoesODMCombine = true;
if (MainPlaneDoesODMCombine == true)
locals->SwathWidthY[k] = dml_min(
(double) locals->SwathWidthSingleDPPY[k],
dml_round(
mode_lib->vba.HActive[k] / 2.0
* mode_lib->vba.HRatio[k]));
else
locals->SwathWidthY[k] = locals->SwathWidthSingleDPPY[k]
/ mode_lib->vba.DPPPerPlane[k];
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
locals->BytePerPixelDETY[k] = 8;
locals->BytePerPixelDETC[k] = 0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
locals->BytePerPixelDETY[k] = 4;
locals->BytePerPixelDETC[k] = 0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16 || mode_lib->vba.SourcePixelFormat[k] == dm_mono_16) {
locals->BytePerPixelDETY[k] = 2;
locals->BytePerPixelDETC[k] = 0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8 || mode_lib->vba.SourcePixelFormat[k] == dm_mono_8) {
locals->BytePerPixelDETY[k] = 1;
locals->BytePerPixelDETC[k] = 0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
locals->BytePerPixelDETY[k] = 1;
locals->BytePerPixelDETC[k] = 2;
} else { // dm_420_10
locals->BytePerPixelDETY[k] = 4.0 / 3.0;
locals->BytePerPixelDETC[k] = 8.0 / 3.0;
}
}
mode_lib->vba.TotalDataReadBandwidth = 0.0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
locals->ReadBandwidthPlaneLuma[k] = locals->SwathWidthSingleDPPY[k]
* dml_ceil(locals->BytePerPixelDETY[k], 1)
/ (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
* mode_lib->vba.VRatio[k];
locals->ReadBandwidthPlaneChroma[k] = locals->SwathWidthSingleDPPY[k]
/ 2 * dml_ceil(locals->BytePerPixelDETC[k], 2)
/ (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
* mode_lib->vba.VRatio[k] / 2;
DTRACE(
" read_bw[%i] = %fBps",
k,
locals->ReadBandwidthPlaneLuma[k]
+ locals->ReadBandwidthPlaneChroma[k]);
mode_lib->vba.TotalDataReadBandwidth += locals->ReadBandwidthPlaneLuma[k]
+ locals->ReadBandwidthPlaneChroma[k];
}
// DCFCLK Deep Sleep
CalculateDCFCLKDeepSleep(
mode_lib,
mode_lib->vba.NumberOfActivePlanes,
locals->BytePerPixelDETY,
locals->BytePerPixelDETC,
mode_lib->vba.VRatio,
locals->SwathWidthY,
mode_lib->vba.DPPPerPlane,
mode_lib->vba.HRatio,
mode_lib->vba.PixelClock,
locals->PSCL_THROUGHPUT_LUMA,
locals->PSCL_THROUGHPUT_CHROMA,
locals->DPPCLK,
&mode_lib->vba.DCFCLKDeepSleep);
// DSCCLK
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if ((mode_lib->vba.BlendingAndTiming[k] != k) || !mode_lib->vba.DSCEnabled[k]) {
locals->DSCCLK_calculated[k] = 0.0;
} else {
if (mode_lib->vba.OutputFormat[k] == dm_420
|| mode_lib->vba.OutputFormat[k] == dm_n422)
mode_lib->vba.DSCFormatFactor = 2;
else
mode_lib->vba.DSCFormatFactor = 1;
if (mode_lib->vba.ODMCombineEnabled[k])
locals->DSCCLK_calculated[k] =
mode_lib->vba.PixelClockBackEnd[k] / 6
/ mode_lib->vba.DSCFormatFactor
/ (1
- mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
/ 100);
else
locals->DSCCLK_calculated[k] =
mode_lib->vba.PixelClockBackEnd[k] / 3
/ mode_lib->vba.DSCFormatFactor
/ (1
- mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
/ 100);
}
}
// DSC Delay
// TODO
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
double bpp = mode_lib->vba.OutputBpp[k];
unsigned int slices = mode_lib->vba.NumberOfDSCSlices[k];
if (mode_lib->vba.DSCEnabled[k] && bpp != 0) {
if (!mode_lib->vba.ODMCombineEnabled[k]) {
locals->DSCDelay[k] =
dscceComputeDelay(
mode_lib->vba.DSCInputBitPerComponent[k],
bpp,
dml_ceil(
(double) mode_lib->vba.HActive[k]
/ mode_lib->vba.NumberOfDSCSlices[k],
1),
slices,
mode_lib->vba.OutputFormat[k])
+ dscComputeDelay(
mode_lib->vba.OutputFormat[k]);
} else {
locals->DSCDelay[k] =
2
* (dscceComputeDelay(
mode_lib->vba.DSCInputBitPerComponent[k],
bpp,
dml_ceil(
(double) mode_lib->vba.HActive[k]
/ mode_lib->vba.NumberOfDSCSlices[k],
1),
slices / 2.0,
mode_lib->vba.OutputFormat[k])
+ dscComputeDelay(
mode_lib->vba.OutputFormat[k]));
}
locals->DSCDelay[k] = locals->DSCDelay[k]
* mode_lib->vba.PixelClock[k]
/ mode_lib->vba.PixelClockBackEnd[k];
} else {
locals->DSCDelay[k] = 0;
}
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) // NumberOfPlanes
if (j != k && mode_lib->vba.BlendingAndTiming[k] == j
&& mode_lib->vba.DSCEnabled[j])
locals->DSCDelay[k] = locals->DSCDelay[j];
// Prefetch
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
unsigned int PDEAndMetaPTEBytesFrameY;
unsigned int PixelPTEBytesPerRowY;
unsigned int MetaRowByteY;
unsigned int MetaRowByteC;
unsigned int PDEAndMetaPTEBytesFrameC;
unsigned int PixelPTEBytesPerRowC;
bool PTEBufferSizeNotExceededY;
bool PTEBufferSizeNotExceededC;
Calculate256BBlockSizes(
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.SurfaceTiling[k],
dml_ceil(locals->BytePerPixelDETY[k], 1),
dml_ceil(locals->BytePerPixelDETC[k], 2),
&locals->BlockHeight256BytesY[k],
&locals->BlockHeight256BytesC[k],
&locals->BlockWidth256BytesY[k],
&locals->BlockWidth256BytesC[k]);
locals->PrefetchSourceLinesY[k] = CalculatePrefetchSourceLines(
mode_lib,
mode_lib->vba.VRatio[k],
mode_lib->vba.vtaps[k],
mode_lib->vba.Interlace[k],
mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
mode_lib->vba.SwathHeightY[k],
mode_lib->vba.ViewportYStartY[k],
&locals->VInitPreFillY[k],
&locals->MaxNumSwathY[k]);
if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_32
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_8)) {
PDEAndMetaPTEBytesFrameC =
CalculateVMAndRowBytes(
mode_lib,
mode_lib->vba.DCCEnable[k],
locals->BlockHeight256BytesC[k],
locals->BlockWidth256BytesC[k],
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.SurfaceTiling[k],
dml_ceil(
locals->BytePerPixelDETC[k],
2),
mode_lib->vba.SourceScan[k],
mode_lib->vba.ViewportWidth[k] / 2,
mode_lib->vba.ViewportHeight[k] / 2,
locals->SwathWidthY[k] / 2,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.HostVMEnable,
mode_lib->vba.HostVMMaxPageTableLevels,
mode_lib->vba.HostVMCachedPageTableLevels,
mode_lib->vba.VMMPageSize,
mode_lib->vba.PTEBufferSizeInRequestsChroma,
mode_lib->vba.PitchC[k],
mode_lib->vba.DCCMetaPitchC[k],
&locals->MacroTileWidthC[k],
&MetaRowByteC,
&PixelPTEBytesPerRowC,
&PTEBufferSizeNotExceededC,
&locals->dpte_row_width_chroma_ub[k],
&locals->dpte_row_height_chroma[k],
&locals->meta_req_width_chroma[k],
&locals->meta_req_height_chroma[k],
&locals->meta_row_width_chroma[k],
&locals->meta_row_height_chroma[k],
&locals->vm_group_bytes_chroma,
&locals->dpte_group_bytes_chroma,
&locals->PixelPTEReqWidthC[k],
&locals->PixelPTEReqHeightC[k],
&locals->PTERequestSizeC[k],
&locals->dpde0_bytes_per_frame_ub_c[k],
&locals->meta_pte_bytes_per_frame_ub_c[k]);
locals->PrefetchSourceLinesC[k] = CalculatePrefetchSourceLines(
mode_lib,
mode_lib->vba.VRatio[k] / 2,
mode_lib->vba.VTAPsChroma[k],
mode_lib->vba.Interlace[k],
mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
mode_lib->vba.SwathHeightC[k],
mode_lib->vba.ViewportYStartC[k],
&locals->VInitPreFillC[k],
&locals->MaxNumSwathC[k]);
} else {
PixelPTEBytesPerRowC = 0;
PDEAndMetaPTEBytesFrameC = 0;
MetaRowByteC = 0;
locals->MaxNumSwathC[k] = 0;
locals->PrefetchSourceLinesC[k] = 0;
locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma + mode_lib->vba.PTEBufferSizeInRequestsChroma;
}
PDEAndMetaPTEBytesFrameY = CalculateVMAndRowBytes(
mode_lib,
mode_lib->vba.DCCEnable[k],
locals->BlockHeight256BytesY[k],
locals->BlockWidth256BytesY[k],
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.SurfaceTiling[k],
dml_ceil(locals->BytePerPixelDETY[k], 1),
mode_lib->vba.SourceScan[k],
mode_lib->vba.ViewportWidth[k],
mode_lib->vba.ViewportHeight[k],
locals->SwathWidthY[k],
mode_lib->vba.GPUVMEnable,
mode_lib->vba.HostVMEnable,
mode_lib->vba.HostVMMaxPageTableLevels,
mode_lib->vba.HostVMCachedPageTableLevels,
mode_lib->vba.VMMPageSize,
locals->PTEBufferSizeInRequestsForLuma,
mode_lib->vba.PitchY[k],
mode_lib->vba.DCCMetaPitchY[k],
&locals->MacroTileWidthY[k],
&MetaRowByteY,
&PixelPTEBytesPerRowY,
&PTEBufferSizeNotExceededY,
&locals->dpte_row_width_luma_ub[k],
&locals->dpte_row_height[k],
&locals->meta_req_width[k],
&locals->meta_req_height[k],
&locals->meta_row_width[k],
&locals->meta_row_height[k],
&locals->vm_group_bytes[k],
&locals->dpte_group_bytes[k],
&locals->PixelPTEReqWidthY[k],
&locals->PixelPTEReqHeightY[k],
&locals->PTERequestSizeY[k],
&locals->dpde0_bytes_per_frame_ub_l[k],
&locals->meta_pte_bytes_per_frame_ub_l[k]);
locals->PixelPTEBytesPerRow[k] = PixelPTEBytesPerRowY + PixelPTEBytesPerRowC;
locals->PDEAndMetaPTEBytesFrame[k] = PDEAndMetaPTEBytesFrameY
+ PDEAndMetaPTEBytesFrameC;
locals->MetaRowByte[k] = MetaRowByteY + MetaRowByteC;
CalculateActiveRowBandwidth(
mode_lib->vba.GPUVMEnable,
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.VRatio[k],
mode_lib->vba.DCCEnable[k],
mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
MetaRowByteY,
MetaRowByteC,
locals->meta_row_height[k],
locals->meta_row_height_chroma[k],
PixelPTEBytesPerRowY,
PixelPTEBytesPerRowC,
locals->dpte_row_height[k],
locals->dpte_row_height_chroma[k],
&locals->meta_row_bw[k],
&locals->dpte_row_bw[k]);
}
mode_lib->vba.TotalDCCActiveDPP = 0;
mode_lib->vba.TotalActiveDPP = 0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
mode_lib->vba.TotalActiveDPP = mode_lib->vba.TotalActiveDPP
+ mode_lib->vba.DPPPerPlane[k];
if (mode_lib->vba.DCCEnable[k])
mode_lib->vba.TotalDCCActiveDPP = mode_lib->vba.TotalDCCActiveDPP
+ mode_lib->vba.DPPPerPlane[k];
}
mode_lib->vba.UrgentOutOfOrderReturnPerChannel = dml_max3(
mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelDataOnly,
mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelMixedWithVMData,
mode_lib->vba.UrgentOutOfOrderReturnPerChannelVMDataOnly);
mode_lib->vba.UrgentRoundTripAndOutOfOrderLatency =
(mode_lib->vba.RoundTripPingLatencyCycles + 32) / mode_lib->vba.DCFCLK
+ mode_lib->vba.UrgentOutOfOrderReturnPerChannel
* mode_lib->vba.NumberOfChannels
/ mode_lib->vba.ReturnBW;
mode_lib->vba.UrgentExtraLatency = CalculateExtraLatency(
mode_lib->vba.UrgentRoundTripAndOutOfOrderLatency,
mode_lib->vba.TotalActiveDPP,
mode_lib->vba.PixelChunkSizeInKByte,
mode_lib->vba.TotalDCCActiveDPP,
mode_lib->vba.MetaChunkSize,
mode_lib->vba.ReturnBW,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.HostVMEnable,
mode_lib->vba.NumberOfActivePlanes,
mode_lib->vba.DPPPerPlane,
locals->dpte_group_bytes,
mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
mode_lib->vba.HostVMMaxPageTableLevels,
mode_lib->vba.HostVMCachedPageTableLevels);
mode_lib->vba.TCalc = 24.0 / mode_lib->vba.DCFCLKDeepSleep;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
if (mode_lib->vba.WritebackEnable[k] == true) {
locals->WritebackDelay[mode_lib->vba.VoltageLevel][k] =
mode_lib->vba.WritebackLatency
+ CalculateWriteBackDelay(
mode_lib->vba.WritebackPixelFormat[k],
mode_lib->vba.WritebackHRatio[k],
mode_lib->vba.WritebackVRatio[k],
mode_lib->vba.WritebackLumaHTaps[k],
mode_lib->vba.WritebackLumaVTaps[k],
mode_lib->vba.WritebackChromaHTaps[k],
mode_lib->vba.WritebackChromaVTaps[k],
mode_lib->vba.WritebackDestinationWidth[k])
/ mode_lib->vba.DISPCLK;
} else
locals->WritebackDelay[mode_lib->vba.VoltageLevel][k] = 0;
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
if (mode_lib->vba.BlendingAndTiming[j] == k
&& mode_lib->vba.WritebackEnable[j] == true) {
locals->WritebackDelay[mode_lib->vba.VoltageLevel][k] =
dml_max(
locals->WritebackDelay[mode_lib->vba.VoltageLevel][k],
mode_lib->vba.WritebackLatency
+ CalculateWriteBackDelay(
mode_lib->vba.WritebackPixelFormat[j],
mode_lib->vba.WritebackHRatio[j],
mode_lib->vba.WritebackVRatio[j],
mode_lib->vba.WritebackLumaHTaps[j],
mode_lib->vba.WritebackLumaVTaps[j],
mode_lib->vba.WritebackChromaHTaps[j],
mode_lib->vba.WritebackChromaVTaps[j],
mode_lib->vba.WritebackDestinationWidth[j])
/ mode_lib->vba.DISPCLK);
}
}
}
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
if (mode_lib->vba.BlendingAndTiming[k] == j)
locals->WritebackDelay[mode_lib->vba.VoltageLevel][k] =
locals->WritebackDelay[mode_lib->vba.VoltageLevel][j];
mode_lib->vba.VStartupLines = 13;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
locals->MaxVStartupLines[k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k] - dml_max(1.0, dml_ceil(locals->WritebackDelay[mode_lib->vba.VoltageLevel][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1));
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
locals->MaximumMaxVStartupLines = dml_max(locals->MaximumMaxVStartupLines, locals->MaxVStartupLines[k]);
// We don't really care to iterate between the various prefetch modes
//mode_lib->vba.PrefetchERROR = CalculateMinAndMaxPrefetchMode(mode_lib->vba.AllowDRAMSelfRefreshOrDRAMClockChangeInVblank, &mode_lib->vba.MinPrefetchMode, &mode_lib->vba.MaxPrefetchMode);
mode_lib->vba.UrgentLatency = dml_max3(mode_lib->vba.UrgentLatencyPixelDataOnly, mode_lib->vba.UrgentLatencyPixelMixedWithVMData, mode_lib->vba.UrgentLatencyVMDataOnly);
do {
double MaxTotalRDBandwidth = 0;
double MaxTotalRDBandwidthNoUrgentBurst = 0;
bool DestinationLineTimesForPrefetchLessThan2 = false;
bool VRatioPrefetchMoreThan4 = false;
double TWait = CalculateTWait(
mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb],
mode_lib->vba.DRAMClockChangeLatency,
mode_lib->vba.UrgentLatency,
mode_lib->vba.SREnterPlusExitTime);
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
Pipe myPipe;
HostVM myHostVM;
if (mode_lib->vba.XFCEnabled[k] == true) {
mode_lib->vba.XFCRemoteSurfaceFlipDelay =
CalculateRemoteSurfaceFlipDelay(
mode_lib,
mode_lib->vba.VRatio[k],
locals->SwathWidthY[k],
dml_ceil(
locals->BytePerPixelDETY[k],
1),
mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k],
mode_lib->vba.XFCTSlvVupdateOffset,
mode_lib->vba.XFCTSlvVupdateWidth,
mode_lib->vba.XFCTSlvVreadyOffset,
mode_lib->vba.XFCXBUFLatencyTolerance,
mode_lib->vba.XFCFillBWOverhead,
mode_lib->vba.XFCSlvChunkSize,
mode_lib->vba.XFCBusTransportTime,
mode_lib->vba.TCalc,
TWait,
&mode_lib->vba.SrcActiveDrainRate,
&mode_lib->vba.TInitXFill,
&mode_lib->vba.TslvChk);
} else {
mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0;
}
myPipe.DPPCLK = locals->DPPCLK[k];
myPipe.DISPCLK = mode_lib->vba.DISPCLK;
myPipe.PixelClock = mode_lib->vba.PixelClock[k];
myPipe.DCFCLKDeepSleep = mode_lib->vba.DCFCLKDeepSleep;
myPipe.DPPPerPlane = mode_lib->vba.DPPPerPlane[k];
myPipe.ScalerEnabled = mode_lib->vba.ScalerEnabled[k];
myPipe.SourceScan = mode_lib->vba.SourceScan[k];
myPipe.BlockWidth256BytesY = locals->BlockWidth256BytesY[k];
myPipe.BlockHeight256BytesY = locals->BlockHeight256BytesY[k];
myPipe.BlockWidth256BytesC = locals->BlockWidth256BytesC[k];
myPipe.BlockHeight256BytesC = locals->BlockHeight256BytesC[k];
myPipe.InterlaceEnable = mode_lib->vba.Interlace[k];
myPipe.NumberOfCursors = mode_lib->vba.NumberOfCursors[k];
myPipe.VBlank = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k];
myPipe.HTotal = mode_lib->vba.HTotal[k];
myHostVM.Enable = mode_lib->vba.HostVMEnable;
myHostVM.MaxPageTableLevels = mode_lib->vba.HostVMMaxPageTableLevels;
myHostVM.CachedPageTableLevels = mode_lib->vba.HostVMCachedPageTableLevels;
mode_lib->vba.ErrorResult[k] =
CalculatePrefetchSchedule(
mode_lib,
mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
&myPipe,
locals->DSCDelay[k],
mode_lib->vba.DPPCLKDelaySubtotal,
mode_lib->vba.DPPCLKDelaySCL,
mode_lib->vba.DPPCLKDelaySCLLBOnly,
mode_lib->vba.DPPCLKDelayCNVCFormater,
mode_lib->vba.DPPCLKDelayCNVCCursor,
mode_lib->vba.DISPCLKDelaySubtotal,
(unsigned int) (locals->SwathWidthY[k]
/ mode_lib->vba.HRatio[k]),
mode_lib->vba.OutputFormat[k],
mode_lib->vba.MaxInterDCNTileRepeaters,
dml_min(mode_lib->vba.VStartupLines, locals->MaxVStartupLines[k]),
locals->MaxVStartupLines[k],
mode_lib->vba.GPUVMMaxPageTableLevels,
mode_lib->vba.GPUVMEnable,
&myHostVM,
mode_lib->vba.DynamicMetadataEnable[k],
mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
mode_lib->vba.DynamicMetadataTransmittedBytes[k],
mode_lib->vba.DCCEnable[k],
mode_lib->vba.UrgentLatency,
mode_lib->vba.UrgentExtraLatency,
mode_lib->vba.TCalc,
locals->PDEAndMetaPTEBytesFrame[k],
locals->MetaRowByte[k],
locals->PixelPTEBytesPerRow[k],
locals->PrefetchSourceLinesY[k],
locals->SwathWidthY[k],
locals->BytePerPixelDETY[k],
locals->VInitPreFillY[k],
locals->MaxNumSwathY[k],
locals->PrefetchSourceLinesC[k],
locals->BytePerPixelDETC[k],
locals->VInitPreFillC[k],
locals->MaxNumSwathC[k],
mode_lib->vba.SwathHeightY[k],
mode_lib->vba.SwathHeightC[k],
TWait,
mode_lib->vba.XFCEnabled[k],
mode_lib->vba.XFCRemoteSurfaceFlipDelay,
mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
&locals->DSTXAfterScaler[k],
&locals->DSTYAfterScaler[k],
&locals->DestinationLinesForPrefetch[k],
&locals->PrefetchBandwidth[k],
&locals->DestinationLinesToRequestVMInVBlank[k],
&locals->DestinationLinesToRequestRowInVBlank[k],
&locals->VRatioPrefetchY[k],
&locals->VRatioPrefetchC[k],
&locals->RequiredPrefetchPixDataBWLuma[k],
&locals->RequiredPrefetchPixDataBWChroma[k],
&locals->VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
&locals->Tno_bw[k],
&locals->prefetch_vmrow_bw[k],
&locals->swath_width_luma_ub[k],
&locals->swath_width_chroma_ub[k],
&mode_lib->vba.VUpdateOffsetPix[k],
&mode_lib->vba.VUpdateWidthPix[k],
&mode_lib->vba.VReadyOffsetPix[k]);
if (mode_lib->vba.BlendingAndTiming[k] == k) {
locals->VStartup[k] = dml_min(
mode_lib->vba.VStartupLines,
locals->MaxVStartupLines[k]);
if (locals->VStartupRequiredWhenNotEnoughTimeForDynamicMetadata
!= 0) {
locals->VStartup[k] =
locals->VStartupRequiredWhenNotEnoughTimeForDynamicMetadata;
}
} else {
locals->VStartup[k] =
dml_min(
mode_lib->vba.VStartupLines,
locals->MaxVStartupLines[mode_lib->vba.BlendingAndTiming[k]]);
}
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
unsigned int m;
locals->cursor_bw[k] = 0;
locals->cursor_bw_pre[k] = 0;
for (m = 0; m < mode_lib->vba.NumberOfCursors[k]; m++) {
locals->cursor_bw[k] += mode_lib->vba.CursorWidth[k][m] * mode_lib->vba.CursorBPP[k][m] / 8.0 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k];
locals->cursor_bw_pre[k] += mode_lib->vba.CursorWidth[k][m] * mode_lib->vba.CursorBPP[k][m] / 8.0 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * locals->VRatioPrefetchY[k];
}
CalculateUrgentBurstFactor(
mode_lib->vba.DETBufferSizeInKByte[0],
mode_lib->vba.SwathHeightY[k],
mode_lib->vba.SwathHeightC[k],
locals->SwathWidthY[k],
mode_lib->vba.HTotal[k] /
mode_lib->vba.PixelClock[k],
mode_lib->vba.UrgentLatency,
mode_lib->vba.CursorBufferSize,
mode_lib->vba.CursorWidth[k][0] + mode_lib->vba.CursorWidth[k][1],
dml_max(mode_lib->vba.CursorBPP[k][0], mode_lib->vba.CursorBPP[k][1]),
mode_lib->vba.VRatio[k],
locals->VRatioPrefetchY[k],
locals->VRatioPrefetchC[k],
locals->BytePerPixelDETY[k],
locals->BytePerPixelDETC[k],
&locals->UrgentBurstFactorCursor[k],
&locals->UrgentBurstFactorCursorPre[k],
&locals->UrgentBurstFactorLuma[k],
&locals->UrgentBurstFactorLumaPre[k],
&locals->UrgentBurstFactorChroma[k],
&locals->UrgentBurstFactorChromaPre[k],
&locals->NotEnoughUrgentLatencyHiding[0][0],
&locals->NotEnoughUrgentLatencyHidingPre);
if (mode_lib->vba.UseUrgentBurstBandwidth == false) {
locals->UrgentBurstFactorLuma[k] = 1;
locals->UrgentBurstFactorChroma[k] = 1;
locals->UrgentBurstFactorCursor[k] = 1;
locals->UrgentBurstFactorLumaPre[k] = 1;
locals->UrgentBurstFactorChromaPre[k] = 1;
locals->UrgentBurstFactorCursorPre[k] = 1;
}
MaxTotalRDBandwidth = MaxTotalRDBandwidth +
dml_max3(locals->prefetch_vmrow_bw[k],
locals->ReadBandwidthPlaneLuma[k] * locals->UrgentBurstFactorLuma[k]
+ locals->ReadBandwidthPlaneChroma[k] * locals->UrgentBurstFactorChroma[k] + locals->cursor_bw[k]
* locals->UrgentBurstFactorCursor[k] + locals->meta_row_bw[k] + locals->dpte_row_bw[k],
locals->RequiredPrefetchPixDataBWLuma[k] * locals->UrgentBurstFactorLumaPre[k] + locals->RequiredPrefetchPixDataBWChroma[k]
* locals->UrgentBurstFactorChromaPre[k] + locals->cursor_bw_pre[k] * locals->UrgentBurstFactorCursorPre[k]);
MaxTotalRDBandwidthNoUrgentBurst = MaxTotalRDBandwidthNoUrgentBurst +
dml_max3(locals->prefetch_vmrow_bw[k],
locals->ReadBandwidthPlaneLuma[k] + locals->ReadBandwidthPlaneChroma[k] + locals->cursor_bw[k]
+ locals->meta_row_bw[k] + locals->dpte_row_bw[k],
locals->RequiredPrefetchPixDataBWLuma[k] + locals->RequiredPrefetchPixDataBWChroma[k] + locals->cursor_bw_pre[k]);
if (locals->DestinationLinesForPrefetch[k] < 2)
DestinationLineTimesForPrefetchLessThan2 = true;
if (locals->VRatioPrefetchY[k] > 4 || locals->VRatioPrefetchC[k] > 4)
VRatioPrefetchMoreThan4 = true;
}
mode_lib->vba.FractionOfUrgentBandwidth = MaxTotalRDBandwidthNoUrgentBurst / mode_lib->vba.ReturnBW;
if (MaxTotalRDBandwidth <= mode_lib->vba.ReturnBW && locals->NotEnoughUrgentLatencyHiding[0][0] == 0 &&
locals->NotEnoughUrgentLatencyHidingPre == 0 && !VRatioPrefetchMoreThan4
&& !DestinationLineTimesForPrefetchLessThan2)
mode_lib->vba.PrefetchModeSupported = true;
else {
mode_lib->vba.PrefetchModeSupported = false;
dml_print(
"DML: CalculatePrefetchSchedule ***failed***. Bandwidth violation. Results are NOT valid\n");
}
if (mode_lib->vba.PrefetchModeSupported == true) {
mode_lib->vba.BandwidthAvailableForImmediateFlip = mode_lib->vba.ReturnBW;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
mode_lib->vba.BandwidthAvailableForImmediateFlip =
mode_lib->vba.BandwidthAvailableForImmediateFlip
- dml_max(
locals->ReadBandwidthPlaneLuma[k] * locals->UrgentBurstFactorLuma[k]
+ locals->ReadBandwidthPlaneChroma[k] * locals->UrgentBurstFactorChroma[k]
+ locals->cursor_bw[k] * locals->UrgentBurstFactorCursor[k],
locals->RequiredPrefetchPixDataBWLuma[k] * locals->UrgentBurstFactorLumaPre[k] +
locals->RequiredPrefetchPixDataBWChroma[k] * locals->UrgentBurstFactorChromaPre[k] +
locals->cursor_bw_pre[k] * locals->UrgentBurstFactorCursorPre[k]);
}
mode_lib->vba.TotImmediateFlipBytes = 0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
mode_lib->vba.TotImmediateFlipBytes = mode_lib->vba.TotImmediateFlipBytes + locals->PDEAndMetaPTEBytesFrame[k] + locals->MetaRowByte[k] + locals->PixelPTEBytesPerRow[k];
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
CalculateFlipSchedule(
mode_lib,
mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
mode_lib->vba.UrgentExtraLatency,
mode_lib->vba.UrgentLatency,
mode_lib->vba.GPUVMMaxPageTableLevels,
mode_lib->vba.HostVMEnable,
mode_lib->vba.HostVMMaxPageTableLevels,
mode_lib->vba.HostVMCachedPageTableLevels,
mode_lib->vba.GPUVMEnable,
locals->PDEAndMetaPTEBytesFrame[k],
locals->MetaRowByte[k],
locals->PixelPTEBytesPerRow[k],
mode_lib->vba.BandwidthAvailableForImmediateFlip,
mode_lib->vba.TotImmediateFlipBytes,
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
mode_lib->vba.VRatio[k],
locals->Tno_bw[k],
mode_lib->vba.DCCEnable[k],
locals->dpte_row_height[k],
locals->meta_row_height[k],
locals->dpte_row_height_chroma[k],
locals->meta_row_height_chroma[k],
&locals->DestinationLinesToRequestVMInImmediateFlip[k],
&locals->DestinationLinesToRequestRowInImmediateFlip[k],
&locals->final_flip_bw[k],
&locals->ImmediateFlipSupportedForPipe[k]);
}
mode_lib->vba.total_dcn_read_bw_with_flip = 0.0;
mode_lib->vba.total_dcn_read_bw_with_flip_no_urgent_burst = 0.0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
mode_lib->vba.total_dcn_read_bw_with_flip =
mode_lib->vba.total_dcn_read_bw_with_flip + dml_max3(
locals->prefetch_vmrow_bw[k],
locals->final_flip_bw[k] + locals->ReadBandwidthLuma[k] * locals->UrgentBurstFactorLuma[k]
+ locals->ReadBandwidthChroma[k] * locals->UrgentBurstFactorChroma[k] + locals->cursor_bw[k] * locals->UrgentBurstFactorCursor[k],
locals->final_flip_bw[k] + locals->RequiredPrefetchPixDataBWLuma[k] * locals->UrgentBurstFactorLumaPre[k]
+ locals->RequiredPrefetchPixDataBWChroma[k] * locals->UrgentBurstFactorChromaPre[k]
+ locals->cursor_bw_pre[k] * locals->UrgentBurstFactorCursorPre[k]);
mode_lib->vba.total_dcn_read_bw_with_flip_no_urgent_burst =
mode_lib->vba.total_dcn_read_bw_with_flip_no_urgent_burst +
dml_max3(locals->prefetch_vmrow_bw[k],
locals->final_flip_bw[k] + locals->ReadBandwidthPlaneLuma[k] + locals->ReadBandwidthPlaneChroma[k] + locals->cursor_bw[k],
locals->final_flip_bw[k] + locals->RequiredPrefetchPixDataBWLuma[k] + locals->RequiredPrefetchPixDataBWChroma[k] + locals->cursor_bw_pre[k]);
}
mode_lib->vba.FractionOfUrgentBandwidthImmediateFlip = mode_lib->vba.total_dcn_read_bw_with_flip_no_urgent_burst / mode_lib->vba.ReturnBW;
mode_lib->vba.ImmediateFlipSupported = true;
if (mode_lib->vba.total_dcn_read_bw_with_flip > mode_lib->vba.ReturnBW) {
mode_lib->vba.ImmediateFlipSupported = false;
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (locals->ImmediateFlipSupportedForPipe[k] == false) {
mode_lib->vba.ImmediateFlipSupported = false;
}
}
} else {
mode_lib->vba.ImmediateFlipSupported = false;
}
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.ErrorResult[k]) {
mode_lib->vba.PrefetchModeSupported = false;
dml_print(
"DML: CalculatePrefetchSchedule ***failed***. Prefetch schedule violation. Results are NOT valid\n");
}
}
mode_lib->vba.VStartupLines = mode_lib->vba.VStartupLines + 1;
} while (!((mode_lib->vba.PrefetchModeSupported
&& ((!mode_lib->vba.ImmediateFlipSupport && !mode_lib->vba.HostVMEnable)
|| mode_lib->vba.ImmediateFlipSupported))
|| locals->MaximumMaxVStartupLines < mode_lib->vba.VStartupLines));
//Watermarks and NB P-State/DRAM Clock Change Support
{
enum clock_change_support DRAMClockChangeSupport; // dummy
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb],
mode_lib->vba.NumberOfActivePlanes,
mode_lib->vba.MaxLineBufferLines,
mode_lib->vba.LineBufferSize,
mode_lib->vba.DPPOutputBufferPixels,
mode_lib->vba.DETBufferSizeInKByte[0],
mode_lib->vba.WritebackInterfaceLumaBufferSize,
mode_lib->vba.WritebackInterfaceChromaBufferSize,
mode_lib->vba.DCFCLK,
mode_lib->vba.UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels,
mode_lib->vba.ReturnBW,
mode_lib->vba.GPUVMEnable,
locals->dpte_group_bytes,
mode_lib->vba.MetaChunkSize,
mode_lib->vba.UrgentLatency,
mode_lib->vba.UrgentExtraLatency,
mode_lib->vba.WritebackLatency,
mode_lib->vba.WritebackChunkSize,
mode_lib->vba.SOCCLK,
mode_lib->vba.DRAMClockChangeLatency,
mode_lib->vba.SRExitTime,
mode_lib->vba.SREnterPlusExitTime,
mode_lib->vba.DCFCLKDeepSleep,
mode_lib->vba.DPPPerPlane,
mode_lib->vba.DCCEnable,
locals->DPPCLK,
locals->SwathWidthSingleDPPY,
mode_lib->vba.SwathHeightY,
locals->ReadBandwidthPlaneLuma,
mode_lib->vba.SwathHeightC,
locals->ReadBandwidthPlaneChroma,
mode_lib->vba.LBBitPerPixel,
locals->SwathWidthY,
mode_lib->vba.HRatio,
mode_lib->vba.vtaps,
mode_lib->vba.VTAPsChroma,
mode_lib->vba.VRatio,
mode_lib->vba.HTotal,
mode_lib->vba.PixelClock,
mode_lib->vba.BlendingAndTiming,
locals->BytePerPixelDETY,
locals->BytePerPixelDETC,
mode_lib->vba.WritebackEnable,
mode_lib->vba.WritebackPixelFormat,
mode_lib->vba.WritebackDestinationWidth,
mode_lib->vba.WritebackDestinationHeight,
mode_lib->vba.WritebackSourceHeight,
&DRAMClockChangeSupport,
&mode_lib->vba.UrgentWatermark,
&mode_lib->vba.WritebackUrgentWatermark,
&mode_lib->vba.DRAMClockChangeWatermark,
&mode_lib->vba.WritebackDRAMClockChangeWatermark,
&mode_lib->vba.StutterExitWatermark,
&mode_lib->vba.StutterEnterPlusExitWatermark,
&mode_lib->vba.MinActiveDRAMClockChangeLatencySupported);
}
//Display Pipeline Delivery Time in Prefetch, Groups
CalculatePixelDeliveryTimes(
mode_lib->vba.NumberOfActivePlanes,
mode_lib->vba.VRatio,
locals->VRatioPrefetchY,
locals->VRatioPrefetchC,
locals->swath_width_luma_ub,
locals->swath_width_chroma_ub,
mode_lib->vba.DPPPerPlane,
mode_lib->vba.HRatio,
mode_lib->vba.PixelClock,
locals->PSCL_THROUGHPUT_LUMA,
locals->PSCL_THROUGHPUT_CHROMA,
locals->DPPCLK,
locals->BytePerPixelDETC,
mode_lib->vba.SourceScan,
locals->BlockWidth256BytesY,
locals->BlockHeight256BytesY,
locals->BlockWidth256BytesC,
locals->BlockHeight256BytesC,
locals->DisplayPipeLineDeliveryTimeLuma,
locals->DisplayPipeLineDeliveryTimeChroma,
locals->DisplayPipeLineDeliveryTimeLumaPrefetch,
locals->DisplayPipeLineDeliveryTimeChromaPrefetch,
locals->DisplayPipeRequestDeliveryTimeLuma,
locals->DisplayPipeRequestDeliveryTimeChroma,
locals->DisplayPipeRequestDeliveryTimeLumaPrefetch,
locals->DisplayPipeRequestDeliveryTimeChromaPrefetch);
CalculateMetaAndPTETimes(
mode_lib->vba.NumberOfActivePlanes,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.MetaChunkSize,
mode_lib->vba.MinMetaChunkSizeBytes,
mode_lib->vba.GPUVMMaxPageTableLevels,
mode_lib->vba.HTotal,
mode_lib->vba.VRatio,
locals->VRatioPrefetchY,
locals->VRatioPrefetchC,
locals->DestinationLinesToRequestRowInVBlank,
locals->DestinationLinesToRequestRowInImmediateFlip,
locals->DestinationLinesToRequestVMInVBlank,
locals->DestinationLinesToRequestVMInImmediateFlip,
mode_lib->vba.DCCEnable,
mode_lib->vba.PixelClock,
locals->BytePerPixelDETY,
locals->BytePerPixelDETC,
mode_lib->vba.SourceScan,
locals->dpte_row_height,
locals->dpte_row_height_chroma,
locals->meta_row_width,
locals->meta_row_height,
locals->meta_req_width,
locals->meta_req_height,
locals->dpte_group_bytes,
locals->PTERequestSizeY,
locals->PTERequestSizeC,
locals->PixelPTEReqWidthY,
locals->PixelPTEReqHeightY,
locals->PixelPTEReqWidthC,
locals->PixelPTEReqHeightC,
locals->dpte_row_width_luma_ub,
locals->dpte_row_width_chroma_ub,
locals->vm_group_bytes,
locals->dpde0_bytes_per_frame_ub_l,
locals->dpde0_bytes_per_frame_ub_c,
locals->meta_pte_bytes_per_frame_ub_l,
locals->meta_pte_bytes_per_frame_ub_c,
locals->DST_Y_PER_PTE_ROW_NOM_L,
locals->DST_Y_PER_PTE_ROW_NOM_C,
locals->DST_Y_PER_META_ROW_NOM_L,
locals->TimePerMetaChunkNominal,
locals->TimePerMetaChunkVBlank,
locals->TimePerMetaChunkFlip,
locals->time_per_pte_group_nom_luma,
locals->time_per_pte_group_vblank_luma,
locals->time_per_pte_group_flip_luma,
locals->time_per_pte_group_nom_chroma,
locals->time_per_pte_group_vblank_chroma,
locals->time_per_pte_group_flip_chroma,
locals->TimePerVMGroupVBlank,
locals->TimePerVMGroupFlip,
locals->TimePerVMRequestVBlank,
locals->TimePerVMRequestFlip);
// Min TTUVBlank
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
locals->AllowDRAMClockChangeDuringVBlank[k] = true;
locals->AllowDRAMSelfRefreshDuringVBlank[k] = true;
locals->MinTTUVBlank[k] = dml_max(
mode_lib->vba.DRAMClockChangeWatermark,
dml_max(
mode_lib->vba.StutterEnterPlusExitWatermark,
mode_lib->vba.UrgentWatermark));
} else if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 1) {
locals->AllowDRAMClockChangeDuringVBlank[k] = false;
locals->AllowDRAMSelfRefreshDuringVBlank[k] = true;
locals->MinTTUVBlank[k] = dml_max(
mode_lib->vba.StutterEnterPlusExitWatermark,
mode_lib->vba.UrgentWatermark);
} else {
locals->AllowDRAMClockChangeDuringVBlank[k] = false;
locals->AllowDRAMSelfRefreshDuringVBlank[k] = false;
locals->MinTTUVBlank[k] = mode_lib->vba.UrgentWatermark;
}
if (!mode_lib->vba.DynamicMetadataEnable[k])
locals->MinTTUVBlank[k] = mode_lib->vba.TCalc
+ locals->MinTTUVBlank[k];
}
// DCC Configuration
mode_lib->vba.ActiveDPPs = 0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
locals->MaximumDCCCompressionYSurface[k] = CalculateDCCConfiguration(
mode_lib->vba.DCCEnable[k],
false, // We should always know the direction DCCProgrammingAssumesScanDirectionUnknown,
mode_lib->vba.ViewportWidth[k],
mode_lib->vba.ViewportHeight[k],
mode_lib->vba.DETBufferSizeInKByte[0] * 1024,
locals->BlockHeight256BytesY[k],
mode_lib->vba.SwathHeightY[k],
mode_lib->vba.SurfaceTiling[k],
locals->BytePerPixelDETY[k],
mode_lib->vba.SourceScan[k],
&locals->DCCYMaxUncompressedBlock[k],
&locals->DCCYMaxCompressedBlock[k],
&locals->DCCYIndependent64ByteBlock[k]);
}
//XFC Parameters:
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.XFCEnabled[k] == true) {
double TWait;
locals->XFCSlaveVUpdateOffset[k] = mode_lib->vba.XFCTSlvVupdateOffset;
locals->XFCSlaveVupdateWidth[k] = mode_lib->vba.XFCTSlvVupdateWidth;
locals->XFCSlaveVReadyOffset[k] = mode_lib->vba.XFCTSlvVreadyOffset;
TWait = CalculateTWait(
mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb],
mode_lib->vba.DRAMClockChangeLatency,
mode_lib->vba.UrgentLatency,
mode_lib->vba.SREnterPlusExitTime);
mode_lib->vba.XFCRemoteSurfaceFlipDelay = CalculateRemoteSurfaceFlipDelay(
mode_lib,
mode_lib->vba.VRatio[k],
locals->SwathWidthY[k],
dml_ceil(locals->BytePerPixelDETY[k], 1),
mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
mode_lib->vba.XFCTSlvVupdateOffset,
mode_lib->vba.XFCTSlvVupdateWidth,
mode_lib->vba.XFCTSlvVreadyOffset,
mode_lib->vba.XFCXBUFLatencyTolerance,
mode_lib->vba.XFCFillBWOverhead,
mode_lib->vba.XFCSlvChunkSize,
mode_lib->vba.XFCBusTransportTime,
mode_lib->vba.TCalc,
TWait,
&mode_lib->vba.SrcActiveDrainRate,
&mode_lib->vba.TInitXFill,
&mode_lib->vba.TslvChk);
locals->XFCRemoteSurfaceFlipLatency[k] =
dml_floor(
mode_lib->vba.XFCRemoteSurfaceFlipDelay
/ (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]),
1);
locals->XFCTransferDelay[k] =
dml_ceil(
mode_lib->vba.XFCBusTransportTime
/ (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]),
1);
locals->XFCPrechargeDelay[k] =
dml_ceil(
(mode_lib->vba.XFCBusTransportTime
+ mode_lib->vba.TInitXFill
+ mode_lib->vba.TslvChk)
/ (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]),
1);
mode_lib->vba.InitFillLevel = mode_lib->vba.XFCXBUFLatencyTolerance
* mode_lib->vba.SrcActiveDrainRate;
mode_lib->vba.FinalFillMargin =
(locals->DestinationLinesToRequestVMInVBlank[k]
+ locals->DestinationLinesToRequestRowInVBlank[k])
* mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]
* mode_lib->vba.SrcActiveDrainRate
+ mode_lib->vba.XFCFillConstant;
mode_lib->vba.FinalFillLevel = mode_lib->vba.XFCRemoteSurfaceFlipDelay
* mode_lib->vba.SrcActiveDrainRate
+ mode_lib->vba.FinalFillMargin;
mode_lib->vba.RemainingFillLevel = dml_max(
0.0,
mode_lib->vba.FinalFillLevel - mode_lib->vba.InitFillLevel);
mode_lib->vba.TFinalxFill = mode_lib->vba.RemainingFillLevel
/ (mode_lib->vba.SrcActiveDrainRate
* mode_lib->vba.XFCFillBWOverhead / 100);
locals->XFCPrefetchMargin[k] =
mode_lib->vba.XFCRemoteSurfaceFlipDelay
+ mode_lib->vba.TFinalxFill
+ (locals->DestinationLinesToRequestVMInVBlank[k]
+ locals->DestinationLinesToRequestRowInVBlank[k])
* mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k];
} else {
locals->XFCSlaveVUpdateOffset[k] = 0;
locals->XFCSlaveVupdateWidth[k] = 0;
locals->XFCSlaveVReadyOffset[k] = 0;
locals->XFCRemoteSurfaceFlipLatency[k] = 0;
locals->XFCPrechargeDelay[k] = 0;
locals->XFCTransferDelay[k] = 0;
locals->XFCPrefetchMargin[k] = 0;
}
}
// Stutter Efficiency
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
CalculateDETBufferSize(
mode_lib->vba.DETBufferSizeInKByte[0],
mode_lib->vba.SwathHeightY[k],
mode_lib->vba.SwathHeightC[k],
&locals->DETBufferSizeY[k],
&locals->DETBufferSizeC[k]);
locals->LinesInDETY[k] = (double)locals->DETBufferSizeY[k]
/ locals->BytePerPixelDETY[k] / locals->SwathWidthY[k];
locals->LinesInDETYRoundedDownToSwath[k] = dml_floor(
locals->LinesInDETY[k],
mode_lib->vba.SwathHeightY[k]);
locals->FullDETBufferingTimeY[k] =
locals->LinesInDETYRoundedDownToSwath[k]
* (mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k])
/ mode_lib->vba.VRatio[k];
}
mode_lib->vba.StutterPeriod = 999999.0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (locals->FullDETBufferingTimeY[k] < mode_lib->vba.StutterPeriod) {
mode_lib->vba.StutterPeriod = locals->FullDETBufferingTimeY[k];
mode_lib->vba.FrameTimeForMinFullDETBufferingTime =
(double) mode_lib->vba.VTotal[k] * mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k];
locals->BytePerPixelYCriticalPlane = dml_ceil(locals->BytePerPixelDETY[k], 1);
locals->SwathWidthYCriticalPlane = locals->SwathWidthY[k];
locals->LinesToFinishSwathTransferStutterCriticalPlane =
mode_lib->vba.SwathHeightY[k] - (locals->LinesInDETY[k] - locals->LinesInDETYRoundedDownToSwath[k]);
}
}
mode_lib->vba.AverageReadBandwidth = 0.0;
mode_lib->vba.TotalRowReadBandwidth = 0.0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
unsigned int DCCRateLimit;
if (mode_lib->vba.DCCEnable[k]) {
if (locals->DCCYMaxCompressedBlock[k] == 256)
DCCRateLimit = 4;
else
DCCRateLimit = 2;
mode_lib->vba.AverageReadBandwidth =
mode_lib->vba.AverageReadBandwidth
+ (locals->ReadBandwidthPlaneLuma[k] + locals->ReadBandwidthPlaneChroma[k]) /
dml_min(mode_lib->vba.DCCRate[k], DCCRateLimit);
} else {
mode_lib->vba.AverageReadBandwidth =
mode_lib->vba.AverageReadBandwidth
+ locals->ReadBandwidthPlaneLuma[k]
+ locals->ReadBandwidthPlaneChroma[k];
}
mode_lib->vba.TotalRowReadBandwidth = mode_lib->vba.TotalRowReadBandwidth +
locals->meta_row_bw[k] + locals->dpte_row_bw[k];
}
mode_lib->vba.AverageDCCCompressionRate = mode_lib->vba.TotalDataReadBandwidth / mode_lib->vba.AverageReadBandwidth;
mode_lib->vba.PartOfBurstThatFitsInROB =
dml_min(
mode_lib->vba.StutterPeriod
* mode_lib->vba.TotalDataReadBandwidth,
mode_lib->vba.ROBBufferSizeInKByte * 1024
* mode_lib->vba.AverageDCCCompressionRate);
mode_lib->vba.StutterBurstTime = mode_lib->vba.PartOfBurstThatFitsInROB
/ mode_lib->vba.AverageDCCCompressionRate / mode_lib->vba.ReturnBW
+ (mode_lib->vba.StutterPeriod * mode_lib->vba.TotalDataReadBandwidth
- mode_lib->vba.PartOfBurstThatFitsInROB)
/ (mode_lib->vba.DCFCLK * 64)
+ mode_lib->vba.StutterPeriod * mode_lib->vba.TotalRowReadBandwidth / mode_lib->vba.ReturnBW;
mode_lib->vba.StutterBurstTime = dml_max(
mode_lib->vba.StutterBurstTime,
(locals->LinesToFinishSwathTransferStutterCriticalPlane * locals->BytePerPixelYCriticalPlane *
locals->SwathWidthYCriticalPlane / mode_lib->vba.ReturnBW)
);
mode_lib->vba.TotalActiveWriteback = 0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.WritebackEnable[k] == true) {
mode_lib->vba.TotalActiveWriteback = mode_lib->vba.TotalActiveWriteback + 1;
}
}
if (mode_lib->vba.TotalActiveWriteback == 0) {
mode_lib->vba.StutterEfficiencyNotIncludingVBlank = (1
- (mode_lib->vba.SRExitTime + mode_lib->vba.StutterBurstTime)
/ mode_lib->vba.StutterPeriod) * 100;
} else {
mode_lib->vba.StutterEfficiencyNotIncludingVBlank = 0;
}
mode_lib->vba.SmallestVBlank = 999999;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
mode_lib->vba.VBlankTime = (double) (mode_lib->vba.VTotal[k]
- mode_lib->vba.VActive[k]) * mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k];
} else {
mode_lib->vba.VBlankTime = 0;
}
mode_lib->vba.SmallestVBlank = dml_min(
mode_lib->vba.SmallestVBlank,
mode_lib->vba.VBlankTime);
}
mode_lib->vba.StutterEfficiency = (mode_lib->vba.StutterEfficiencyNotIncludingVBlank / 100
* (mode_lib->vba.FrameTimeForMinFullDETBufferingTime
- mode_lib->vba.SmallestVBlank)
+ mode_lib->vba.SmallestVBlank)
/ mode_lib->vba.FrameTimeForMinFullDETBufferingTime * 100;
}
static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
{
// Display Pipe Configuration
double BytePerPixDETY;
double BytePerPixDETC;
double Read256BytesBlockHeightY;
double Read256BytesBlockHeightC;
double Read256BytesBlockWidthY;
double Read256BytesBlockWidthC;
double MaximumSwathHeightY;
double MaximumSwathHeightC;
double MinimumSwathHeightY;
double MinimumSwathHeightC;
double SwathWidth;
double SwathWidthGranularityY;
double SwathWidthGranularityC;
double RoundedUpMaxSwathSizeBytesY;
double RoundedUpMaxSwathSizeBytesC;
unsigned int j, k;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
bool MainPlaneDoesODMCombine = false;
if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
BytePerPixDETY = 8;
BytePerPixDETC = 0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
BytePerPixDETY = 4;
BytePerPixDETC = 0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16) {
BytePerPixDETY = 2;
BytePerPixDETC = 0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8) {
BytePerPixDETY = 1;
BytePerPixDETC = 0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
BytePerPixDETY = 1;
BytePerPixDETC = 2;
} else {
BytePerPixDETY = 4.0 / 3.0;
BytePerPixDETC = 8.0 / 3.0;
}
if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
|| mode_lib->vba.SourcePixelFormat[k] == dm_444_32
|| mode_lib->vba.SourcePixelFormat[k] == dm_444_16
|| mode_lib->vba.SourcePixelFormat[k] == dm_444_8)) {
if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
Read256BytesBlockHeightY = 1;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
Read256BytesBlockHeightY = 4;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32
|| mode_lib->vba.SourcePixelFormat[k] == dm_444_16) {
Read256BytesBlockHeightY = 8;
} else {
Read256BytesBlockHeightY = 16;
}
Read256BytesBlockWidthY = 256 / dml_ceil(BytePerPixDETY, 1)
/ Read256BytesBlockHeightY;
Read256BytesBlockHeightC = 0;
Read256BytesBlockWidthC = 0;
} else {
if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
Read256BytesBlockHeightY = 1;
Read256BytesBlockHeightC = 1;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
Read256BytesBlockHeightY = 16;
Read256BytesBlockHeightC = 8;
} else {
Read256BytesBlockHeightY = 8;
Read256BytesBlockHeightC = 8;
}
Read256BytesBlockWidthY = 256 / dml_ceil(BytePerPixDETY, 1)
/ Read256BytesBlockHeightY;
Read256BytesBlockWidthC = 256 / dml_ceil(BytePerPixDETC, 2)
/ Read256BytesBlockHeightC;
}
if (mode_lib->vba.SourceScan[k] == dm_horz) {
MaximumSwathHeightY = Read256BytesBlockHeightY;
MaximumSwathHeightC = Read256BytesBlockHeightC;
} else {
MaximumSwathHeightY = Read256BytesBlockWidthY;
MaximumSwathHeightC = Read256BytesBlockWidthC;
}
if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
|| mode_lib->vba.SourcePixelFormat[k] == dm_444_32
|| mode_lib->vba.SourcePixelFormat[k] == dm_444_16
|| mode_lib->vba.SourcePixelFormat[k] == dm_444_8)) {
if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
|| (mode_lib->vba.SourcePixelFormat[k] == dm_444_64
&& (mode_lib->vba.SurfaceTiling[k]
== dm_sw_4kb_s
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_4kb_s_x
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_64kb_s
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_64kb_s_t
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_64kb_s_x
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_var_s
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_var_s_x)
&& mode_lib->vba.SourceScan[k] == dm_horz)) {
MinimumSwathHeightY = MaximumSwathHeightY;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8
&& mode_lib->vba.SourceScan[k] != dm_horz) {
MinimumSwathHeightY = MaximumSwathHeightY;
} else {
MinimumSwathHeightY = MaximumSwathHeightY / 2.0;
}
MinimumSwathHeightC = MaximumSwathHeightC;
} else {
if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
MinimumSwathHeightY = MaximumSwathHeightY;
MinimumSwathHeightC = MaximumSwathHeightC;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8
&& mode_lib->vba.SourceScan[k] == dm_horz) {
MinimumSwathHeightY = MaximumSwathHeightY / 2.0;
MinimumSwathHeightC = MaximumSwathHeightC;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10
&& mode_lib->vba.SourceScan[k] == dm_horz) {
MinimumSwathHeightC = MaximumSwathHeightC / 2.0;
MinimumSwathHeightY = MaximumSwathHeightY;
} else {
MinimumSwathHeightY = MaximumSwathHeightY;
MinimumSwathHeightC = MaximumSwathHeightC;
}
}
if (mode_lib->vba.SourceScan[k] == dm_horz) {
SwathWidth = mode_lib->vba.ViewportWidth[k];
} else {
SwathWidth = mode_lib->vba.ViewportHeight[k];
}
if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
MainPlaneDoesODMCombine = true;
}
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
if (mode_lib->vba.BlendingAndTiming[k] == j
&& mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
MainPlaneDoesODMCombine = true;
}
}
if (MainPlaneDoesODMCombine == true) {
SwathWidth = dml_min(
SwathWidth,
mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]);
} else {
SwathWidth = SwathWidth / mode_lib->vba.DPPPerPlane[k];
}
SwathWidthGranularityY = 256 / dml_ceil(BytePerPixDETY, 1) / MaximumSwathHeightY;
RoundedUpMaxSwathSizeBytesY = (dml_ceil(
(double) (SwathWidth - 1),
SwathWidthGranularityY) + SwathWidthGranularityY) * BytePerPixDETY
* MaximumSwathHeightY;
if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
RoundedUpMaxSwathSizeBytesY = dml_ceil(RoundedUpMaxSwathSizeBytesY, 256)
+ 256;
}
if (MaximumSwathHeightC > 0) {
SwathWidthGranularityC = 256.0 / dml_ceil(BytePerPixDETC, 2)
/ MaximumSwathHeightC;
RoundedUpMaxSwathSizeBytesC = (dml_ceil(
(double) (SwathWidth / 2.0 - 1),
SwathWidthGranularityC) + SwathWidthGranularityC)
* BytePerPixDETC * MaximumSwathHeightC;
if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
RoundedUpMaxSwathSizeBytesC = dml_ceil(
RoundedUpMaxSwathSizeBytesC,
256) + 256;
}
} else
RoundedUpMaxSwathSizeBytesC = 0.0;
if (RoundedUpMaxSwathSizeBytesY + RoundedUpMaxSwathSizeBytesC
<= mode_lib->vba.DETBufferSizeInKByte[0] * 1024.0 / 2.0) {
mode_lib->vba.SwathHeightY[k] = MaximumSwathHeightY;
mode_lib->vba.SwathHeightC[k] = MaximumSwathHeightC;
} else {
mode_lib->vba.SwathHeightY[k] = MinimumSwathHeightY;
mode_lib->vba.SwathHeightC[k] = MinimumSwathHeightC;
}
CalculateDETBufferSize(
mode_lib->vba.DETBufferSizeInKByte[0],
mode_lib->vba.SwathHeightY[k],
mode_lib->vba.SwathHeightC[k],
&mode_lib->vba.DETBufferSizeY[k],
&mode_lib->vba.DETBufferSizeC[k]);
}
}
static double CalculateTWait(
unsigned int PrefetchMode,
double DRAMClockChangeLatency,
double UrgentLatency,
double SREnterPlusExitTime)
{
if (PrefetchMode == 0) {
return dml_max(
DRAMClockChangeLatency + UrgentLatency,
dml_max(SREnterPlusExitTime, UrgentLatency));
} else if (PrefetchMode == 1) {
return dml_max(SREnterPlusExitTime, UrgentLatency);
} else {
return UrgentLatency;
}
}
static double CalculateRemoteSurfaceFlipDelay(
struct display_mode_lib *mode_lib,
double VRatio,
double SwathWidth,
double Bpp,
double LineTime,
double XFCTSlvVupdateOffset,
double XFCTSlvVupdateWidth,
double XFCTSlvVreadyOffset,
double XFCXBUFLatencyTolerance,
double XFCFillBWOverhead,
double XFCSlvChunkSize,
double XFCBusTransportTime,
double TCalc,
double TWait,
double *SrcActiveDrainRate,
double *TInitXFill,
double *TslvChk)
{
double TSlvSetup, AvgfillRate, result;
*SrcActiveDrainRate = VRatio * SwathWidth * Bpp / LineTime;
TSlvSetup = XFCTSlvVupdateOffset + XFCTSlvVupdateWidth + XFCTSlvVreadyOffset;
*TInitXFill = XFCXBUFLatencyTolerance / (1 + XFCFillBWOverhead / 100);
AvgfillRate = *SrcActiveDrainRate * (1 + XFCFillBWOverhead / 100);
*TslvChk = XFCSlvChunkSize / AvgfillRate;
dml_print(
"DML::CalculateRemoteSurfaceFlipDelay: SrcActiveDrainRate: %f\n",
*SrcActiveDrainRate);
dml_print("DML::CalculateRemoteSurfaceFlipDelay: TSlvSetup: %f\n", TSlvSetup);
dml_print("DML::CalculateRemoteSurfaceFlipDelay: TInitXFill: %f\n", *TInitXFill);
dml_print("DML::CalculateRemoteSurfaceFlipDelay: AvgfillRate: %f\n", AvgfillRate);
dml_print("DML::CalculateRemoteSurfaceFlipDelay: TslvChk: %f\n", *TslvChk);
result = 2 * XFCBusTransportTime + TSlvSetup + TCalc + TWait + *TslvChk + *TInitXFill; // TODO: This doesn't seem to match programming guide
dml_print("DML::CalculateRemoteSurfaceFlipDelay: RemoteSurfaceFlipDelay: %f\n", result);
return result;
}
static double CalculateWriteBackDelay(
enum source_format_class WritebackPixelFormat,
double WritebackHRatio,
double WritebackVRatio,
unsigned int WritebackLumaHTaps,
unsigned int WritebackLumaVTaps,
unsigned int WritebackChromaHTaps,
unsigned int WritebackChromaVTaps,
unsigned int WritebackDestinationWidth)
{
double CalculateWriteBackDelay =
dml_max(
dml_ceil(WritebackLumaHTaps / 4.0, 1) / WritebackHRatio,
WritebackLumaVTaps * dml_ceil(1.0 / WritebackVRatio, 1)
* dml_ceil(
WritebackDestinationWidth
/ 4.0,
1)
+ dml_ceil(1.0 / WritebackVRatio, 1)
* (dml_ceil(
WritebackLumaVTaps
/ 4.0,
1) + 4));
if (WritebackPixelFormat != dm_444_32) {
CalculateWriteBackDelay =
dml_max(
CalculateWriteBackDelay,
dml_max(
dml_ceil(
WritebackChromaHTaps
/ 2.0,
1)
/ (2
* WritebackHRatio),
WritebackChromaVTaps
* dml_ceil(
1
/ (2
* WritebackVRatio),
1)
* dml_ceil(
WritebackDestinationWidth
/ 2.0
/ 2.0,
1)
+ dml_ceil(
1
/ (2
* WritebackVRatio),
1)
* (dml_ceil(
WritebackChromaVTaps
/ 4.0,
1)
+ 4)));
}
return CalculateWriteBackDelay;
}
static void CalculateActiveRowBandwidth(
bool GPUVMEnable,
enum source_format_class SourcePixelFormat,
double VRatio,
bool DCCEnable,
double LineTime,
unsigned int MetaRowByteLuma,
unsigned int MetaRowByteChroma,
unsigned int meta_row_height_luma,
unsigned int meta_row_height_chroma,
unsigned int PixelPTEBytesPerRowLuma,
unsigned int PixelPTEBytesPerRowChroma,
unsigned int dpte_row_height_luma,
unsigned int dpte_row_height_chroma,
double *meta_row_bw,
double *dpte_row_bw)
{
if (DCCEnable != true) {
*meta_row_bw = 0;
} else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
*meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime)
+ VRatio / 2 * MetaRowByteChroma
/ (meta_row_height_chroma * LineTime);
} else {
*meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime);
}
if (GPUVMEnable != true) {
*dpte_row_bw = 0;
} else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
*dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime)
+ VRatio / 2 * PixelPTEBytesPerRowChroma
/ (dpte_row_height_chroma * LineTime);
} else {
*dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime);
}
}
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
double UrgentExtraLatency,
double UrgentLatency,
unsigned int GPUVMMaxPageTableLevels,
bool HostVMEnable,
unsigned int HostVMMaxPageTableLevels,
unsigned int HostVMCachedPageTableLevels,
bool GPUVMEnable,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
double DPTEBytesPerRow,
double BandwidthAvailableForImmediateFlip,
unsigned int TotImmediateFlipBytes,
enum source_format_class SourcePixelFormat,
double LineTime,
double VRatio,
double Tno_bw,
bool DCCEnable,
unsigned int dpte_row_height,
unsigned int meta_row_height,
unsigned int dpte_row_height_chroma,
unsigned int meta_row_height_chroma,
double *DestinationLinesToRequestVMInImmediateFlip,
double *DestinationLinesToRequestRowInImmediateFlip,
double *final_flip_bw,
bool *ImmediateFlipSupportedForPipe)
{
double min_row_time = 0.0;
unsigned int HostVMDynamicLevels;
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
double ImmediateFlipBW = 1.0;
double HostVMInefficiencyFactor;
double VRatioClamped;
if (GPUVMEnable == true && HostVMEnable == true) {
HostVMInefficiencyFactor =
PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData
/ PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly;
HostVMDynamicLevels = HostVMMaxPageTableLevels - HostVMCachedPageTableLevels;
} else {
HostVMInefficiencyFactor = 1;
HostVMDynamicLevels = 0;
}
ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow)
* BandwidthAvailableForImmediateFlip / TotImmediateFlipBytes;
if (GPUVMEnable == true) {
TimeForFetchingMetaPTEImmediateFlip = dml_max3(
Tno_bw + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
UrgentExtraLatency + UrgentLatency * (GPUVMMaxPageTableLevels * (HostVMDynamicLevels + 1) - 1),
LineTime / 4.0);
} else {
TimeForFetchingMetaPTEImmediateFlip = 0;
}
*DestinationLinesToRequestVMInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
if ((GPUVMEnable == true || DCCEnable == true)) {
TimeForFetchingRowInVBlankImmediateFlip = dml_max3((MetaRowBytes + DPTEBytesPerRow) * HostVMInefficiencyFactor / ImmediateFlipBW, UrgentLatency * (HostVMDynamicLevels + 1), LineTime / 4);
} else {
TimeForFetchingRowInVBlankImmediateFlip = 0;
}
*DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
*final_flip_bw = dml_max(PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime), (MetaRowBytes + DPTEBytesPerRow) * HostVMInefficiencyFactor / (*DestinationLinesToRequestRowInImmediateFlip * LineTime));
VRatioClamped = (VRatio < 1.0) ? 1.0 : VRatio;
if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
if (GPUVMEnable == true && DCCEnable != true) {
min_row_time = dml_min(
dpte_row_height * LineTime / VRatioClamped,
dpte_row_height_chroma * LineTime / (VRatioClamped / 2));
} else if (GPUVMEnable != true && DCCEnable == true) {
min_row_time = dml_min(
meta_row_height * LineTime / VRatioClamped,
meta_row_height_chroma * LineTime / (VRatioClamped / 2));
} else {
min_row_time = dml_min4(
dpte_row_height * LineTime / VRatioClamped,
meta_row_height * LineTime / VRatioClamped,
dpte_row_height_chroma * LineTime / (VRatioClamped / 2),
meta_row_height_chroma * LineTime / (VRatioClamped / 2));
}
} else {
if (GPUVMEnable == true && DCCEnable != true) {
min_row_time = dpte_row_height * LineTime / VRatioClamped;
} else if (GPUVMEnable != true && DCCEnable == true) {
min_row_time = meta_row_height * LineTime / VRatioClamped;
} else {
min_row_time = dml_min(
dpte_row_height * LineTime / VRatioClamped,
meta_row_height * LineTime / VRatioClamped);
}
}
if (*DestinationLinesToRequestVMInImmediateFlip >= 32
|| *DestinationLinesToRequestRowInImmediateFlip >= 16
|| TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) {
*ImmediateFlipSupportedForPipe = false;
} else {
*ImmediateFlipSupportedForPipe = true;
}
}
static unsigned int TruncToValidBPP(
double DecimalBPP,
double DesiredBPP,
bool DSCEnabled,
enum output_encoder_class Output,
enum output_format_class Format,
unsigned int DSCInputBitPerComponent)
{
if (Output == dm_hdmi) {
if (Format == dm_420) {
if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
return 18;
else if (DecimalBPP >= 15 && (DesiredBPP == 0 || DesiredBPP == 15))
return 15;
else if (DecimalBPP >= 12 && (DesiredBPP == 0 || DesiredBPP == 12))
return 12;
else
return BPP_INVALID;
} else if (Format == dm_444) {
if (DecimalBPP >= 36 && (DesiredBPP == 0 || DesiredBPP == 36))
return 36;
else if (DecimalBPP >= 30 && (DesiredBPP == 0 || DesiredBPP == 30))
return 30;
else if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
return 24;
else if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
return 18;
else
return BPP_INVALID;
} else {
if (DecimalBPP / 1.5 >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
return 24;
else if (DecimalBPP / 1.5 >= 20 && (DesiredBPP == 0 || DesiredBPP == 20))
return 20;
else if (DecimalBPP / 1.5 >= 16 && (DesiredBPP == 0 || DesiredBPP == 16))
return 16;
else
return BPP_INVALID;
}
} else {
if (DSCEnabled) {
if (Format == dm_420) {
if (DesiredBPP == 0) {
if (DecimalBPP < 6)
return BPP_INVALID;
else if (DecimalBPP >= 1.5 * DSCInputBitPerComponent - 1.0 / 16.0)
return 1.5 * DSCInputBitPerComponent - 1.0 / 16.0;
else
return dml_floor(16 * DecimalBPP, 1) / 16.0;
} else {
if (DecimalBPP < 6
|| DesiredBPP < 6
|| DesiredBPP > 1.5 * DSCInputBitPerComponent - 1.0 / 16.0
|| DecimalBPP < DesiredBPP) {
return BPP_INVALID;
} else {
return DesiredBPP;
}
}
} else if (Format == dm_n422) {
if (DesiredBPP == 0) {
if (DecimalBPP < 7)
return BPP_INVALID;
else if (DecimalBPP >= 2 * DSCInputBitPerComponent - 1.0 / 16.0)
return 2 * DSCInputBitPerComponent - 1.0 / 16.0;
else
return dml_floor(16 * DecimalBPP, 1) / 16.0;
} else {
if (DecimalBPP < 7
|| DesiredBPP < 7
|| DesiredBPP > 2 * DSCInputBitPerComponent - 1.0 / 16.0
|| DecimalBPP < DesiredBPP) {
return BPP_INVALID;
} else {
return DesiredBPP;
}
}
} else {
if (DesiredBPP == 0) {
if (DecimalBPP < 8)
return BPP_INVALID;
else if (DecimalBPP >= 3 * DSCInputBitPerComponent - 1.0 / 16.0)
return 3 * DSCInputBitPerComponent - 1.0 / 16.0;
else
return dml_floor(16 * DecimalBPP, 1) / 16.0;
} else {
if (DecimalBPP < 8
|| DesiredBPP < 8
|| DesiredBPP > 3 * DSCInputBitPerComponent - 1.0 / 16.0
|| DecimalBPP < DesiredBPP) {
return BPP_INVALID;
} else {
return DesiredBPP;
}
}
}
} else if (Format == dm_420) {
if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
return 18;
else if (DecimalBPP >= 15 && (DesiredBPP == 0 || DesiredBPP == 15))
return 15;
else if (DecimalBPP >= 12 && (DesiredBPP == 0 || DesiredBPP == 12))
return 12;
else
return BPP_INVALID;
} else if (Format == dm_s422 || Format == dm_n422) {
if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
return 24;
else if (DecimalBPP >= 20 && (DesiredBPP == 0 || DesiredBPP == 20))
return 20;
else if (DecimalBPP >= 16 && (DesiredBPP == 0 || DesiredBPP == 16))
return 16;
else
return BPP_INVALID;
} else {
if (DecimalBPP >= 36 && (DesiredBPP == 0 || DesiredBPP == 36))
return 36;
else if (DecimalBPP >= 30 && (DesiredBPP == 0 || DesiredBPP == 30))
return 30;
else if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
return 24;
else if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
return 18;
else
return BPP_INVALID;
}
}
}
static noinline void CalculatePrefetchSchedulePerPlane(
struct display_mode_lib *mode_lib,
int i,
unsigned j,
unsigned k)
{
struct vba_vars_st *locals = &mode_lib->vba;
Pipe myPipe;
HostVM myHostVM;
if (mode_lib->vba.XFCEnabled[k] == true) {
mode_lib->vba.XFCRemoteSurfaceFlipDelay =
CalculateRemoteSurfaceFlipDelay(
mode_lib,
mode_lib->vba.VRatio[k],
locals->SwathWidthYThisState[k],
dml_ceil(locals->BytePerPixelInDETY[k], 1.0),
mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
mode_lib->vba.XFCTSlvVupdateOffset,
mode_lib->vba.XFCTSlvVupdateWidth,
mode_lib->vba.XFCTSlvVreadyOffset,
mode_lib->vba.XFCXBUFLatencyTolerance,
mode_lib->vba.XFCFillBWOverhead,
mode_lib->vba.XFCSlvChunkSize,
mode_lib->vba.XFCBusTransportTime,
mode_lib->vba.TimeCalc,
mode_lib->vba.TWait,
&mode_lib->vba.SrcActiveDrainRate,
&mode_lib->vba.TInitXFill,
&mode_lib->vba.TslvChk);
} else {
mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0.0;
}
myPipe.DPPCLK = locals->RequiredDPPCLK[i][j][k];
myPipe.DISPCLK = locals->RequiredDISPCLK[i][j];
myPipe.PixelClock = mode_lib->vba.PixelClock[k];
myPipe.DCFCLKDeepSleep = mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0];
myPipe.DPPPerPlane = locals->NoOfDPP[i][j][k];
myPipe.ScalerEnabled = mode_lib->vba.ScalerEnabled[k];
myPipe.SourceScan = mode_lib->vba.SourceScan[k];
myPipe.BlockWidth256BytesY = locals->Read256BlockWidthY[k];
myPipe.BlockHeight256BytesY = locals->Read256BlockHeightY[k];
myPipe.BlockWidth256BytesC = locals->Read256BlockWidthC[k];
myPipe.BlockHeight256BytesC = locals->Read256BlockHeightC[k];
myPipe.InterlaceEnable = mode_lib->vba.Interlace[k];
myPipe.NumberOfCursors = mode_lib->vba.NumberOfCursors[k];
myPipe.VBlank = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k];
myPipe.HTotal = mode_lib->vba.HTotal[k];
myHostVM.Enable = mode_lib->vba.HostVMEnable;
myHostVM.MaxPageTableLevels = mode_lib->vba.HostVMMaxPageTableLevels;
myHostVM.CachedPageTableLevels = mode_lib->vba.HostVMCachedPageTableLevels;
mode_lib->vba.IsErrorResult[i][j][k] = CalculatePrefetchSchedule(
mode_lib,
mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
&myPipe,
locals->DSCDelayPerState[i][k],
mode_lib->vba.DPPCLKDelaySubtotal,
mode_lib->vba.DPPCLKDelaySCL,
mode_lib->vba.DPPCLKDelaySCLLBOnly,
mode_lib->vba.DPPCLKDelayCNVCFormater,
mode_lib->vba.DPPCLKDelayCNVCCursor,
mode_lib->vba.DISPCLKDelaySubtotal,
locals->SwathWidthYThisState[k] / mode_lib->vba.HRatio[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.MaxInterDCNTileRepeaters,
dml_min(mode_lib->vba.MaxVStartup, locals->MaximumVStartup[0][0][k]),
locals->MaximumVStartup[0][0][k],
mode_lib->vba.GPUVMMaxPageTableLevels,
mode_lib->vba.GPUVMEnable,
&myHostVM,
mode_lib->vba.DynamicMetadataEnable[k],
mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
mode_lib->vba.DynamicMetadataTransmittedBytes[k],
mode_lib->vba.DCCEnable[k],
mode_lib->vba.UrgentLatency,
mode_lib->vba.ExtraLatency,
mode_lib->vba.TimeCalc,
locals->PDEAndMetaPTEBytesPerFrame[0][0][k],
locals->MetaRowBytes[0][0][k],
locals->DPTEBytesPerRow[0][0][k],
locals->PrefetchLinesY[0][0][k],
locals->SwathWidthYThisState[k],
locals->BytePerPixelInDETY[k],
locals->PrefillY[k],
locals->MaxNumSwY[k],
locals->PrefetchLinesC[0][0][k],
locals->BytePerPixelInDETC[k],
locals->PrefillC[k],
locals->MaxNumSwC[k],
locals->SwathHeightYThisState[k],
locals->SwathHeightCThisState[k],
mode_lib->vba.TWait,
mode_lib->vba.XFCEnabled[k],
mode_lib->vba.XFCRemoteSurfaceFlipDelay,
mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
&locals->dst_x_after_scaler,
&locals->dst_y_after_scaler,
&locals->LineTimesForPrefetch[k],
&locals->PrefetchBW[k],
&locals->LinesForMetaPTE[k],
&locals->LinesForMetaAndDPTERow[k],
&locals->VRatioPreY[i][j][k],
&locals->VRatioPreC[i][j][k],
&locals->RequiredPrefetchPixelDataBWLuma[i][j][k],
&locals->RequiredPrefetchPixelDataBWChroma[i][j][k],
&locals->VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
&locals->Tno_bw[k],
&locals->prefetch_vmrow_bw[k],
locals->swath_width_luma_ub,
locals->swath_width_chroma_ub,
&mode_lib->vba.VUpdateOffsetPix[k],
&mode_lib->vba.VUpdateWidthPix[k],
&mode_lib->vba.VReadyOffsetPix[k]);
}
void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
{
struct vba_vars_st *locals = &mode_lib->vba;
int i;
unsigned int j, k, m;
/*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/
/*Scale Ratio, taps Support Check*/
mode_lib->vba.ScaleRatioAndTapsSupport = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.ScalerEnabled[k] == false
&& ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_32
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_8)
|| mode_lib->vba.HRatio[k] != 1.0
|| mode_lib->vba.htaps[k] != 1.0
|| mode_lib->vba.VRatio[k] != 1.0
|| mode_lib->vba.vtaps[k] != 1.0)) {
mode_lib->vba.ScaleRatioAndTapsSupport = false;
} else if (mode_lib->vba.vtaps[k] < 1.0 || mode_lib->vba.vtaps[k] > 8.0
|| mode_lib->vba.htaps[k] < 1.0 || mode_lib->vba.htaps[k] > 8.0
|| (mode_lib->vba.htaps[k] > 1.0
&& (mode_lib->vba.htaps[k] % 2) == 1)
|| mode_lib->vba.HRatio[k] > mode_lib->vba.MaxHSCLRatio
|| mode_lib->vba.VRatio[k] > mode_lib->vba.MaxVSCLRatio
|| mode_lib->vba.HRatio[k] > mode_lib->vba.htaps[k]
|| mode_lib->vba.VRatio[k] > mode_lib->vba.vtaps[k]
|| (mode_lib->vba.SourcePixelFormat[k] != dm_444_64
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_32
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_8
&& (mode_lib->vba.HRatio[k] / 2.0
> mode_lib->vba.HTAPsChroma[k]
|| mode_lib->vba.VRatio[k] / 2.0
> mode_lib->vba.VTAPsChroma[k]))) {
mode_lib->vba.ScaleRatioAndTapsSupport = false;
}
}
/*Source Format, Pixel Format and Scan Support Check*/
mode_lib->vba.SourceFormatPixelAndScanSupport = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if ((mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
&& mode_lib->vba.SourceScan[k] != dm_horz)
|| ((mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d
|| mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d_x
|| mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d
|| mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_t
|| mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_x
|| mode_lib->vba.SurfaceTiling[k] == dm_sw_var_d
|| mode_lib->vba.SurfaceTiling[k] == dm_sw_var_d_x)
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_64)
|| (mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_r_x
&& (mode_lib->vba.SourcePixelFormat[k] == dm_mono_8
|| mode_lib->vba.SourcePixelFormat[k]
== dm_420_8
|| mode_lib->vba.SourcePixelFormat[k]
== dm_420_10))
|| (((mode_lib->vba.SurfaceTiling[k] == dm_sw_gfx7_2d_thin_gl
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_gfx7_2d_thin_l_vp)
&& !((mode_lib->vba.SourcePixelFormat[k]
== dm_444_64
|| mode_lib->vba.SourcePixelFormat[k]
== dm_444_32)
&& mode_lib->vba.SourceScan[k]
== dm_horz
&& mode_lib->vba.SupportGFX7CompatibleTilingIn32bppAnd64bpp
== true
&& mode_lib->vba.DCCEnable[k]
== false))
|| (mode_lib->vba.DCCEnable[k] == true
&& (mode_lib->vba.SurfaceTiling[k]
== dm_sw_linear
|| mode_lib->vba.SourcePixelFormat[k]
== dm_420_8
|| mode_lib->vba.SourcePixelFormat[k]
== dm_420_10)))) {
mode_lib->vba.SourceFormatPixelAndScanSupport = false;
}
}
/*Bandwidth Support Check*/
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
locals->BytePerPixelInDETY[k] = 8.0;
locals->BytePerPixelInDETC[k] = 0.0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
locals->BytePerPixelInDETY[k] = 4.0;
locals->BytePerPixelInDETC[k] = 0.0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16
|| mode_lib->vba.SourcePixelFormat[k] == dm_mono_16) {
locals->BytePerPixelInDETY[k] = 2.0;
locals->BytePerPixelInDETC[k] = 0.0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_mono_8) {
locals->BytePerPixelInDETY[k] = 1.0;
locals->BytePerPixelInDETC[k] = 0.0;
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
locals->BytePerPixelInDETY[k] = 1.0;
locals->BytePerPixelInDETC[k] = 2.0;
} else {
locals->BytePerPixelInDETY[k] = 4.0 / 3;
locals->BytePerPixelInDETC[k] = 8.0 / 3;
}
if (mode_lib->vba.SourceScan[k] == dm_horz) {
locals->SwathWidthYSingleDPP[k] = mode_lib->vba.ViewportWidth[k];
} else {
locals->SwathWidthYSingleDPP[k] = mode_lib->vba.ViewportHeight[k];
}
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
locals->ReadBandwidthLuma[k] = locals->SwathWidthYSingleDPP[k] * dml_ceil(locals->BytePerPixelInDETY[k], 1.0)
/ (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k];
locals->ReadBandwidthChroma[k] = locals->SwathWidthYSingleDPP[k] / 2 * dml_ceil(locals->BytePerPixelInDETC[k], 2.0)
/ (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k] / 2.0;
locals->ReadBandwidth[k] = locals->ReadBandwidthLuma[k] + locals->ReadBandwidthChroma[k];
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.WritebackEnable[k] == true
&& mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
locals->WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
* mode_lib->vba.WritebackDestinationHeight[k]
/ (mode_lib->vba.WritebackSourceHeight[k]
* mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]) * 4.0;
} else if (mode_lib->vba.WritebackEnable[k] == true
&& mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) {
locals->WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
* mode_lib->vba.WritebackDestinationHeight[k]
/ (mode_lib->vba.WritebackSourceHeight[k]
* mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]) * 3.0;
} else if (mode_lib->vba.WritebackEnable[k] == true) {
locals->WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
* mode_lib->vba.WritebackDestinationHeight[k]
/ (mode_lib->vba.WritebackSourceHeight[k]
* mode_lib->vba.HTotal[k]
/ mode_lib->vba.PixelClock[k]) * 1.5;
} else {
locals->WriteBandwidth[k] = 0.0;
}
}
mode_lib->vba.DCCEnabledInAnyPlane = false;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.DCCEnable[k] == true) {
mode_lib->vba.DCCEnabledInAnyPlane = true;
}
}
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->IdealSDPPortBandwidthPerState[i][0] = dml_min3(
mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i],
mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
* mode_lib->vba.DRAMChannelWidth,
mode_lib->vba.FabricClockPerState[i]
* mode_lib->vba.FabricDatapathToDCNDataReturn);
if (mode_lib->vba.HostVMEnable == false) {
locals->ReturnBWPerState[i][0] = locals->IdealSDPPortBandwidthPerState[i][0]
* mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100.0;
} else {
locals->ReturnBWPerState[i][0] = locals->IdealSDPPortBandwidthPerState[i][0]
* mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData / 100.0;
}
}
/*Writeback Latency support check*/
mode_lib->vba.WritebackLatencySupport = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.WritebackEnable[k] == true) {
if (mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
if (locals->WriteBandwidth[k]
> (mode_lib->vba.WritebackInterfaceLumaBufferSize
+ mode_lib->vba.WritebackInterfaceChromaBufferSize)
/ mode_lib->vba.WritebackLatency) {
mode_lib->vba.WritebackLatencySupport = false;
}
} else {
if (locals->WriteBandwidth[k]
> 1.5
* dml_min(
mode_lib->vba.WritebackInterfaceLumaBufferSize,
2.0
* mode_lib->vba.WritebackInterfaceChromaBufferSize)
/ mode_lib->vba.WritebackLatency) {
mode_lib->vba.WritebackLatencySupport = false;
}
}
}
}
/*Re-ordering Buffer Support Check*/
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i] =
(mode_lib->vba.RoundTripPingLatencyCycles + 32.0) / mode_lib->vba.DCFCLKPerState[i]
+ dml_max3(mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelDataOnly,
mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelMixedWithVMData,
mode_lib->vba.UrgentOutOfOrderReturnPerChannelVMDataOnly)
* mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i][0];
if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i][0]
> locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i]) {
locals->ROBSupport[i][0] = true;
} else {
locals->ROBSupport[i][0] = false;
}
}
/*Writeback Mode Support Check*/
mode_lib->vba.TotalNumberOfActiveWriteback = 0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.WritebackEnable[k] == true) {
if (mode_lib->vba.ActiveWritebacksPerPlane[k] == 0)
mode_lib->vba.ActiveWritebacksPerPlane[k] = 1;
mode_lib->vba.TotalNumberOfActiveWriteback =
mode_lib->vba.TotalNumberOfActiveWriteback
+ mode_lib->vba.ActiveWritebacksPerPlane[k];
}
}
mode_lib->vba.WritebackModeSupport = true;
if (mode_lib->vba.TotalNumberOfActiveWriteback > mode_lib->vba.MaxNumWriteback) {
mode_lib->vba.WritebackModeSupport = false;
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.WritebackEnable[k] == true
&& mode_lib->vba.Writeback10bpc420Supported != true
&& mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) {
mode_lib->vba.WritebackModeSupport = false;
}
}
/*Writeback Scale Ratio and Taps Support Check*/
mode_lib->vba.WritebackScaleRatioAndTapsSupport = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.WritebackEnable[k] == true) {
if (mode_lib->vba.WritebackLumaAndChromaScalingSupported == false
&& (mode_lib->vba.WritebackHRatio[k] != 1.0
|| mode_lib->vba.WritebackVRatio[k] != 1.0)) {
mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
}
if (mode_lib->vba.WritebackHRatio[k] > mode_lib->vba.WritebackMaxHSCLRatio
|| mode_lib->vba.WritebackVRatio[k]
> mode_lib->vba.WritebackMaxVSCLRatio
|| mode_lib->vba.WritebackHRatio[k]
< mode_lib->vba.WritebackMinHSCLRatio
|| mode_lib->vba.WritebackVRatio[k]
< mode_lib->vba.WritebackMinVSCLRatio
|| mode_lib->vba.WritebackLumaHTaps[k]
> mode_lib->vba.WritebackMaxHSCLTaps
|| mode_lib->vba.WritebackLumaVTaps[k]
> mode_lib->vba.WritebackMaxVSCLTaps
|| mode_lib->vba.WritebackHRatio[k]
> mode_lib->vba.WritebackLumaHTaps[k]
|| mode_lib->vba.WritebackVRatio[k]
> mode_lib->vba.WritebackLumaVTaps[k]
|| (mode_lib->vba.WritebackLumaHTaps[k] > 2.0
&& ((mode_lib->vba.WritebackLumaHTaps[k] % 2)
== 1))
|| (mode_lib->vba.WritebackPixelFormat[k] != dm_444_32
&& (mode_lib->vba.WritebackChromaHTaps[k]
> mode_lib->vba.WritebackMaxHSCLTaps
|| mode_lib->vba.WritebackChromaVTaps[k]
> mode_lib->vba.WritebackMaxVSCLTaps
|| 2.0
* mode_lib->vba.WritebackHRatio[k]
> mode_lib->vba.WritebackChromaHTaps[k]
|| 2.0
* mode_lib->vba.WritebackVRatio[k]
> mode_lib->vba.WritebackChromaVTaps[k]
|| (mode_lib->vba.WritebackChromaHTaps[k] > 2.0
&& ((mode_lib->vba.WritebackChromaHTaps[k] % 2) == 1))))) {
mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
}
if (mode_lib->vba.WritebackVRatio[k] < 1.0) {
mode_lib->vba.WritebackLumaVExtra =
dml_max(1.0 - 2.0 / dml_ceil(1.0 / mode_lib->vba.WritebackVRatio[k], 1.0), 0.0);
} else {
mode_lib->vba.WritebackLumaVExtra = -1;
}
if ((mode_lib->vba.WritebackPixelFormat[k] == dm_444_32
&& mode_lib->vba.WritebackLumaVTaps[k]
> (mode_lib->vba.WritebackLineBufferLumaBufferSize
+ mode_lib->vba.WritebackLineBufferChromaBufferSize)
/ 3.0
/ mode_lib->vba.WritebackDestinationWidth[k]
- mode_lib->vba.WritebackLumaVExtra)
|| (mode_lib->vba.WritebackPixelFormat[k] == dm_420_8
&& mode_lib->vba.WritebackLumaVTaps[k]
> mode_lib->vba.WritebackLineBufferLumaBufferSize
* 8.0 / 10.0 / mode_lib->vba.WritebackDestinationWidth[k]
- mode_lib->vba.WritebackLumaVExtra)
|| (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10
&& mode_lib->vba.WritebackLumaVTaps[k]
> mode_lib->vba.WritebackLineBufferLumaBufferSize
* 8.0 / 10.0
/ mode_lib->vba.WritebackDestinationWidth[k]
- mode_lib->vba.WritebackLumaVExtra)) {
mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
}
if (2.0 * mode_lib->vba.WritebackVRatio[k] < 1) {
mode_lib->vba.WritebackChromaVExtra = 0.0;
} else {
mode_lib->vba.WritebackChromaVExtra = -1;
}
if ((mode_lib->vba.WritebackPixelFormat[k] == dm_420_8
&& mode_lib->vba.WritebackChromaVTaps[k]
> mode_lib->vba.WritebackLineBufferChromaBufferSize
* 8.0 / 10.0 / mode_lib->vba.WritebackDestinationWidth[k]
- mode_lib->vba.WritebackChromaVExtra)
|| (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10
&& mode_lib->vba.WritebackChromaVTaps[k]
> mode_lib->vba.WritebackLineBufferChromaBufferSize
* 8.0 / 10.0
/ mode_lib->vba.WritebackDestinationWidth[k]
- mode_lib->vba.WritebackChromaVExtra)) {
mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
}
}
}
/*Maximum DISPCLK/DPPCLK Support check*/
mode_lib->vba.WritebackRequiredDISPCLK = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.WritebackEnable[k] == true) {
mode_lib->vba.WritebackRequiredDISPCLK =
dml_max(
mode_lib->vba.WritebackRequiredDISPCLK,
CalculateWriteBackDISPCLK(
mode_lib->vba.WritebackPixelFormat[k],
mode_lib->vba.PixelClock[k],
mode_lib->vba.WritebackHRatio[k],
mode_lib->vba.WritebackVRatio[k],
mode_lib->vba.WritebackLumaHTaps[k],
mode_lib->vba.WritebackLumaVTaps[k],
mode_lib->vba.WritebackChromaHTaps[k],
mode_lib->vba.WritebackChromaVTaps[k],
mode_lib->vba.WritebackDestinationWidth[k],
mode_lib->vba.HTotal[k],
mode_lib->vba.WritebackChromaLineBufferWidth));
}
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.HRatio[k] > 1.0) {
locals->PSCL_FACTOR[k] = dml_min(
mode_lib->vba.MaxDCHUBToPSCLThroughput,
mode_lib->vba.MaxPSCLToLBThroughput
* mode_lib->vba.HRatio[k]
/ dml_ceil(
mode_lib->vba.htaps[k]
/ 6.0,
1.0));
} else {
locals->PSCL_FACTOR[k] = dml_min(
mode_lib->vba.MaxDCHUBToPSCLThroughput,
mode_lib->vba.MaxPSCLToLBThroughput);
}
if (locals->BytePerPixelInDETC[k] == 0.0) {
locals->PSCL_FACTOR_CHROMA[k] = 0.0;
locals->MinDPPCLKUsingSingleDPP[k] =
mode_lib->vba.PixelClock[k]
* dml_max3(
mode_lib->vba.vtaps[k] / 6.0
* dml_min(
1.0,
mode_lib->vba.HRatio[k]),
mode_lib->vba.HRatio[k]
* mode_lib->vba.VRatio[k]
/ locals->PSCL_FACTOR[k],
1.0);
if ((mode_lib->vba.htaps[k] > 6.0 || mode_lib->vba.vtaps[k] > 6.0)
&& locals->MinDPPCLKUsingSingleDPP[k]
< 2.0 * mode_lib->vba.PixelClock[k]) {
locals->MinDPPCLKUsingSingleDPP[k] = 2.0
* mode_lib->vba.PixelClock[k];
}
} else {
if (mode_lib->vba.HRatio[k] / 2.0 > 1.0) {
locals->PSCL_FACTOR_CHROMA[k] =
dml_min(
mode_lib->vba.MaxDCHUBToPSCLThroughput,
mode_lib->vba.MaxPSCLToLBThroughput
* mode_lib->vba.HRatio[k]
/ 2.0
/ dml_ceil(
mode_lib->vba.HTAPsChroma[k]
/ 6.0,
1.0));
} else {
locals->PSCL_FACTOR_CHROMA[k] = dml_min(
mode_lib->vba.MaxDCHUBToPSCLThroughput,
mode_lib->vba.MaxPSCLToLBThroughput);
}
locals->MinDPPCLKUsingSingleDPP[k] =
mode_lib->vba.PixelClock[k]
* dml_max5(
mode_lib->vba.vtaps[k] / 6.0
* dml_min(
1.0,
mode_lib->vba.HRatio[k]),
mode_lib->vba.HRatio[k]
* mode_lib->vba.VRatio[k]
/ locals->PSCL_FACTOR[k],
mode_lib->vba.VTAPsChroma[k]
/ 6.0
* dml_min(
1.0,
mode_lib->vba.HRatio[k]
/ 2.0),
mode_lib->vba.HRatio[k]
* mode_lib->vba.VRatio[k]
/ 4.0
/ locals->PSCL_FACTOR_CHROMA[k],
1.0);
if ((mode_lib->vba.htaps[k] > 6.0 || mode_lib->vba.vtaps[k] > 6.0
|| mode_lib->vba.HTAPsChroma[k] > 6.0
|| mode_lib->vba.VTAPsChroma[k] > 6.0)
&& locals->MinDPPCLKUsingSingleDPP[k]
< 2.0 * mode_lib->vba.PixelClock[k]) {
locals->MinDPPCLKUsingSingleDPP[k] = 2.0
* mode_lib->vba.PixelClock[k];
}
}
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
Calculate256BBlockSizes(
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.SurfaceTiling[k],
dml_ceil(locals->BytePerPixelInDETY[k], 1.0),
dml_ceil(locals->BytePerPixelInDETC[k], 2.0),
&locals->Read256BlockHeightY[k],
&locals->Read256BlockHeightC[k],
&locals->Read256BlockWidthY[k],
&locals->Read256BlockWidthC[k]);
if (mode_lib->vba.SourceScan[k] == dm_horz) {
locals->MaxSwathHeightY[k] = locals->Read256BlockHeightY[k];
locals->MaxSwathHeightC[k] = locals->Read256BlockHeightC[k];
} else {
locals->MaxSwathHeightY[k] = locals->Read256BlockWidthY[k];
locals->MaxSwathHeightC[k] = locals->Read256BlockWidthC[k];
}
if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
|| mode_lib->vba.SourcePixelFormat[k] == dm_444_32
|| mode_lib->vba.SourcePixelFormat[k] == dm_444_16
|| mode_lib->vba.SourcePixelFormat[k] == dm_mono_16
|| mode_lib->vba.SourcePixelFormat[k] == dm_mono_8)) {
if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
|| (mode_lib->vba.SourcePixelFormat[k] == dm_444_64
&& (mode_lib->vba.SurfaceTiling[k]
== dm_sw_4kb_s
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_4kb_s_x
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_64kb_s
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_64kb_s_t
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_64kb_s_x
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_var_s
|| mode_lib->vba.SurfaceTiling[k]
== dm_sw_var_s_x)
&& mode_lib->vba.SourceScan[k] == dm_horz)) {
locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k];
} else {
locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k]
/ 2.0;
}
locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k];
} else {
if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k];
locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k];
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8
&& mode_lib->vba.SourceScan[k] == dm_horz) {
locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k]
/ 2.0;
locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k];
} else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10
&& mode_lib->vba.SourceScan[k] == dm_horz) {
locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k]
/ 2.0;
locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k];
} else {
locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k];
locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k];
}
}
if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
mode_lib->vba.MaximumSwathWidthSupport = 8192.0;
} else {
mode_lib->vba.MaximumSwathWidthSupport = 5120.0;
}
mode_lib->vba.MaximumSwathWidthInDETBuffer =
dml_min(
mode_lib->vba.MaximumSwathWidthSupport,
mode_lib->vba.DETBufferSizeInKByte[0] * 1024.0 / 2.0
/ (locals->BytePerPixelInDETY[k]
* locals->MinSwathHeightY[k]
+ locals->BytePerPixelInDETC[k]
/ 2.0
* locals->MinSwathHeightC[k]));
if (locals->BytePerPixelInDETC[k] == 0.0) {
mode_lib->vba.MaximumSwathWidthInLineBuffer =
mode_lib->vba.LineBufferSize
* dml_max(mode_lib->vba.HRatio[k], 1.0)
/ mode_lib->vba.LBBitPerPixel[k]
/ (mode_lib->vba.vtaps[k]
+ dml_max(
dml_ceil(
mode_lib->vba.VRatio[k],
1.0)
- 2,
0.0));
} else {
mode_lib->vba.MaximumSwathWidthInLineBuffer =
dml_min(
mode_lib->vba.LineBufferSize
* dml_max(
mode_lib->vba.HRatio[k],
1.0)
/ mode_lib->vba.LBBitPerPixel[k]
/ (mode_lib->vba.vtaps[k]
+ dml_max(
dml_ceil(
mode_lib->vba.VRatio[k],
1.0)
- 2,
0.0)),
2.0 * mode_lib->vba.LineBufferSize
* dml_max(
mode_lib->vba.HRatio[k]
/ 2.0,
1.0)
/ mode_lib->vba.LBBitPerPixel[k]
/ (mode_lib->vba.VTAPsChroma[k]
+ dml_max(
dml_ceil(
mode_lib->vba.VRatio[k]
/ 2.0,
1.0)
- 2,
0.0)));
}
locals->MaximumSwathWidth[k] = dml_min(
mode_lib->vba.MaximumSwathWidthInDETBuffer,
mode_lib->vba.MaximumSwathWidthInLineBuffer);
}
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
double MaxMaxDispclkRoundedDown = RoundToDFSGranularityDown(
mode_lib->vba.MaxDispclk[mode_lib->vba.soc.num_states],
mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
for (j = 0; j < 2; j++) {
mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
mode_lib->vba.MaxDispclk[i],
mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
mode_lib->vba.MaxDppclk[i],
mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
locals->RequiredDISPCLK[i][j] = 0.0;
locals->DISPCLK_DPPCLK_Support[i][j] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine =
mode_lib->vba.PixelClock[k]
* (1.0
+ mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
/ 100.0)
* (1.0
+ mode_lib->vba.DISPCLKRampingMargin
/ 100.0);
if (mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine >= mode_lib->vba.MaxDispclk[i]
&& i == mode_lib->vba.soc.num_states)
mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine = mode_lib->vba.PixelClock[k]
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * (1 + mode_lib->vba.DISPCLKRampingMargin / 100.0);
if (mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine >= mode_lib->vba.MaxDispclk[i]
&& i == mode_lib->vba.soc.num_states)
mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
if (mode_lib->vba.ODMCapability) {
if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) {
locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
} else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN21_MAX_DSC_IMAGE_WIDTH)) {
locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
} else if (locals->HActive[k] > DCN21_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) {
locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
}
}
if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
&& locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]
&& locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
locals->NoOfDPP[i][j][k] = 1;
locals->RequiredDPPCLK[i][j][k] =
locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
} else {
locals->NoOfDPP[i][j][k] = 2;
locals->RequiredDPPCLK[i][j][k] =
locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 2.0;
}
locals->RequiredDISPCLK[i][j] = dml_max(
locals->RequiredDISPCLK[i][j],
mode_lib->vba.PlaneRequiredDISPCLK);
if ((locals->MinDPPCLKUsingSingleDPP[k] / locals->NoOfDPP[i][j][k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
> mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity)
|| (mode_lib->vba.PlaneRequiredDISPCLK > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity)) {
locals->DISPCLK_DPPCLK_Support[i][j] = false;
}
}
locals->TotalNumberOfActiveDPP[i][j] = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++)
locals->TotalNumberOfActiveDPP[i][j] = locals->TotalNumberOfActiveDPP[i][j] + locals->NoOfDPP[i][j][k];
if (j == 1) {
while (locals->TotalNumberOfActiveDPP[i][j] < mode_lib->vba.MaxNumDPP
&& locals->TotalNumberOfActiveDPP[i][j] < 2 * mode_lib->vba.NumberOfActivePlanes) {
double BWOfNonSplitPlaneOfMaximumBandwidth;
unsigned int NumberOfNonSplitPlaneOfMaximumBandwidth;
BWOfNonSplitPlaneOfMaximumBandwidth = 0;
NumberOfNonSplitPlaneOfMaximumBandwidth = 0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (locals->ReadBandwidth[k] > BWOfNonSplitPlaneOfMaximumBandwidth && locals->NoOfDPP[i][j][k] == 1) {
BWOfNonSplitPlaneOfMaximumBandwidth = locals->ReadBandwidth[k];
NumberOfNonSplitPlaneOfMaximumBandwidth = k;
}
}
locals->NoOfDPP[i][j][NumberOfNonSplitPlaneOfMaximumBandwidth] = 2;
locals->RequiredDPPCLK[i][j][NumberOfNonSplitPlaneOfMaximumBandwidth] =
locals->MinDPPCLKUsingSingleDPP[NumberOfNonSplitPlaneOfMaximumBandwidth]
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100) / 2;
locals->TotalNumberOfActiveDPP[i][j] = locals->TotalNumberOfActiveDPP[i][j] + 1;
}
}
if (locals->TotalNumberOfActiveDPP[i][j] > mode_lib->vba.MaxNumDPP) {
locals->RequiredDISPCLK[i][j] = 0.0;
locals->DISPCLK_DPPCLK_Support[i][j] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) {
locals->NoOfDPP[i][j][k] = 1;
locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k]
* (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
} else {
locals->NoOfDPP[i][j][k] = 2;
locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k]
* (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 2.0;
}
if (i != mode_lib->vba.soc.num_states) {
mode_lib->vba.PlaneRequiredDISPCLK =
mode_lib->vba.PixelClock[k]
* (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
* (1.0 + mode_lib->vba.DISPCLKRampingMargin / 100.0);
} else {
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PixelClock[k]
* (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
}
locals->RequiredDISPCLK[i][j] = dml_max(
locals->RequiredDISPCLK[i][j],
mode_lib->vba.PlaneRequiredDISPCLK);
if (locals->MinDPPCLKUsingSingleDPP[k] / locals->NoOfDPP[i][j][k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
> mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
|| mode_lib->vba.PlaneRequiredDISPCLK > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity)
locals->DISPCLK_DPPCLK_Support[i][j] = false;
}
locals->TotalNumberOfActiveDPP[i][j] = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++)
locals->TotalNumberOfActiveDPP[i][j] = locals->TotalNumberOfActiveDPP[i][j] + locals->NoOfDPP[i][j][k];
}
locals->RequiredDISPCLK[i][j] = dml_max(
locals->RequiredDISPCLK[i][j],
mode_lib->vba.WritebackRequiredDISPCLK);
if (mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity
< mode_lib->vba.WritebackRequiredDISPCLK) {
locals->DISPCLK_DPPCLK_Support[i][j] = false;
}
}
}
/*Viewport Size Check*/
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->ViewportSizeSupport[i][0] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
if (dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]))
> locals->MaximumSwathWidth[k]) {
locals->ViewportSizeSupport[i][0] = false;
}
} else {
if (locals->SwathWidthYSingleDPP[k] / 2.0 > locals->MaximumSwathWidth[k]) {
locals->ViewportSizeSupport[i][0] = false;
}
}
}
}
/*Total Available Pipes Support Check*/
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
for (j = 0; j < 2; j++) {
if (locals->TotalNumberOfActiveDPP[i][j] <= mode_lib->vba.MaxNumDPP)
locals->TotalAvailablePipesSupport[i][j] = true;
else
locals->TotalAvailablePipesSupport[i][j] = false;
}
}
/*Total Available OTG Support Check*/
mode_lib->vba.TotalNumberOfActiveOTG = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
mode_lib->vba.TotalNumberOfActiveOTG = mode_lib->vba.TotalNumberOfActiveOTG
+ 1.0;
}
}
if (mode_lib->vba.TotalNumberOfActiveOTG <= mode_lib->vba.MaxNumOTG) {
mode_lib->vba.NumberOfOTGSupport = true;
} else {
mode_lib->vba.NumberOfOTGSupport = false;
}
/*Display IO and DSC Support Check*/
mode_lib->vba.NonsupportedDSCInputBPC = false;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (!(mode_lib->vba.DSCInputBitPerComponent[k] == 12.0
|| mode_lib->vba.DSCInputBitPerComponent[k] == 10.0
|| mode_lib->vba.DSCInputBitPerComponent[k] == 8.0)) {
mode_lib->vba.NonsupportedDSCInputBPC = true;
}
}
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
locals->RequiresDSC[i][k] = false;
locals->RequiresFEC[i][k] = 0;
if (mode_lib->vba.BlendingAndTiming[k] == k) {
if (mode_lib->vba.Output[k] == dm_hdmi) {
locals->RequiresDSC[i][k] = false;
locals->RequiresFEC[i][k] = 0;
locals->OutputBppPerState[i][k] = TruncToValidBPP(
dml_min(600.0, mode_lib->vba.PHYCLKPerState[i]) / mode_lib->vba.PixelClockBackEnd[k] * 24,
mode_lib->vba.ForcedOutputLinkBPP[k],
false,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.DSCInputBitPerComponent[k]);
} else if (mode_lib->vba.Output[k] == dm_dp
|| mode_lib->vba.Output[k] == dm_edp) {
if (mode_lib->vba.Output[k] == dm_edp) {
mode_lib->vba.EffectiveFECOverhead = 0.0;
} else {
mode_lib->vba.EffectiveFECOverhead =
mode_lib->vba.FECOverhead;
}
if (mode_lib->vba.PHYCLKPerState[i] >= 270.0) {
mode_lib->vba.Outbpp = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * 270.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
mode_lib->vba.ForcedOutputLinkBPP[k],
false,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.DSCInputBitPerComponent[k]);
mode_lib->vba.OutbppDSC = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 270.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
mode_lib->vba.ForcedOutputLinkBPP[k],
true,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.DSCInputBitPerComponent[k]);
if (mode_lib->vba.DSCEnabled[k] == true) {
locals->RequiresDSC[i][k] = true;
if (mode_lib->vba.Output[k] == dm_dp) {
locals->RequiresFEC[i][k] = true;
} else {
locals->RequiresFEC[i][k] = false;
}
mode_lib->vba.Outbpp = mode_lib->vba.OutbppDSC;
} else {
locals->RequiresDSC[i][k] = false;
locals->RequiresFEC[i][k] = false;
}
locals->OutputBppPerState[i][k] = mode_lib->vba.Outbpp;
}
if (mode_lib->vba.Outbpp == BPP_INVALID && mode_lib->vba.PHYCLKPerState[i] >= 540.0) {
mode_lib->vba.Outbpp = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * 540.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
mode_lib->vba.ForcedOutputLinkBPP[k],
false,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.DSCInputBitPerComponent[k]);
mode_lib->vba.OutbppDSC = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 540.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
mode_lib->vba.ForcedOutputLinkBPP[k],
true,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.DSCInputBitPerComponent[k]);
if (mode_lib->vba.DSCEnabled[k] == true) {
locals->RequiresDSC[i][k] = true;
if (mode_lib->vba.Output[k] == dm_dp) {
locals->RequiresFEC[i][k] = true;
} else {
locals->RequiresFEC[i][k] = false;
}
mode_lib->vba.Outbpp = mode_lib->vba.OutbppDSC;
} else {
locals->RequiresDSC[i][k] = false;
locals->RequiresFEC[i][k] = false;
}
locals->OutputBppPerState[i][k] = mode_lib->vba.Outbpp;
}
if (mode_lib->vba.Outbpp == BPP_INVALID
&& mode_lib->vba.PHYCLKPerState[i]
>= 810.0) {
mode_lib->vba.Outbpp = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * 810.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
mode_lib->vba.ForcedOutputLinkBPP[k],
false,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.DSCInputBitPerComponent[k]);
mode_lib->vba.OutbppDSC = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 810.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
mode_lib->vba.ForcedOutputLinkBPP[k],
true,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.DSCInputBitPerComponent[k]);
if (mode_lib->vba.DSCEnabled[k] == true || mode_lib->vba.Outbpp == BPP_INVALID) {
locals->RequiresDSC[i][k] = true;
if (mode_lib->vba.Output[k] == dm_dp) {
locals->RequiresFEC[i][k] = true;
} else {
locals->RequiresFEC[i][k] = false;
}
mode_lib->vba.Outbpp = mode_lib->vba.OutbppDSC;
} else {
locals->RequiresDSC[i][k] = false;
locals->RequiresFEC[i][k] = false;
}
locals->OutputBppPerState[i][k] =
mode_lib->vba.Outbpp;
}
}
} else {
locals->OutputBppPerState[i][k] = BPP_BLENDED_PIPE;
}
}
}
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->DIOSupport[i] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (!mode_lib->vba.skip_dio_check[k]
&& (locals->OutputBppPerState[i][k] == BPP_INVALID
|| (mode_lib->vba.OutputFormat[k] == dm_420
&& mode_lib->vba.Interlace[k] == true
&& mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true))) {
locals->DIOSupport[i] = false;
}
}
}
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
locals->DSCCLKRequiredMoreThanSupported[i] = false;
if (mode_lib->vba.BlendingAndTiming[k] == k) {
if ((mode_lib->vba.Output[k] == dm_dp
|| mode_lib->vba.Output[k] == dm_edp)) {
if (mode_lib->vba.OutputFormat[k] == dm_420
|| mode_lib->vba.OutputFormat[k]
== dm_n422) {
mode_lib->vba.DSCFormatFactor = 2;
} else {
mode_lib->vba.DSCFormatFactor = 1;
}
if (locals->RequiresDSC[i][k] == true) {
if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
if (mode_lib->vba.PixelClockBackEnd[k] / 6.0 / mode_lib->vba.DSCFormatFactor
> (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) {
locals->DSCCLKRequiredMoreThanSupported[i] =
true;
}
} else {
if (mode_lib->vba.PixelClockBackEnd[k] / 3.0 / mode_lib->vba.DSCFormatFactor
> (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) {
locals->DSCCLKRequiredMoreThanSupported[i] =
true;
}
}
}
}
}
}
}
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->NotEnoughDSCUnits[i] = false;
mode_lib->vba.TotalDSCUnitsRequired = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (locals->RequiresDSC[i][k] == true) {
if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
mode_lib->vba.TotalDSCUnitsRequired =
mode_lib->vba.TotalDSCUnitsRequired + 2.0;
} else {
mode_lib->vba.TotalDSCUnitsRequired =
mode_lib->vba.TotalDSCUnitsRequired + 1.0;
}
}
}
if (mode_lib->vba.TotalDSCUnitsRequired > mode_lib->vba.NumberOfDSC) {
locals->NotEnoughDSCUnits[i] = true;
}
}
/*DSC Delay per state*/
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] != k) {
mode_lib->vba.slices = 0;
} else if (locals->RequiresDSC[i][k] == 0
|| locals->RequiresDSC[i][k] == false) {
mode_lib->vba.slices = 0;
} else if (mode_lib->vba.PixelClockBackEnd[k] > 3200.0) {
mode_lib->vba.slices = dml_ceil(
mode_lib->vba.PixelClockBackEnd[k] / 400.0,
4.0);
} else if (mode_lib->vba.PixelClockBackEnd[k] > 1360.0) {
mode_lib->vba.slices = 8.0;
} else if (mode_lib->vba.PixelClockBackEnd[k] > 680.0) {
mode_lib->vba.slices = 4.0;
} else if (mode_lib->vba.PixelClockBackEnd[k] > 340.0) {
mode_lib->vba.slices = 2.0;
} else {
mode_lib->vba.slices = 1.0;
}
if (locals->OutputBppPerState[i][k] == BPP_BLENDED_PIPE
|| locals->OutputBppPerState[i][k] == BPP_INVALID) {
mode_lib->vba.bpp = 0.0;
} else {
mode_lib->vba.bpp = locals->OutputBppPerState[i][k];
}
if (locals->RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) {
if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
locals->DSCDelayPerState[i][k] =
dscceComputeDelay(
mode_lib->vba.DSCInputBitPerComponent[k],
mode_lib->vba.bpp,
dml_ceil(
mode_lib->vba.HActive[k]
/ mode_lib->vba.slices,
1.0),
mode_lib->vba.slices,
mode_lib->vba.OutputFormat[k])
+ dscComputeDelay(
mode_lib->vba.OutputFormat[k]);
} else {
locals->DSCDelayPerState[i][k] =
2.0 * (dscceComputeDelay(
mode_lib->vba.DSCInputBitPerComponent[k],
mode_lib->vba.bpp,
dml_ceil(mode_lib->vba.HActive[k] / mode_lib->vba.slices, 1.0),
mode_lib->vba.slices / 2,
mode_lib->vba.OutputFormat[k])
+ dscComputeDelay(mode_lib->vba.OutputFormat[k]));
}
locals->DSCDelayPerState[i][k] =
locals->DSCDelayPerState[i][k] * mode_lib->vba.PixelClock[k] / mode_lib->vba.PixelClockBackEnd[k];
} else {
locals->DSCDelayPerState[i][k] = 0.0;
}
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
for (m = 0; m <= mode_lib->vba.NumberOfActivePlanes - 1; m++) {
for (j = 0; j <= mode_lib->vba.NumberOfActivePlanes - 1; j++) {
if (mode_lib->vba.BlendingAndTiming[k] == m && locals->RequiresDSC[i][m] == true)
locals->DSCDelayPerState[i][k] = locals->DSCDelayPerState[i][m];
}
}
}
}
//Prefetch Check
for (i = 0; i <= mode_lib->vba.soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
locals->TotalNumberOfDCCActiveDPP[i][j] = 0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.DCCEnable[k] == true)
locals->TotalNumberOfDCCActiveDPP[i][j] = locals->TotalNumberOfDCCActiveDPP[i][j] + locals->NoOfDPP[i][j][k];
}
}
}
mode_lib->vba.UrgentLatency = dml_max3(
mode_lib->vba.UrgentLatencyPixelDataOnly,
mode_lib->vba.UrgentLatencyPixelMixedWithVMData,
mode_lib->vba.UrgentLatencyVMDataOnly);
mode_lib->vba.PrefetchERROR = CalculateMinAndMaxPrefetchMode(
mode_lib->vba.AllowDRAMSelfRefreshOrDRAMClockChangeInVblank,
&mode_lib->vba.MinPrefetchMode,
&mode_lib->vba.MaxPrefetchMode);
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
for (j = 0; j < 2; j++) {
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
locals->RequiredDPPCLKThisState[k] = locals->RequiredDPPCLK[i][j][k];
locals->NoOfDPPThisState[k] = locals->NoOfDPP[i][j][k];
if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
locals->SwathWidthYThisState[k] =
dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]));
} else {
locals->SwathWidthYThisState[k] = locals->SwathWidthYSingleDPP[k] / locals->NoOfDPP[i][j][k];
}
mode_lib->vba.SwathWidthGranularityY = 256.0
/ dml_ceil(locals->BytePerPixelInDETY[k], 1.0)
/ locals->MaxSwathHeightY[k];
mode_lib->vba.RoundedUpMaxSwathSizeBytesY =
(dml_ceil(locals->SwathWidthYThisState[k] - 1.0, mode_lib->vba.SwathWidthGranularityY)
+ mode_lib->vba.SwathWidthGranularityY) * locals->BytePerPixelInDETY[k] * locals->MaxSwathHeightY[k];
if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
mode_lib->vba.RoundedUpMaxSwathSizeBytesY = dml_ceil(
mode_lib->vba.RoundedUpMaxSwathSizeBytesY,
256.0) + 256;
}
if (locals->MaxSwathHeightC[k] > 0.0) {
mode_lib->vba.SwathWidthGranularityC = 256.0 / dml_ceil(locals->BytePerPixelInDETC[k], 2.0) / locals->MaxSwathHeightC[k];
mode_lib->vba.RoundedUpMaxSwathSizeBytesC = (dml_ceil(locals->SwathWidthYThisState[k] / 2.0 - 1.0, mode_lib->vba.SwathWidthGranularityC)
+ mode_lib->vba.SwathWidthGranularityC) * locals->BytePerPixelInDETC[k] * locals->MaxSwathHeightC[k];
if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
mode_lib->vba.RoundedUpMaxSwathSizeBytesC = dml_ceil(mode_lib->vba.RoundedUpMaxSwathSizeBytesC, 256.0) + 256;
}
} else {
mode_lib->vba.RoundedUpMaxSwathSizeBytesC = 0.0;
}
if (mode_lib->vba.RoundedUpMaxSwathSizeBytesY + mode_lib->vba.RoundedUpMaxSwathSizeBytesC
<= mode_lib->vba.DETBufferSizeInKByte[0] * 1024.0 / 2.0) {
locals->SwathHeightYThisState[k] = locals->MaxSwathHeightY[k];
locals->SwathHeightCThisState[k] = locals->MaxSwathHeightC[k];
} else {
locals->SwathHeightYThisState[k] =
locals->MinSwathHeightY[k];
locals->SwathHeightCThisState[k] =
locals->MinSwathHeightC[k];
}
}
CalculateDCFCLKDeepSleep(
mode_lib,
mode_lib->vba.NumberOfActivePlanes,
locals->BytePerPixelInDETY,
locals->BytePerPixelInDETC,
mode_lib->vba.VRatio,
locals->SwathWidthYThisState,
locals->NoOfDPPThisState,
mode_lib->vba.HRatio,
mode_lib->vba.PixelClock,
locals->PSCL_FACTOR,
locals->PSCL_FACTOR_CHROMA,
locals->RequiredDPPCLKThisState,
&mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0]);
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_32
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_8)) {
mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = CalculateVMAndRowBytes(
mode_lib,
mode_lib->vba.DCCEnable[k],
locals->Read256BlockHeightC[k],
locals->Read256BlockWidthC[k],
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.SurfaceTiling[k],
dml_ceil(locals->BytePerPixelInDETC[k], 2.0),
mode_lib->vba.SourceScan[k],
mode_lib->vba.ViewportWidth[k] / 2.0,
mode_lib->vba.ViewportHeight[k] / 2.0,
locals->SwathWidthYThisState[k] / 2.0,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.HostVMEnable,
mode_lib->vba.HostVMMaxPageTableLevels,
mode_lib->vba.HostVMCachedPageTableLevels,
mode_lib->vba.VMMPageSize,
mode_lib->vba.PTEBufferSizeInRequestsChroma,
mode_lib->vba.PitchC[k],
0.0,
&locals->MacroTileWidthC[k],
&mode_lib->vba.MetaRowBytesC,
&mode_lib->vba.DPTEBytesPerRowC,
&locals->PTEBufferSizeNotExceededC[i][j][k],
locals->dpte_row_width_chroma_ub,
&locals->dpte_row_height_chroma[k],
&locals->meta_req_width_chroma[k],
&locals->meta_req_height_chroma[k],
&locals->meta_row_width_chroma[k],
&locals->meta_row_height_chroma[k],
&locals->vm_group_bytes_chroma,
&locals->dpte_group_bytes_chroma,
locals->PixelPTEReqWidthC,
locals->PixelPTEReqHeightC,
locals->PTERequestSizeC,
locals->dpde0_bytes_per_frame_ub_c,
locals->meta_pte_bytes_per_frame_ub_c);
locals->PrefetchLinesC[0][0][k] = CalculatePrefetchSourceLines(
mode_lib,
mode_lib->vba.VRatio[k]/2,
mode_lib->vba.VTAPsChroma[k],
mode_lib->vba.Interlace[k],
mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
locals->SwathHeightCThisState[k],
mode_lib->vba.ViewportYStartC[k],
&locals->PrefillC[k],
&locals->MaxNumSwC[k]);
locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma;
} else {
mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0;
mode_lib->vba.MetaRowBytesC = 0.0;
mode_lib->vba.DPTEBytesPerRowC = 0.0;
locals->PrefetchLinesC[0][0][k] = 0.0;
locals->PTEBufferSizeNotExceededC[i][j][k] = true;
locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma + mode_lib->vba.PTEBufferSizeInRequestsChroma;
}
mode_lib->vba.PDEAndMetaPTEBytesPerFrameY = CalculateVMAndRowBytes(
mode_lib,
mode_lib->vba.DCCEnable[k],
locals->Read256BlockHeightY[k],
locals->Read256BlockWidthY[k],
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.SurfaceTiling[k],
dml_ceil(locals->BytePerPixelInDETY[k], 1.0),
mode_lib->vba.SourceScan[k],
mode_lib->vba.ViewportWidth[k],
mode_lib->vba.ViewportHeight[k],
locals->SwathWidthYThisState[k],
mode_lib->vba.GPUVMEnable,
mode_lib->vba.HostVMEnable,
mode_lib->vba.HostVMMaxPageTableLevels,
mode_lib->vba.HostVMCachedPageTableLevels,
mode_lib->vba.VMMPageSize,
locals->PTEBufferSizeInRequestsForLuma,
mode_lib->vba.PitchY[k],
mode_lib->vba.DCCMetaPitchY[k],
&locals->MacroTileWidthY[k],
&mode_lib->vba.MetaRowBytesY,
&mode_lib->vba.DPTEBytesPerRowY,
&locals->PTEBufferSizeNotExceededY[i][j][k],
locals->dpte_row_width_luma_ub,
&locals->dpte_row_height[k],
&locals->meta_req_width[k],
&locals->meta_req_height[k],
&locals->meta_row_width[k],
&locals->meta_row_height[k],
&locals->vm_group_bytes[k],
&locals->dpte_group_bytes[k],
locals->PixelPTEReqWidthY,
locals->PixelPTEReqHeightY,
locals->PTERequestSizeY,
locals->dpde0_bytes_per_frame_ub_l,
locals->meta_pte_bytes_per_frame_ub_l);
locals->PrefetchLinesY[0][0][k] = CalculatePrefetchSourceLines(
mode_lib,
mode_lib->vba.VRatio[k],
mode_lib->vba.vtaps[k],
mode_lib->vba.Interlace[k],
mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
locals->SwathHeightYThisState[k],
mode_lib->vba.ViewportYStartY[k],
&locals->PrefillY[k],
&locals->MaxNumSwY[k]);
locals->PDEAndMetaPTEBytesPerFrame[0][0][k] =
mode_lib->vba.PDEAndMetaPTEBytesPerFrameY + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC;
locals->MetaRowBytes[0][0][k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC;
locals->DPTEBytesPerRow[0][0][k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC;
CalculateActiveRowBandwidth(
mode_lib->vba.GPUVMEnable,
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.VRatio[k],
mode_lib->vba.DCCEnable[k],
mode_lib->vba.HTotal[k] /
mode_lib->vba.PixelClock[k],
mode_lib->vba.MetaRowBytesY,
mode_lib->vba.MetaRowBytesC,
locals->meta_row_height[k],
locals->meta_row_height_chroma[k],
mode_lib->vba.DPTEBytesPerRowY,
mode_lib->vba.DPTEBytesPerRowC,
locals->dpte_row_height[k],
locals->dpte_row_height_chroma[k],
&locals->meta_row_bw[k],
&locals->dpte_row_bw[k]);
}
mode_lib->vba.ExtraLatency = CalculateExtraLatency(
locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i],
locals->TotalNumberOfActiveDPP[i][j],
mode_lib->vba.PixelChunkSizeInKByte,
locals->TotalNumberOfDCCActiveDPP[i][j],
mode_lib->vba.MetaChunkSize,
locals->ReturnBWPerState[i][0],
mode_lib->vba.GPUVMEnable,
mode_lib->vba.HostVMEnable,
mode_lib->vba.NumberOfActivePlanes,
locals->NoOfDPPThisState,
locals->dpte_group_bytes,
mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
mode_lib->vba.HostVMMaxPageTableLevels,
mode_lib->vba.HostVMCachedPageTableLevels);
mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
if (mode_lib->vba.WritebackEnable[k] == true) {
locals->WritebackDelay[i][k] = mode_lib->vba.WritebackLatency
+ CalculateWriteBackDelay(
mode_lib->vba.WritebackPixelFormat[k],
mode_lib->vba.WritebackHRatio[k],
mode_lib->vba.WritebackVRatio[k],
mode_lib->vba.WritebackLumaHTaps[k],
mode_lib->vba.WritebackLumaVTaps[k],
mode_lib->vba.WritebackChromaHTaps[k],
mode_lib->vba.WritebackChromaVTaps[k],
mode_lib->vba.WritebackDestinationWidth[k]) / locals->RequiredDISPCLK[i][j];
} else {
locals->WritebackDelay[i][k] = 0.0;
}
for (m = 0; m <= mode_lib->vba.NumberOfActivePlanes - 1; m++) {
if (mode_lib->vba.BlendingAndTiming[m] == k
&& mode_lib->vba.WritebackEnable[m]
== true) {
locals->WritebackDelay[i][k] = dml_max(locals->WritebackDelay[i][k],
mode_lib->vba.WritebackLatency + CalculateWriteBackDelay(
mode_lib->vba.WritebackPixelFormat[m],
mode_lib->vba.WritebackHRatio[m],
mode_lib->vba.WritebackVRatio[m],
mode_lib->vba.WritebackLumaHTaps[m],
mode_lib->vba.WritebackLumaVTaps[m],
mode_lib->vba.WritebackChromaHTaps[m],
mode_lib->vba.WritebackChromaVTaps[m],
mode_lib->vba.WritebackDestinationWidth[m]) / locals->RequiredDISPCLK[i][j]);
}
}
}
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
for (m = 0; m <= mode_lib->vba.NumberOfActivePlanes - 1; m++) {
if (mode_lib->vba.BlendingAndTiming[k] == m) {
locals->WritebackDelay[i][k] = locals->WritebackDelay[i][m];
}
}
}
mode_lib->vba.MaxMaxVStartup[0][0] = 0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
locals->MaximumVStartup[0][0][k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
- dml_max(1.0, dml_ceil(locals->WritebackDelay[i][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1.0));
mode_lib->vba.MaxMaxVStartup[0][0] = dml_max(mode_lib->vba.MaxMaxVStartup[0][0], locals->MaximumVStartup[0][0][k]);
}
mode_lib->vba.NextPrefetchMode = mode_lib->vba.MinPrefetchMode;
mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup[0][0];
do {
mode_lib->vba.PrefetchMode[i][j] = mode_lib->vba.NextPrefetchMode;
mode_lib->vba.MaxVStartup = mode_lib->vba.NextMaxVStartup;
mode_lib->vba.TWait = CalculateTWait(
mode_lib->vba.PrefetchMode[i][j],
mode_lib->vba.DRAMClockChangeLatency,
mode_lib->vba.UrgentLatency,
mode_lib->vba.SREnterPlusExitTime);
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++)
CalculatePrefetchSchedulePerPlane(mode_lib, i, j, k);
mode_lib->vba.MaximumReadBandwidthWithoutPrefetch = 0.0;
mode_lib->vba.MaximumReadBandwidthWithPrefetch = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
unsigned int m;
locals->cursor_bw[k] = 0;
locals->cursor_bw_pre[k] = 0;
for (m = 0; m < mode_lib->vba.NumberOfCursors[k]; m++) {
locals->cursor_bw[k] = mode_lib->vba.CursorWidth[k][m] * mode_lib->vba.CursorBPP[k][m]
/ 8.0 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k];
locals->cursor_bw_pre[k] = mode_lib->vba.CursorWidth[k][m] * mode_lib->vba.CursorBPP[k][m]
/ 8.0 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * locals->VRatioPreY[i][j][k];
}
CalculateUrgentBurstFactor(
mode_lib->vba.DETBufferSizeInKByte[0],
locals->SwathHeightYThisState[k],
locals->SwathHeightCThisState[k],
locals->SwathWidthYThisState[k],
mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
mode_lib->vba.UrgentLatency,
mode_lib->vba.CursorBufferSize,
mode_lib->vba.CursorWidth[k][0] + mode_lib->vba.CursorWidth[k][1],
dml_max(mode_lib->vba.CursorBPP[k][0], mode_lib->vba.CursorBPP[k][1]),
mode_lib->vba.VRatio[k],
locals->VRatioPreY[i][j][k],
locals->VRatioPreC[i][j][k],
locals->BytePerPixelInDETY[k],
locals->BytePerPixelInDETC[k],
&locals->UrgentBurstFactorCursor[k],
&locals->UrgentBurstFactorCursorPre[k],
&locals->UrgentBurstFactorLuma[k],
&locals->UrgentBurstFactorLumaPre[k],
&locals->UrgentBurstFactorChroma[k],
&locals->UrgentBurstFactorChromaPre[k],
&locals->NotEnoughUrgentLatencyHiding[0][0],
&locals->NotEnoughUrgentLatencyHidingPre);
if (mode_lib->vba.UseUrgentBurstBandwidth == false) {
locals->UrgentBurstFactorCursor[k] = 1;
locals->UrgentBurstFactorCursorPre[k] = 1;
locals->UrgentBurstFactorLuma[k] = 1;
locals->UrgentBurstFactorLumaPre[k] = 1;
locals->UrgentBurstFactorChroma[k] = 1;
locals->UrgentBurstFactorChromaPre[k] = 1;
}
mode_lib->vba.MaximumReadBandwidthWithoutPrefetch = mode_lib->vba.MaximumReadBandwidthWithoutPrefetch
+ locals->cursor_bw[k] * locals->UrgentBurstFactorCursor[k] + locals->ReadBandwidthLuma[k]
* locals->UrgentBurstFactorLuma[k] + locals->ReadBandwidthChroma[k]
* locals->UrgentBurstFactorChroma[k] + locals->meta_row_bw[k] + locals->dpte_row_bw[k];
mode_lib->vba.MaximumReadBandwidthWithPrefetch = mode_lib->vba.MaximumReadBandwidthWithPrefetch
+ dml_max3(locals->prefetch_vmrow_bw[k],
locals->ReadBandwidthLuma[k] * locals->UrgentBurstFactorLuma[k] + locals->ReadBandwidthChroma[k]
* locals->UrgentBurstFactorChroma[k] + locals->cursor_bw[k] * locals->UrgentBurstFactorCursor[k]
+ locals->meta_row_bw[k] + locals->dpte_row_bw[k],
locals->RequiredPrefetchPixelDataBWLuma[i][j][k] * locals->UrgentBurstFactorLumaPre[k]
+ locals->RequiredPrefetchPixelDataBWChroma[i][j][k] * locals->UrgentBurstFactorChromaPre[k]
+ locals->cursor_bw_pre[k] * locals->UrgentBurstFactorCursorPre[k]);
}
locals->BandwidthWithoutPrefetchSupported[i][0] = true;
if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i][0]
|| locals->NotEnoughUrgentLatencyHiding[0][0] == 1) {
locals->BandwidthWithoutPrefetchSupported[i][0] = false;
}
locals->PrefetchSupported[i][j] = true;
if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i][0]
|| locals->NotEnoughUrgentLatencyHiding[0][0] == 1
|| locals->NotEnoughUrgentLatencyHidingPre == 1) {
locals->PrefetchSupported[i][j] = false;
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (locals->LineTimesForPrefetch[k] < 2.0
|| locals->LinesForMetaPTE[k] >= 32.0
|| locals->LinesForMetaAndDPTERow[k] >= 16.0
|| mode_lib->vba.IsErrorResult[i][j][k] == true) {
locals->PrefetchSupported[i][j] = false;
}
}
locals->VRatioInPrefetchSupported[i][j] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (locals->VRatioPreY[i][j][k] > 4.0
|| locals->VRatioPreC[i][j][k] > 4.0
|| mode_lib->vba.IsErrorResult[i][j][k] == true) {
locals->VRatioInPrefetchSupported[i][j] = false;
}
}
mode_lib->vba.AnyLinesForVMOrRowTooLarge = false;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (locals->LinesForMetaAndDPTERow[k] >= 16 || locals->LinesForMetaPTE[k] >= 32) {
mode_lib->vba.AnyLinesForVMOrRowTooLarge = true;
}
}
if (mode_lib->vba.MaxVStartup <= 13 || mode_lib->vba.AnyLinesForVMOrRowTooLarge == false) {
mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup[0][0];
mode_lib->vba.NextPrefetchMode = mode_lib->vba.NextPrefetchMode + 1;
} else {
mode_lib->vba.NextMaxVStartup = mode_lib->vba.NextMaxVStartup - 1;
}
} while ((locals->PrefetchSupported[i][j] != true || locals->VRatioInPrefetchSupported[i][j] != true)
&& (mode_lib->vba.NextMaxVStartup != mode_lib->vba.MaxMaxVStartup[0][0]
|| mode_lib->vba.NextPrefetchMode <= mode_lib->vba.MaxPrefetchMode));
if (locals->PrefetchSupported[i][j] == true && locals->VRatioInPrefetchSupported[i][j] == true) {
mode_lib->vba.BandwidthAvailableForImmediateFlip = locals->ReturnBWPerState[i][0];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
mode_lib->vba.BandwidthAvailableForImmediateFlip = mode_lib->vba.BandwidthAvailableForImmediateFlip
- dml_max(locals->ReadBandwidthLuma[k] * locals->UrgentBurstFactorLuma[k]
+ locals->ReadBandwidthChroma[k] * locals->UrgentBurstFactorChroma[k]
+ locals->cursor_bw[k] * locals->UrgentBurstFactorCursor[k],
locals->RequiredPrefetchPixelDataBWLuma[i][j][k] * locals->UrgentBurstFactorLumaPre[k]
+ locals->RequiredPrefetchPixelDataBWChroma[i][j][k] * locals->UrgentBurstFactorChromaPre[k]
+ locals->cursor_bw_pre[k] * locals->UrgentBurstFactorCursorPre[k]);
}
mode_lib->vba.TotImmediateFlipBytes = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
mode_lib->vba.TotImmediateFlipBytes = mode_lib->vba.TotImmediateFlipBytes
+ locals->PDEAndMetaPTEBytesPerFrame[0][0][k] + locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k];
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
CalculateFlipSchedule(
mode_lib,
mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
mode_lib->vba.ExtraLatency,
mode_lib->vba.UrgentLatency,
mode_lib->vba.GPUVMMaxPageTableLevels,
mode_lib->vba.HostVMEnable,
mode_lib->vba.HostVMMaxPageTableLevels,
mode_lib->vba.HostVMCachedPageTableLevels,
mode_lib->vba.GPUVMEnable,
locals->PDEAndMetaPTEBytesPerFrame[0][0][k],
locals->MetaRowBytes[0][0][k],
locals->DPTEBytesPerRow[0][0][k],
mode_lib->vba.BandwidthAvailableForImmediateFlip,
mode_lib->vba.TotImmediateFlipBytes,
mode_lib->vba.SourcePixelFormat[k],
mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
mode_lib->vba.VRatio[k],
locals->Tno_bw[k],
mode_lib->vba.DCCEnable[k],
locals->dpte_row_height[k],
locals->meta_row_height[k],
locals->dpte_row_height_chroma[k],
locals->meta_row_height_chroma[k],
&locals->DestinationLinesToRequestVMInImmediateFlip[k],
&locals->DestinationLinesToRequestRowInImmediateFlip[k],
&locals->final_flip_bw[k],
&locals->ImmediateFlipSupportedForPipe[k]);
}
mode_lib->vba.total_dcn_read_bw_with_flip = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
mode_lib->vba.total_dcn_read_bw_with_flip = mode_lib->vba.total_dcn_read_bw_with_flip + dml_max3(
locals->prefetch_vmrow_bw[k],
locals->final_flip_bw[k] + locals->ReadBandwidthLuma[k] * locals->UrgentBurstFactorLuma[k]
+ locals->ReadBandwidthChroma[k] * locals->UrgentBurstFactorChroma[k]
+ locals->cursor_bw[k] * locals->UrgentBurstFactorCursor[k],
locals->final_flip_bw[k] + locals->RequiredPrefetchPixelDataBWLuma[i][j][k]
* locals->UrgentBurstFactorLumaPre[k] + locals->RequiredPrefetchPixelDataBWChroma[i][j][k]
* locals->UrgentBurstFactorChromaPre[k] + locals->cursor_bw_pre[k]
* locals->UrgentBurstFactorCursorPre[k]);
}
locals->ImmediateFlipSupportedForState[i][j] = true;
if (mode_lib->vba.total_dcn_read_bw_with_flip
> locals->ReturnBWPerState[i][0]) {
locals->ImmediateFlipSupportedForState[i][j] = false;
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (locals->ImmediateFlipSupportedForPipe[k] == false) {
locals->ImmediateFlipSupportedForState[i][j] = false;
}
}
} else {
locals->ImmediateFlipSupportedForState[i][j] = false;
}
mode_lib->vba.UrgentOutOfOrderReturnPerChannel = dml_max3(
mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelDataOnly,
mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelMixedWithVMData,
mode_lib->vba.UrgentOutOfOrderReturnPerChannelVMDataOnly);
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
mode_lib->vba.PrefetchMode[i][j],
mode_lib->vba.NumberOfActivePlanes,
mode_lib->vba.MaxLineBufferLines,
mode_lib->vba.LineBufferSize,
mode_lib->vba.DPPOutputBufferPixels,
mode_lib->vba.DETBufferSizeInKByte[0],
mode_lib->vba.WritebackInterfaceLumaBufferSize,
mode_lib->vba.WritebackInterfaceChromaBufferSize,
mode_lib->vba.DCFCLKPerState[i],
mode_lib->vba.UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels,
locals->ReturnBWPerState[i][0],
mode_lib->vba.GPUVMEnable,
locals->dpte_group_bytes,
mode_lib->vba.MetaChunkSize,
mode_lib->vba.UrgentLatency,
mode_lib->vba.ExtraLatency,
mode_lib->vba.WritebackLatency,
mode_lib->vba.WritebackChunkSize,
mode_lib->vba.SOCCLKPerState[i],
mode_lib->vba.DRAMClockChangeLatency,
mode_lib->vba.SRExitTime,
mode_lib->vba.SREnterPlusExitTime,
mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
locals->NoOfDPPThisState,
mode_lib->vba.DCCEnable,
locals->RequiredDPPCLKThisState,
locals->SwathWidthYSingleDPP,
locals->SwathHeightYThisState,
locals->ReadBandwidthLuma,
locals->SwathHeightCThisState,
locals->ReadBandwidthChroma,
mode_lib->vba.LBBitPerPixel,
locals->SwathWidthYThisState,
mode_lib->vba.HRatio,
mode_lib->vba.vtaps,
mode_lib->vba.VTAPsChroma,
mode_lib->vba.VRatio,
mode_lib->vba.HTotal,
mode_lib->vba.PixelClock,
mode_lib->vba.BlendingAndTiming,
locals->BytePerPixelInDETY,
locals->BytePerPixelInDETC,
mode_lib->vba.WritebackEnable,
mode_lib->vba.WritebackPixelFormat,
mode_lib->vba.WritebackDestinationWidth,
mode_lib->vba.WritebackDestinationHeight,
mode_lib->vba.WritebackSourceHeight,
&locals->DRAMClockChangeSupport[i][j],
&mode_lib->vba.UrgentWatermark,
&mode_lib->vba.WritebackUrgentWatermark,
&mode_lib->vba.DRAMClockChangeWatermark,
&mode_lib->vba.WritebackDRAMClockChangeWatermark,
&mode_lib->vba.StutterExitWatermark,
&mode_lib->vba.StutterEnterPlusExitWatermark,
&mode_lib->vba.MinActiveDRAMClockChangeLatencySupported);
}
}
/*Vertical Active BW support*/
{
double MaxTotalVActiveRDBandwidth = 0.0;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
MaxTotalVActiveRDBandwidth = MaxTotalVActiveRDBandwidth + locals->ReadBandwidth[k];
}
for (i = 0; i <= mode_lib->vba.soc.num_states; ++i) {
locals->MaxTotalVerticalActiveAvailableBandwidth[i][0] = dml_min(
locals->IdealSDPPortBandwidthPerState[i][0] *
mode_lib->vba.MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation
/ 100.0, mode_lib->vba.DRAMSpeedPerState[i] *
mode_lib->vba.NumberOfChannels *
mode_lib->vba.DRAMChannelWidth *
mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation
/ 100.0);
if (MaxTotalVActiveRDBandwidth <= locals->MaxTotalVerticalActiveAvailableBandwidth[i][0]) {
locals->TotalVerticalActiveBandwidthSupport[i][0] = true;
} else {
locals->TotalVerticalActiveBandwidthSupport[i][0] = false;
}
}
}
/*PTE Buffer Size Check*/
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
for (j = 0; j < 2; j++) {
locals->PTEBufferSizeNotExceeded[i][j] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (locals->PTEBufferSizeNotExceededY[i][j][k] == false
|| locals->PTEBufferSizeNotExceededC[i][j][k] == false) {
locals->PTEBufferSizeNotExceeded[i][j] = false;
}
}
}
}
/*Cursor Support Check*/
mode_lib->vba.CursorSupport = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.CursorWidth[k][0] > 0.0) {
for (m = 0; m < mode_lib->vba.NumberOfCursors[k]; m++) {
if (mode_lib->vba.CursorBPP[k][m] == 64 && mode_lib->vba.Cursor64BppSupport == false) {
mode_lib->vba.CursorSupport = false;
}
}
}
}
/*Valid Pitch Check*/
mode_lib->vba.PitchSupport = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
locals->AlignedYPitch[k] = dml_ceil(
dml_max(mode_lib->vba.PitchY[k], mode_lib->vba.ViewportWidth[k]),
locals->MacroTileWidthY[k]);
if (locals->AlignedYPitch[k] > mode_lib->vba.PitchY[k]) {
mode_lib->vba.PitchSupport = false;
}
if (mode_lib->vba.DCCEnable[k] == true) {
locals->AlignedDCCMetaPitch[k] = dml_ceil(
dml_max(
mode_lib->vba.DCCMetaPitchY[k],
mode_lib->vba.ViewportWidth[k]),
64.0 * locals->Read256BlockWidthY[k]);
} else {
locals->AlignedDCCMetaPitch[k] = mode_lib->vba.DCCMetaPitchY[k];
}
if (locals->AlignedDCCMetaPitch[k] > mode_lib->vba.DCCMetaPitchY[k]) {
mode_lib->vba.PitchSupport = false;
}
if (mode_lib->vba.SourcePixelFormat[k] != dm_444_64
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_32
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_mono_8) {
locals->AlignedCPitch[k] = dml_ceil(
dml_max(
mode_lib->vba.PitchC[k],
mode_lib->vba.ViewportWidth[k] / 2.0),
locals->MacroTileWidthC[k]);
} else {
locals->AlignedCPitch[k] = mode_lib->vba.PitchC[k];
}
if (locals->AlignedCPitch[k] > mode_lib->vba.PitchC[k]) {
mode_lib->vba.PitchSupport = false;
}
}
/*Mode Support, Voltage State and SOC Configuration*/
for (i = mode_lib->vba.soc.num_states; i >= 0; i--) {
for (j = 0; j < 2; j++) {
enum dm_validation_status status = DML_VALIDATION_OK;
if (!mode_lib->vba.ScaleRatioAndTapsSupport) {
status = DML_FAIL_SCALE_RATIO_TAP;
} else if (!mode_lib->vba.SourceFormatPixelAndScanSupport) {
status = DML_FAIL_SOURCE_PIXEL_FORMAT;
} else if (!locals->ViewportSizeSupport[i][0]) {
status = DML_FAIL_VIEWPORT_SIZE;
} else if (!locals->DIOSupport[i]) {
status = DML_FAIL_DIO_SUPPORT;
} else if (locals->NotEnoughDSCUnits[i]) {
status = DML_FAIL_NOT_ENOUGH_DSC;
} else if (locals->DSCCLKRequiredMoreThanSupported[i]) {
status = DML_FAIL_DSC_CLK_REQUIRED;
} else if (!locals->ROBSupport[i][0]) {
status = DML_FAIL_REORDERING_BUFFER;
} else if (!locals->DISPCLK_DPPCLK_Support[i][j]) {
status = DML_FAIL_DISPCLK_DPPCLK;
} else if (!locals->TotalAvailablePipesSupport[i][j]) {
status = DML_FAIL_TOTAL_AVAILABLE_PIPES;
} else if (!mode_lib->vba.NumberOfOTGSupport) {
status = DML_FAIL_NUM_OTG;
} else if (!mode_lib->vba.WritebackModeSupport) {
status = DML_FAIL_WRITEBACK_MODE;
} else if (!mode_lib->vba.WritebackLatencySupport) {
status = DML_FAIL_WRITEBACK_LATENCY;
} else if (!mode_lib->vba.WritebackScaleRatioAndTapsSupport) {
status = DML_FAIL_WRITEBACK_SCALE_RATIO_TAP;
} else if (!mode_lib->vba.CursorSupport) {
status = DML_FAIL_CURSOR_SUPPORT;
} else if (!mode_lib->vba.PitchSupport) {
status = DML_FAIL_PITCH_SUPPORT;
} else if (!locals->TotalVerticalActiveBandwidthSupport[i][0]) {
status = DML_FAIL_TOTAL_V_ACTIVE_BW;
} else if (!locals->PTEBufferSizeNotExceeded[i][j]) {
status = DML_FAIL_PTE_BUFFER_SIZE;
} else if (mode_lib->vba.NonsupportedDSCInputBPC) {
status = DML_FAIL_DSC_INPUT_BPC;
} else if ((mode_lib->vba.HostVMEnable
&& !locals->ImmediateFlipSupportedForState[i][j])) {
status = DML_FAIL_HOST_VM_IMMEDIATE_FLIP;
} else if (!locals->PrefetchSupported[i][j]) {
status = DML_FAIL_PREFETCH_SUPPORT;
} else if (!locals->VRatioInPrefetchSupported[i][j]) {
status = DML_FAIL_V_RATIO_PREFETCH;
}
if (status == DML_VALIDATION_OK) {
locals->ModeSupport[i][j] = true;
} else {
locals->ModeSupport[i][j] = false;
}
locals->ValidationStatus[i] = status;
}
}
{
unsigned int MaximumMPCCombine = 0;
mode_lib->vba.VoltageLevel = mode_lib->vba.soc.num_states + 1;
for (i = mode_lib->vba.VoltageOverrideLevel; i <= mode_lib->vba.soc.num_states; i++) {
if (locals->ModeSupport[i][0] == true || locals->ModeSupport[i][1] == true) {
mode_lib->vba.VoltageLevel = i;
if (locals->ModeSupport[i][1] == true && (locals->ModeSupport[i][0] == false
|| mode_lib->vba.WhenToDoMPCCombine == dm_mpc_always_when_possible
|| (mode_lib->vba.WhenToDoMPCCombine == dm_mpc_reduce_voltage_and_clocks
&& ((locals->DRAMClockChangeSupport[i][1] == dm_dram_clock_change_vactive
&& locals->DRAMClockChangeSupport[i][0] != dm_dram_clock_change_vactive)
|| (locals->DRAMClockChangeSupport[i][1] == dm_dram_clock_change_vblank
&& locals->DRAMClockChangeSupport[i][0] == dm_dram_clock_change_unsupported))))) {
MaximumMPCCombine = 1;
} else {
MaximumMPCCombine = 0;
}
break;
}
}
mode_lib->vba.ImmediateFlipSupport =
locals->ImmediateFlipSupportedForState[mode_lib->vba.VoltageLevel][MaximumMPCCombine];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
mode_lib->vba.DPPPerPlane[k] = locals->NoOfDPP[mode_lib->vba.VoltageLevel][MaximumMPCCombine][k];
locals->DPPCLK[k] = locals->RequiredDPPCLK[mode_lib->vba.VoltageLevel][MaximumMPCCombine][k];
}
mode_lib->vba.DISPCLK = locals->RequiredDISPCLK[mode_lib->vba.VoltageLevel][MaximumMPCCombine];
mode_lib->vba.maxMpcComb = MaximumMPCCombine;
}
mode_lib->vba.DCFCLK = mode_lib->vba.DCFCLKPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel][0];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
mode_lib->vba.ODMCombineEnabled[k] =
locals->ODMCombineEnablePerState[mode_lib->vba.VoltageLevel][k];
} else {
mode_lib->vba.ODMCombineEnabled[k] = dm_odm_combine_mode_disabled;
}
mode_lib->vba.DSCEnabled[k] =
locals->RequiresDSC[mode_lib->vba.VoltageLevel][k];
mode_lib->vba.OutputBpp[k] =
locals->OutputBppPerState[mode_lib->vba.VoltageLevel][k];
}
}
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
unsigned int NumberOfActivePlanes,
unsigned int MaxLineBufferLines,
unsigned int LineBufferSize,
unsigned int DPPOutputBufferPixels,
unsigned int DETBufferSizeInKByte,
unsigned int WritebackInterfaceLumaBufferSize,
unsigned int WritebackInterfaceChromaBufferSize,
double DCFCLK,
double UrgentOutOfOrderReturn,
double ReturnBW,
bool GPUVMEnable,
int dpte_group_bytes[],
unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
double WritebackLatency,
double WritebackChunkSize,
double SOCCLK,
double DRAMClockChangeLatency,
double SRExitTime,
double SREnterPlusExitTime,
double DCFCLKDeepSleep,
int DPPPerPlane[],
bool DCCEnable[],
double DPPCLK[],
double SwathWidthSingleDPPY[],
unsigned int SwathHeightY[],
double ReadBandwidthPlaneLuma[],
unsigned int SwathHeightC[],
double ReadBandwidthPlaneChroma[],
unsigned int LBBitPerPixel[],
double SwathWidthY[],
double HRatio[],
unsigned int vtaps[],
unsigned int VTAPsChroma[],
double VRatio[],
unsigned int HTotal[],
double PixelClock[],
unsigned int BlendingAndTiming[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
bool WritebackEnable[],
enum source_format_class WritebackPixelFormat[],
double WritebackDestinationWidth[],
double WritebackDestinationHeight[],
double WritebackSourceHeight[],
enum clock_change_support *DRAMClockChangeSupport,
double *UrgentWatermark,
double *WritebackUrgentWatermark,
double *DRAMClockChangeWatermark,
double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *MinActiveDRAMClockChangeLatencySupported)
{
double EffectiveLBLatencyHidingY;
double EffectiveLBLatencyHidingC;
double DPPOutputBufferLinesY;
double DPPOutputBufferLinesC;
unsigned int DETBufferSizeY;
unsigned int DETBufferSizeC;
double LinesInDETY[DC__NUM_DPP__MAX];
double LinesInDETC;
unsigned int LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX];
unsigned int LinesInDETCRoundedDownToSwath;
double FullDETBufferingTimeY[DC__NUM_DPP__MAX];
double FullDETBufferingTimeC;
double ActiveDRAMClockChangeLatencyMarginY;
double ActiveDRAMClockChangeLatencyMarginC;
double WritebackDRAMClockChangeLatencyMargin;
double PlaneWithMinActiveDRAMClockChangeMargin;
double SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank;
double FullDETBufferingTimeYStutterCriticalPlane = 0;
double TimeToFinishSwathTransferStutterCriticalPlane = 0;
unsigned int k, j;
mode_lib->vba.TotalActiveDPP = 0;
mode_lib->vba.TotalDCCActiveDPP = 0;
for (k = 0; k < NumberOfActivePlanes; ++k) {
mode_lib->vba.TotalActiveDPP = mode_lib->vba.TotalActiveDPP + DPPPerPlane[k];
if (DCCEnable[k] == true) {
mode_lib->vba.TotalDCCActiveDPP = mode_lib->vba.TotalDCCActiveDPP + DPPPerPlane[k];
}
}
mode_lib->vba.TotalDataReadBandwidth = 0;
for (k = 0; k < NumberOfActivePlanes; ++k) {
mode_lib->vba.TotalDataReadBandwidth = mode_lib->vba.TotalDataReadBandwidth
+ ReadBandwidthPlaneLuma[k] + ReadBandwidthPlaneChroma[k];
}
*UrgentWatermark = UrgentLatency + ExtraLatency;
*DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark;
mode_lib->vba.TotalActiveWriteback = 0;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (WritebackEnable[k] == true) {
mode_lib->vba.TotalActiveWriteback = mode_lib->vba.TotalActiveWriteback + 1;
}
}
if (mode_lib->vba.TotalActiveWriteback <= 1) {
*WritebackUrgentWatermark = WritebackLatency;
} else {
*WritebackUrgentWatermark = WritebackLatency
+ WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
if (mode_lib->vba.TotalActiveWriteback <= 1) {
*WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency;
} else {
*WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency
+ WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
for (k = 0; k < NumberOfActivePlanes; ++k) {
mode_lib->vba.LBLatencyHidingSourceLinesY = dml_min((double) MaxLineBufferLines,
dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1))
- (vtaps[k] - 1);
mode_lib->vba.LBLatencyHidingSourceLinesC = dml_min((double) MaxLineBufferLines,
dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / 2 / dml_max(HRatio[k] / 2, 1.0)), 1))
- (VTAPsChroma[k] - 1);
EffectiveLBLatencyHidingY = mode_lib->vba.LBLatencyHidingSourceLinesY / VRatio[k]
* (HTotal[k] / PixelClock[k]);
EffectiveLBLatencyHidingC = mode_lib->vba.LBLatencyHidingSourceLinesC
/ (VRatio[k] / 2) * (HTotal[k] / PixelClock[k]);
if (SwathWidthY[k] > 2 * DPPOutputBufferPixels) {
DPPOutputBufferLinesY = (double) DPPOutputBufferPixels / SwathWidthY[k];
} else if (SwathWidthY[k] > DPPOutputBufferPixels) {
DPPOutputBufferLinesY = 0.5;
} else {
DPPOutputBufferLinesY = 1;
}
if (SwathWidthY[k] / 2.0 > 2 * DPPOutputBufferPixels) {
DPPOutputBufferLinesC = (double) DPPOutputBufferPixels
/ (SwathWidthY[k] / 2.0);
} else if (SwathWidthY[k] / 2.0 > DPPOutputBufferPixels) {
DPPOutputBufferLinesC = 0.5;
} else {
DPPOutputBufferLinesC = 1;
}
CalculateDETBufferSize(
DETBufferSizeInKByte,
SwathHeightY[k],
SwathHeightC[k],
&DETBufferSizeY,
&DETBufferSizeC);
LinesInDETY[k] = (double)DETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
FullDETBufferingTimeY[k] = LinesInDETYRoundedDownToSwath[k]
* (HTotal[k] / PixelClock[k]) / VRatio[k];
if (BytePerPixelDETC[k] > 0) {
LinesInDETC = (double)DETBufferSizeC / BytePerPixelDETC[k] / (SwathWidthY[k] / 2.0);
LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath
* (HTotal[k] / PixelClock[k]) / (VRatio[k] / 2);
} else {
LinesInDETC = 0;
FullDETBufferingTimeC = 999999;
}
ActiveDRAMClockChangeLatencyMarginY = HTotal[k] / PixelClock[k]
* DPPOutputBufferLinesY + EffectiveLBLatencyHidingY
+ FullDETBufferingTimeY[k] - *DRAMClockChangeWatermark;
if (NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY
- (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
ActiveDRAMClockChangeLatencyMarginC = HTotal[k] / PixelClock[k]
* DPPOutputBufferLinesC + EffectiveLBLatencyHidingC
+ FullDETBufferingTimeC - *DRAMClockChangeWatermark;
if (NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC
- (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / (VRatio[k] / 2);
}
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(
ActiveDRAMClockChangeLatencyMarginY,
ActiveDRAMClockChangeLatencyMarginC);
} else {
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
}
if (WritebackEnable[k] == true) {
if (WritebackPixelFormat[k] == dm_444_32) {
WritebackDRAMClockChangeLatencyMargin = (WritebackInterfaceLumaBufferSize
+ WritebackInterfaceChromaBufferSize) / (WritebackDestinationWidth[k]
* WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k]
/ PixelClock[k]) * 4) - *WritebackDRAMClockChangeWatermark;
} else {
WritebackDRAMClockChangeLatencyMargin = dml_min(
WritebackInterfaceLumaBufferSize * 8.0 / 10,
2 * WritebackInterfaceChromaBufferSize * 8.0 / 10) / (WritebackDestinationWidth[k]
* WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]))
- *WritebackDRAMClockChangeWatermark;
}
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k],
WritebackDRAMClockChangeLatencyMargin);
}
}
mode_lib->vba.MinActiveDRAMClockChangeMargin = 999999;
PlaneWithMinActiveDRAMClockChangeMargin = 0;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]
< mode_lib->vba.MinActiveDRAMClockChangeMargin) {
mode_lib->vba.MinActiveDRAMClockChangeMargin =
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
if (BlendingAndTiming[k] == k) {
PlaneWithMinActiveDRAMClockChangeMargin = k;
} else {
for (j = 0; j < NumberOfActivePlanes; ++j) {
if (BlendingAndTiming[k] == j) {
PlaneWithMinActiveDRAMClockChangeMargin = j;
}
}
}
}
}
*MinActiveDRAMClockChangeLatencySupported = mode_lib->vba.MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency;
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k))
&& !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
&& mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]
< SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank =
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
}
}
mode_lib->vba.TotalNumberOfActiveOTG = 0;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (BlendingAndTiming[k] == k) {
mode_lib->vba.TotalNumberOfActiveOTG = mode_lib->vba.TotalNumberOfActiveOTG + 1;
}
}
if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
} else if (((mode_lib->vba.SynchronizedVBlank == true
|| mode_lib->vba.TotalNumberOfActiveOTG == 1
|| SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0)
&& PrefetchMode == 0)) {
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
} else {
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
}
FullDETBufferingTimeYStutterCriticalPlane = FullDETBufferingTimeY[0];
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (FullDETBufferingTimeY[k] <= FullDETBufferingTimeYStutterCriticalPlane) {
TimeToFinishSwathTransferStutterCriticalPlane = (SwathHeightY[k]
- (LinesInDETY[k] - LinesInDETYRoundedDownToSwath[k]))
* (HTotal[k] / PixelClock[k]) / VRatio[k];
}
}
*StutterExitWatermark = SRExitTime + mode_lib->vba.LastPixelOfLineExtraWatermark
+ ExtraLatency + 10 / DCFCLKDeepSleep;
*StutterEnterPlusExitWatermark = dml_max(
SREnterPlusExitTime + mode_lib->vba.LastPixelOfLineExtraWatermark
+ ExtraLatency + 10 / DCFCLKDeepSleep,
TimeToFinishSwathTransferStutterCriticalPlane);
}
static void CalculateDCFCLKDeepSleep(
struct display_mode_lib *mode_lib,
unsigned int NumberOfActivePlanes,
double BytePerPixelDETY[],
double BytePerPixelDETC[],
double VRatio[],
double SwathWidthY[],
int DPPPerPlane[],
double HRatio[],
double PixelClock[],
double PSCL_THROUGHPUT[],
double PSCL_THROUGHPUT_CHROMA[],
double DPPCLK[],
double *DCFCLKDeepSleep)
{
unsigned int k;
double DisplayPipeLineDeliveryTimeLuma;
double DisplayPipeLineDeliveryTimeChroma;
//double DCFCLKDeepSleepPerPlane[DC__NUM_DPP__MAX];
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (VRatio[k] <= 1) {
DisplayPipeLineDeliveryTimeLuma = SwathWidthY[k] * DPPPerPlane[k]
/ HRatio[k] / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeLuma = SwathWidthY[k] / PSCL_THROUGHPUT[k]
/ DPPCLK[k];
}
if (BytePerPixelDETC[k] == 0) {
DisplayPipeLineDeliveryTimeChroma = 0;
} else {
if (VRatio[k] / 2 <= 1) {
DisplayPipeLineDeliveryTimeChroma = SwathWidthY[k] / 2.0
* DPPPerPlane[k] / (HRatio[k] / 2) / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeChroma = SwathWidthY[k] / 2.0
/ PSCL_THROUGHPUT_CHROMA[k] / DPPCLK[k];
}
}
if (BytePerPixelDETC[k] > 0) {
mode_lib->vba.DCFCLKDeepSleepPerPlane[k] = dml_max(
1.1 * SwathWidthY[k] * dml_ceil(BytePerPixelDETY[k], 1)
/ 32.0 / DisplayPipeLineDeliveryTimeLuma,
1.1 * SwathWidthY[k] / 2.0
* dml_ceil(BytePerPixelDETC[k], 2) / 32.0
/ DisplayPipeLineDeliveryTimeChroma);
} else {
mode_lib->vba.DCFCLKDeepSleepPerPlane[k] = 1.1 * SwathWidthY[k]
* dml_ceil(BytePerPixelDETY[k], 1) / 64.0
/ DisplayPipeLineDeliveryTimeLuma;
}
mode_lib->vba.DCFCLKDeepSleepPerPlane[k] = dml_max(
mode_lib->vba.DCFCLKDeepSleepPerPlane[k],
PixelClock[k] / 16);
}
*DCFCLKDeepSleep = 8;
for (k = 0; k < NumberOfActivePlanes; ++k) {
*DCFCLKDeepSleep = dml_max(
*DCFCLKDeepSleep,
mode_lib->vba.DCFCLKDeepSleepPerPlane[k]);
}
}
static void CalculateDETBufferSize(
unsigned int DETBufferSizeInKByte,
unsigned int SwathHeightY,
unsigned int SwathHeightC,
unsigned int *DETBufferSizeY,
unsigned int *DETBufferSizeC)
{
if (SwathHeightC == 0) {
*DETBufferSizeY = DETBufferSizeInKByte * 1024;
*DETBufferSizeC = 0;
} else if (SwathHeightY <= SwathHeightC) {
*DETBufferSizeY = DETBufferSizeInKByte * 1024 / 2;
*DETBufferSizeC = DETBufferSizeInKByte * 1024 / 2;
} else {
*DETBufferSizeY = DETBufferSizeInKByte * 1024 * 2 / 3;
*DETBufferSizeC = DETBufferSizeInKByte * 1024 / 3;
}
}
static void CalculateUrgentBurstFactor(
unsigned int DETBufferSizeInKByte,
unsigned int SwathHeightY,
unsigned int SwathHeightC,
unsigned int SwathWidthY,
double LineTime,
double UrgentLatency,
double CursorBufferSize,
unsigned int CursorWidth,
unsigned int CursorBPP,
double VRatio,
double VRatioPreY,
double VRatioPreC,
double BytePerPixelInDETY,
double BytePerPixelInDETC,
double *UrgentBurstFactorCursor,
double *UrgentBurstFactorCursorPre,
double *UrgentBurstFactorLuma,
double *UrgentBurstFactorLumaPre,
double *UrgentBurstFactorChroma,
double *UrgentBurstFactorChromaPre,
unsigned int *NotEnoughUrgentLatencyHiding,
unsigned int *NotEnoughUrgentLatencyHidingPre)
{
double LinesInDETLuma;
double LinesInDETChroma;
unsigned int LinesInCursorBuffer;
double CursorBufferSizeInTime;
double CursorBufferSizeInTimePre;
double DETBufferSizeInTimeLuma;
double DETBufferSizeInTimeLumaPre;
double DETBufferSizeInTimeChroma;
double DETBufferSizeInTimeChromaPre;
unsigned int DETBufferSizeY;
unsigned int DETBufferSizeC;
*NotEnoughUrgentLatencyHiding = 0;
*NotEnoughUrgentLatencyHidingPre = 0;
if (CursorWidth > 0) {
LinesInCursorBuffer = 1 << (unsigned int) dml_floor(
dml_log2(CursorBufferSize * 1024.0 / (CursorWidth * CursorBPP / 8.0)), 1.0);
CursorBufferSizeInTime = LinesInCursorBuffer * LineTime / VRatio;
if (CursorBufferSizeInTime - UrgentLatency <= 0) {
*NotEnoughUrgentLatencyHiding = 1;
*UrgentBurstFactorCursor = 0;
} else {
*UrgentBurstFactorCursor = CursorBufferSizeInTime
/ (CursorBufferSizeInTime - UrgentLatency);
}
if (VRatioPreY > 0) {
CursorBufferSizeInTimePre = LinesInCursorBuffer * LineTime / VRatioPreY;
if (CursorBufferSizeInTimePre - UrgentLatency <= 0) {
*NotEnoughUrgentLatencyHidingPre = 1;
*UrgentBurstFactorCursorPre = 0;
} else {
*UrgentBurstFactorCursorPre = CursorBufferSizeInTimePre
/ (CursorBufferSizeInTimePre - UrgentLatency);
}
} else {
*UrgentBurstFactorCursorPre = 1;
}
}
CalculateDETBufferSize(
DETBufferSizeInKByte,
SwathHeightY,
SwathHeightC,
&DETBufferSizeY,
&DETBufferSizeC);
LinesInDETLuma = (double)DETBufferSizeY / BytePerPixelInDETY / SwathWidthY;
DETBufferSizeInTimeLuma = dml_floor(LinesInDETLuma, SwathHeightY) * LineTime / VRatio;
if (DETBufferSizeInTimeLuma - UrgentLatency <= 0) {
*NotEnoughUrgentLatencyHiding = 1;
*UrgentBurstFactorLuma = 0;
} else {
*UrgentBurstFactorLuma = DETBufferSizeInTimeLuma
/ (DETBufferSizeInTimeLuma - UrgentLatency);
}
if (VRatioPreY > 0) {
DETBufferSizeInTimeLumaPre = dml_floor(LinesInDETLuma, SwathHeightY) * LineTime
/ VRatioPreY;
if (DETBufferSizeInTimeLumaPre - UrgentLatency <= 0) {
*NotEnoughUrgentLatencyHidingPre = 1;
*UrgentBurstFactorLumaPre = 0;
} else {
*UrgentBurstFactorLumaPre = DETBufferSizeInTimeLumaPre
/ (DETBufferSizeInTimeLumaPre - UrgentLatency);
}
} else {
*UrgentBurstFactorLumaPre = 1;
}
if (BytePerPixelInDETC > 0) {
LinesInDETChroma = (double)DETBufferSizeC / BytePerPixelInDETC / (SwathWidthY / 2);
DETBufferSizeInTimeChroma = dml_floor(LinesInDETChroma, SwathHeightC) * LineTime
/ (VRatio / 2);
if (DETBufferSizeInTimeChroma - UrgentLatency <= 0) {
*NotEnoughUrgentLatencyHiding = 1;
*UrgentBurstFactorChroma = 0;
} else {
*UrgentBurstFactorChroma = DETBufferSizeInTimeChroma
/ (DETBufferSizeInTimeChroma - UrgentLatency);
}
if (VRatioPreC > 0) {
DETBufferSizeInTimeChromaPre = dml_floor(LinesInDETChroma, SwathHeightC)
* LineTime / VRatioPreC;
if (DETBufferSizeInTimeChromaPre - UrgentLatency <= 0) {
*NotEnoughUrgentLatencyHidingPre = 1;
*UrgentBurstFactorChromaPre = 0;
} else {
*UrgentBurstFactorChromaPre = DETBufferSizeInTimeChromaPre
/ (DETBufferSizeInTimeChromaPre - UrgentLatency);
}
} else {
*UrgentBurstFactorChromaPre = 1;
}
}
}
static void CalculatePixelDeliveryTimes(
unsigned int NumberOfActivePlanes,
double VRatio[],
double VRatioPrefetchY[],
double VRatioPrefetchC[],
unsigned int swath_width_luma_ub[],
unsigned int swath_width_chroma_ub[],
int DPPPerPlane[],
double HRatio[],
double PixelClock[],
double PSCL_THROUGHPUT[],
double PSCL_THROUGHPUT_CHROMA[],
double DPPCLK[],
double BytePerPixelDETC[],
enum scan_direction_class SourceScan[],
unsigned int BlockWidth256BytesY[],
unsigned int BlockHeight256BytesY[],
unsigned int BlockWidth256BytesC[],
unsigned int BlockHeight256BytesC[],
double DisplayPipeLineDeliveryTimeLuma[],
double DisplayPipeLineDeliveryTimeChroma[],
double DisplayPipeLineDeliveryTimeLumaPrefetch[],
double DisplayPipeLineDeliveryTimeChromaPrefetch[],
double DisplayPipeRequestDeliveryTimeLuma[],
double DisplayPipeRequestDeliveryTimeChroma[],
double DisplayPipeRequestDeliveryTimeLumaPrefetch[],
double DisplayPipeRequestDeliveryTimeChromaPrefetch[])
{
double req_per_swath_ub;
unsigned int k;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (VRatio[k] <= 1) {
DisplayPipeLineDeliveryTimeLuma[k] = swath_width_luma_ub[k] * DPPPerPlane[k]
/ HRatio[k] / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeLuma[k] = swath_width_luma_ub[k]
/ PSCL_THROUGHPUT[k] / DPPCLK[k];
}
if (BytePerPixelDETC[k] == 0) {
DisplayPipeLineDeliveryTimeChroma[k] = 0;
} else {
if (VRatio[k] / 2 <= 1) {
DisplayPipeLineDeliveryTimeChroma[k] = swath_width_chroma_ub[k]
* DPPPerPlane[k] / (HRatio[k] / 2) / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeChroma[k] = swath_width_chroma_ub[k]
/ PSCL_THROUGHPUT_CHROMA[k] / DPPCLK[k];
}
}
if (VRatioPrefetchY[k] <= 1) {
DisplayPipeLineDeliveryTimeLumaPrefetch[k] = swath_width_luma_ub[k]
* DPPPerPlane[k] / HRatio[k] / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeLumaPrefetch[k] = swath_width_luma_ub[k]
/ PSCL_THROUGHPUT[k] / DPPCLK[k];
}
if (BytePerPixelDETC[k] == 0) {
DisplayPipeLineDeliveryTimeChromaPrefetch[k] = 0;
} else {
if (VRatioPrefetchC[k] <= 1) {
DisplayPipeLineDeliveryTimeChromaPrefetch[k] =
swath_width_chroma_ub[k] * DPPPerPlane[k]
/ (HRatio[k] / 2) / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeChromaPrefetch[k] =
swath_width_chroma_ub[k] / PSCL_THROUGHPUT_CHROMA[k] / DPPCLK[k];
}
}
}
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (SourceScan[k] == dm_horz) {
req_per_swath_ub = swath_width_luma_ub[k] / BlockWidth256BytesY[k];
} else {
req_per_swath_ub = swath_width_luma_ub[k] / BlockHeight256BytesY[k];
}
DisplayPipeRequestDeliveryTimeLuma[k] = DisplayPipeLineDeliveryTimeLuma[k]
/ req_per_swath_ub;
DisplayPipeRequestDeliveryTimeLumaPrefetch[k] =
DisplayPipeLineDeliveryTimeLumaPrefetch[k] / req_per_swath_ub;
if (BytePerPixelDETC[k] == 0) {
DisplayPipeRequestDeliveryTimeChroma[k] = 0;
DisplayPipeRequestDeliveryTimeChromaPrefetch[k] = 0;
} else {
if (SourceScan[k] == dm_horz) {
req_per_swath_ub = swath_width_chroma_ub[k]
/ BlockWidth256BytesC[k];
} else {
req_per_swath_ub = swath_width_chroma_ub[k]
/ BlockHeight256BytesC[k];
}
DisplayPipeRequestDeliveryTimeChroma[k] =
DisplayPipeLineDeliveryTimeChroma[k] / req_per_swath_ub;
DisplayPipeRequestDeliveryTimeChromaPrefetch[k] =
DisplayPipeLineDeliveryTimeChromaPrefetch[k] / req_per_swath_ub;
}
}
}
static void CalculateMetaAndPTETimes(
unsigned int NumberOfActivePlanes,
bool GPUVMEnable,
unsigned int MetaChunkSize,
unsigned int MinMetaChunkSizeBytes,
unsigned int GPUVMMaxPageTableLevels,
unsigned int HTotal[],
double VRatio[],
double VRatioPrefetchY[],
double VRatioPrefetchC[],
double DestinationLinesToRequestRowInVBlank[],
double DestinationLinesToRequestRowInImmediateFlip[],
double DestinationLinesToRequestVMInVBlank[],
double DestinationLinesToRequestVMInImmediateFlip[],
bool DCCEnable[],
double PixelClock[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
enum scan_direction_class SourceScan[],
unsigned int dpte_row_height[],
unsigned int dpte_row_height_chroma[],
unsigned int meta_row_width[],
unsigned int meta_row_height[],
unsigned int meta_req_width[],
unsigned int meta_req_height[],
int dpte_group_bytes[],
unsigned int PTERequestSizeY[],
unsigned int PTERequestSizeC[],
unsigned int PixelPTEReqWidthY[],
unsigned int PixelPTEReqHeightY[],
unsigned int PixelPTEReqWidthC[],
unsigned int PixelPTEReqHeightC[],
unsigned int dpte_row_width_luma_ub[],
unsigned int dpte_row_width_chroma_ub[],
unsigned int vm_group_bytes[],
unsigned int dpde0_bytes_per_frame_ub_l[],
unsigned int dpde0_bytes_per_frame_ub_c[],
unsigned int meta_pte_bytes_per_frame_ub_l[],
unsigned int meta_pte_bytes_per_frame_ub_c[],
double DST_Y_PER_PTE_ROW_NOM_L[],
double DST_Y_PER_PTE_ROW_NOM_C[],
double DST_Y_PER_META_ROW_NOM_L[],
double TimePerMetaChunkNominal[],
double TimePerMetaChunkVBlank[],
double TimePerMetaChunkFlip[],
double time_per_pte_group_nom_luma[],
double time_per_pte_group_vblank_luma[],
double time_per_pte_group_flip_luma[],
double time_per_pte_group_nom_chroma[],
double time_per_pte_group_vblank_chroma[],
double time_per_pte_group_flip_chroma[],
double TimePerVMGroupVBlank[],
double TimePerVMGroupFlip[],
double TimePerVMRequestVBlank[],
double TimePerVMRequestFlip[])
{
unsigned int meta_chunk_width;
unsigned int min_meta_chunk_width;
unsigned int meta_chunk_per_row_int;
unsigned int meta_row_remainder;
unsigned int meta_chunk_threshold;
unsigned int meta_chunks_per_row_ub;
unsigned int dpte_group_width_luma;
unsigned int dpte_group_width_chroma;
unsigned int dpte_groups_per_row_luma_ub;
unsigned int dpte_groups_per_row_chroma_ub;
unsigned int num_group_per_lower_vm_stage;
unsigned int num_req_per_lower_vm_stage;
unsigned int k;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (GPUVMEnable == true) {
DST_Y_PER_PTE_ROW_NOM_L[k] = dpte_row_height[k] / VRatio[k];
if (BytePerPixelDETC[k] == 0) {
DST_Y_PER_PTE_ROW_NOM_C[k] = 0;
} else {
DST_Y_PER_PTE_ROW_NOM_C[k] = dpte_row_height_chroma[k] / (VRatio[k] / 2);
}
} else {
DST_Y_PER_PTE_ROW_NOM_L[k] = 0;
DST_Y_PER_PTE_ROW_NOM_C[k] = 0;
}
if (DCCEnable[k] == true) {
DST_Y_PER_META_ROW_NOM_L[k] = meta_row_height[k] / VRatio[k];
} else {
DST_Y_PER_META_ROW_NOM_L[k] = 0;
}
}
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (DCCEnable[k] == true) {
meta_chunk_width = MetaChunkSize * 1024 * 256
/ dml_ceil(BytePerPixelDETY[k], 1) / meta_row_height[k];
min_meta_chunk_width = MinMetaChunkSizeBytes * 256
/ dml_ceil(BytePerPixelDETY[k], 1) / meta_row_height[k];
meta_chunk_per_row_int = meta_row_width[k] / meta_chunk_width;
meta_row_remainder = meta_row_width[k] % meta_chunk_width;
if (SourceScan[k] == dm_horz) {
meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_width[k];
} else {
meta_chunk_threshold = 2 * min_meta_chunk_width
- meta_req_height[k];
}
if (meta_row_remainder <= meta_chunk_threshold) {
meta_chunks_per_row_ub = meta_chunk_per_row_int + 1;
} else {
meta_chunks_per_row_ub = meta_chunk_per_row_int + 2;
}
TimePerMetaChunkNominal[k] = meta_row_height[k] / VRatio[k] * HTotal[k]
/ PixelClock[k] / meta_chunks_per_row_ub;
TimePerMetaChunkVBlank[k] = DestinationLinesToRequestRowInVBlank[k]
* HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub;
TimePerMetaChunkFlip[k] = DestinationLinesToRequestRowInImmediateFlip[k]
* HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub;
} else {
TimePerMetaChunkNominal[k] = 0;
TimePerMetaChunkVBlank[k] = 0;
TimePerMetaChunkFlip[k] = 0;
}
}
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (GPUVMEnable == true) {
if (SourceScan[k] == dm_horz) {
dpte_group_width_luma = dpte_group_bytes[k] / PTERequestSizeY[k]
* PixelPTEReqWidthY[k];
} else {
dpte_group_width_luma = dpte_group_bytes[k] / PTERequestSizeY[k]
* PixelPTEReqHeightY[k];
}
dpte_groups_per_row_luma_ub = dml_ceil(
(float) dpte_row_width_luma_ub[k] / dpte_group_width_luma,
1);
time_per_pte_group_nom_luma[k] = DST_Y_PER_PTE_ROW_NOM_L[k] * HTotal[k]
/ PixelClock[k] / dpte_groups_per_row_luma_ub;
time_per_pte_group_vblank_luma[k] = DestinationLinesToRequestRowInVBlank[k]
* HTotal[k] / PixelClock[k] / dpte_groups_per_row_luma_ub;
time_per_pte_group_flip_luma[k] =
DestinationLinesToRequestRowInImmediateFlip[k] * HTotal[k]
/ PixelClock[k]
/ dpte_groups_per_row_luma_ub;
if (BytePerPixelDETC[k] == 0) {
time_per_pte_group_nom_chroma[k] = 0;
time_per_pte_group_vblank_chroma[k] = 0;
time_per_pte_group_flip_chroma[k] = 0;
} else {
if (SourceScan[k] == dm_horz) {
dpte_group_width_chroma = dpte_group_bytes[k]
/ PTERequestSizeC[k] * PixelPTEReqWidthC[k];
} else {
dpte_group_width_chroma = dpte_group_bytes[k]
/ PTERequestSizeC[k]
* PixelPTEReqHeightC[k];
}
dpte_groups_per_row_chroma_ub = dml_ceil(
(float) dpte_row_width_chroma_ub[k]
/ dpte_group_width_chroma,
1);
time_per_pte_group_nom_chroma[k] = DST_Y_PER_PTE_ROW_NOM_C[k]
* HTotal[k] / PixelClock[k]
/ dpte_groups_per_row_chroma_ub;
time_per_pte_group_vblank_chroma[k] =
DestinationLinesToRequestRowInVBlank[k] * HTotal[k]
/ PixelClock[k]
/ dpte_groups_per_row_chroma_ub;
time_per_pte_group_flip_chroma[k] =
DestinationLinesToRequestRowInImmediateFlip[k]
* HTotal[k] / PixelClock[k]
/ dpte_groups_per_row_chroma_ub;
}
} else {
time_per_pte_group_nom_luma[k] = 0;
time_per_pte_group_vblank_luma[k] = 0;
time_per_pte_group_flip_luma[k] = 0;
time_per_pte_group_nom_chroma[k] = 0;
time_per_pte_group_vblank_chroma[k] = 0;
time_per_pte_group_flip_chroma[k] = 0;
}
}
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (GPUVMEnable == true && (DCCEnable[k] == true || GPUVMMaxPageTableLevels > 1)) {
if (DCCEnable[k] == false) {
if (BytePerPixelDETC[k] > 0) {
num_group_per_lower_vm_stage =
dml_ceil((double) (dpde0_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1)
+ dml_ceil((double) (dpde0_bytes_per_frame_ub_c[k]) / (double) (vm_group_bytes[k]), 1);
} else {
num_group_per_lower_vm_stage =
dml_ceil((double) (dpde0_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1);
}
} else {
if (GPUVMMaxPageTableLevels == 1) {
if (BytePerPixelDETC[k] > 0) {
num_group_per_lower_vm_stage =
dml_ceil((double) (meta_pte_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1)
+ dml_ceil((double) (meta_pte_bytes_per_frame_ub_c[k]) / (double) (vm_group_bytes[k]), 1);
} else {
num_group_per_lower_vm_stage =
dml_ceil((double) (meta_pte_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1);
}
} else {
if (BytePerPixelDETC[k] > 0) {
num_group_per_lower_vm_stage =
dml_ceil((double) (dpde0_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1)
+ dml_ceil((double) (dpde0_bytes_per_frame_ub_c[k]) / (double) (vm_group_bytes[k]), 1)
+ dml_ceil((double) (meta_pte_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1)
+ dml_ceil((double) (meta_pte_bytes_per_frame_ub_c[k]) / (double) (vm_group_bytes[k]), 1);
} else {
num_group_per_lower_vm_stage =
dml_ceil((double) (dpde0_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1)
+ dml_ceil((double) (meta_pte_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1);
}
}
}
if (DCCEnable[k] == false) {
if (BytePerPixelDETC[k] > 0) {
num_req_per_lower_vm_stage = dpde0_bytes_per_frame_ub_l[k]
/ 64 + dpde0_bytes_per_frame_ub_c[k] / 64;
} else {
num_req_per_lower_vm_stage = dpde0_bytes_per_frame_ub_l[k]
/ 64;
}
} else {
if (GPUVMMaxPageTableLevels == 1) {
if (BytePerPixelDETC[k] > 0) {
num_req_per_lower_vm_stage = meta_pte_bytes_per_frame_ub_l[k] / 64
+ meta_pte_bytes_per_frame_ub_c[k] / 64;
} else {
num_req_per_lower_vm_stage = meta_pte_bytes_per_frame_ub_l[k] / 64;
}
} else {
if (BytePerPixelDETC[k] > 0) {
num_req_per_lower_vm_stage = dpde0_bytes_per_frame_ub_l[k] / 64
+ dpde0_bytes_per_frame_ub_c[k] / 64
+ meta_pte_bytes_per_frame_ub_l[k] / 64
+ meta_pte_bytes_per_frame_ub_c[k] / 64;
} else {
num_req_per_lower_vm_stage = dpde0_bytes_per_frame_ub_l[k] / 64
+ meta_pte_bytes_per_frame_ub_l[k] / 64;
}
}
}
TimePerVMGroupVBlank[k] = DestinationLinesToRequestVMInVBlank[k] * HTotal[k]
/ PixelClock[k] / num_group_per_lower_vm_stage;
TimePerVMGroupFlip[k] = DestinationLinesToRequestVMInImmediateFlip[k]
* HTotal[k] / PixelClock[k] / num_group_per_lower_vm_stage;
TimePerVMRequestVBlank[k] = DestinationLinesToRequestVMInVBlank[k]
* HTotal[k] / PixelClock[k] / num_req_per_lower_vm_stage;
TimePerVMRequestFlip[k] = DestinationLinesToRequestVMInImmediateFlip[k]
* HTotal[k] / PixelClock[k] / num_req_per_lower_vm_stage;
if (GPUVMMaxPageTableLevels > 2) {
TimePerVMGroupVBlank[k] = TimePerVMGroupVBlank[k] / 2;
TimePerVMGroupFlip[k] = TimePerVMGroupFlip[k] / 2;
TimePerVMRequestVBlank[k] = TimePerVMRequestVBlank[k] / 2;
TimePerVMRequestFlip[k] = TimePerVMRequestFlip[k] / 2;
}
} else {
TimePerVMGroupVBlank[k] = 0;
TimePerVMGroupFlip[k] = 0;
TimePerVMRequestVBlank[k] = 0;
TimePerVMRequestFlip[k] = 0;
}
}
}
static double CalculateExtraLatency(
double UrgentRoundTripAndOutOfOrderLatency,
int TotalNumberOfActiveDPP,
int PixelChunkSizeInKByte,
int TotalNumberOfDCCActiveDPP,
int MetaChunkSize,
double ReturnBW,
bool GPUVMEnable,
bool HostVMEnable,
int NumberOfActivePlanes,
int NumberOfDPP[],
int dpte_group_bytes[],
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
int HostVMMaxPageTableLevels,
int HostVMCachedPageTableLevels)
{
double CalculateExtraLatency;
double HostVMInefficiencyFactor;
int HostVMDynamicLevels;
if (GPUVMEnable && HostVMEnable) {
HostVMInefficiencyFactor =
PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData
/ PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly;
HostVMDynamicLevels = HostVMMaxPageTableLevels - HostVMCachedPageTableLevels;
} else {
HostVMInefficiencyFactor = 1;
HostVMDynamicLevels = 0;
}
CalculateExtraLatency = UrgentRoundTripAndOutOfOrderLatency
+ (TotalNumberOfActiveDPP * PixelChunkSizeInKByte
+ TotalNumberOfDCCActiveDPP * MetaChunkSize) * 1024.0
/ ReturnBW;
if (GPUVMEnable) {
int k;
for (k = 0; k < NumberOfActivePlanes; k++) {
CalculateExtraLatency = CalculateExtraLatency
+ NumberOfDPP[k] * dpte_group_bytes[k]
* (1 + 8 * HostVMDynamicLevels)
* HostVMInefficiencyFactor / ReturnBW;
}
}
return CalculateExtraLatency;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c |
// SPDX-License-Identifier: MIT
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "clk_mgr.h"
#include "resource.h"
#include "dcn31/dcn31_hubbub.h"
#include "dcn314_fpu.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "dml/dcn31/dcn31_fpu.h"
#include "dml/display_mode_vba.h"
#include "dml/dml_inline_defs.h"
struct _vcs_dpi_ip_params_st dcn3_14_ip = {
.VBlankNomDefaultUS = 668,
.gpuvm_enable = 1,
.gpuvm_max_page_table_levels = 1,
.hostvm_enable = 1,
.hostvm_max_page_table_levels = 2,
.rob_buffer_size_kbytes = 64,
.det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE,
.config_return_buffer_size_in_kbytes = 1792,
.compressed_buffer_segment_size_in_kbytes = 64,
.meta_fifo_size_in_kentries = 32,
.zero_size_buffer_entries = 512,
.compbuf_reserved_space_64b = 256,
.compbuf_reserved_space_zs = 64,
.dpp_output_buffer_pixels = 2560,
.opp_output_buffer_lines = 1,
.pixel_chunk_size_kbytes = 8,
.meta_chunk_size_kbytes = 2,
.min_meta_chunk_size_bytes = 256,
.writeback_chunk_size_kbytes = 8,
.ptoi_supported = false,
.num_dsc = 4,
.maximum_dsc_bits_per_component = 10,
.dsc422_native_support = false,
.is_line_buffer_bpp_fixed = true,
.line_buffer_fixed_bpp = 48,
.line_buffer_size_bits = 789504,
.max_line_buffer_lines = 12,
.writeback_interface_buffer_size_kbytes = 90,
.max_num_dpp = 4,
.max_num_otg = 4,
.max_num_hdmi_frl_outputs = 1,
.max_num_wb = 1,
.max_dchub_pscl_bw_pix_per_clk = 4,
.max_pscl_lb_bw_pix_per_clk = 2,
.max_lb_vscl_bw_pix_per_clk = 4,
.max_vscl_hscl_bw_pix_per_clk = 4,
.max_hscl_ratio = 6,
.max_vscl_ratio = 6,
.max_hscl_taps = 8,
.max_vscl_taps = 8,
.dpte_buffer_size_in_pte_reqs_luma = 64,
.dpte_buffer_size_in_pte_reqs_chroma = 34,
.dispclk_ramp_margin_percent = 1,
.max_inter_dcn_tile_repeaters = 8,
.cursor_buffer_size = 16,
.cursor_chunk_size = 2,
.writeback_line_buffer_buffer_size = 0,
.writeback_min_hscl_ratio = 1,
.writeback_min_vscl_ratio = 1,
.writeback_max_hscl_ratio = 1,
.writeback_max_vscl_ratio = 1,
.writeback_max_hscl_taps = 1,
.writeback_max_vscl_taps = 1,
.dppclk_delay_subtotal = 46,
.dppclk_delay_scl = 50,
.dppclk_delay_scl_lb_only = 16,
.dppclk_delay_cnvc_formatter = 27,
.dppclk_delay_cnvc_cursor = 6,
.dispclk_delay_subtotal = 119,
.dynamic_metadata_vm_enabled = false,
.odm_combine_4to1_supported = false,
.dcc_supported = true,
};
static struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
/*TODO: correct dispclk/dppclk voltage level determination*/
.clock_limits = {
{
.state = 0,
.dispclk_mhz = 1200.0,
.dppclk_mhz = 1200.0,
.phyclk_mhz = 600.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 186.0,
.dtbclk_mhz = 600.0,
},
{
.state = 1,
.dispclk_mhz = 1200.0,
.dppclk_mhz = 1200.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 209.0,
.dtbclk_mhz = 600.0,
},
{
.state = 2,
.dispclk_mhz = 1200.0,
.dppclk_mhz = 1200.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 209.0,
.dtbclk_mhz = 600.0,
},
{
.state = 3,
.dispclk_mhz = 1200.0,
.dppclk_mhz = 1200.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 371.0,
.dtbclk_mhz = 600.0,
},
{
.state = 4,
.dispclk_mhz = 1200.0,
.dppclk_mhz = 1200.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 417.0,
.dtbclk_mhz = 600.0,
},
},
.num_states = 5,
.sr_exit_time_us = 16.5,
.sr_enter_plus_exit_time_us = 18.5,
.sr_exit_z8_time_us = 268.0,
.sr_enter_plus_exit_z8_time_us = 393.0,
.writeback_latency_us = 12.0,
.dram_channel_width_bytes = 4,
.round_trip_ping_latency_dcfclk_cycles = 106,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
.urgent_latency_vm_data_only_us = 4.0,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
.pct_ideal_sdp_bw_after_urgent = 80.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
.max_avg_sdp_bw_use_normal_percent = 60.0,
.max_avg_dram_bw_use_normal_percent = 60.0,
.fabric_datapath_to_dcn_data_return_bytes = 32,
.return_bus_width_bytes = 64,
.downspread_percent = 0.38,
.dcn_downspread_percent = 0.5,
.gpuvm_min_page_size_bytes = 4096,
.hostvm_min_page_size_bytes = 4096,
.do_urgent_latency_adjustment = false,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
};
void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params)
{
struct clk_limit_table *clk_table = &bw_params->clk_table;
struct _vcs_dpi_voltage_scaling_st *clock_limits =
dcn3_14_soc.clock_limits;
unsigned int i, closest_clk_lvl;
int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
int j;
dc_assert_fp_enabled();
// Default clock levels are used for diags, which may lead to overclocking.
if (dc->config.use_default_clock_table == false) {
dcn3_14_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
dcn3_14_ip.max_num_dpp = dc->res_pool->pipe_count;
if (bw_params->dram_channel_width_bytes > 0)
dcn3_14_soc.dram_channel_width_bytes = bw_params->dram_channel_width_bytes;
if (bw_params->num_channels > 0)
dcn3_14_soc.num_chans = bw_params->num_channels;
ASSERT(dcn3_14_soc.num_chans);
ASSERT(clk_table->num_entries);
/* Prepass to find max clocks independent of voltage level. */
for (i = 0; i < clk_table->num_entries; ++i) {
if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
}
for (i = 0; i < clk_table->num_entries; i++) {
/* loop backwards*/
for (closest_clk_lvl = 0, j = dcn3_14_soc.num_states - 1; j >= 0; j--) {
if ((unsigned int) dcn3_14_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
closest_clk_lvl = j;
break;
}
}
if (clk_table->num_entries == 1) {
/*smu gives one DPM level, let's take the highest one*/
closest_clk_lvl = dcn3_14_soc.num_states - 1;
}
clock_limits[i].state = i;
/* Clocks dependent on voltage level. */
clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
if (clk_table->num_entries == 1 &&
clock_limits[i].dcfclk_mhz < dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
/*SMU fix not released yet*/
clock_limits[i].dcfclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
}
clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
if (clk_table->entries[i].memclk_mhz && clk_table->entries[i].wck_ratio)
clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
/* Clocks independent of voltage level. */
clock_limits[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
dcn3_14_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
clock_limits[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
dcn3_14_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
clock_limits[i].dram_bw_per_chan_gbps = dcn3_14_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
clock_limits[i].dscclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
clock_limits[i].dtbclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
clock_limits[i].phyclk_d18_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
clock_limits[i].phyclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
}
for (i = 0; i < clk_table->num_entries; i++)
dcn3_14_soc.clock_limits[i] = clock_limits[i];
if (clk_table->num_entries) {
dcn3_14_soc.num_states = clk_table->num_entries;
}
}
if (max_dispclk_mhz) {
dcn3_14_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
}
dcn20_patch_bounding_box(dc, &dcn3_14_soc);
dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN314);
}
static bool is_dual_plane(enum surface_pixel_format format)
{
return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
}
/*
* micro_sec_to_vert_lines () - converts time to number of vertical lines for a given timing
*
* @param: num_us: number of microseconds
* @return: number of vertical lines. If exact number of vertical lines is not found then
* it will round up to next number of lines to guarantee num_us
*/
static unsigned int micro_sec_to_vert_lines(unsigned int num_us, struct dc_crtc_timing *timing)
{
unsigned int num_lines = 0;
unsigned int lines_time_in_ns = 1000.0 *
(((float)timing->h_total * 1000.0) /
((float)timing->pix_clk_100hz / 10.0));
num_lines = dml_ceil(1000.0 * num_us / lines_time_in_ns, 1.0);
return num_lines;
}
static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing)
{
unsigned int v_active = 0, v_blank = 0, v_back_porch = 0;
v_active = timing->v_border_top + timing->v_addressable + timing->v_border_bottom;
v_blank = timing->v_total - v_active;
v_back_porch = v_blank - timing->v_front_porch - timing->v_sync_width;
return v_back_porch;
}
int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
struct pipe_ctx *pipe;
bool upscaled = false;
const unsigned int max_allowed_vblank_nom = 1023;
dc_assert_fp_enabled();
dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
unsigned int num_lines = 0;
unsigned int v_back_porch = 0;
if (!res_ctx->pipe_ctx[i].stream)
continue;
pipe = &res_ctx->pipe_ctx[i];
timing = &pipe->stream->timing;
num_lines = micro_sec_to_vert_lines(dcn3_14_ip.VBlankNomDefaultUS, timing);
if (pipe->stream->adjust.v_total_min != 0)
pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
else
pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total;
v_back_porch = get_vertical_back_porch(timing);
pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive;
pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, num_lines);
// vblank_nom should not smaller than (VSync (timing->v_sync_width + v_back_porch) + 2)
// + 2 is because
// 1 -> VStartup_start should be 1 line before VSync
// 1 -> always reserve 1 line between start of vblank to vstartup signal
pipes[pipe_cnt].pipe.dest.vblank_nom =
max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width + v_back_porch + 2);
pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, max_allowed_vblank_nom);
if (pipe->plane_state &&
(pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height ||
pipe->plane_state->src_rect.width < pipe->plane_state->dst_rect.width))
upscaled = true;
/* Apply HostVM policy - either based on hypervisor globally enabled, or rIOMMU active */
if (dc->debug.dml_hostvm_override == DML_HOSTVM_NO_OVERRIDE)
pipes[i].pipe.src.hostvm = dc->vm_pa_config.is_hvm_enabled || dc->res_pool->hubbub->riommu_active;
/*
* Immediate flip can be set dynamically after enabling the plane.
* We need to require support for immediate flip or underflow can be
* intermittently experienced depending on peak b/w requirements.
*/
pipes[pipe_cnt].pipe.src.immediate_flip = true;
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
pipes[pipe_cnt].pipe.src.dcc_rate = 3;
pipes[pipe_cnt].dout.dsc_input_bpc = 0;
if (pipes[pipe_cnt].dout.dsc_enable) {
switch (timing->display_color_depth) {
case COLOR_DEPTH_888:
pipes[pipe_cnt].dout.dsc_input_bpc = 8;
break;
case COLOR_DEPTH_101010:
pipes[pipe_cnt].dout.dsc_input_bpc = 10;
break;
case COLOR_DEPTH_121212:
pipes[pipe_cnt].dout.dsc_input_bpc = 12;
break;
default:
ASSERT(0);
break;
}
}
pipe_cnt++;
}
context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE;
dc->config.enable_4to1MPC = false;
if (pipe_cnt == 1 && pipe->plane_state
&& pipe->plane_state->rotation == ROTATION_ANGLE_0 && !dc->debug.disable_z9_mpc) {
if (is_dual_plane(pipe->plane_state->format)
&& pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
dc->config.enable_4to1MPC = true;
} else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) {
/* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */
context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
pipes[0].pipe.src.unbounded_req_mode = true;
}
} else if (context->stream_count >= dc->debug.crb_alloc_policy_min_disp_count
&& dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) {
context->bw_ctx.dml.ip.det_buffer_size_kbytes = dc->debug.crb_alloc_policy * 64;
} else if (context->stream_count >= 3 && upscaled) {
context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->stream)
continue;
if (pipe->stream->signal == SIGNAL_TYPE_EDP && dc->debug.seamless_boot_odm_combine &&
pipe->stream->apply_seamless_boot_optimization) {
if (pipe->stream->apply_boot_odm_mode == dm_odm_combine_policy_2to1) {
context->bw_ctx.dml.vba.ODMCombinePolicy = dm_odm_combine_policy_2to1;
break;
}
}
}
return pipe_cnt;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c |
// SPDX-License-Identifier: MIT
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "../display_mode_lib.h"
#include "../display_mode_vba.h"
#include "../dml_inline_defs.h"
#include "display_rq_dlg_calc_314.h"
static bool CalculateBytePerPixelAnd256BBlockSizes(
enum source_format_class SourcePixelFormat,
enum dm_swizzle_mode SurfaceTiling,
unsigned int *BytePerPixelY,
unsigned int *BytePerPixelC,
double *BytePerPixelDETY,
double *BytePerPixelDETC,
unsigned int *BlockHeight256BytesY,
unsigned int *BlockHeight256BytesC,
unsigned int *BlockWidth256BytesY,
unsigned int *BlockWidth256BytesC)
{
if (SourcePixelFormat == dm_444_64) {
*BytePerPixelDETY = 8;
*BytePerPixelDETC = 0;
*BytePerPixelY = 8;
*BytePerPixelC = 0;
} else if (SourcePixelFormat == dm_444_32 || SourcePixelFormat == dm_rgbe) {
*BytePerPixelDETY = 4;
*BytePerPixelDETC = 0;
*BytePerPixelY = 4;
*BytePerPixelC = 0;
} else if (SourcePixelFormat == dm_444_16) {
*BytePerPixelDETY = 2;
*BytePerPixelDETC = 0;
*BytePerPixelY = 2;
*BytePerPixelC = 0;
} else if (SourcePixelFormat == dm_444_8) {
*BytePerPixelDETY = 1;
*BytePerPixelDETC = 0;
*BytePerPixelY = 1;
*BytePerPixelC = 0;
} else if (SourcePixelFormat == dm_rgbe_alpha) {
*BytePerPixelDETY = 4;
*BytePerPixelDETC = 1;
*BytePerPixelY = 4;
*BytePerPixelC = 1;
} else if (SourcePixelFormat == dm_420_8) {
*BytePerPixelDETY = 1;
*BytePerPixelDETC = 2;
*BytePerPixelY = 1;
*BytePerPixelC = 2;
} else if (SourcePixelFormat == dm_420_12) {
*BytePerPixelDETY = 2;
*BytePerPixelDETC = 4;
*BytePerPixelY = 2;
*BytePerPixelC = 4;
} else {
*BytePerPixelDETY = 4.0 / 3;
*BytePerPixelDETC = 8.0 / 3;
*BytePerPixelY = 2;
*BytePerPixelC = 4;
}
if ((SourcePixelFormat == dm_444_64 || SourcePixelFormat == dm_444_32 || SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_8 || SourcePixelFormat == dm_mono_16
|| SourcePixelFormat == dm_mono_8 || SourcePixelFormat == dm_rgbe)) {
if (SurfaceTiling == dm_sw_linear)
*BlockHeight256BytesY = 1;
else if (SourcePixelFormat == dm_444_64)
*BlockHeight256BytesY = 4;
else if (SourcePixelFormat == dm_444_8)
*BlockHeight256BytesY = 16;
else
*BlockHeight256BytesY = 8;
*BlockWidth256BytesY = 256U / *BytePerPixelY / *BlockHeight256BytesY;
*BlockHeight256BytesC = 0;
*BlockWidth256BytesC = 0;
} else {
if (SurfaceTiling == dm_sw_linear) {
*BlockHeight256BytesY = 1;
*BlockHeight256BytesC = 1;
} else if (SourcePixelFormat == dm_rgbe_alpha) {
*BlockHeight256BytesY = 8;
*BlockHeight256BytesC = 16;
} else if (SourcePixelFormat == dm_420_8) {
*BlockHeight256BytesY = 16;
*BlockHeight256BytesC = 8;
} else {
*BlockHeight256BytesY = 8;
*BlockHeight256BytesC = 8;
}
*BlockWidth256BytesY = 256U / *BytePerPixelY / *BlockHeight256BytesY;
*BlockWidth256BytesC = 256U / *BytePerPixelC / *BlockHeight256BytesC;
}
return true;
}
static bool is_dual_plane(enum source_format_class source_format)
{
bool ret_val = 0;
if ((source_format == dm_420_12) || (source_format == dm_420_8) || (source_format == dm_420_10) || (source_format == dm_rgbe_alpha))
ret_val = 1;
return ret_val;
}
static double get_refcyc_per_delivery(
struct display_mode_lib *mode_lib,
double refclk_freq_in_mhz,
double pclk_freq_in_mhz,
unsigned int odm_combine,
unsigned int recout_width,
unsigned int hactive,
double vratio,
double hscale_pixel_rate,
unsigned int delivery_width,
unsigned int req_per_swath_ub)
{
double refcyc_per_delivery = 0.0;
if (vratio <= 1.0) {
if (odm_combine)
refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) ((unsigned int) odm_combine * 2)
* dml_min((double) recout_width, (double) hactive / ((unsigned int) odm_combine * 2)) / pclk_freq_in_mhz / (double) req_per_swath_ub;
else
refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) recout_width / pclk_freq_in_mhz / (double) req_per_swath_ub;
} else {
refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) delivery_width / (double) hscale_pixel_rate / (double) req_per_swath_ub;
}
#ifdef __DML_RQ_DLG_CALC_DEBUG__
dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
dml_print("DML_DLG: %s: recout_width = %d\n", __func__, recout_width);
dml_print("DML_DLG: %s: vratio = %3.2f\n", __func__, vratio);
dml_print("DML_DLG: %s: req_per_swath_ub = %d\n", __func__, req_per_swath_ub);
dml_print("DML_DLG: %s: hscale_pixel_rate = %3.2f\n", __func__, hscale_pixel_rate);
dml_print("DML_DLG: %s: delivery_width = %d\n", __func__, delivery_width);
dml_print("DML_DLG: %s: refcyc_per_delivery= %3.2f\n", __func__, refcyc_per_delivery);
#endif
return refcyc_per_delivery;
}
static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_size)
{
if (tile_size == dm_256k_tile)
return (256 * 1024);
else if (tile_size == dm_64k_tile)
return (64 * 1024);
else
return (4 * 1024);
}
static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib, display_data_rq_regs_st *rq_regs, const display_data_rq_sizing_params_st *rq_sizing)
{
print__data_rq_sizing_params_st(mode_lib, rq_sizing);
rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10;
if (rq_sizing->min_chunk_bytes == 0)
rq_regs->min_chunk_size = 0;
else
rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1;
rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10;
if (rq_sizing->min_meta_chunk_bytes == 0)
rq_regs->min_meta_chunk_size = 0;
else
rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1;
rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6;
rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6;
}
static void extract_rq_regs(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, const display_rq_params_st *rq_param)
{
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
unsigned int detile_buf_plane1_addr = 0;
extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l);
rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_l.dpte_row_height), 1) - 3;
if (rq_param->yuv420) {
extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c);
rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_c.dpte_row_height), 1) - 3;
}
rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height);
rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height);
// FIXME: take the max between luma, chroma chunk size?
// okay for now, as we are setting chunk_bytes to 8kb anyways
if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024 || (rq_param->yuv420 && rq_param->sizing.rq_c.chunk_bytes >= 32 * 1024)) { //32kb
rq_regs->drq_expansion_mode = 0;
} else {
rq_regs->drq_expansion_mode = 2;
}
rq_regs->prq_expansion_mode = 1;
rq_regs->mrq_expansion_mode = 1;
rq_regs->crq_expansion_mode = 1;
// Note: detile_buf_plane1_addr is in unit of 1KB
if (rq_param->yuv420) {
if ((double) rq_param->misc.rq_l.stored_swath_bytes / (double) rq_param->misc.rq_c.stored_swath_bytes <= 1.5) {
detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 1024.0); // half to chroma
#ifdef __DML_RQ_DLG_CALC_DEBUG__
dml_print("DML_DLG: %s: detile_buf_plane1_addr = %0d (1/2 to chroma)\n", __func__, detile_buf_plane1_addr);
#endif
} else {
detile_buf_plane1_addr = dml_round_to_multiple((unsigned int) ((2.0 * detile_buf_size_in_bytes) / 3.0), 1024, 0) / 1024.0; // 2/3 to luma
#ifdef __DML_RQ_DLG_CALC_DEBUG__
dml_print("DML_DLG: %s: detile_buf_plane1_addr = %0d (1/3 chroma)\n", __func__, detile_buf_plane1_addr);
#endif
}
}
rq_regs->plane1_base_address = detile_buf_plane1_addr;
#ifdef __DML_RQ_DLG_CALC_DEBUG__
dml_print("DML_DLG: %s: detile_buf_size_in_bytes = %0d\n", __func__, detile_buf_size_in_bytes);
dml_print("DML_DLG: %s: detile_buf_plane1_addr = %0d\n", __func__, detile_buf_plane1_addr);
dml_print("DML_DLG: %s: plane1_base_address = %0d\n", __func__, rq_regs->plane1_base_address);
dml_print("DML_DLG: %s: rq_l.stored_swath_bytes = %0d\n", __func__, rq_param->misc.rq_l.stored_swath_bytes);
dml_print("DML_DLG: %s: rq_c.stored_swath_bytes = %0d\n", __func__, rq_param->misc.rq_c.stored_swath_bytes);
dml_print("DML_DLG: %s: rq_l.swath_height = %0d\n", __func__, rq_param->dlg.rq_l.swath_height);
dml_print("DML_DLG: %s: rq_c.swath_height = %0d\n", __func__, rq_param->dlg.rq_c.swath_height);
#endif
}
static void handle_det_buf_split(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, const display_pipe_source_params_st *pipe_src_param)
{
unsigned int total_swath_bytes = 0;
unsigned int swath_bytes_l = 0;
unsigned int swath_bytes_c = 0;
unsigned int full_swath_bytes_packed_l = 0;
unsigned int full_swath_bytes_packed_c = 0;
bool req128_l = 0;
bool req128_c = 0;
bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear);
bool surf_vert = (pipe_src_param->source_scan == dm_vert);
unsigned int log2_swath_height_l = 0;
unsigned int log2_swath_height_c = 0;
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
full_swath_bytes_packed_l = rq_param->misc.rq_l.full_swath_bytes;
full_swath_bytes_packed_c = rq_param->misc.rq_c.full_swath_bytes;
#ifdef __DML_RQ_DLG_CALC_DEBUG__
dml_print("DML_DLG: %s: full_swath_bytes_packed_l = %0d\n", __func__, full_swath_bytes_packed_l);
dml_print("DML_DLG: %s: full_swath_bytes_packed_c = %0d\n", __func__, full_swath_bytes_packed_c);
#endif
if (rq_param->yuv420_10bpc) {
full_swath_bytes_packed_l = dml_round_to_multiple(rq_param->misc.rq_l.full_swath_bytes * 2.0 / 3.0, 256, 1) + 256;
full_swath_bytes_packed_c = dml_round_to_multiple(rq_param->misc.rq_c.full_swath_bytes * 2.0 / 3.0, 256, 1) + 256;
#ifdef __DML_RQ_DLG_CALC_DEBUG__
dml_print("DML_DLG: %s: full_swath_bytes_packed_l = %0d (3-2 packing)\n", __func__, full_swath_bytes_packed_l);
dml_print("DML_DLG: %s: full_swath_bytes_packed_c = %0d (3-2 packing)\n", __func__, full_swath_bytes_packed_c);
#endif
}
if (rq_param->yuv420)
total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;
else
total_swath_bytes = 2 * full_swath_bytes_packed_l;
#ifdef __DML_RQ_DLG_CALC_DEBUG__
dml_print("DML_DLG: %s: total_swath_bytes = %0d\n", __func__, total_swath_bytes);
dml_print("DML_DLG: %s: detile_buf_size_in_bytes = %0d\n", __func__, detile_buf_size_in_bytes);
#endif
if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request
req128_l = 0;
req128_c = 0;
swath_bytes_l = full_swath_bytes_packed_l;
swath_bytes_c = full_swath_bytes_packed_c;
} else if (!rq_param->yuv420) {
req128_l = 1;
req128_c = 0;
swath_bytes_c = full_swath_bytes_packed_c;
swath_bytes_l = full_swath_bytes_packed_l / 2;
} else if ((double) full_swath_bytes_packed_l / (double) full_swath_bytes_packed_c < 1.5) {
req128_l = 0;
req128_c = 1;
swath_bytes_l = full_swath_bytes_packed_l;
swath_bytes_c = full_swath_bytes_packed_c / 2;
total_swath_bytes = 2 * swath_bytes_l + 2 * swath_bytes_c;
if (total_swath_bytes > detile_buf_size_in_bytes) {
req128_l = 1;
swath_bytes_l = full_swath_bytes_packed_l / 2;
}
} else {
req128_l = 1;
req128_c = 0;
swath_bytes_l = full_swath_bytes_packed_l / 2;
swath_bytes_c = full_swath_bytes_packed_c;
total_swath_bytes = 2 * swath_bytes_l + 2 * swath_bytes_c;
if (total_swath_bytes > detile_buf_size_in_bytes) {
req128_c = 1;
swath_bytes_c = full_swath_bytes_packed_c / 2;
}
}
if (rq_param->yuv420)
total_swath_bytes = 2 * swath_bytes_l + 2 * swath_bytes_c;
else
total_swath_bytes = 2 * swath_bytes_l;
rq_param->misc.rq_l.stored_swath_bytes = swath_bytes_l;
rq_param->misc.rq_c.stored_swath_bytes = swath_bytes_c;
#ifdef __DML_RQ_DLG_CALC_DEBUG__
dml_print("DML_DLG: %s: total_swath_bytes = %0d\n", __func__, total_swath_bytes);
dml_print("DML_DLG: %s: rq_l.stored_swath_bytes = %0d\n", __func__, rq_param->misc.rq_l.stored_swath_bytes);
dml_print("DML_DLG: %s: rq_c.stored_swath_bytes = %0d\n", __func__, rq_param->misc.rq_c.stored_swath_bytes);
#endif
if (surf_linear) {
log2_swath_height_l = 0;
log2_swath_height_c = 0;
} else {
unsigned int swath_height_l;
unsigned int swath_height_c;
if (!surf_vert) {
swath_height_l = rq_param->misc.rq_l.blk256_height;
swath_height_c = rq_param->misc.rq_c.blk256_height;
} else {
swath_height_l = rq_param->misc.rq_l.blk256_width;
swath_height_c = rq_param->misc.rq_c.blk256_width;
}
if (swath_height_l > 0)
log2_swath_height_l = dml_log2(swath_height_l);
if (req128_l && log2_swath_height_l > 0)
log2_swath_height_l -= 1;
if (swath_height_c > 0)
log2_swath_height_c = dml_log2(swath_height_c);
if (req128_c && log2_swath_height_c > 0)
log2_swath_height_c -= 1;
}
rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
#ifdef __DML_RQ_DLG_CALC_DEBUG__
dml_print("DML_DLG: %s: req128_l = %0d\n", __func__, req128_l);
dml_print("DML_DLG: %s: req128_c = %0d\n", __func__, req128_c);
dml_print("DML_DLG: %s: full_swath_bytes_packed_l = %0d\n", __func__, full_swath_bytes_packed_l);
dml_print("DML_DLG: %s: full_swath_bytes_packed_c = %0d\n", __func__, full_swath_bytes_packed_c);
dml_print("DML_DLG: %s: swath_height luma = %0d\n", __func__, rq_param->dlg.rq_l.swath_height);
dml_print("DML_DLG: %s: swath_height chroma = %0d\n", __func__, rq_param->dlg.rq_c.swath_height);
#endif
}
static void get_meta_and_pte_attr(
struct display_mode_lib *mode_lib,
display_data_rq_dlg_params_st *rq_dlg_param,
display_data_rq_misc_params_st *rq_misc_param,
display_data_rq_sizing_params_st *rq_sizing_param,
unsigned int vp_width,
unsigned int vp_height,
unsigned int data_pitch,
unsigned int meta_pitch,
unsigned int source_format,
unsigned int tiling,
unsigned int macro_tile_size,
unsigned int source_scan,
unsigned int hostvm_enable,
unsigned int is_chroma,
unsigned int surface_height)
{
bool surf_linear = (tiling == dm_sw_linear);
bool surf_vert = (source_scan == dm_vert);
unsigned int bytes_per_element;
unsigned int bytes_per_element_y;
unsigned int bytes_per_element_c;
unsigned int blk256_width = 0;
unsigned int blk256_height = 0;
unsigned int blk256_width_y = 0;
unsigned int blk256_height_y = 0;
unsigned int blk256_width_c = 0;
unsigned int blk256_height_c = 0;
unsigned int log2_bytes_per_element;
unsigned int log2_blk256_width;
unsigned int log2_blk256_height;
unsigned int blk_bytes;
unsigned int log2_blk_bytes;
unsigned int log2_blk_height;
unsigned int log2_blk_width;
unsigned int log2_meta_req_bytes;
unsigned int log2_meta_req_height;
unsigned int log2_meta_req_width;
unsigned int meta_req_width;
unsigned int meta_req_height;
unsigned int log2_meta_row_height;
unsigned int meta_row_width_ub;
unsigned int log2_meta_chunk_bytes;
unsigned int log2_meta_chunk_height;
//full sized meta chunk width in unit of data elements
unsigned int log2_meta_chunk_width;
unsigned int log2_min_meta_chunk_bytes;
unsigned int min_meta_chunk_width;
unsigned int meta_chunk_width;
unsigned int meta_chunk_per_row_int;
unsigned int meta_row_remainder;
unsigned int meta_chunk_threshold;
unsigned int meta_blk_height;
unsigned int meta_surface_bytes;
unsigned int vmpg_bytes;
unsigned int meta_pte_req_per_frame_ub;
unsigned int meta_pte_bytes_per_frame_ub;
const unsigned int log2_vmpg_bytes = dml_log2(mode_lib->soc.gpuvm_min_page_size_bytes);
const bool dual_plane_en = is_dual_plane((enum source_format_class) (source_format));
const unsigned int dpte_buf_in_pte_reqs =
dual_plane_en ? (is_chroma ? mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma : mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma) : (mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma
+ mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma);
unsigned int log2_vmpg_height = 0;
unsigned int log2_vmpg_width = 0;
unsigned int log2_dpte_req_height_ptes = 0;
unsigned int log2_dpte_req_height = 0;
unsigned int log2_dpte_req_width = 0;
unsigned int log2_dpte_row_height_linear = 0;
unsigned int log2_dpte_row_height = 0;
unsigned int log2_dpte_group_width = 0;
unsigned int dpte_row_width_ub = 0;
unsigned int dpte_req_height = 0;
unsigned int dpte_req_width = 0;
unsigned int dpte_group_width = 0;
unsigned int log2_dpte_group_bytes = 0;
unsigned int log2_dpte_group_length = 0;
double byte_per_pixel_det_y;
double byte_per_pixel_det_c;
CalculateBytePerPixelAnd256BBlockSizes(
(enum source_format_class) (source_format),
(enum dm_swizzle_mode) (tiling),
&bytes_per_element_y,
&bytes_per_element_c,
&byte_per_pixel_det_y,
&byte_per_pixel_det_c,
&blk256_height_y,
&blk256_height_c,
&blk256_width_y,
&blk256_width_c);
if (!is_chroma) {
blk256_width = blk256_width_y;
blk256_height = blk256_height_y;
bytes_per_element = bytes_per_element_y;
} else {
blk256_width = blk256_width_c;
blk256_height = blk256_height_c;
bytes_per_element = bytes_per_element_c;
}
log2_bytes_per_element = dml_log2(bytes_per_element);
dml_print("DML_DLG: %s: surf_linear = %d\n", __func__, surf_linear);
dml_print("DML_DLG: %s: surf_vert = %d\n", __func__, surf_vert);
dml_print("DML_DLG: %s: blk256_width = %d\n", __func__, blk256_width);
dml_print("DML_DLG: %s: blk256_height = %d\n", __func__, blk256_height);
log2_blk256_width = dml_log2((double) blk256_width);
log2_blk256_height = dml_log2((double) blk256_height);
blk_bytes = surf_linear ? 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
log2_blk_bytes = dml_log2((double) blk_bytes);
log2_blk_height = 0;
log2_blk_width = 0;
// remember log rule
// "+" in log is multiply
// "-" in log is divide
// "/2" is like square root
// blk is vertical biased
if (tiling != dm_sw_linear)
log2_blk_height = log2_blk256_height + dml_ceil((double) (log2_blk_bytes - 8) / 2.0, 1);
else
log2_blk_height = 0; // blk height of 1
log2_blk_width = log2_blk_bytes - log2_bytes_per_element - log2_blk_height;
if (!surf_vert) {
unsigned int temp;
temp = dml_round_to_multiple(vp_width - 1, blk256_width, 1) + blk256_width;
if (data_pitch < blk256_width) {
dml_print("WARNING: DML_DLG: %s: swath_size calculation ignoring data_pitch=%u < blk256_width=%u\n", __func__, data_pitch, blk256_width);
} else {
if (temp > data_pitch) {
if (data_pitch >= vp_width)
temp = data_pitch;
else
dml_print("WARNING: DML_DLG: %s: swath_size calculation ignoring data_pitch=%u < vp_width=%u\n", __func__, data_pitch, vp_width);
}
}
rq_dlg_param->swath_width_ub = temp;
rq_dlg_param->req_per_swath_ub = temp >> log2_blk256_width;
} else {
unsigned int temp;
temp = dml_round_to_multiple(vp_height - 1, blk256_height, 1) + blk256_height;
if (surface_height < blk256_height) {
dml_print("WARNING: DML_DLG: %s swath_size calculation ignored surface_height=%u < blk256_height=%u\n", __func__, surface_height, blk256_height);
} else {
if (temp > surface_height) {
if (surface_height >= vp_height)
temp = surface_height;
else
dml_print("WARNING: DML_DLG: %s swath_size calculation ignored surface_height=%u < vp_height=%u\n", __func__, surface_height, vp_height);
}
}
rq_dlg_param->swath_width_ub = temp;
rq_dlg_param->req_per_swath_ub = temp >> log2_blk256_height;
}
if (!surf_vert)
rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_height * bytes_per_element;
else
rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_width * bytes_per_element;
rq_misc_param->blk256_height = blk256_height;
rq_misc_param->blk256_width = blk256_width;
// -------
// meta
// -------
log2_meta_req_bytes = 6; // meta request is 64b and is 8x8byte meta element
// each 64b meta request for dcn is 8x8 meta elements and
// a meta element covers one 256b block of the data surface.
log2_meta_req_height = log2_blk256_height + 3; // meta req is 8x8 byte, each byte represent 1 blk256
log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element - log2_meta_req_height;
meta_req_width = 1 << log2_meta_req_width;
meta_req_height = 1 << log2_meta_req_height;
log2_meta_row_height = 0;
meta_row_width_ub = 0;
// the dimensions of a meta row are meta_row_width x meta_row_height in elements.
// calculate upper bound of the meta_row_width
if (!surf_vert) {
log2_meta_row_height = log2_meta_req_height;
meta_row_width_ub = dml_round_to_multiple(vp_width - 1, meta_req_width, 1) + meta_req_width;
rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_width;
} else {
log2_meta_row_height = log2_meta_req_width;
meta_row_width_ub = dml_round_to_multiple(vp_height - 1, meta_req_height, 1) + meta_req_height;
rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_height;
}
rq_dlg_param->meta_bytes_per_row_ub = rq_dlg_param->meta_req_per_row_ub * 64;
rq_dlg_param->meta_row_height = 1 << log2_meta_row_height;
log2_meta_chunk_bytes = dml_log2(rq_sizing_param->meta_chunk_bytes);
log2_meta_chunk_height = log2_meta_row_height;
//full sized meta chunk width in unit of data elements
log2_meta_chunk_width = log2_meta_chunk_bytes + 8 - log2_bytes_per_element - log2_meta_chunk_height;
log2_min_meta_chunk_bytes = dml_log2(rq_sizing_param->min_meta_chunk_bytes);
min_meta_chunk_width = 1 << (log2_min_meta_chunk_bytes + 8 - log2_bytes_per_element - log2_meta_chunk_height);
meta_chunk_width = 1 << log2_meta_chunk_width;
meta_chunk_per_row_int = (unsigned int) (meta_row_width_ub / meta_chunk_width);
meta_row_remainder = meta_row_width_ub % meta_chunk_width;
meta_chunk_threshold = 0;
meta_blk_height = blk256_height * 64;
meta_surface_bytes = meta_pitch * (dml_round_to_multiple(vp_height - 1, meta_blk_height, 1) + meta_blk_height) * bytes_per_element / 256;
vmpg_bytes = mode_lib->soc.gpuvm_min_page_size_bytes;
meta_pte_req_per_frame_ub = (dml_round_to_multiple(meta_surface_bytes - vmpg_bytes, 8 * vmpg_bytes, 1) + 8 * vmpg_bytes) / (8 * vmpg_bytes);
meta_pte_bytes_per_frame_ub = meta_pte_req_per_frame_ub * 64; //64B mpte request
rq_dlg_param->meta_pte_bytes_per_frame_ub = meta_pte_bytes_per_frame_ub;
dml_print("DML_DLG: %s: meta_blk_height = %d\n", __func__, meta_blk_height);
dml_print("DML_DLG: %s: meta_surface_bytes = %d\n", __func__, meta_surface_bytes);
dml_print("DML_DLG: %s: meta_pte_req_per_frame_ub = %d\n", __func__, meta_pte_req_per_frame_ub);
dml_print("DML_DLG: %s: meta_pte_bytes_per_frame_ub = %d\n", __func__, meta_pte_bytes_per_frame_ub);
if (!surf_vert)
meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_width;
else
meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_height;
if (meta_row_remainder <= meta_chunk_threshold)
rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 1;
else
rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 2;
// ------
// dpte
// ------
if (surf_linear)
log2_vmpg_height = 0; // one line high
else
log2_vmpg_height = (log2_vmpg_bytes - 8) / 2 + log2_blk256_height;
log2_vmpg_width = log2_vmpg_bytes - log2_bytes_per_element - log2_vmpg_height;
// only 3 possible shapes for dpte request in dimensions of ptes: 8x1, 4x2, 2x4.
if (surf_linear) { //one 64B PTE request returns 8 PTEs
log2_dpte_req_height_ptes = 0;
log2_dpte_req_width = log2_vmpg_width + 3;
log2_dpte_req_height = 0;
} else if (log2_blk_bytes == 12) { //4KB tile means 4kB page size
//one 64B req gives 8x1 PTEs for 4KB tile
log2_dpte_req_height_ptes = 0;
log2_dpte_req_width = log2_blk_width + 3;
log2_dpte_req_height = log2_blk_height + 0;
} else if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) { // tile block >= 64KB
//two 64B reqs of 2x4 PTEs give 16 PTEs to cover 64KB
log2_dpte_req_height_ptes = 4;
log2_dpte_req_width = log2_blk256_width + 4; // log2_64KB_width
log2_dpte_req_height = log2_blk256_height + 4; // log2_64KB_height
} else { //64KB page size and must 64KB tile block
//one 64B req gives 8x1 PTEs for 64KB tile
log2_dpte_req_height_ptes = 0;
log2_dpte_req_width = log2_blk_width + 3;
log2_dpte_req_height = log2_blk_height + 0;
}
// The dpte request dimensions in data elements is dpte_req_width x dpte_req_height
// log2_vmpg_width is how much 1 pte represent, now calculating how much a 64b pte req represent
// That depends on the pte shape (i.e. 8x1, 4x2, 2x4)
//log2_dpte_req_height = log2_vmpg_height + log2_dpte_req_height_ptes;
//log2_dpte_req_width = log2_vmpg_width + log2_dpte_req_width_ptes;
dpte_req_height = 1 << log2_dpte_req_height;
dpte_req_width = 1 << log2_dpte_req_width;
// calculate pitch dpte row buffer can hold
// round the result down to a power of two.
if (surf_linear) {
unsigned int dpte_row_height;
log2_dpte_row_height_linear = dml_floor(dml_log2(dpte_buf_in_pte_reqs * dpte_req_width / data_pitch), 1);
dml_print("DML_DLG: %s: is_chroma = %d\n", __func__, is_chroma);
dml_print("DML_DLG: %s: dpte_buf_in_pte_reqs = %d\n", __func__, dpte_buf_in_pte_reqs);
dml_print("DML_DLG: %s: log2_dpte_row_height_linear = %d\n", __func__, log2_dpte_row_height_linear);
ASSERT(log2_dpte_row_height_linear >= 3);
if (log2_dpte_row_height_linear > 7)
log2_dpte_row_height_linear = 7;
log2_dpte_row_height = log2_dpte_row_height_linear;
// For linear, the dpte row is pitch dependent and the pte requests wrap at the pitch boundary.
// the dpte_row_width_ub is the upper bound of data_pitch*dpte_row_height in elements with this unique buffering.
dpte_row_height = 1 << log2_dpte_row_height;
dpte_row_width_ub = dml_round_to_multiple(data_pitch * dpte_row_height - 1, dpte_req_width, 1) + dpte_req_width;
rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
} else {
// the upper bound of the dpte_row_width without dependency on viewport position follows.
// for tiled mode, row height is the same as req height and row store up to vp size upper bound
if (!surf_vert) {
log2_dpte_row_height = log2_dpte_req_height;
dpte_row_width_ub = dml_round_to_multiple(vp_width - 1, dpte_req_width, 1) + dpte_req_width;
rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
} else {
log2_dpte_row_height = (log2_blk_width < log2_dpte_req_width) ? log2_blk_width : log2_dpte_req_width;
dpte_row_width_ub = dml_round_to_multiple(vp_height - 1, dpte_req_height, 1) + dpte_req_height;
rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_height;
}
}
if (log2_blk_bytes >= 16 && log2_vmpg_bytes == 12) // tile block >= 64KB
rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 128; //2*64B dpte request
else
rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 64; //64B dpte request
rq_dlg_param->dpte_row_height = 1 << log2_dpte_row_height;
// the dpte_group_bytes is reduced for the specific case of vertical
// access of a tile surface that has dpte request of 8x1 ptes.
if (hostvm_enable)
rq_sizing_param->dpte_group_bytes = 512;
else {
if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
rq_sizing_param->dpte_group_bytes = 2048;
}
//since pte request size is 64byte, the number of data pte requests per full sized group is as follows.
log2_dpte_group_bytes = dml_log2(rq_sizing_param->dpte_group_bytes);
log2_dpte_group_length = log2_dpte_group_bytes - 6; //length in 64b requests
// full sized data pte group width in elements
if (!surf_vert)
log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_width;
else
log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_height;
//But if the tile block >=64KB and the page size is 4KB, then each dPTE request is 2*64B
if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) // tile block >= 64KB
log2_dpte_group_width = log2_dpte_group_width - 1;
dpte_group_width = 1 << log2_dpte_group_width;
// since dpte groups are only aligned to dpte_req_width and not dpte_group_width,
// the upper bound for the dpte groups per row is as follows.
rq_dlg_param->dpte_groups_per_row_ub = dml_ceil((double) dpte_row_width_ub / dpte_group_width, 1);
}
static void get_surf_rq_param(
struct display_mode_lib *mode_lib,
display_data_rq_sizing_params_st *rq_sizing_param,
display_data_rq_dlg_params_st *rq_dlg_param,
display_data_rq_misc_params_st *rq_misc_param,
const display_pipe_params_st *pipe_param,
bool is_chroma,
bool is_alpha)
{
bool mode_422 = 0;
unsigned int vp_width = 0;
unsigned int vp_height = 0;
unsigned int data_pitch = 0;
unsigned int meta_pitch = 0;
unsigned int surface_height = 0;
unsigned int ppe = mode_422 ? 2 : 1;
// FIXME check if ppe apply for both luma and chroma in 422 case
if (is_chroma | is_alpha) {
vp_width = pipe_param->src.viewport_width_c / ppe;
vp_height = pipe_param->src.viewport_height_c;
data_pitch = pipe_param->src.data_pitch_c;
meta_pitch = pipe_param->src.meta_pitch_c;
surface_height = pipe_param->src.surface_height_y / 2.0;
} else {
vp_width = pipe_param->src.viewport_width / ppe;
vp_height = pipe_param->src.viewport_height;
data_pitch = pipe_param->src.data_pitch;
meta_pitch = pipe_param->src.meta_pitch;
surface_height = pipe_param->src.surface_height_y;
}
if (pipe_param->dest.odm_combine) {
unsigned int access_dir;
unsigned int full_src_vp_width;
unsigned int hactive_odm;
unsigned int src_hactive_odm;
access_dir = (pipe_param->src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
hactive_odm = pipe_param->dest.hactive / ((unsigned int) pipe_param->dest.odm_combine * 2);
if (is_chroma) {
full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio_c * pipe_param->dest.full_recout_width;
src_hactive_odm = pipe_param->scale_ratio_depth.hscl_ratio_c * hactive_odm;
} else {
full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio * pipe_param->dest.full_recout_width;
src_hactive_odm = pipe_param->scale_ratio_depth.hscl_ratio * hactive_odm;
}
if (access_dir == 0) {
vp_width = dml_min(full_src_vp_width, src_hactive_odm);
dml_print("DML_DLG: %s: vp_width = %d\n", __func__, vp_width);
} else {
vp_height = dml_min(full_src_vp_width, src_hactive_odm);
dml_print("DML_DLG: %s: vp_height = %d\n", __func__, vp_height);
}
dml_print("DML_DLG: %s: full_src_vp_width = %d\n", __func__, full_src_vp_width);
dml_print("DML_DLG: %s: hactive_odm = %d\n", __func__, hactive_odm);
dml_print("DML_DLG: %s: src_hactive_odm = %d\n", __func__, src_hactive_odm);
}
rq_sizing_param->chunk_bytes = 8192;
if (is_alpha)
rq_sizing_param->chunk_bytes = 4096;
if (rq_sizing_param->chunk_bytes == 64 * 1024)
rq_sizing_param->min_chunk_bytes = 0;
else
rq_sizing_param->min_chunk_bytes = 1024;
rq_sizing_param->meta_chunk_bytes = 2048;
rq_sizing_param->min_meta_chunk_bytes = 256;
if (pipe_param->src.hostvm)
rq_sizing_param->mpte_group_bytes = 512;
else
rq_sizing_param->mpte_group_bytes = 2048;
get_meta_and_pte_attr(
mode_lib,
rq_dlg_param,
rq_misc_param,
rq_sizing_param,
vp_width,
vp_height,
data_pitch,
meta_pitch,
pipe_param->src.source_format,
pipe_param->src.sw_mode,
pipe_param->src.macro_tile_size,
pipe_param->src.source_scan,
pipe_param->src.hostvm,
is_chroma,
surface_height);
}
static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, const display_pipe_params_st *pipe_param)
{
// get param for luma surface
rq_param->yuv420 = pipe_param->src.source_format == dm_420_8 || pipe_param->src.source_format == dm_420_10 || pipe_param->src.source_format == dm_rgbe_alpha
|| pipe_param->src.source_format == dm_420_12;
rq_param->yuv420_10bpc = pipe_param->src.source_format == dm_420_10;
rq_param->rgbe_alpha = (pipe_param->src.source_format == dm_rgbe_alpha) ? 1 : 0;
get_surf_rq_param(mode_lib, &(rq_param->sizing.rq_l), &(rq_param->dlg.rq_l), &(rq_param->misc.rq_l), pipe_param, 0, 0);
if (is_dual_plane((enum source_format_class) (pipe_param->src.source_format))) {
// get param for chroma surface
get_surf_rq_param(mode_lib, &(rq_param->sizing.rq_c), &(rq_param->dlg.rq_c), &(rq_param->misc.rq_c), pipe_param, 1, rq_param->rgbe_alpha);
}
// calculate how to split the det buffer space between luma and chroma
handle_det_buf_split(mode_lib, rq_param, &pipe_param->src);
print__rq_params_st(mode_lib, rq_param);
}
void dml314_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, const display_pipe_params_st *pipe_param)
{
display_rq_params_st rq_param = {0};
memset(rq_regs, 0, sizeof(*rq_regs));
dml_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param);
extract_rq_regs(mode_lib, rq_regs, &rq_param);
print__rq_regs_st(mode_lib, rq_regs);
}
static void calculate_ttu_cursor(
struct display_mode_lib *mode_lib,
double *refcyc_per_req_delivery_pre_cur,
double *refcyc_per_req_delivery_cur,
double refclk_freq_in_mhz,
double ref_freq_to_pix_freq,
double hscale_pixel_rate_l,
double hscl_ratio,
double vratio_pre_l,
double vratio_l,
unsigned int cur_width,
enum cursor_bpp cur_bpp)
{
unsigned int cur_src_width = cur_width;
unsigned int cur_req_size = 0;
unsigned int cur_req_width = 0;
double cur_width_ub = 0.0;
double cur_req_per_width = 0.0;
double hactive_cur = 0.0;
ASSERT(cur_src_width <= 256);
*refcyc_per_req_delivery_pre_cur = 0.0;
*refcyc_per_req_delivery_cur = 0.0;
if (cur_src_width > 0) {
unsigned int cur_bit_per_pixel = 0;
if (cur_bpp == dm_cur_2bit) {
cur_req_size = 64; // byte
cur_bit_per_pixel = 2;
} else { // 32bit
cur_bit_per_pixel = 32;
if (cur_src_width >= 1 && cur_src_width <= 16)
cur_req_size = 64;
else if (cur_src_width >= 17 && cur_src_width <= 31)
cur_req_size = 128;
else
cur_req_size = 256;
}
cur_req_width = (double) cur_req_size / ((double) cur_bit_per_pixel / 8.0);
cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1) * (double) cur_req_width;
cur_req_per_width = cur_width_ub / (double) cur_req_width;
hactive_cur = (double) cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor
if (vratio_pre_l <= 1.0)
*refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq / (double) cur_req_per_width;
else
*refcyc_per_req_delivery_pre_cur = (double) refclk_freq_in_mhz * (double) cur_src_width / hscale_pixel_rate_l / (double) cur_req_per_width;
ASSERT(*refcyc_per_req_delivery_pre_cur < dml_pow(2, 13));
if (vratio_l <= 1.0)
*refcyc_per_req_delivery_cur = hactive_cur * ref_freq_to_pix_freq / (double) cur_req_per_width;
else
*refcyc_per_req_delivery_cur = (double) refclk_freq_in_mhz * (double) cur_src_width / hscale_pixel_rate_l / (double) cur_req_per_width;
dml_print("DML_DLG: %s: cur_req_width = %d\n", __func__, cur_req_width);
dml_print("DML_DLG: %s: cur_width_ub = %3.2f\n", __func__, cur_width_ub);
dml_print("DML_DLG: %s: cur_req_per_width = %3.2f\n", __func__, cur_req_per_width);
dml_print("DML_DLG: %s: hactive_cur = %3.2f\n", __func__, hactive_cur);
dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_cur = %3.2f\n", __func__, *refcyc_per_req_delivery_pre_cur);
dml_print("DML_DLG: %s: refcyc_per_req_delivery_cur = %3.2f\n", __func__, *refcyc_per_req_delivery_cur);
ASSERT(*refcyc_per_req_delivery_cur < dml_pow(2, 13));
}
}
// Note: currently taken in as is.
// Nice to decouple code from hw register implement and extract code that are repeated for luma and chroma.
static void dml_rq_dlg_get_dlg_params(
struct display_mode_lib *mode_lib,
const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
display_dlg_regs_st *disp_dlg_regs,
display_ttu_regs_st *disp_ttu_regs,
const display_rq_dlg_params_st *rq_dlg_param,
const display_dlg_sys_params_st *dlg_sys_param,
const bool cstate_en,
const bool pstate_en,
const bool vm_en,
const bool ignore_viewport_pos,
const bool immediate_flip_support)
{
const display_pipe_source_params_st *src = &e2e_pipe_param[pipe_idx].pipe.src;
const display_pipe_dest_params_st *dst = &e2e_pipe_param[pipe_idx].pipe.dest;
const display_clocks_and_cfg_st *clks = &e2e_pipe_param[pipe_idx].clks_cfg;
const scaler_ratio_depth_st *scl = &e2e_pipe_param[pipe_idx].pipe.scale_ratio_depth;
const scaler_taps_st *taps = &e2e_pipe_param[pipe_idx].pipe.scale_taps;
unsigned int pipe_index_in_combine[DC__NUM_PIPES__MAX];
// -------------------------
// Section 1.15.2.1: OTG dependent Params
// -------------------------
// Timing
unsigned int htotal = dst->htotal;
unsigned int hblank_end = dst->hblank_end;
unsigned int vblank_start = dst->vblank_start;
unsigned int vblank_end = dst->vblank_end;
double dppclk_freq_in_mhz = clks->dppclk_mhz;
double refclk_freq_in_mhz = clks->refclk_mhz;
double pclk_freq_in_mhz = dst->pixel_rate_mhz;
bool interlaced = dst->interlaced;
double ref_freq_to_pix_freq = refclk_freq_in_mhz / pclk_freq_in_mhz;
double min_ttu_vblank;
unsigned int dlg_vblank_start;
bool dual_plane;
bool mode_422;
unsigned int access_dir;
unsigned int vp_height_l;
unsigned int vp_width_l;
unsigned int vp_height_c;
unsigned int vp_width_c;
// Scaling
unsigned int htaps_l;
unsigned int htaps_c;
double hratio_l;
double hratio_c;
double vratio_l;
double vratio_c;
unsigned int swath_width_ub_l;
unsigned int dpte_groups_per_row_ub_l;
unsigned int swath_width_ub_c;
unsigned int dpte_groups_per_row_ub_c;
unsigned int meta_chunks_per_row_ub_l;
unsigned int meta_chunks_per_row_ub_c;
unsigned int vupdate_offset;
unsigned int vupdate_width;
unsigned int vready_offset;
unsigned int vstartup_start;
unsigned int dst_x_after_scaler;
unsigned int dst_y_after_scaler;
double dst_y_prefetch;
double dst_y_per_vm_vblank;
double dst_y_per_row_vblank;
double dst_y_per_vm_flip;
double dst_y_per_row_flip;
double max_dst_y_per_vm_vblank;
double max_dst_y_per_row_vblank;
double vratio_pre_l;
double vratio_pre_c;
unsigned int req_per_swath_ub_l;
unsigned int req_per_swath_ub_c;
unsigned int meta_row_height_l;
unsigned int meta_row_height_c;
unsigned int swath_width_pixels_ub_l;
unsigned int swath_width_pixels_ub_c;
unsigned int scaler_rec_in_width_l;
unsigned int scaler_rec_in_width_c;
unsigned int dpte_row_height_l;
unsigned int dpte_row_height_c;
double hscale_pixel_rate_l;
double hscale_pixel_rate_c;
double min_hratio_fact_l;
double min_hratio_fact_c;
double refcyc_per_line_delivery_pre_l;
double refcyc_per_line_delivery_pre_c;
double refcyc_per_line_delivery_l;
double refcyc_per_line_delivery_c;
double refcyc_per_req_delivery_pre_l;
double refcyc_per_req_delivery_pre_c;
double refcyc_per_req_delivery_l;
double refcyc_per_req_delivery_c;
unsigned int full_recout_width;
double refcyc_per_req_delivery_pre_cur0;
double refcyc_per_req_delivery_cur0;
double refcyc_per_req_delivery_pre_cur1;
double refcyc_per_req_delivery_cur1;
unsigned int vba__min_dst_y_next_start = get_min_dst_y_next_start(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // FROM VBA
unsigned int vba__vready_after_vcount0 = get_vready_at_or_after_vsync(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
float vba__refcyc_per_line_delivery_pre_l = get_refcyc_per_line_delivery_pre_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
float vba__refcyc_per_line_delivery_l = get_refcyc_per_line_delivery_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
float vba__refcyc_per_req_delivery_pre_l = get_refcyc_per_req_delivery_pre_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
float vba__refcyc_per_req_delivery_l = get_refcyc_per_req_delivery_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
dml_print("DML_DLG: %s: cstate_en = %d\n", __func__, cstate_en);
dml_print("DML_DLG: %s: pstate_en = %d\n", __func__, pstate_en);
dml_print("DML_DLG: %s: vm_en = %d\n", __func__, vm_en);
dml_print("DML_DLG: %s: ignore_viewport_pos = %d\n", __func__, ignore_viewport_pos);
dml_print("DML_DLG: %s: immediate_flip_support = %d\n", __func__, immediate_flip_support);
dml_print("DML_DLG: %s: dppclk_freq_in_mhz = %3.2f\n", __func__, dppclk_freq_in_mhz);
dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
dml_print("DML_DLG: %s: interlaced = %d\n", __func__, interlaced); ASSERT(ref_freq_to_pix_freq < 4.0);
disp_dlg_regs->ref_freq_to_pix_freq = (unsigned int) (ref_freq_to_pix_freq * dml_pow(2, 19));
disp_dlg_regs->refcyc_per_htotal = (unsigned int) (ref_freq_to_pix_freq * (double) htotal * dml_pow(2, 8));
disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; // 15 bits
//set_prefetch_mode(mode_lib, cstate_en, pstate_en, ignore_viewport_pos, immediate_flip_support);
min_ttu_vblank = get_min_ttu_vblank_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
disp_dlg_regs->min_dst_y_next_start_us =
(vba__min_dst_y_next_start * dst->hactive) / (unsigned int) dst->pixel_rate_mhz;
disp_dlg_regs->min_dst_y_next_start = vba__min_dst_y_next_start * dml_pow(2, 2);
ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18));
dml_print("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, min_ttu_vblank);
dml_print("DML_DLG: %s: min_dst_y_next_start = 0x%0x\n", __func__, disp_dlg_regs->min_dst_y_next_start);
dml_print("DML_DLG: %s: dlg_vblank_start = 0x%0x\n", __func__, dlg_vblank_start);
dml_print("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, ref_freq_to_pix_freq);
dml_print("DML_DLG: %s: vba__min_dst_y_next_start = 0x%0x\n", __func__, vba__min_dst_y_next_start);
//old_impl_vs_vba_impl("min_dst_y_next_start", dlg_vblank_start, vba__min_dst_y_next_start);
// -------------------------
// Section 1.15.2.2: Prefetch, Active and TTU
// -------------------------
// Prefetch Calc
// Source
dual_plane = is_dual_plane((enum source_format_class) (src->source_format));
mode_422 = 0;
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
vp_height_l = src->viewport_height;
vp_width_l = src->viewport_width;
vp_height_c = src->viewport_height_c;
vp_width_c = src->viewport_width_c;
// Scaling
htaps_l = taps->htaps;
htaps_c = taps->htaps_c;
hratio_l = scl->hscl_ratio;
hratio_c = scl->hscl_ratio_c;
vratio_l = scl->vscl_ratio;
vratio_c = scl->vscl_ratio_c;
swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub;
dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub;
swath_width_ub_c = rq_dlg_param->rq_c.swath_width_ub;
dpte_groups_per_row_ub_c = rq_dlg_param->rq_c.dpte_groups_per_row_ub;
meta_chunks_per_row_ub_l = rq_dlg_param->rq_l.meta_chunks_per_row_ub;
meta_chunks_per_row_ub_c = rq_dlg_param->rq_c.meta_chunks_per_row_ub;
vupdate_offset = dst->vupdate_offset;
vupdate_width = dst->vupdate_width;
vready_offset = dst->vready_offset;
vstartup_start = dst->vstartup_start;
if (interlaced) {
if (vstartup_start / 2.0 - (double) (vready_offset + vupdate_width + vupdate_offset) / htotal <= vblank_end / 2.0)
disp_dlg_regs->vready_after_vcount0 = 1;
else
disp_dlg_regs->vready_after_vcount0 = 0;
} else {
if (vstartup_start - (double) (vready_offset + vupdate_width + vupdate_offset) / htotal <= vblank_end)
disp_dlg_regs->vready_after_vcount0 = 1;
else
disp_dlg_regs->vready_after_vcount0 = 0;
}
dml_print("DML_DLG: %s: vready_after_vcount0 = %d\n", __func__, disp_dlg_regs->vready_after_vcount0);
dml_print("DML_DLG: %s: vba__vready_after_vcount0 = %d\n", __func__, vba__vready_after_vcount0);
//old_impl_vs_vba_impl("vready_after_vcount0", disp_dlg_regs->vready_after_vcount0, vba__vready_after_vcount0);
if (interlaced)
vstartup_start = vstartup_start / 2;
dst_x_after_scaler = get_dst_x_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
dst_y_after_scaler = get_dst_y_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
// do some adjustment on the dst_after scaler to account for odm combine mode
dml_print("DML_DLG: %s: input dst_x_after_scaler = %d\n", __func__, dst_x_after_scaler);
dml_print("DML_DLG: %s: input dst_y_after_scaler = %d\n", __func__, dst_y_after_scaler);
// need to figure out which side of odm combine we're in
if (dst->odm_combine) {
// figure out which pipes go together
bool visited[DC__NUM_PIPES__MAX];
unsigned int i, j, k;
for (k = 0; k < num_pipes; ++k) {
visited[k] = false;
pipe_index_in_combine[k] = 0;
}
for (i = 0; i < num_pipes; i++) {
if (e2e_pipe_param[i].pipe.src.is_hsplit && !visited[i]) {
unsigned int grp = e2e_pipe_param[i].pipe.src.hsplit_grp;
unsigned int grp_idx = 0;
for (j = i; j < num_pipes; j++) {
if (e2e_pipe_param[j].pipe.src.hsplit_grp == grp && e2e_pipe_param[j].pipe.src.is_hsplit && !visited[j]) {
pipe_index_in_combine[j] = grp_idx;
dml_print("DML_DLG: %s: pipe[%d] is in grp %d idx %d\n", __func__, j, grp, grp_idx);
grp_idx++;
visited[j] = true;
}
}
}
}
}
if (dst->odm_combine == dm_odm_combine_mode_disabled) {
disp_dlg_regs->refcyc_h_blank_end = (unsigned int) ((double) hblank_end * ref_freq_to_pix_freq);
} else {
unsigned int odm_combine_factor = (dst->odm_combine == dm_odm_combine_mode_2to1 ? 2 : 4); // TODO: We should really check that 4to1 is supported before setting it to 4
unsigned int odm_pipe_index = pipe_index_in_combine[pipe_idx];
disp_dlg_regs->refcyc_h_blank_end = (unsigned int) (((double) hblank_end + odm_pipe_index * (double) dst->hactive / odm_combine_factor) * ref_freq_to_pix_freq);
} ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int)dml_pow(2, 13));
dml_print("DML_DLG: %s: htotal = %d\n", __func__, htotal);
dml_print("DML_DLG: %s: dst_x_after_scaler[%d] = %d\n", __func__, pipe_idx, dst_x_after_scaler);
dml_print("DML_DLG: %s: dst_y_after_scaler[%d] = %d\n", __func__, pipe_idx, dst_y_after_scaler);
dst_y_prefetch = get_dst_y_prefetch(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
dst_y_per_vm_vblank = get_dst_y_per_vm_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
dst_y_per_row_vblank = get_dst_y_per_row_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
dst_y_per_vm_flip = get_dst_y_per_vm_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
dst_y_per_row_flip = get_dst_y_per_row_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
max_dst_y_per_vm_vblank = 32.0; //U5.2
max_dst_y_per_row_vblank = 16.0; //U4.2
// magic!
if (htotal <= 75) {
max_dst_y_per_vm_vblank = 100.0;
max_dst_y_per_row_vblank = 100.0;
}
dml_print("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, dst_y_prefetch);
dml_print("DML_DLG: %s: dst_y_per_vm_flip = %3.2f\n", __func__, dst_y_per_vm_flip);
dml_print("DML_DLG: %s: dst_y_per_row_flip = %3.2f\n", __func__, dst_y_per_row_flip);
dml_print("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, dst_y_per_vm_vblank);
dml_print("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, dst_y_per_row_vblank);
ASSERT(dst_y_per_vm_vblank < max_dst_y_per_vm_vblank); ASSERT(dst_y_per_row_vblank < max_dst_y_per_row_vblank);
ASSERT(dst_y_prefetch > (dst_y_per_vm_vblank + dst_y_per_row_vblank));
vratio_pre_l = get_vratio_prefetch_l(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
vratio_pre_c = get_vratio_prefetch_c(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
dml_print("DML_DLG: %s: vratio_pre_l = %3.2f\n", __func__, vratio_pre_l);
dml_print("DML_DLG: %s: vratio_pre_c = %3.2f\n", __func__, vratio_pre_c);
// Active
req_per_swath_ub_l = rq_dlg_param->rq_l.req_per_swath_ub;
req_per_swath_ub_c = rq_dlg_param->rq_c.req_per_swath_ub;
meta_row_height_l = rq_dlg_param->rq_l.meta_row_height;
meta_row_height_c = rq_dlg_param->rq_c.meta_row_height;
swath_width_pixels_ub_l = 0;
swath_width_pixels_ub_c = 0;
scaler_rec_in_width_l = 0;
scaler_rec_in_width_c = 0;
dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height;
dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height;
if (mode_422) {
swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element
swath_width_pixels_ub_c = swath_width_ub_c * 2;
} else {
swath_width_pixels_ub_l = swath_width_ub_l * 1;
swath_width_pixels_ub_c = swath_width_ub_c * 1;
}
hscale_pixel_rate_l = 0.;
hscale_pixel_rate_c = 0.;
min_hratio_fact_l = 1.0;
min_hratio_fact_c = 1.0;
if (hratio_l <= 1)
min_hratio_fact_l = 2.0;
else if (htaps_l <= 6) {
if ((hratio_l * 2.0) > 4.0)
min_hratio_fact_l = 4.0;
else
min_hratio_fact_l = hratio_l * 2.0;
} else {
if (hratio_l > 4.0)
min_hratio_fact_l = 4.0;
else
min_hratio_fact_l = hratio_l;
}
hscale_pixel_rate_l = min_hratio_fact_l * dppclk_freq_in_mhz;
dml_print("DML_DLG: %s: hratio_l = %3.2f\n", __func__, hratio_l);
dml_print("DML_DLG: %s: min_hratio_fact_l = %3.2f\n", __func__, min_hratio_fact_l);
dml_print("DML_DLG: %s: hscale_pixel_rate_l = %3.2f\n", __func__, hscale_pixel_rate_l);
if (hratio_c <= 1)
min_hratio_fact_c = 2.0;
else if (htaps_c <= 6) {
if ((hratio_c * 2.0) > 4.0)
min_hratio_fact_c = 4.0;
else
min_hratio_fact_c = hratio_c * 2.0;
} else {
if (hratio_c > 4.0)
min_hratio_fact_c = 4.0;
else
min_hratio_fact_c = hratio_c;
}
hscale_pixel_rate_c = min_hratio_fact_c * dppclk_freq_in_mhz;
refcyc_per_line_delivery_pre_l = 0.;
refcyc_per_line_delivery_pre_c = 0.;
refcyc_per_line_delivery_l = 0.;
refcyc_per_line_delivery_c = 0.;
refcyc_per_req_delivery_pre_l = 0.;
refcyc_per_req_delivery_pre_c = 0.;
refcyc_per_req_delivery_l = 0.;
refcyc_per_req_delivery_c = 0.;
full_recout_width = 0;
// In ODM
if (src->is_hsplit) {
// This "hack" is only allowed (and valid) for MPC combine. In ODM
// combine, you MUST specify the full_recout_width...according to Oswin
if (dst->full_recout_width == 0 && !dst->odm_combine) {
dml_print("DML_DLG: %s: Warning: full_recout_width not set in hsplit mode\n", __func__);
full_recout_width = dst->recout_width * 2; // assume half split for dcn1
} else
full_recout_width = dst->full_recout_width;
} else
full_recout_width = dst->recout_width;
// As of DCN2, mpc_combine and odm_combine are mutually exclusive
refcyc_per_line_delivery_pre_l = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_pre_l,
hscale_pixel_rate_l,
swath_width_pixels_ub_l,
1); // per line
refcyc_per_line_delivery_l = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_l,
hscale_pixel_rate_l,
swath_width_pixels_ub_l,
1); // per line
dml_print("DML_DLG: %s: full_recout_width = %d\n", __func__, full_recout_width);
dml_print("DML_DLG: %s: hscale_pixel_rate_l = %3.2f\n", __func__, hscale_pixel_rate_l);
dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n", __func__, refcyc_per_line_delivery_pre_l);
dml_print("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n", __func__, refcyc_per_line_delivery_l);
dml_print("DML_DLG: %s: vba__refcyc_per_line_delivery_pre_l = %3.2f\n", __func__, vba__refcyc_per_line_delivery_pre_l);
dml_print("DML_DLG: %s: vba__refcyc_per_line_delivery_l = %3.2f\n", __func__, vba__refcyc_per_line_delivery_l);
//old_impl_vs_vba_impl("refcyc_per_line_delivery_pre_l", refcyc_per_line_delivery_pre_l, vba__refcyc_per_line_delivery_pre_l);
//old_impl_vs_vba_impl("refcyc_per_line_delivery_l", refcyc_per_line_delivery_l, vba__refcyc_per_line_delivery_l);
if (dual_plane) {
float vba__refcyc_per_line_delivery_pre_c = get_refcyc_per_line_delivery_pre_c_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
float vba__refcyc_per_line_delivery_c = get_refcyc_per_line_delivery_c_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
refcyc_per_line_delivery_pre_c = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_pre_c,
hscale_pixel_rate_c,
swath_width_pixels_ub_c,
1); // per line
refcyc_per_line_delivery_c = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_c,
hscale_pixel_rate_c,
swath_width_pixels_ub_c,
1); // per line
dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n", __func__, refcyc_per_line_delivery_pre_c);
dml_print("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n", __func__, refcyc_per_line_delivery_c);
dml_print("DML_DLG: %s: vba__refcyc_per_line_delivery_pre_c = %3.2f\n", __func__, vba__refcyc_per_line_delivery_pre_c);
dml_print("DML_DLG: %s: vba__refcyc_per_line_delivery_c = %3.2f\n", __func__, vba__refcyc_per_line_delivery_c);
//old_impl_vs_vba_impl("refcyc_per_line_delivery_pre_c", refcyc_per_line_delivery_pre_c, vba__refcyc_per_line_delivery_pre_c);
//old_impl_vs_vba_impl("refcyc_per_line_delivery_c", refcyc_per_line_delivery_c, vba__refcyc_per_line_delivery_c);
}
if (src->dynamic_metadata_enable && src->gpuvm)
disp_dlg_regs->refcyc_per_vm_dmdata = get_refcyc_per_vm_dmdata_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
disp_dlg_regs->dmdata_dl_delta = get_dmdata_dl_delta_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
// TTU - Luma / Chroma
if (access_dir) { // vertical access
scaler_rec_in_width_l = vp_height_l;
scaler_rec_in_width_c = vp_height_c;
} else {
scaler_rec_in_width_l = vp_width_l;
scaler_rec_in_width_c = vp_width_c;
}
refcyc_per_req_delivery_pre_l = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_pre_l,
hscale_pixel_rate_l,
scaler_rec_in_width_l,
req_per_swath_ub_l); // per req
refcyc_per_req_delivery_l = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_l,
hscale_pixel_rate_l,
scaler_rec_in_width_l,
req_per_swath_ub_l); // per req
dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n", __func__, refcyc_per_req_delivery_pre_l);
dml_print("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n", __func__, refcyc_per_req_delivery_l);
dml_print("DML_DLG: %s: vba__refcyc_per_req_delivery_pre_l = %3.2f\n", __func__, vba__refcyc_per_req_delivery_pre_l);
dml_print("DML_DLG: %s: vba__refcyc_per_req_delivery_l = %3.2f\n", __func__, vba__refcyc_per_req_delivery_l);
//old_impl_vs_vba_impl("refcyc_per_req_delivery_pre_l", refcyc_per_req_delivery_pre_l, vba__refcyc_per_req_delivery_pre_l);
//old_impl_vs_vba_impl("refcyc_per_req_delivery_l", refcyc_per_req_delivery_l, vba__refcyc_per_req_delivery_l);
ASSERT(refcyc_per_req_delivery_pre_l < dml_pow(2, 13)); ASSERT(refcyc_per_req_delivery_l < dml_pow(2, 13));
if (dual_plane) {
float vba__refcyc_per_req_delivery_pre_c = get_refcyc_per_req_delivery_pre_c_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
float vba__refcyc_per_req_delivery_c = get_refcyc_per_req_delivery_c_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
refcyc_per_req_delivery_pre_c = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_pre_c,
hscale_pixel_rate_c,
scaler_rec_in_width_c,
req_per_swath_ub_c); // per req
refcyc_per_req_delivery_c = get_refcyc_per_delivery(
mode_lib,
refclk_freq_in_mhz,
pclk_freq_in_mhz,
dst->odm_combine,
full_recout_width,
dst->hactive,
vratio_c,
hscale_pixel_rate_c,
scaler_rec_in_width_c,
req_per_swath_ub_c); // per req
dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n", __func__, refcyc_per_req_delivery_pre_c);
dml_print("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n", __func__, refcyc_per_req_delivery_c);
dml_print("DML_DLG: %s: vba__refcyc_per_req_delivery_pre_c = %3.2f\n", __func__, vba__refcyc_per_req_delivery_pre_c);
dml_print("DML_DLG: %s: vba__refcyc_per_req_delivery_c = %3.2f\n", __func__, vba__refcyc_per_req_delivery_c);
//old_impl_vs_vba_impl("refcyc_per_req_delivery_pre_c", refcyc_per_req_delivery_pre_c, vba__refcyc_per_req_delivery_pre_c);
//old_impl_vs_vba_impl("refcyc_per_req_delivery_c", refcyc_per_req_delivery_c, vba__refcyc_per_req_delivery_c);
ASSERT(refcyc_per_req_delivery_pre_c < dml_pow(2, 13)); ASSERT(refcyc_per_req_delivery_c < dml_pow(2, 13));
}
// TTU - Cursor
refcyc_per_req_delivery_pre_cur0 = 0.0;
refcyc_per_req_delivery_cur0 = 0.0;
ASSERT(src->num_cursors <= 1);
if (src->num_cursors > 0) {
float vba__refcyc_per_req_delivery_pre_cur0;
float vba__refcyc_per_req_delivery_cur0;
calculate_ttu_cursor(
mode_lib,
&refcyc_per_req_delivery_pre_cur0,
&refcyc_per_req_delivery_cur0,
refclk_freq_in_mhz,
ref_freq_to_pix_freq,
hscale_pixel_rate_l,
scl->hscl_ratio,
vratio_pre_l,
vratio_l,
src->cur0_src_width,
(enum cursor_bpp) (src->cur0_bpp));
vba__refcyc_per_req_delivery_pre_cur0 = get_refcyc_per_cursor_req_delivery_pre_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
vba__refcyc_per_req_delivery_cur0 = get_refcyc_per_cursor_req_delivery_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_cur0 = %3.2f\n", __func__, refcyc_per_req_delivery_pre_cur0);
dml_print("DML_DLG: %s: refcyc_per_req_delivery_cur0 = %3.2f\n", __func__, refcyc_per_req_delivery_cur0);
dml_print("DML_DLG: %s: vba__refcyc_per_req_delivery_pre_cur0 = %3.2f\n", __func__, vba__refcyc_per_req_delivery_pre_cur0);
dml_print("DML_DLG: %s: vba__refcyc_per_req_delivery_cur0 = %3.2f\n", __func__, vba__refcyc_per_req_delivery_cur0);
//old_impl_vs_vba_impl("refcyc_per_req_delivery_pre_cur0", refcyc_per_req_delivery_pre_cur0, vba__refcyc_per_req_delivery_pre_cur0);
//old_impl_vs_vba_impl("refcyc_per_req_delivery_cur0", refcyc_per_req_delivery_cur0, vba__refcyc_per_req_delivery_cur0);
}
refcyc_per_req_delivery_pre_cur1 = 0.0;
refcyc_per_req_delivery_cur1 = 0.0;
// TTU - Misc
// all hard-coded
// Assignment to register structures
disp_dlg_regs->dst_y_after_scaler = dst_y_after_scaler; // in terms of line
ASSERT(disp_dlg_regs->dst_y_after_scaler < 8);
disp_dlg_regs->refcyc_x_after_scaler = dst_x_after_scaler * ref_freq_to_pix_freq; // in terms of refclk
ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int)dml_pow(2, 13));
disp_dlg_regs->dst_y_prefetch = (unsigned int) (dst_y_prefetch * dml_pow(2, 2));
disp_dlg_regs->dst_y_per_vm_vblank = (unsigned int) (dst_y_per_vm_vblank * dml_pow(2, 2));
disp_dlg_regs->dst_y_per_row_vblank = (unsigned int) (dst_y_per_row_vblank * dml_pow(2, 2));
disp_dlg_regs->dst_y_per_vm_flip = (unsigned int) (dst_y_per_vm_flip * dml_pow(2, 2));
disp_dlg_regs->dst_y_per_row_flip = (unsigned int) (dst_y_per_row_flip * dml_pow(2, 2));
disp_dlg_regs->vratio_prefetch = (unsigned int) (vratio_pre_l * dml_pow(2, 19));
disp_dlg_regs->vratio_prefetch_c = (unsigned int) (vratio_pre_c * dml_pow(2, 19));
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_vblank);
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_vblank);
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip);
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip);
disp_dlg_regs->refcyc_per_pte_group_vblank_l = (unsigned int) (dst_y_per_row_vblank * (double) htotal * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l);
ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)dml_pow(2, 13));
if (dual_plane) {
disp_dlg_regs->refcyc_per_pte_group_vblank_c = (unsigned int) (dst_y_per_row_vblank * (double) htotal * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_c);
ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c < (unsigned int)dml_pow(2, 13));
}
disp_dlg_regs->refcyc_per_meta_chunk_vblank_l = (unsigned int) (dst_y_per_row_vblank * (double) htotal * ref_freq_to_pix_freq / (double) meta_chunks_per_row_ub_l);
ASSERT(disp_dlg_regs->refcyc_per_meta_chunk_vblank_l < (unsigned int)dml_pow(2, 13));
disp_dlg_regs->refcyc_per_meta_chunk_vblank_c = disp_dlg_regs->refcyc_per_meta_chunk_vblank_l; // dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now
disp_dlg_regs->refcyc_per_pte_group_flip_l = (unsigned int) (dst_y_per_row_flip * htotal * ref_freq_to_pix_freq) / dpte_groups_per_row_ub_l;
disp_dlg_regs->refcyc_per_meta_chunk_flip_l = (unsigned int) (dst_y_per_row_flip * htotal * ref_freq_to_pix_freq) / meta_chunks_per_row_ub_l;
if (dual_plane) {
disp_dlg_regs->refcyc_per_pte_group_flip_c = (unsigned int) (dst_y_per_row_flip * htotal * ref_freq_to_pix_freq) / dpte_groups_per_row_ub_c;
disp_dlg_regs->refcyc_per_meta_chunk_flip_c = (unsigned int) (dst_y_per_row_flip * htotal * ref_freq_to_pix_freq) / meta_chunks_per_row_ub_c;
}
disp_dlg_regs->refcyc_per_vm_group_vblank = get_refcyc_per_vm_group_vblank_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
disp_dlg_regs->refcyc_per_vm_group_flip = get_refcyc_per_vm_group_flip_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
disp_dlg_regs->refcyc_per_vm_req_vblank = get_refcyc_per_vm_req_vblank_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10); // From VBA
disp_dlg_regs->refcyc_per_vm_req_flip = get_refcyc_per_vm_req_flip_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10); // From VBA
// Clamp to max for now
if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_vm_group_vblank = dml_pow(2, 23) - 1;
if (disp_dlg_regs->refcyc_per_vm_group_flip >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_vm_group_flip = dml_pow(2, 23) - 1;
if (disp_dlg_regs->refcyc_per_vm_req_vblank >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_vm_req_vblank = dml_pow(2, 23) - 1;
if (disp_dlg_regs->refcyc_per_vm_req_flip >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_vm_req_flip = dml_pow(2, 23) - 1;
disp_dlg_regs->dst_y_per_pte_row_nom_l = (unsigned int) ((double) dpte_row_height_l / (double) vratio_l * dml_pow(2, 2));
ASSERT(disp_dlg_regs->dst_y_per_pte_row_nom_l < (unsigned int)dml_pow(2, 17));
if (dual_plane) {
disp_dlg_regs->dst_y_per_pte_row_nom_c = (unsigned int) ((double) dpte_row_height_c / (double) vratio_c * dml_pow(2, 2));
if (disp_dlg_regs->dst_y_per_pte_row_nom_c >= (unsigned int) dml_pow(2, 17)) {
dml_print(
"DML_DLG: %s: Warning dst_y_per_pte_row_nom_c %u larger than supported by register format U15.2 %u\n",
__func__,
disp_dlg_regs->dst_y_per_pte_row_nom_c,
(unsigned int) dml_pow(2, 17) - 1);
}
}
disp_dlg_regs->dst_y_per_meta_row_nom_l = (unsigned int) ((double) meta_row_height_l / (double) vratio_l * dml_pow(2, 2));
ASSERT(disp_dlg_regs->dst_y_per_meta_row_nom_l < (unsigned int)dml_pow(2, 17));
disp_dlg_regs->dst_y_per_meta_row_nom_c = (unsigned int) ((double) meta_row_height_c / (double) vratio_c * dml_pow(2, 2));
ASSERT(disp_dlg_regs->dst_y_per_meta_row_nom_c < (unsigned int)dml_pow(2, 17));
disp_dlg_regs->refcyc_per_pte_group_nom_l = (unsigned int) ((double) dpte_row_height_l / (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
/ (double) dpte_groups_per_row_ub_l);
if (disp_dlg_regs->refcyc_per_pte_group_nom_l >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_pte_group_nom_l = dml_pow(2, 23) - 1;
disp_dlg_regs->refcyc_per_meta_chunk_nom_l = (unsigned int) ((double) meta_row_height_l / (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
/ (double) meta_chunks_per_row_ub_l);
if (disp_dlg_regs->refcyc_per_meta_chunk_nom_l >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_meta_chunk_nom_l = dml_pow(2, 23) - 1;
if (dual_plane) {
disp_dlg_regs->refcyc_per_pte_group_nom_c = (unsigned int) ((double) dpte_row_height_c / (double) vratio_c * (double) htotal * ref_freq_to_pix_freq
/ (double) dpte_groups_per_row_ub_c);
if (disp_dlg_regs->refcyc_per_pte_group_nom_c >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_pte_group_nom_c = dml_pow(2, 23) - 1;
// TODO: Is this the right calculation? Does htotal need to be halved?
disp_dlg_regs->refcyc_per_meta_chunk_nom_c = (unsigned int) ((double) meta_row_height_c / (double) vratio_c * (double) htotal * ref_freq_to_pix_freq
/ (double) meta_chunks_per_row_ub_c);
if (disp_dlg_regs->refcyc_per_meta_chunk_nom_c >= (unsigned int) dml_pow(2, 23))
disp_dlg_regs->refcyc_per_meta_chunk_nom_c = dml_pow(2, 23) - 1;
}
disp_dlg_regs->refcyc_per_line_delivery_pre_l = (unsigned int) dml_floor(refcyc_per_line_delivery_pre_l, 1);
disp_dlg_regs->refcyc_per_line_delivery_l = (unsigned int) dml_floor(refcyc_per_line_delivery_l, 1);
ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int)dml_pow(2, 13)); ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int)dml_pow(2, 13));
disp_dlg_regs->refcyc_per_line_delivery_pre_c = (unsigned int) dml_floor(refcyc_per_line_delivery_pre_c, 1);
disp_dlg_regs->refcyc_per_line_delivery_c = (unsigned int) dml_floor(refcyc_per_line_delivery_c, 1);
ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int)dml_pow(2, 13)); ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int)dml_pow(2, 13));
disp_dlg_regs->chunk_hdl_adjust_cur0 = 3;
disp_dlg_regs->dst_y_offset_cur0 = 0;
disp_dlg_regs->chunk_hdl_adjust_cur1 = 3;
disp_dlg_regs->dst_y_offset_cur1 = 0;
disp_dlg_regs->dst_y_delta_drq_limit = 0x7fff; // off
disp_ttu_regs->refcyc_per_req_delivery_pre_l = (unsigned int) (refcyc_per_req_delivery_pre_l * dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_l = (unsigned int) (refcyc_per_req_delivery_l * dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_pre_c = (unsigned int) (refcyc_per_req_delivery_pre_c * dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_c = (unsigned int) (refcyc_per_req_delivery_c * dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_pre_cur0 = (unsigned int) (refcyc_per_req_delivery_pre_cur0 * dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_cur0 = (unsigned int) (refcyc_per_req_delivery_cur0 * dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_pre_cur1 = (unsigned int) (refcyc_per_req_delivery_pre_cur1 * dml_pow(2, 10));
disp_ttu_regs->refcyc_per_req_delivery_cur1 = (unsigned int) (refcyc_per_req_delivery_cur1 * dml_pow(2, 10));
disp_ttu_regs->qos_level_low_wm = 0;
ASSERT(disp_ttu_regs->qos_level_low_wm < dml_pow(2, 14));
disp_ttu_regs->qos_level_high_wm = (unsigned int) (4.0 * (double) htotal * ref_freq_to_pix_freq);
ASSERT(disp_ttu_regs->qos_level_high_wm < dml_pow(2, 14));
disp_ttu_regs->qos_level_flip = 14;
disp_ttu_regs->qos_level_fixed_l = 8;
disp_ttu_regs->qos_level_fixed_c = 8;
disp_ttu_regs->qos_level_fixed_cur0 = 8;
disp_ttu_regs->qos_ramp_disable_l = 0;
disp_ttu_regs->qos_ramp_disable_c = 0;
disp_ttu_regs->qos_ramp_disable_cur0 = 0;
disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
print__ttu_regs_st(mode_lib, disp_ttu_regs);
print__dlg_regs_st(mode_lib, disp_dlg_regs);
}
void dml314_rq_dlg_get_dlg_reg(
struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
const bool pstate_en,
const bool vm_en,
const bool ignore_viewport_pos,
const bool immediate_flip_support)
{
display_rq_params_st rq_param = {0};
display_dlg_sys_params_st dlg_sys_param = {0};
// Get watermark and Tex.
dlg_sys_param.t_urg_wm_us = get_wm_urgent(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.deepsleep_dcfclk_mhz = get_clk_dcf_deepsleep(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.t_extra_us = get_urgent_extra_latency(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.mem_trip_us = get_wm_memory_trip(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.t_mclk_wm_us = get_wm_dram_clock_change(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.t_sr_wm_us = get_wm_stutter_enter_exit(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.total_flip_bw = get_total_immediate_flip_bw(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib, e2e_pipe_param, num_pipes);
print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
// system parameter calculation done
dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx);
dml_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe);
dml_rq_dlg_get_dlg_params(
mode_lib,
e2e_pipe_param,
num_pipes,
pipe_idx,
dlg_regs,
ttu_regs,
&rq_param.dlg,
&dlg_sys_param,
cstate_en,
pstate_en,
vm_en,
ignore_viewport_pos,
immediate_flip_support);
dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx);
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c |
// SPDX-License-Identifier: MIT
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#define UNIT_TEST 0
#if !UNIT_TEST
#include "dc.h"
#endif
#include "../display_mode_lib.h"
#include "display_mode_vba_314.h"
#include "../dml_inline_defs.h"
/*
* NOTE:
* This file is gcc-parsable HW gospel, coming straight from HW engineers.
*
* It doesn't adhere to Linux kernel style and sometimes will do things in odd
* ways. Unless there is something clearly wrong with it the code should
* remain as-is as it provides us with a guarantee from HW that it is correct.
*/
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
#define DCN314_MAX_DSC_IMAGE_WIDTH 5184
#define DCN314_MAX_FMT_420_BUFFER_WIDTH 4096
// For DML-C changes that hasn't been propagated to VBA yet
//#define __DML_VBA_ALLOW_DELTA__
// Move these to ip parameters/constant
// At which vstartup the DML start to try if the mode can be supported
#define __DML_VBA_MIN_VSTARTUP__ 9
// Delay in DCFCLK from ARB to DET (1st num is ARB to SDPIF, 2nd number is SDPIF to DET)
#define __DML_ARB_TO_RET_DELAY__ (7 + 95)
// fudge factor for min dcfclk calclation
#define __DML_MIN_DCFCLK_FACTOR__ 1.15
typedef struct {
double DPPCLK;
double DISPCLK;
double PixelClock;
double DCFCLKDeepSleep;
unsigned int DPPPerPlane;
bool ScalerEnabled;
double VRatio;
double VRatioChroma;
enum scan_direction_class SourceScan;
unsigned int BlockWidth256BytesY;
unsigned int BlockHeight256BytesY;
unsigned int BlockWidth256BytesC;
unsigned int BlockHeight256BytesC;
unsigned int InterlaceEnable;
unsigned int NumberOfCursors;
unsigned int VBlank;
unsigned int HTotal;
unsigned int DCCEnable;
bool ODMCombineIsEnabled;
enum source_format_class SourcePixelFormat;
int BytePerPixelY;
int BytePerPixelC;
bool ProgressiveToInterlaceUnitInOPP;
} Pipe;
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
static bool CalculateBytePerPixelAnd256BBlockSizes(
enum source_format_class SourcePixelFormat,
enum dm_swizzle_mode SurfaceTiling,
unsigned int *BytePerPixelY,
unsigned int *BytePerPixelC,
double *BytePerPixelDETY,
double *BytePerPixelDETC,
unsigned int *BlockHeight256BytesY,
unsigned int *BlockHeight256BytesC,
unsigned int *BlockWidth256BytesY,
unsigned int *BlockWidth256BytesC);
static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(struct display_mode_lib *mode_lib);
static unsigned int dscceComputeDelay(
unsigned int bpc,
double BPP,
unsigned int sliceWidth,
unsigned int numSlices,
enum output_format_class pixelFormat,
enum output_encoder_class Output);
static unsigned int dscComputeDelay(enum output_format_class pixelFormat, enum output_encoder_class Output);
static bool CalculatePrefetchSchedule(
struct display_mode_lib *mode_lib,
double HostVMInefficiencyFactor,
Pipe *myPipe,
unsigned int DSCDelay,
double DPPCLKDelaySubtotalPlusCNVCFormater,
double DPPCLKDelaySCL,
double DPPCLKDelaySCLLBOnly,
double DPPCLKDelayCNVCCursor,
double DISPCLKDelaySubtotal,
unsigned int DPP_RECOUT_WIDTH,
enum output_format_class OutputFormat,
unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int MaxVStartup,
unsigned int GPUVMPageTableLevels,
bool GPUVMEnable,
bool HostVMEnable,
unsigned int HostVMMaxNonCachedPageTableLevels,
double HostVMMinPageSize,
bool DynamicMetadataEnable,
bool DynamicMetadataVMEnabled,
int DynamicMetadataLinesBeforeActiveRequired,
unsigned int DynamicMetadataTransmittedBytes,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
unsigned int PDEAndMetaPTEBytesFrame,
unsigned int MetaRowByte,
unsigned int PixelPTEBytesPerRow,
double PrefetchSourceLinesY,
unsigned int SwathWidthY,
double VInitPreFillY,
unsigned int MaxNumSwathY,
double PrefetchSourceLinesC,
unsigned int SwathWidthC,
double VInitPreFillC,
unsigned int MaxNumSwathC,
int swath_width_luma_ub,
int swath_width_chroma_ub,
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double TWait,
double *DSTXAfterScaler,
double *DSTYAfterScaler,
double *DestinationLinesForPrefetch,
double *PrefetchBandwidth,
double *DestinationLinesToRequestVMInVBlank,
double *DestinationLinesToRequestRowInVBlank,
double *VRatioPrefetchY,
double *VRatioPrefetchC,
double *RequiredPrefetchPixDataBWLuma,
double *RequiredPrefetchPixDataBWChroma,
bool *NotEnoughTimeForDynamicMetadata,
double *Tno_bw,
double *prefetch_vmrow_bw,
double *Tdmdl_vm,
double *Tdmdl,
double *TSetup,
int *VUpdateOffsetPix,
double *VUpdateWidthPix,
double *VReadyOffsetPix);
static double RoundToDFSGranularityUp(double Clock, double VCOSpeed);
static double RoundToDFSGranularityDown(double Clock, double VCOSpeed);
static void CalculateDCCConfiguration(
bool DCCEnabled,
bool DCCProgrammingAssumesScanDirectionUnknown,
enum source_format_class SourcePixelFormat,
unsigned int SurfaceWidthLuma,
unsigned int SurfaceWidthChroma,
unsigned int SurfaceHeightLuma,
unsigned int SurfaceHeightChroma,
double DETBufferSize,
unsigned int RequestHeight256ByteLuma,
unsigned int RequestHeight256ByteChroma,
enum dm_swizzle_mode TilingFormat,
unsigned int BytePerPixelY,
unsigned int BytePerPixelC,
double BytePerPixelDETY,
double BytePerPixelDETC,
enum scan_direction_class ScanOrientation,
unsigned int *MaxUncompressedBlockLuma,
unsigned int *MaxUncompressedBlockChroma,
unsigned int *MaxCompressedBlockLuma,
unsigned int *MaxCompressedBlockChroma,
unsigned int *IndependentBlockLuma,
unsigned int *IndependentBlockChroma);
static double CalculatePrefetchSourceLines(
struct display_mode_lib *mode_lib,
double VRatio,
double vtaps,
bool Interlace,
bool ProgressiveToInterlaceUnitInOPP,
unsigned int SwathHeight,
unsigned int ViewportYStart,
double *VInitPreFill,
unsigned int *MaxNumSwath);
static unsigned int CalculateVMAndRowBytes(
struct display_mode_lib *mode_lib,
bool DCCEnable,
unsigned int BlockHeight256Bytes,
unsigned int BlockWidth256Bytes,
enum source_format_class SourcePixelFormat,
unsigned int SurfaceTiling,
unsigned int BytePerPixel,
enum scan_direction_class ScanDirection,
unsigned int SwathWidth,
unsigned int ViewportHeight,
bool GPUVMEnable,
bool HostVMEnable,
unsigned int HostVMMaxNonCachedPageTableLevels,
unsigned int GPUVMMinPageSize,
unsigned int HostVMMinPageSize,
unsigned int PTEBufferSizeInRequests,
unsigned int Pitch,
unsigned int DCCMetaPitch,
unsigned int *MacroTileWidth,
unsigned int *MetaRowByte,
unsigned int *PixelPTEBytesPerRow,
bool *PTEBufferSizeNotExceeded,
int *dpte_row_width_ub,
unsigned int *dpte_row_height,
unsigned int *MetaRequestWidth,
unsigned int *MetaRequestHeight,
unsigned int *meta_row_width,
unsigned int *meta_row_height,
int *vm_group_bytes,
unsigned int *dpte_group_bytes,
unsigned int *PixelPTEReqWidth,
unsigned int *PixelPTEReqHeight,
unsigned int *PTERequestSize,
int *DPDE0BytesFrame,
int *MetaPTEBytesFrame);
static double CalculateTWait(unsigned int PrefetchMode, double DRAMClockChangeLatency, double UrgentLatency, double SREnterPlusExitTime);
static void CalculateRowBandwidth(
bool GPUVMEnable,
enum source_format_class SourcePixelFormat,
double VRatio,
double VRatioChroma,
bool DCCEnable,
double LineTime,
unsigned int MetaRowByteLuma,
unsigned int MetaRowByteChroma,
unsigned int meta_row_height_luma,
unsigned int meta_row_height_chroma,
unsigned int PixelPTEBytesPerRowLuma,
unsigned int PixelPTEBytesPerRowChroma,
unsigned int dpte_row_height_luma,
unsigned int dpte_row_height_chroma,
double *meta_row_bw,
double *dpte_row_bw);
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
unsigned int k,
double HostVMInefficiencyFactor,
double UrgentExtraLatency,
double UrgentLatency,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
double DPTEBytesPerRow);
static double CalculateWriteBackDelay(
enum source_format_class WritebackPixelFormat,
double WritebackHRatio,
double WritebackVRatio,
unsigned int WritebackVTaps,
int WritebackDestinationWidth,
int WritebackDestinationHeight,
int WritebackSourceHeight,
unsigned int HTotal);
static void CalculateVupdateAndDynamicMetadataParameters(
int MaxInterDCNTileRepeaters,
double DPPCLK,
double DISPCLK,
double DCFClkDeepSleep,
double PixelClock,
int HTotal,
int VBlank,
int DynamicMetadataTransmittedBytes,
int DynamicMetadataLinesBeforeActiveRequired,
int InterlaceEnable,
bool ProgressiveToInterlaceUnitInOPP,
double *TSetup,
double *Tdmbf,
double *Tdmec,
double *Tdmsks,
int *VUpdateOffsetPix,
double *VUpdateWidthPix,
double *VReadyOffsetPix);
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
double DCFCLK,
double ReturnBW,
double UrgentLatency,
double ExtraLatency,
double SOCCLK,
double DCFCLKDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
double SwathWidthY[],
double SwathWidthC[],
unsigned int DPPPerPlane[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
bool UnboundedRequestEnabled,
unsigned int CompressedBufferSizeInkByte,
enum clock_change_support *DRAMClockChangeSupport,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *Z8StutterExitWatermark,
double *Z8StutterEnterPlusExitWatermark);
static void CalculateDCFCLKDeepSleep(
struct display_mode_lib *mode_lib,
unsigned int NumberOfActivePlanes,
int BytePerPixelY[],
int BytePerPixelC[],
double VRatio[],
double VRatioChroma[],
double SwathWidthY[],
double SwathWidthC[],
unsigned int DPPPerPlane[],
double HRatio[],
double HRatioChroma[],
double PixelClock[],
double PSCL_THROUGHPUT[],
double PSCL_THROUGHPUT_CHROMA[],
double DPPCLK[],
double ReadBandwidthLuma[],
double ReadBandwidthChroma[],
int ReturnBusWidth,
double *DCFCLKDeepSleep);
static void CalculateUrgentBurstFactor(
int swath_width_luma_ub,
int swath_width_chroma_ub,
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double LineTime,
double UrgentLatency,
double CursorBufferSize,
unsigned int CursorWidth,
unsigned int CursorBPP,
double VRatio,
double VRatioC,
double BytePerPixelInDETY,
double BytePerPixelInDETC,
double DETBufferSizeY,
double DETBufferSizeC,
double *UrgentBurstFactorCursor,
double *UrgentBurstFactorLuma,
double *UrgentBurstFactorChroma,
bool *NotEnoughUrgentLatencyHiding);
static void UseMinimumDCFCLK(
struct display_mode_lib *mode_lib,
int MaxPrefetchMode,
int ReorderingBytes);
static void CalculatePixelDeliveryTimes(
unsigned int NumberOfActivePlanes,
double VRatio[],
double VRatioChroma[],
double VRatioPrefetchY[],
double VRatioPrefetchC[],
unsigned int swath_width_luma_ub[],
unsigned int swath_width_chroma_ub[],
unsigned int DPPPerPlane[],
double HRatio[],
double HRatioChroma[],
double PixelClock[],
double PSCL_THROUGHPUT[],
double PSCL_THROUGHPUT_CHROMA[],
double DPPCLK[],
int BytePerPixelC[],
enum scan_direction_class SourceScan[],
unsigned int NumberOfCursors[],
unsigned int CursorWidth[][DC__NUM_CURSOR__MAX],
unsigned int CursorBPP[][DC__NUM_CURSOR__MAX],
unsigned int BlockWidth256BytesY[],
unsigned int BlockHeight256BytesY[],
unsigned int BlockWidth256BytesC[],
unsigned int BlockHeight256BytesC[],
double DisplayPipeLineDeliveryTimeLuma[],
double DisplayPipeLineDeliveryTimeChroma[],
double DisplayPipeLineDeliveryTimeLumaPrefetch[],
double DisplayPipeLineDeliveryTimeChromaPrefetch[],
double DisplayPipeRequestDeliveryTimeLuma[],
double DisplayPipeRequestDeliveryTimeChroma[],
double DisplayPipeRequestDeliveryTimeLumaPrefetch[],
double DisplayPipeRequestDeliveryTimeChromaPrefetch[],
double CursorRequestDeliveryTime[],
double CursorRequestDeliveryTimePrefetch[]);
static void CalculateMetaAndPTETimes(
int NumberOfActivePlanes,
bool GPUVMEnable,
int MetaChunkSize,
int MinMetaChunkSizeBytes,
int HTotal[],
double VRatio[],
double VRatioChroma[],
double DestinationLinesToRequestRowInVBlank[],
double DestinationLinesToRequestRowInImmediateFlip[],
bool DCCEnable[],
double PixelClock[],
int BytePerPixelY[],
int BytePerPixelC[],
enum scan_direction_class SourceScan[],
int dpte_row_height[],
int dpte_row_height_chroma[],
int meta_row_width[],
int meta_row_width_chroma[],
int meta_row_height[],
int meta_row_height_chroma[],
int meta_req_width[],
int meta_req_width_chroma[],
int meta_req_height[],
int meta_req_height_chroma[],
int dpte_group_bytes[],
int PTERequestSizeY[],
int PTERequestSizeC[],
int PixelPTEReqWidthY[],
int PixelPTEReqHeightY[],
int PixelPTEReqWidthC[],
int PixelPTEReqHeightC[],
int dpte_row_width_luma_ub[],
int dpte_row_width_chroma_ub[],
double DST_Y_PER_PTE_ROW_NOM_L[],
double DST_Y_PER_PTE_ROW_NOM_C[],
double DST_Y_PER_META_ROW_NOM_L[],
double DST_Y_PER_META_ROW_NOM_C[],
double TimePerMetaChunkNominal[],
double TimePerChromaMetaChunkNominal[],
double TimePerMetaChunkVBlank[],
double TimePerChromaMetaChunkVBlank[],
double TimePerMetaChunkFlip[],
double TimePerChromaMetaChunkFlip[],
double time_per_pte_group_nom_luma[],
double time_per_pte_group_vblank_luma[],
double time_per_pte_group_flip_luma[],
double time_per_pte_group_nom_chroma[],
double time_per_pte_group_vblank_chroma[],
double time_per_pte_group_flip_chroma[]);
static void CalculateVMGroupAndRequestTimes(
unsigned int NumberOfActivePlanes,
bool GPUVMEnable,
unsigned int GPUVMMaxPageTableLevels,
unsigned int HTotal[],
int BytePerPixelC[],
double DestinationLinesToRequestVMInVBlank[],
double DestinationLinesToRequestVMInImmediateFlip[],
bool DCCEnable[],
double PixelClock[],
int dpte_row_width_luma_ub[],
int dpte_row_width_chroma_ub[],
int vm_group_bytes[],
unsigned int dpde0_bytes_per_frame_ub_l[],
unsigned int dpde0_bytes_per_frame_ub_c[],
int meta_pte_bytes_per_frame_ub_l[],
int meta_pte_bytes_per_frame_ub_c[],
double TimePerVMGroupVBlank[],
double TimePerVMGroupFlip[],
double TimePerVMRequestVBlank[],
double TimePerVMRequestFlip[]);
static void CalculateStutterEfficiency(
struct display_mode_lib *mode_lib,
int CompressedBufferSizeInkByte,
bool UnboundedRequestEnabled,
int ConfigReturnBufferSizeInKByte,
int MetaFIFOSizeInKEntries,
int ZeroSizeBufferEntries,
int NumberOfActivePlanes,
int ROBBufferSizeInKByte,
double TotalDataReadBandwidth,
double DCFCLK,
double ReturnBW,
double COMPBUF_RESERVED_SPACE_64B,
double COMPBUF_RESERVED_SPACE_ZS,
double SRExitTime,
double SRExitZ8Time,
bool SynchronizedVBlank,
double Z8StutterEnterPlusExitWatermark,
double StutterEnterPlusExitWatermark,
bool ProgressiveToInterlaceUnitInOPP,
bool Interlace[],
double MinTTUVBlank[],
int DPPPerPlane[],
unsigned int DETBufferSizeY[],
int BytePerPixelY[],
double BytePerPixelDETY[],
double SwathWidthY[],
int SwathHeightY[],
int SwathHeightC[],
double NetDCCRateLuma[],
double NetDCCRateChroma[],
double DCCFractionOfZeroSizeRequestsLuma[],
double DCCFractionOfZeroSizeRequestsChroma[],
int HTotal[],
int VTotal[],
double PixelClock[],
double VRatio[],
enum scan_direction_class SourceScan[],
int BlockHeight256BytesY[],
int BlockWidth256BytesY[],
int BlockHeight256BytesC[],
int BlockWidth256BytesC[],
int DCCYMaxUncompressedBlock[],
int DCCCMaxUncompressedBlock[],
int VActive[],
bool DCCEnable[],
bool WritebackEnable[],
double ReadBandwidthPlaneLuma[],
double ReadBandwidthPlaneChroma[],
double meta_row_bw[],
double dpte_row_bw[],
double *StutterEfficiencyNotIncludingVBlank,
double *StutterEfficiency,
int *NumberOfStutterBurstsPerFrame,
double *Z8StutterEfficiencyNotIncludingVBlank,
double *Z8StutterEfficiency,
int *Z8NumberOfStutterBurstsPerFrame,
double *StutterPeriod);
static void CalculateSwathAndDETConfiguration(
bool ForceSingleDPP,
int NumberOfActivePlanes,
unsigned int DETBufferSizeInKByte,
double MaximumSwathWidthLuma[],
double MaximumSwathWidthChroma[],
enum scan_direction_class SourceScan[],
enum source_format_class SourcePixelFormat[],
enum dm_swizzle_mode SurfaceTiling[],
int ViewportWidth[],
int ViewportHeight[],
int SurfaceWidthY[],
int SurfaceWidthC[],
int SurfaceHeightY[],
int SurfaceHeightC[],
int Read256BytesBlockHeightY[],
int Read256BytesBlockHeightC[],
int Read256BytesBlockWidthY[],
int Read256BytesBlockWidthC[],
enum odm_combine_mode ODMCombineEnabled[],
int BlendingAndTiming[],
int BytePerPixY[],
int BytePerPixC[],
double BytePerPixDETY[],
double BytePerPixDETC[],
int HActive[],
double HRatio[],
double HRatioChroma[],
int DPPPerPlane[],
int swath_width_luma_ub[],
int swath_width_chroma_ub[],
double SwathWidth[],
double SwathWidthChroma[],
int SwathHeightY[],
int SwathHeightC[],
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
bool ViewportSizeSupportPerPlane[],
bool *ViewportSizeSupport);
static void CalculateSwathWidth(
bool ForceSingleDPP,
int NumberOfActivePlanes,
enum source_format_class SourcePixelFormat[],
enum scan_direction_class SourceScan[],
int ViewportWidth[],
int ViewportHeight[],
int SurfaceWidthY[],
int SurfaceWidthC[],
int SurfaceHeightY[],
int SurfaceHeightC[],
enum odm_combine_mode ODMCombineEnabled[],
int BytePerPixY[],
int BytePerPixC[],
int Read256BytesBlockHeightY[],
int Read256BytesBlockHeightC[],
int Read256BytesBlockWidthY[],
int Read256BytesBlockWidthC[],
int BlendingAndTiming[],
int HActive[],
double HRatio[],
int DPPPerPlane[],
double SwathWidthSingleDPPY[],
double SwathWidthSingleDPPC[],
double SwathWidthY[],
double SwathWidthC[],
int MaximumSwathHeightY[],
int MaximumSwathHeightC[],
int swath_width_luma_ub[],
int swath_width_chroma_ub[]);
static double CalculateExtraLatency(
int RoundTripPingLatencyCycles,
int ReorderingBytes,
double DCFCLK,
int TotalNumberOfActiveDPP,
int PixelChunkSizeInKByte,
int TotalNumberOfDCCActiveDPP,
int MetaChunkSize,
double ReturnBW,
bool GPUVMEnable,
bool HostVMEnable,
int NumberOfActivePlanes,
int NumberOfDPP[],
int dpte_group_bytes[],
double HostVMInefficiencyFactor,
double HostVMMinPageSize,
int HostVMMaxNonCachedPageTableLevels);
static double CalculateExtraLatencyBytes(
int ReorderingBytes,
int TotalNumberOfActiveDPP,
int PixelChunkSizeInKByte,
int TotalNumberOfDCCActiveDPP,
int MetaChunkSize,
bool GPUVMEnable,
bool HostVMEnable,
int NumberOfActivePlanes,
int NumberOfDPP[],
int dpte_group_bytes[],
double HostVMInefficiencyFactor,
double HostVMMinPageSize,
int HostVMMaxNonCachedPageTableLevels);
static double CalculateUrgentLatency(
double UrgentLatencyPixelDataOnly,
double UrgentLatencyPixelMixedWithVMData,
double UrgentLatencyVMDataOnly,
bool DoUrgentLatencyAdjustment,
double UrgentLatencyAdjustmentFabricClockComponent,
double UrgentLatencyAdjustmentFabricClockReference,
double FabricClockSingle);
static void CalculateUnboundedRequestAndCompressedBufferSize(
unsigned int DETBufferSizeInKByte,
int ConfigReturnBufferSizeInKByte,
enum unbounded_requesting_policy UseUnboundedRequestingFinal,
int TotalActiveDPP,
bool NoChromaPlanes,
int MaxNumDPP,
int CompressedBufferSegmentSizeInkByteFinal,
enum output_encoder_class *Output,
bool *UnboundedRequestEnabled,
int *CompressedBufferSizeInkByte);
static bool UnboundedRequest(enum unbounded_requesting_policy UseUnboundedRequestingFinal, int TotalNumberOfActiveDPP, bool NoChroma, enum output_encoder_class Output);
static unsigned int CalculateMaxVStartup(
unsigned int VTotal,
unsigned int VActive,
unsigned int VBlankNom,
unsigned int HTotal,
double PixelClock,
bool ProgressiveTointerlaceUnitinOPP,
bool Interlace,
unsigned int VBlankNomDefaultUS,
double WritebackDelayTime);
void dml314_recalculate(struct display_mode_lib *mode_lib)
{
ModeSupportAndSystemConfiguration(mode_lib);
PixelClockAdjustmentForProgressiveToInterlaceUnit(mode_lib);
DisplayPipeConfiguration(mode_lib);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Calling DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation\n", __func__);
#endif
DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(mode_lib);
}
static unsigned int dscceComputeDelay(
unsigned int bpc,
double BPP,
unsigned int sliceWidth,
unsigned int numSlices,
enum output_format_class pixelFormat,
enum output_encoder_class Output)
{
// valid bpc = source bits per component in the set of {8, 10, 12}
// valid bpp = increments of 1/16 of a bit
// min = 6/7/8 in N420/N422/444, respectively
// max = such that compression is 1:1
//valid sliceWidth = number of pixels per slice line, must be less than or equal to 5184/numSlices (or 4096/numSlices in 420 mode)
//valid numSlices = number of slices in the horiziontal direction per DSC engine in the set of {1, 2, 3, 4}
//valid pixelFormat = pixel/color format in the set of {:N444_RGB, :S422, :N422, :N420}
// fixed value
unsigned int rcModelSize = 8192;
// N422/N420 operate at 2 pixels per clock
unsigned int pixelsPerClock = 0, lstall, D, initalXmitDelay, w, s, ix, wx, P, l0, a, ax, L, Delay, pixels;
if (pixelFormat == dm_420)
pixelsPerClock = 2;
else if (pixelFormat == dm_444)
pixelsPerClock = 1;
else if (pixelFormat == dm_n422)
pixelsPerClock = 2;
// #all other modes operate at 1 pixel per clock
else
pixelsPerClock = 1;
//initial transmit delay as per PPS
initalXmitDelay = dml_round(rcModelSize / 2.0 / BPP / pixelsPerClock);
//compute ssm delay
if (bpc == 8)
D = 81;
else if (bpc == 10)
D = 89;
else
D = 113;
//divide by pixel per cycle to compute slice width as seen by DSC
w = sliceWidth / pixelsPerClock;
//422 mode has an additional cycle of delay
if (pixelFormat == dm_420 || pixelFormat == dm_444 || pixelFormat == dm_n422)
s = 0;
else
s = 1;
//main calculation for the dscce
ix = initalXmitDelay + 45;
wx = (w + 2) / 3;
P = 3 * wx - w;
l0 = ix / w;
a = ix + P * l0;
ax = (a + 2) / 3 + D + 6 + 1;
L = (ax + wx - 1) / wx;
if ((ix % w) == 0 && P != 0)
lstall = 1;
else
lstall = 0;
Delay = L * wx * (numSlices - 1) + ax + s + lstall + 22;
//dsc processes 3 pixel containers per cycle and a container can contain 1 or 2 pixels
pixels = Delay * 3 * pixelsPerClock;
return pixels;
}
static unsigned int dscComputeDelay(enum output_format_class pixelFormat, enum output_encoder_class Output)
{
unsigned int Delay = 0;
if (pixelFormat == dm_420) {
// sfr
Delay = Delay + 2;
// dsccif
Delay = Delay + 0;
// dscc - input deserializer
Delay = Delay + 3;
// dscc gets pixels every other cycle
Delay = Delay + 2;
// dscc - input cdc fifo
Delay = Delay + 12;
// dscc gets pixels every other cycle
Delay = Delay + 13;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output cdc fifo
Delay = Delay + 7;
// dscc gets pixels every other cycle
Delay = Delay + 3;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output serializer
Delay = Delay + 1;
// sft
Delay = Delay + 1;
} else if (pixelFormat == dm_n422) {
// sfr
Delay = Delay + 2;
// dsccif
Delay = Delay + 1;
// dscc - input deserializer
Delay = Delay + 5;
// dscc - input cdc fifo
Delay = Delay + 25;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output cdc fifo
Delay = Delay + 10;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output serializer
Delay = Delay + 1;
// sft
Delay = Delay + 1;
} else {
// sfr
Delay = Delay + 2;
// dsccif
Delay = Delay + 0;
// dscc - input deserializer
Delay = Delay + 3;
// dscc - input cdc fifo
Delay = Delay + 12;
// dscc - cdc uncertainty
Delay = Delay + 2;
// dscc - output cdc fifo
Delay = Delay + 7;
// dscc - output serializer
Delay = Delay + 1;
// dscc - cdc uncertainty
Delay = Delay + 2;
// sft
Delay = Delay + 1;
}
return Delay;
}
static bool CalculatePrefetchSchedule(
struct display_mode_lib *mode_lib,
double HostVMInefficiencyFactor,
Pipe *myPipe,
unsigned int DSCDelay,
double DPPCLKDelaySubtotalPlusCNVCFormater,
double DPPCLKDelaySCL,
double DPPCLKDelaySCLLBOnly,
double DPPCLKDelayCNVCCursor,
double DISPCLKDelaySubtotal,
unsigned int DPP_RECOUT_WIDTH,
enum output_format_class OutputFormat,
unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int MaxVStartup,
unsigned int GPUVMPageTableLevels,
bool GPUVMEnable,
bool HostVMEnable,
unsigned int HostVMMaxNonCachedPageTableLevels,
double HostVMMinPageSize,
bool DynamicMetadataEnable,
bool DynamicMetadataVMEnabled,
int DynamicMetadataLinesBeforeActiveRequired,
unsigned int DynamicMetadataTransmittedBytes,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
unsigned int PDEAndMetaPTEBytesFrame,
unsigned int MetaRowByte,
unsigned int PixelPTEBytesPerRow,
double PrefetchSourceLinesY,
unsigned int SwathWidthY,
double VInitPreFillY,
unsigned int MaxNumSwathY,
double PrefetchSourceLinesC,
unsigned int SwathWidthC,
double VInitPreFillC,
unsigned int MaxNumSwathC,
int swath_width_luma_ub,
int swath_width_chroma_ub,
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double TWait,
double *DSTXAfterScaler,
double *DSTYAfterScaler,
double *DestinationLinesForPrefetch,
double *PrefetchBandwidth,
double *DestinationLinesToRequestVMInVBlank,
double *DestinationLinesToRequestRowInVBlank,
double *VRatioPrefetchY,
double *VRatioPrefetchC,
double *RequiredPrefetchPixDataBWLuma,
double *RequiredPrefetchPixDataBWChroma,
bool *NotEnoughTimeForDynamicMetadata,
double *Tno_bw,
double *prefetch_vmrow_bw,
double *Tdmdl_vm,
double *Tdmdl,
double *TSetup,
int *VUpdateOffsetPix,
double *VUpdateWidthPix,
double *VReadyOffsetPix)
{
bool MyError = false;
unsigned int DPPCycles, DISPCLKCycles;
double DSTTotalPixelsAfterScaler;
double LineTime;
double dst_y_prefetch_equ;
#ifdef __DML_VBA_DEBUG__
double Tsw_oto;
#endif
double prefetch_bw_oto;
double prefetch_bw_pr;
double Tvm_oto;
double Tr0_oto;
double Tvm_oto_lines;
double Tr0_oto_lines;
double dst_y_prefetch_oto;
double TimeForFetchingMetaPTE = 0;
double TimeForFetchingRowInVBlank = 0;
double LinesToRequestPrefetchPixelData = 0;
unsigned int HostVMDynamicLevelsTrips;
double trip_to_mem;
double Tvm_trips;
double Tr0_trips;
double Tvm_trips_rounded;
double Tr0_trips_rounded;
double Lsw_oto;
double Tpre_rounded;
double prefetch_bw_equ;
double Tvm_equ;
double Tr0_equ;
double Tdmbf;
double Tdmec;
double Tdmsks;
double prefetch_sw_bytes;
double bytes_pp;
double dep_bytes;
int max_vratio_pre = 4;
double min_Lsw;
double Tsw_est1 = 0;
double Tsw_est3 = 0;
double max_Tsw = 0;
if (GPUVMEnable == true && HostVMEnable == true) {
HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
} else {
HostVMDynamicLevelsTrips = 0;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: GPUVMEnable=%d HostVMEnable=%d HostVMInefficiencyFactor=%f\n", __func__, GPUVMEnable, HostVMEnable, HostVMInefficiencyFactor);
#endif
CalculateVupdateAndDynamicMetadataParameters(
MaxInterDCNTileRepeaters,
myPipe->DPPCLK,
myPipe->DISPCLK,
myPipe->DCFCLKDeepSleep,
myPipe->PixelClock,
myPipe->HTotal,
myPipe->VBlank,
DynamicMetadataTransmittedBytes,
DynamicMetadataLinesBeforeActiveRequired,
myPipe->InterlaceEnable,
myPipe->ProgressiveToInterlaceUnitInOPP,
TSetup,
&Tdmbf,
&Tdmec,
&Tdmsks,
VUpdateOffsetPix,
VUpdateWidthPix,
VReadyOffsetPix);
LineTime = myPipe->HTotal / myPipe->PixelClock;
trip_to_mem = UrgentLatency;
Tvm_trips = UrgentExtraLatency + trip_to_mem * (GPUVMPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
#ifdef __DML_VBA_ALLOW_DELTA__
if (DynamicMetadataVMEnabled == true && GPUVMEnable == true) {
#else
if (DynamicMetadataVMEnabled == true) {
#endif
*Tdmdl = TWait + Tvm_trips + trip_to_mem;
} else {
*Tdmdl = TWait + UrgentExtraLatency;
}
#ifdef __DML_VBA_ALLOW_DELTA__
if (DynamicMetadataEnable == false) {
*Tdmdl = 0.0;
}
#endif
if (DynamicMetadataEnable == true) {
if (VStartup * LineTime < *TSetup + *Tdmdl + Tdmbf + Tdmec + Tdmsks) {
*NotEnoughTimeForDynamicMetadata = true;
dml_print("DML::%s: Not Enough Time for Dynamic Meta!\n", __func__);
dml_print("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, Tdmbf);
dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, Tdmec);
dml_print("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, Tdmsks);
dml_print("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd\n", __func__, *Tdmdl);
} else {
*NotEnoughTimeForDynamicMetadata = false;
}
} else {
*NotEnoughTimeForDynamicMetadata = false;
}
*Tdmdl_vm = (DynamicMetadataEnable == true && DynamicMetadataVMEnabled == true && GPUVMEnable == true ? TWait + Tvm_trips : 0);
if (myPipe->ScalerEnabled)
DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCL;
else
DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCLLBOnly;
DPPCycles = DPPCycles + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor;
DISPCLKCycles = DISPCLKDelaySubtotal;
if (myPipe->DPPCLK == 0.0 || myPipe->DISPCLK == 0.0)
return true;
*DSTXAfterScaler = DPPCycles * myPipe->PixelClock / myPipe->DPPCLK + DISPCLKCycles * myPipe->PixelClock / myPipe->DISPCLK + DSCDelay;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DPPCycles: %d\n", __func__, DPPCycles);
dml_print("DML::%s: PixelClock: %f\n", __func__, myPipe->PixelClock);
dml_print("DML::%s: DPPCLK: %f\n", __func__, myPipe->DPPCLK);
dml_print("DML::%s: DISPCLKCycles: %d\n", __func__, DISPCLKCycles);
dml_print("DML::%s: DISPCLK: %f\n", __func__, myPipe->DISPCLK);
dml_print("DML::%s: DSCDelay: %d\n", __func__, DSCDelay);
dml_print("DML::%s: DSTXAfterScaler: %d\n", __func__, *DSTXAfterScaler);
dml_print("DML::%s: ODMCombineIsEnabled: %d\n", __func__, myPipe->ODMCombineIsEnabled);
#endif
*DSTXAfterScaler = *DSTXAfterScaler + ((myPipe->ODMCombineIsEnabled) ? 18 : 0) + (myPipe->DPPPerPlane - 1) * DPP_RECOUT_WIDTH;
if (OutputFormat == dm_420 || (myPipe->InterlaceEnable && myPipe->ProgressiveToInterlaceUnitInOPP))
*DSTYAfterScaler = 1;
else
*DSTYAfterScaler = 0;
DSTTotalPixelsAfterScaler = *DSTYAfterScaler * myPipe->HTotal + *DSTXAfterScaler;
*DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
*DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * myPipe->HTotal));
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DSTXAfterScaler: %d (final)\n", __func__, *DSTXAfterScaler);
#endif
MyError = false;
Tr0_trips = trip_to_mem * (HostVMDynamicLevelsTrips + 1);
Tvm_trips_rounded = dml_ceil(4.0 * Tvm_trips / LineTime, 1) / 4 * LineTime;
Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1) / 4 * LineTime;
#ifdef __DML_VBA_ALLOW_DELTA__
if (!myPipe->DCCEnable) {
Tr0_trips = 0.0;
Tr0_trips_rounded = 0.0;
}
#endif
if (!GPUVMEnable) {
Tvm_trips = 0.0;
Tvm_trips_rounded = 0.0;
}
if (GPUVMEnable) {
if (GPUVMPageTableLevels >= 3) {
*Tno_bw = UrgentExtraLatency + trip_to_mem * ((GPUVMPageTableLevels - 2) - 1);
} else {
*Tno_bw = 0;
}
} else if (!myPipe->DCCEnable) {
*Tno_bw = LineTime;
} else {
*Tno_bw = LineTime / 4;
}
if (myPipe->SourcePixelFormat == dm_420_8 || myPipe->SourcePixelFormat == dm_420_10 || myPipe->SourcePixelFormat == dm_420_12)
bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC / 4;
else
bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC;
/*rev 99*/
prefetch_bw_pr = bytes_pp * myPipe->PixelClock / (double) myPipe->DPPPerPlane;
prefetch_bw_pr = dml_min(1, myPipe->VRatio) * prefetch_bw_pr;
max_Tsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime;
prefetch_sw_bytes = PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * myPipe->BytePerPixelC;
prefetch_bw_oto = dml_max(prefetch_bw_pr, prefetch_sw_bytes / max_Tsw);
min_Lsw = dml_max(1, dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre);
Lsw_oto = dml_ceil(4 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1) / 4;
#ifdef __DML_VBA_DEBUG__
Tsw_oto = Lsw_oto * LineTime;
#endif
#ifdef __DML_VBA_DEBUG__
dml_print("DML: HTotal: %d\n", myPipe->HTotal);
dml_print("DML: prefetch_bw_oto: %f\n", prefetch_bw_oto);
dml_print("DML: PrefetchSourceLinesY: %f\n", PrefetchSourceLinesY);
dml_print("DML: swath_width_luma_ub: %d\n", swath_width_luma_ub);
dml_print("DML: BytePerPixelY: %d\n", myPipe->BytePerPixelY);
dml_print("DML: Tsw_oto: %f\n", Tsw_oto);
#endif
if (GPUVMEnable == true)
Tvm_oto = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto, Tvm_trips, LineTime / 4.0);
else
Tvm_oto = LineTime / 4.0;
if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
Tr0_oto = dml_max4((MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto, Tr0_trips, // PREVIOUS_ERROR (missing this term)
LineTime - Tvm_oto,
LineTime / 4);
} else {
Tr0_oto = (LineTime - Tvm_oto) / 2.0;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Tvm_trips = %f\n", __func__, Tvm_trips);
dml_print("DML::%s: Tr0_trips = %f\n", __func__, Tr0_trips);
dml_print("DML::%s: PDEAndMetaPTEBytesFrame = %d\n", __func__, MetaRowByte);
dml_print("DML::%s: MetaRowByte = %d\n", __func__, MetaRowByte);
dml_print("DML::%s: PixelPTEBytesPerRow = %d\n", __func__, PixelPTEBytesPerRow);
dml_print("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
dml_print("DML::%s: prefetch_bw_oto = %f\n", __func__, prefetch_bw_oto);
dml_print("DML::%s: Tr0_oto = %f\n", __func__, Tr0_oto);
dml_print("DML::%s: Tvm_oto = %f\n", __func__, Tvm_oto);
#endif
Tvm_oto_lines = dml_ceil(4.0 * Tvm_oto / LineTime, 1) / 4.0;
Tr0_oto_lines = dml_ceil(4.0 * Tr0_oto / LineTime, 1) / 4.0;
dst_y_prefetch_oto = Tvm_oto_lines + 2 * Tr0_oto_lines + Lsw_oto;
dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime - (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal);
dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0;
Tpre_rounded = dst_y_prefetch_equ * LineTime;
dep_bytes = dml_max(PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor, MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor);
if (prefetch_sw_bytes < dep_bytes)
prefetch_sw_bytes = 2 * dep_bytes;
dml_print("DML: dst_y_prefetch_oto: %f\n", dst_y_prefetch_oto);
dml_print("DML: Tvm_oto_lines: %f\n", Tvm_oto_lines);
dml_print("DML: Tr0_oto_lines: %f\n", Tr0_oto_lines);
dml_print("DML: Lsw_oto: %f\n", Lsw_oto);
dml_print("DML: LineTime: %f\n", LineTime);
dml_print("DML: dst_y_prefetch_equ: %f (after round)\n", dst_y_prefetch_equ);
dml_print("DML: LineTime: %f\n", LineTime);
dml_print("DML: VStartup: %d\n", VStartup);
dml_print("DML: Tvstartup: %fus - time between vstartup and first pixel of active\n", VStartup * LineTime);
dml_print("DML: TSetup: %fus - time from vstartup to vready\n", *TSetup);
dml_print("DML: TCalc: %fus - time for calculations in dchub starting at vready\n", TCalc);
dml_print("DML: TWait: %fus - time for fabric to become ready max(pstate exit,cstate enter/exit, urgent latency) after TCalc\n", TWait);
dml_print("DML: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", Tdmbf);
dml_print("DML: Tdmec: %fus - time dio takes to transfer dmd\n", Tdmec);
dml_print("DML: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", Tdmsks);
dml_print("DML: Tdmdl_vm: %fus - time for vm stages of dmd\n", *Tdmdl_vm);
dml_print("DML: Tdmdl: %fus - time for fabric to become ready and fetch dmd\n", *Tdmdl);
dml_print("DML: DSTXAfterScaler: %f pixels - number of pixel clocks pipeline and buffer delay after scaler\n", *DSTXAfterScaler);
dml_print("DML: DSTYAfterScaler: %f lines - number of lines of pipeline and buffer delay after scaler\n", *DSTYAfterScaler);
*PrefetchBandwidth = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
if (dst_y_prefetch_equ > 1) {
double PrefetchBandwidth1;
double PrefetchBandwidth2;
double PrefetchBandwidth3;
double PrefetchBandwidth4;
if (Tpre_rounded - *Tno_bw > 0) {
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
+ prefetch_sw_bytes) / (Tpre_rounded - *Tno_bw);
Tsw_est1 = prefetch_sw_bytes / PrefetchBandwidth1;
} else {
PrefetchBandwidth1 = 0;
}
if (VStartup == MaxVStartup && Tsw_est1 / LineTime < min_Lsw && Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - *Tno_bw > 0) {
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor)
/ (Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - *Tno_bw);
}
if (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded > 0)
PrefetchBandwidth2 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + prefetch_sw_bytes) / (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded);
else
PrefetchBandwidth2 = 0;
if (Tpre_rounded - Tvm_trips_rounded > 0) {
PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
+ prefetch_sw_bytes) / (Tpre_rounded - Tvm_trips_rounded);
Tsw_est3 = prefetch_sw_bytes / PrefetchBandwidth3;
} else {
PrefetchBandwidth3 = 0;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Tpre_rounded: %f\n", __func__, Tpre_rounded);
dml_print("DML::%s: Tvm_trips_rounded: %f\n", __func__, Tvm_trips_rounded);
dml_print("DML::%s: PrefetchBandwidth3: %f\n", __func__, PrefetchBandwidth3);
#endif
if (VStartup == MaxVStartup && Tsw_est3 / LineTime < min_Lsw && Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - Tvm_trips_rounded > 0) {
PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor)
/ (Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - Tvm_trips_rounded);
}
if (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded > 0)
PrefetchBandwidth4 = prefetch_sw_bytes / (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded);
else
PrefetchBandwidth4 = 0;
{
bool Case1OK;
bool Case2OK;
bool Case3OK;
if (PrefetchBandwidth1 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth1 >= Tvm_trips_rounded
&& (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth1 >= Tr0_trips_rounded) {
Case1OK = true;
} else {
Case1OK = false;
}
} else {
Case1OK = false;
}
if (PrefetchBandwidth2 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth2 >= Tvm_trips_rounded
&& (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth2 < Tr0_trips_rounded) {
Case2OK = true;
} else {
Case2OK = false;
}
} else {
Case2OK = false;
}
if (PrefetchBandwidth3 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth3 < Tvm_trips_rounded
&& (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth3 >= Tr0_trips_rounded) {
Case3OK = true;
} else {
Case3OK = false;
}
} else {
Case3OK = false;
}
if (Case1OK) {
prefetch_bw_equ = PrefetchBandwidth1;
} else if (Case2OK) {
prefetch_bw_equ = PrefetchBandwidth2;
} else if (Case3OK) {
prefetch_bw_equ = PrefetchBandwidth3;
} else {
prefetch_bw_equ = PrefetchBandwidth4;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Case1OK: %d\n", __func__, Case1OK);
dml_print("DML::%s: Case2OK: %d\n", __func__, Case2OK);
dml_print("DML::%s: Case3OK: %d\n", __func__, Case3OK);
dml_print("DML::%s: prefetch_bw_equ: %f\n", __func__, prefetch_bw_equ);
#endif
if (prefetch_bw_equ > 0) {
if (GPUVMEnable == true) {
Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_equ, Tvm_trips, LineTime / 4);
} else {
Tvm_equ = LineTime / 4;
}
if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
Tr0_equ = dml_max4(
(MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_equ,
Tr0_trips,
(LineTime - Tvm_equ) / 2,
LineTime / 4);
} else {
Tr0_equ = (LineTime - Tvm_equ) / 2;
}
} else {
Tvm_equ = 0;
Tr0_equ = 0;
dml_print("DML: prefetch_bw_equ equals 0! %s:%d\n", __FILE__, __LINE__);
}
}
if (dst_y_prefetch_oto < dst_y_prefetch_equ) {
*DestinationLinesForPrefetch = dst_y_prefetch_oto;
TimeForFetchingMetaPTE = Tvm_oto;
TimeForFetchingRowInVBlank = Tr0_oto;
*PrefetchBandwidth = prefetch_bw_oto;
} else {
*DestinationLinesForPrefetch = dst_y_prefetch_equ;
TimeForFetchingMetaPTE = Tvm_equ;
TimeForFetchingRowInVBlank = Tr0_equ;
*PrefetchBandwidth = prefetch_bw_equ;
}
*DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0;
*DestinationLinesToRequestRowInVBlank = dml_ceil(4.0 * TimeForFetchingRowInVBlank / LineTime, 1.0) / 4.0;
#ifdef __DML_VBA_ALLOW_DELTA__
LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch
// See note above dated 5/30/2018
// - ((NumberOfCursors > 0 || GPUVMEnable || DCCEnable) ?
- ((GPUVMEnable || myPipe->DCCEnable) ? (*DestinationLinesToRequestVMInVBlank + 2 * *DestinationLinesToRequestRowInVBlank) : 0.0); // TODO: Did someone else add this??
#else
LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch - *DestinationLinesToRequestVMInVBlank - 2 * *DestinationLinesToRequestRowInVBlank;
#endif
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DestinationLinesForPrefetch = %f\n", __func__, *DestinationLinesForPrefetch);
dml_print("DML::%s: DestinationLinesToRequestVMInVBlank = %f\n", __func__, *DestinationLinesToRequestVMInVBlank);
dml_print("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, TimeForFetchingRowInVBlank);
dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
dml_print("DML::%s: DestinationLinesToRequestRowInVBlank = %f\n", __func__, *DestinationLinesToRequestRowInVBlank);
dml_print("DML::%s: PrefetchSourceLinesY = %f\n", __func__, PrefetchSourceLinesY);
dml_print("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, LinesToRequestPrefetchPixelData);
#endif
if (LinesToRequestPrefetchPixelData > 0 && prefetch_bw_equ > 0) {
*VRatioPrefetchY = (double) PrefetchSourceLinesY / LinesToRequestPrefetchPixelData;
*VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VRatioPrefetchY = %f\n", __func__, *VRatioPrefetchY);
dml_print("DML::%s: SwathHeightY = %d\n", __func__, SwathHeightY);
dml_print("DML::%s: VInitPreFillY = %f\n", __func__, VInitPreFillY);
#endif
if ((SwathHeightY > 4) && (VInitPreFillY > 3)) {
if (LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
*VRatioPrefetchY = dml_max(
(double) PrefetchSourceLinesY / LinesToRequestPrefetchPixelData,
(double) MaxNumSwathY * SwathHeightY / (LinesToRequestPrefetchPixelData - (VInitPreFillY - 3.0) / 2.0));
*VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
} else {
MyError = true;
dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
*VRatioPrefetchY = 0;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VRatioPrefetchY = %f\n", __func__, *VRatioPrefetchY);
dml_print("DML::%s: PrefetchSourceLinesY = %f\n", __func__, PrefetchSourceLinesY);
dml_print("DML::%s: MaxNumSwathY = %d\n", __func__, MaxNumSwathY);
#endif
}
*VRatioPrefetchC = (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData;
*VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VRatioPrefetchC = %f\n", __func__, *VRatioPrefetchC);
dml_print("DML::%s: SwathHeightC = %d\n", __func__, SwathHeightC);
dml_print("DML::%s: VInitPreFillC = %f\n", __func__, VInitPreFillC);
#endif
if ((SwathHeightC > 4) || VInitPreFillC > 3) {
if (LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
*VRatioPrefetchC = dml_max(
*VRatioPrefetchC,
(double) MaxNumSwathC * SwathHeightC / (LinesToRequestPrefetchPixelData - (VInitPreFillC - 3.0) / 2.0));
*VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
} else {
MyError = true;
dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
*VRatioPrefetchC = 0;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VRatioPrefetchC = %f\n", __func__, *VRatioPrefetchC);
dml_print("DML::%s: PrefetchSourceLinesC = %f\n", __func__, PrefetchSourceLinesC);
dml_print("DML::%s: MaxNumSwathC = %d\n", __func__, MaxNumSwathC);
#endif
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: BytePerPixelY = %d\n", __func__, myPipe->BytePerPixelY);
dml_print("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
#endif
*RequiredPrefetchPixDataBWLuma = (double) PrefetchSourceLinesY / LinesToRequestPrefetchPixelData * myPipe->BytePerPixelY * swath_width_luma_ub / LineTime;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: RequiredPrefetchPixDataBWLuma = %f\n", __func__, *RequiredPrefetchPixDataBWLuma);
#endif
*RequiredPrefetchPixDataBWChroma = (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData * myPipe->BytePerPixelC * swath_width_chroma_ub
/ LineTime;
} else {
MyError = true;
dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
dml_print("DML: LinesToRequestPrefetchPixelData: %f, should be > 0\n", LinesToRequestPrefetchPixelData);
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
*RequiredPrefetchPixDataBWChroma = 0;
}
dml_print(
"DML: Tpre: %fus - sum of time to request meta pte, 2 x data pte + meta data, swaths\n",
(double) LinesToRequestPrefetchPixelData * LineTime + 2.0 * TimeForFetchingRowInVBlank + TimeForFetchingMetaPTE);
dml_print("DML: Tvm: %fus - time to fetch page tables for meta surface\n", TimeForFetchingMetaPTE);
dml_print("DML: Tr0: %fus - time to fetch first row of data pagetables and first row of meta data (done in parallel)\n", TimeForFetchingRowInVBlank);
dml_print(
"DML: Tsw: %fus = time to fetch enough pixel data and cursor data to feed the scalers init position and detile\n",
(double) LinesToRequestPrefetchPixelData * LineTime);
dml_print("DML: To: %fus - time for propagation from scaler to optc\n", (*DSTYAfterScaler + ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime);
dml_print("DML: Tvstartup - TSetup - Tcalc - Twait - Tpre - To > 0\n");
dml_print(
"DML: Tslack(pre): %fus - time left over in schedule\n",
VStartup * LineTime - TimeForFetchingMetaPTE - 2 * TimeForFetchingRowInVBlank
- (*DSTYAfterScaler + ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime - TWait - TCalc - *TSetup);
dml_print("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %d\n", PixelPTEBytesPerRow);
} else {
MyError = true;
dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
}
{
double prefetch_vm_bw;
double prefetch_row_bw;
if (PDEAndMetaPTEBytesFrame == 0) {
prefetch_vm_bw = 0;
} else if (*DestinationLinesToRequestVMInVBlank > 0) {
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: PDEAndMetaPTEBytesFrame = %d\n", __func__, PDEAndMetaPTEBytesFrame);
dml_print("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
dml_print("DML::%s: DestinationLinesToRequestVMInVBlank = %f\n", __func__, *DestinationLinesToRequestVMInVBlank);
dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
#endif
prefetch_vm_bw = PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInVBlank * LineTime);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw);
#endif
} else {
prefetch_vm_bw = 0;
MyError = true;
dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
}
if (MetaRowByte + PixelPTEBytesPerRow == 0) {
prefetch_row_bw = 0;
} else if (*DestinationLinesToRequestRowInVBlank > 0) {
prefetch_row_bw = (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInVBlank * LineTime);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: MetaRowByte = %d\n", __func__, MetaRowByte);
dml_print("DML::%s: PixelPTEBytesPerRow = %d\n", __func__, PixelPTEBytesPerRow);
dml_print("DML::%s: DestinationLinesToRequestRowInVBlank = %f\n", __func__, *DestinationLinesToRequestRowInVBlank);
dml_print("DML::%s: prefetch_row_bw = %f\n", __func__, prefetch_row_bw);
#endif
} else {
prefetch_row_bw = 0;
MyError = true;
dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
}
*prefetch_vmrow_bw = dml_max(prefetch_vm_bw, prefetch_row_bw);
}
if (MyError) {
*PrefetchBandwidth = 0;
TimeForFetchingMetaPTE = 0;
TimeForFetchingRowInVBlank = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*DestinationLinesForPrefetch = 0;
LinesToRequestPrefetchPixelData = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
*RequiredPrefetchPixDataBWChroma = 0;
}
return MyError;
}
static double RoundToDFSGranularityUp(double Clock, double VCOSpeed)
{
return VCOSpeed * 4 / dml_floor(VCOSpeed * 4 / Clock, 1);
}
static double RoundToDFSGranularityDown(double Clock, double VCOSpeed)
{
return VCOSpeed * 4 / dml_ceil(VCOSpeed * 4.0 / Clock, 1);
}
static void CalculateDCCConfiguration(
bool DCCEnabled,
bool DCCProgrammingAssumesScanDirectionUnknown,
enum source_format_class SourcePixelFormat,
unsigned int SurfaceWidthLuma,
unsigned int SurfaceWidthChroma,
unsigned int SurfaceHeightLuma,
unsigned int SurfaceHeightChroma,
double DETBufferSize,
unsigned int RequestHeight256ByteLuma,
unsigned int RequestHeight256ByteChroma,
enum dm_swizzle_mode TilingFormat,
unsigned int BytePerPixelY,
unsigned int BytePerPixelC,
double BytePerPixelDETY,
double BytePerPixelDETC,
enum scan_direction_class ScanOrientation,
unsigned int *MaxUncompressedBlockLuma,
unsigned int *MaxUncompressedBlockChroma,
unsigned int *MaxCompressedBlockLuma,
unsigned int *MaxCompressedBlockChroma,
unsigned int *IndependentBlockLuma,
unsigned int *IndependentBlockChroma)
{
int yuv420;
int horz_div_l;
int horz_div_c;
int vert_div_l;
int vert_div_c;
int swath_buf_size;
double detile_buf_vp_horz_limit;
double detile_buf_vp_vert_limit;
int MAS_vp_horz_limit;
int MAS_vp_vert_limit;
int max_vp_horz_width;
int max_vp_vert_height;
int eff_surf_width_l;
int eff_surf_width_c;
int eff_surf_height_l;
int eff_surf_height_c;
int full_swath_bytes_horz_wc_l;
int full_swath_bytes_horz_wc_c;
int full_swath_bytes_vert_wc_l;
int full_swath_bytes_vert_wc_c;
int req128_horz_wc_l;
int req128_horz_wc_c;
int req128_vert_wc_l;
int req128_vert_wc_c;
int segment_order_horz_contiguous_luma;
int segment_order_horz_contiguous_chroma;
int segment_order_vert_contiguous_luma;
int segment_order_vert_contiguous_chroma;
typedef enum {
REQ_256Bytes, REQ_128BytesNonContiguous, REQ_128BytesContiguous, REQ_NA
} RequestType;
RequestType RequestLuma;
RequestType RequestChroma;
yuv420 = ((SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_420_12) ? 1 : 0);
horz_div_l = 1;
horz_div_c = 1;
vert_div_l = 1;
vert_div_c = 1;
if (BytePerPixelY == 1)
vert_div_l = 0;
if (BytePerPixelC == 1)
vert_div_c = 0;
if (BytePerPixelY == 8 && (TilingFormat == dm_sw_64kb_s || TilingFormat == dm_sw_64kb_s_t || TilingFormat == dm_sw_64kb_s_x))
horz_div_l = 0;
if (BytePerPixelC == 8 && (TilingFormat == dm_sw_64kb_s || TilingFormat == dm_sw_64kb_s_t || TilingFormat == dm_sw_64kb_s_x))
horz_div_c = 0;
if (BytePerPixelC == 0) {
swath_buf_size = DETBufferSize / 2 - 2 * 256;
detile_buf_vp_horz_limit = (double) swath_buf_size / ((double) RequestHeight256ByteLuma * BytePerPixelY / (1 + horz_div_l));
detile_buf_vp_vert_limit = (double) swath_buf_size / (256.0 / RequestHeight256ByteLuma / (1 + vert_div_l));
} else {
swath_buf_size = DETBufferSize / 2 - 2 * 2 * 256;
detile_buf_vp_horz_limit = (double) swath_buf_size
/ ((double) RequestHeight256ByteLuma * BytePerPixelY / (1 + horz_div_l)
+ (double) RequestHeight256ByteChroma * BytePerPixelC / (1 + horz_div_c) / (1 + yuv420));
detile_buf_vp_vert_limit = (double) swath_buf_size
/ (256.0 / RequestHeight256ByteLuma / (1 + vert_div_l) + 256.0 / RequestHeight256ByteChroma / (1 + vert_div_c) / (1 + yuv420));
}
if (SourcePixelFormat == dm_420_10) {
detile_buf_vp_horz_limit = 1.5 * detile_buf_vp_horz_limit;
detile_buf_vp_vert_limit = 1.5 * detile_buf_vp_vert_limit;
}
detile_buf_vp_horz_limit = dml_floor(detile_buf_vp_horz_limit - 1, 16);
detile_buf_vp_vert_limit = dml_floor(detile_buf_vp_vert_limit - 1, 16);
MAS_vp_horz_limit = SourcePixelFormat == dm_rgbe_alpha ? 3840 : 5760;
MAS_vp_vert_limit = (BytePerPixelC > 0 ? 2880 : 5760);
max_vp_horz_width = dml_min((double) MAS_vp_horz_limit, detile_buf_vp_horz_limit);
max_vp_vert_height = dml_min((double) MAS_vp_vert_limit, detile_buf_vp_vert_limit);
eff_surf_width_l = (SurfaceWidthLuma > max_vp_horz_width ? max_vp_horz_width : SurfaceWidthLuma);
eff_surf_width_c = eff_surf_width_l / (1 + yuv420);
eff_surf_height_l = (SurfaceHeightLuma > max_vp_vert_height ? max_vp_vert_height : SurfaceHeightLuma);
eff_surf_height_c = eff_surf_height_l / (1 + yuv420);
full_swath_bytes_horz_wc_l = eff_surf_width_l * RequestHeight256ByteLuma * BytePerPixelY;
full_swath_bytes_vert_wc_l = eff_surf_height_l * 256 / RequestHeight256ByteLuma;
if (BytePerPixelC > 0) {
full_swath_bytes_horz_wc_c = eff_surf_width_c * RequestHeight256ByteChroma * BytePerPixelC;
full_swath_bytes_vert_wc_c = eff_surf_height_c * 256 / RequestHeight256ByteChroma;
} else {
full_swath_bytes_horz_wc_c = 0;
full_swath_bytes_vert_wc_c = 0;
}
if (SourcePixelFormat == dm_420_10) {
full_swath_bytes_horz_wc_l = dml_ceil(full_swath_bytes_horz_wc_l * 2 / 3, 256);
full_swath_bytes_horz_wc_c = dml_ceil(full_swath_bytes_horz_wc_c * 2 / 3, 256);
full_swath_bytes_vert_wc_l = dml_ceil(full_swath_bytes_vert_wc_l * 2 / 3, 256);
full_swath_bytes_vert_wc_c = dml_ceil(full_swath_bytes_vert_wc_c * 2 / 3, 256);
}
if (2 * full_swath_bytes_horz_wc_l + 2 * full_swath_bytes_horz_wc_c <= DETBufferSize) {
req128_horz_wc_l = 0;
req128_horz_wc_c = 0;
} else if (full_swath_bytes_horz_wc_l < 1.5 * full_swath_bytes_horz_wc_c && 2 * full_swath_bytes_horz_wc_l + full_swath_bytes_horz_wc_c <= DETBufferSize) {
req128_horz_wc_l = 0;
req128_horz_wc_c = 1;
} else if (full_swath_bytes_horz_wc_l >= 1.5 * full_swath_bytes_horz_wc_c && full_swath_bytes_horz_wc_l + 2 * full_swath_bytes_horz_wc_c <= DETBufferSize) {
req128_horz_wc_l = 1;
req128_horz_wc_c = 0;
} else {
req128_horz_wc_l = 1;
req128_horz_wc_c = 1;
}
if (2 * full_swath_bytes_vert_wc_l + 2 * full_swath_bytes_vert_wc_c <= DETBufferSize) {
req128_vert_wc_l = 0;
req128_vert_wc_c = 0;
} else if (full_swath_bytes_vert_wc_l < 1.5 * full_swath_bytes_vert_wc_c && 2 * full_swath_bytes_vert_wc_l + full_swath_bytes_vert_wc_c <= DETBufferSize) {
req128_vert_wc_l = 0;
req128_vert_wc_c = 1;
} else if (full_swath_bytes_vert_wc_l >= 1.5 * full_swath_bytes_vert_wc_c && full_swath_bytes_vert_wc_l + 2 * full_swath_bytes_vert_wc_c <= DETBufferSize) {
req128_vert_wc_l = 1;
req128_vert_wc_c = 0;
} else {
req128_vert_wc_l = 1;
req128_vert_wc_c = 1;
}
if (BytePerPixelY == 2 || (BytePerPixelY == 4 && TilingFormat != dm_sw_64kb_r_x)) {
segment_order_horz_contiguous_luma = 0;
} else {
segment_order_horz_contiguous_luma = 1;
}
if ((BytePerPixelY == 8 && (TilingFormat == dm_sw_64kb_d || TilingFormat == dm_sw_64kb_d_x || TilingFormat == dm_sw_64kb_d_t || TilingFormat == dm_sw_64kb_r_x))
|| (BytePerPixelY == 4 && TilingFormat == dm_sw_64kb_r_x)) {
segment_order_vert_contiguous_luma = 0;
} else {
segment_order_vert_contiguous_luma = 1;
}
if (BytePerPixelC == 2 || (BytePerPixelC == 4 && TilingFormat != dm_sw_64kb_r_x)) {
segment_order_horz_contiguous_chroma = 0;
} else {
segment_order_horz_contiguous_chroma = 1;
}
if ((BytePerPixelC == 8 && (TilingFormat == dm_sw_64kb_d || TilingFormat == dm_sw_64kb_d_x || TilingFormat == dm_sw_64kb_d_t || TilingFormat == dm_sw_64kb_r_x))
|| (BytePerPixelC == 4 && TilingFormat == dm_sw_64kb_r_x)) {
segment_order_vert_contiguous_chroma = 0;
} else {
segment_order_vert_contiguous_chroma = 1;
}
if (DCCProgrammingAssumesScanDirectionUnknown == true) {
if (req128_horz_wc_l == 0 && req128_vert_wc_l == 0) {
RequestLuma = REQ_256Bytes;
} else if ((req128_horz_wc_l == 1 && segment_order_horz_contiguous_luma == 0) || (req128_vert_wc_l == 1 && segment_order_vert_contiguous_luma == 0)) {
RequestLuma = REQ_128BytesNonContiguous;
} else {
RequestLuma = REQ_128BytesContiguous;
}
if (req128_horz_wc_c == 0 && req128_vert_wc_c == 0) {
RequestChroma = REQ_256Bytes;
} else if ((req128_horz_wc_c == 1 && segment_order_horz_contiguous_chroma == 0) || (req128_vert_wc_c == 1 && segment_order_vert_contiguous_chroma == 0)) {
RequestChroma = REQ_128BytesNonContiguous;
} else {
RequestChroma = REQ_128BytesContiguous;
}
} else if (ScanOrientation != dm_vert) {
if (req128_horz_wc_l == 0) {
RequestLuma = REQ_256Bytes;
} else if (segment_order_horz_contiguous_luma == 0) {
RequestLuma = REQ_128BytesNonContiguous;
} else {
RequestLuma = REQ_128BytesContiguous;
}
if (req128_horz_wc_c == 0) {
RequestChroma = REQ_256Bytes;
} else if (segment_order_horz_contiguous_chroma == 0) {
RequestChroma = REQ_128BytesNonContiguous;
} else {
RequestChroma = REQ_128BytesContiguous;
}
} else {
if (req128_vert_wc_l == 0) {
RequestLuma = REQ_256Bytes;
} else if (segment_order_vert_contiguous_luma == 0) {
RequestLuma = REQ_128BytesNonContiguous;
} else {
RequestLuma = REQ_128BytesContiguous;
}
if (req128_vert_wc_c == 0) {
RequestChroma = REQ_256Bytes;
} else if (segment_order_vert_contiguous_chroma == 0) {
RequestChroma = REQ_128BytesNonContiguous;
} else {
RequestChroma = REQ_128BytesContiguous;
}
}
if (RequestLuma == REQ_256Bytes) {
*MaxUncompressedBlockLuma = 256;
*MaxCompressedBlockLuma = 256;
*IndependentBlockLuma = 0;
} else if (RequestLuma == REQ_128BytesContiguous) {
*MaxUncompressedBlockLuma = 256;
*MaxCompressedBlockLuma = 128;
*IndependentBlockLuma = 128;
} else {
*MaxUncompressedBlockLuma = 256;
*MaxCompressedBlockLuma = 64;
*IndependentBlockLuma = 64;
}
if (RequestChroma == REQ_256Bytes) {
*MaxUncompressedBlockChroma = 256;
*MaxCompressedBlockChroma = 256;
*IndependentBlockChroma = 0;
} else if (RequestChroma == REQ_128BytesContiguous) {
*MaxUncompressedBlockChroma = 256;
*MaxCompressedBlockChroma = 128;
*IndependentBlockChroma = 128;
} else {
*MaxUncompressedBlockChroma = 256;
*MaxCompressedBlockChroma = 64;
*IndependentBlockChroma = 64;
}
if (DCCEnabled != true || BytePerPixelC == 0) {
*MaxUncompressedBlockChroma = 0;
*MaxCompressedBlockChroma = 0;
*IndependentBlockChroma = 0;
}
if (DCCEnabled != true) {
*MaxUncompressedBlockLuma = 0;
*MaxCompressedBlockLuma = 0;
*IndependentBlockLuma = 0;
}
}
static double CalculatePrefetchSourceLines(
struct display_mode_lib *mode_lib,
double VRatio,
double vtaps,
bool Interlace,
bool ProgressiveToInterlaceUnitInOPP,
unsigned int SwathHeight,
unsigned int ViewportYStart,
double *VInitPreFill,
unsigned int *MaxNumSwath)
{
struct vba_vars_st *v = &mode_lib->vba;
unsigned int MaxPartialSwath;
if (ProgressiveToInterlaceUnitInOPP)
*VInitPreFill = dml_floor((VRatio + vtaps + 1) / 2.0, 1);
else
*VInitPreFill = dml_floor((VRatio + vtaps + 1 + Interlace * 0.5 * VRatio) / 2.0, 1);
if (!v->IgnoreViewportPositioning) {
*MaxNumSwath = dml_ceil((*VInitPreFill - 1.0) / SwathHeight, 1) + 1.0;
if (*VInitPreFill > 1.0)
MaxPartialSwath = (unsigned int) (*VInitPreFill - 2) % SwathHeight;
else
MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 2) % SwathHeight;
MaxPartialSwath = dml_max(1U, MaxPartialSwath);
} else {
if (ViewportYStart != 0)
dml_print("WARNING DML: using viewport y position of 0 even though actual viewport y position is non-zero in prefetch source lines calculation\n");
*MaxNumSwath = dml_ceil(*VInitPreFill / SwathHeight, 1);
if (*VInitPreFill > 1.0)
MaxPartialSwath = (unsigned int) (*VInitPreFill - 1) % SwathHeight;
else
MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 1) % SwathHeight;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VRatio = %f\n", __func__, VRatio);
dml_print("DML::%s: vtaps = %f\n", __func__, vtaps);
dml_print("DML::%s: VInitPreFill = %f\n", __func__, *VInitPreFill);
dml_print("DML::%s: ProgressiveToInterlaceUnitInOPP = %d\n", __func__, ProgressiveToInterlaceUnitInOPP);
dml_print("DML::%s: IgnoreViewportPositioning = %d\n", __func__, v->IgnoreViewportPositioning);
dml_print("DML::%s: SwathHeight = %d\n", __func__, SwathHeight);
dml_print("DML::%s: MaxPartialSwath = %d\n", __func__, MaxPartialSwath);
dml_print("DML::%s: MaxNumSwath = %d\n", __func__, *MaxNumSwath);
dml_print("DML::%s: Prefetch source lines = %d\n", __func__, *MaxNumSwath * SwathHeight + MaxPartialSwath);
#endif
return *MaxNumSwath * SwathHeight + MaxPartialSwath;
}
static unsigned int CalculateVMAndRowBytes(
struct display_mode_lib *mode_lib,
bool DCCEnable,
unsigned int BlockHeight256Bytes,
unsigned int BlockWidth256Bytes,
enum source_format_class SourcePixelFormat,
unsigned int SurfaceTiling,
unsigned int BytePerPixel,
enum scan_direction_class ScanDirection,
unsigned int SwathWidth,
unsigned int ViewportHeight,
bool GPUVMEnable,
bool HostVMEnable,
unsigned int HostVMMaxNonCachedPageTableLevels,
unsigned int GPUVMMinPageSize,
unsigned int HostVMMinPageSize,
unsigned int PTEBufferSizeInRequests,
unsigned int Pitch,
unsigned int DCCMetaPitch,
unsigned int *MacroTileWidth,
unsigned int *MetaRowByte,
unsigned int *PixelPTEBytesPerRow,
bool *PTEBufferSizeNotExceeded,
int *dpte_row_width_ub,
unsigned int *dpte_row_height,
unsigned int *MetaRequestWidth,
unsigned int *MetaRequestHeight,
unsigned int *meta_row_width,
unsigned int *meta_row_height,
int *vm_group_bytes,
unsigned int *dpte_group_bytes,
unsigned int *PixelPTEReqWidth,
unsigned int *PixelPTEReqHeight,
unsigned int *PTERequestSize,
int *DPDE0BytesFrame,
int *MetaPTEBytesFrame)
{
struct vba_vars_st *v = &mode_lib->vba;
unsigned int MPDEBytesFrame;
unsigned int DCCMetaSurfaceBytes;
unsigned int MacroTileSizeBytes;
unsigned int MacroTileHeight;
unsigned int ExtraDPDEBytesFrame;
unsigned int PDEAndMetaPTEBytesFrame;
unsigned int PixelPTEReqHeightPTEs = 0;
unsigned int HostVMDynamicLevels = 0;
double FractionOfPTEReturnDrop;
if (GPUVMEnable == true && HostVMEnable == true) {
if (HostVMMinPageSize < 2048) {
HostVMDynamicLevels = HostVMMaxNonCachedPageTableLevels;
} else if (HostVMMinPageSize >= 2048 && HostVMMinPageSize < 1048576) {
HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 1);
} else {
HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 2);
}
}
*MetaRequestHeight = 8 * BlockHeight256Bytes;
*MetaRequestWidth = 8 * BlockWidth256Bytes;
if (ScanDirection != dm_vert) {
*meta_row_height = *MetaRequestHeight;
*meta_row_width = dml_ceil((double) SwathWidth - 1, *MetaRequestWidth) + *MetaRequestWidth;
*MetaRowByte = *meta_row_width * *MetaRequestHeight * BytePerPixel / 256.0;
} else {
*meta_row_height = *MetaRequestWidth;
*meta_row_width = dml_ceil((double) SwathWidth - 1, *MetaRequestHeight) + *MetaRequestHeight;
*MetaRowByte = *meta_row_width * *MetaRequestWidth * BytePerPixel / 256.0;
}
DCCMetaSurfaceBytes = DCCMetaPitch * (dml_ceil(ViewportHeight - 1, 64 * BlockHeight256Bytes) + 64 * BlockHeight256Bytes) * BytePerPixel / 256;
if (GPUVMEnable == true) {
*MetaPTEBytesFrame = (dml_ceil((double) (DCCMetaSurfaceBytes - 4.0 * 1024.0) / (8 * 4.0 * 1024), 1) + 1) * 64;
MPDEBytesFrame = 128 * (v->GPUVMMaxPageTableLevels - 1);
} else {
*MetaPTEBytesFrame = 0;
MPDEBytesFrame = 0;
}
if (DCCEnable != true) {
*MetaPTEBytesFrame = 0;
MPDEBytesFrame = 0;
*MetaRowByte = 0;
}
if (SurfaceTiling == dm_sw_linear) {
MacroTileSizeBytes = 256;
MacroTileHeight = BlockHeight256Bytes;
} else {
MacroTileSizeBytes = 65536;
MacroTileHeight = 16 * BlockHeight256Bytes;
}
*MacroTileWidth = MacroTileSizeBytes / BytePerPixel / MacroTileHeight;
if (GPUVMEnable == true && v->GPUVMMaxPageTableLevels > 1) {
if (ScanDirection != dm_vert) {
*DPDE0BytesFrame = 64
* (dml_ceil(
((Pitch * (dml_ceil(ViewportHeight - 1, MacroTileHeight) + MacroTileHeight) * BytePerPixel) - MacroTileSizeBytes)
/ (8 * 2097152),
1) + 1);
} else {
*DPDE0BytesFrame = 64
* (dml_ceil(
((Pitch * (dml_ceil((double) SwathWidth - 1, MacroTileHeight) + MacroTileHeight) * BytePerPixel) - MacroTileSizeBytes)
/ (8 * 2097152),
1) + 1);
}
ExtraDPDEBytesFrame = 128 * (v->GPUVMMaxPageTableLevels - 2);
} else {
*DPDE0BytesFrame = 0;
ExtraDPDEBytesFrame = 0;
}
PDEAndMetaPTEBytesFrame = *MetaPTEBytesFrame + MPDEBytesFrame + *DPDE0BytesFrame + ExtraDPDEBytesFrame;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: MetaPTEBytesFrame = %d\n", __func__, *MetaPTEBytesFrame);
dml_print("DML::%s: MPDEBytesFrame = %d\n", __func__, MPDEBytesFrame);
dml_print("DML::%s: DPDE0BytesFrame = %d\n", __func__, *DPDE0BytesFrame);
dml_print("DML::%s: ExtraDPDEBytesFrame= %d\n", __func__, ExtraDPDEBytesFrame);
dml_print("DML::%s: PDEAndMetaPTEBytesFrame = %d\n", __func__, PDEAndMetaPTEBytesFrame);
#endif
if (HostVMEnable == true) {
PDEAndMetaPTEBytesFrame = PDEAndMetaPTEBytesFrame * (1 + 8 * HostVMDynamicLevels);
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: PDEAndMetaPTEBytesFrame = %d\n", __func__, PDEAndMetaPTEBytesFrame);
#endif
if (SurfaceTiling == dm_sw_linear) {
PixelPTEReqHeightPTEs = 1;
*PixelPTEReqHeight = 1;
*PixelPTEReqWidth = 32768.0 / BytePerPixel;
*PTERequestSize = 64;
FractionOfPTEReturnDrop = 0;
} else if (MacroTileSizeBytes == 4096) {
PixelPTEReqHeightPTEs = 1;
*PixelPTEReqHeight = MacroTileHeight;
*PixelPTEReqWidth = 8 * *MacroTileWidth;
*PTERequestSize = 64;
if (ScanDirection != dm_vert)
FractionOfPTEReturnDrop = 0;
else
FractionOfPTEReturnDrop = 7 / 8;
} else if (GPUVMMinPageSize == 4 && MacroTileSizeBytes > 4096) {
PixelPTEReqHeightPTEs = 16;
*PixelPTEReqHeight = 16 * BlockHeight256Bytes;
*PixelPTEReqWidth = 16 * BlockWidth256Bytes;
*PTERequestSize = 128;
FractionOfPTEReturnDrop = 0;
} else {
PixelPTEReqHeightPTEs = 1;
*PixelPTEReqHeight = MacroTileHeight;
*PixelPTEReqWidth = 8 * *MacroTileWidth;
*PTERequestSize = 64;
FractionOfPTEReturnDrop = 0;
}
if (SurfaceTiling == dm_sw_linear) {
*dpte_row_height = dml_min(128, 1 << (unsigned int) dml_floor(dml_log2(PTEBufferSizeInRequests * *PixelPTEReqWidth / Pitch), 1));
*dpte_row_width_ub = (dml_ceil((double)(Pitch * *dpte_row_height - 1) / *PixelPTEReqWidth, 1) + 1) * *PixelPTEReqWidth;
*PixelPTEBytesPerRow = *dpte_row_width_ub / *PixelPTEReqWidth * *PTERequestSize;
} else if (ScanDirection != dm_vert) {
*dpte_row_height = *PixelPTEReqHeight;
*dpte_row_width_ub = (dml_ceil((double) (SwathWidth - 1) / *PixelPTEReqWidth, 1) + 1) * *PixelPTEReqWidth;
*PixelPTEBytesPerRow = *dpte_row_width_ub / *PixelPTEReqWidth * *PTERequestSize;
} else {
*dpte_row_height = dml_min(*PixelPTEReqWidth, *MacroTileWidth);
*dpte_row_width_ub = (dml_ceil((double) (SwathWidth - 1) / *PixelPTEReqHeight, 1) + 1) * *PixelPTEReqHeight;
*PixelPTEBytesPerRow = *dpte_row_width_ub / *PixelPTEReqHeight * *PTERequestSize;
}
if (*PixelPTEBytesPerRow * (1 - FractionOfPTEReturnDrop) <= 64 * PTEBufferSizeInRequests) {
*PTEBufferSizeNotExceeded = true;
} else {
*PTEBufferSizeNotExceeded = false;
}
if (GPUVMEnable != true) {
*PixelPTEBytesPerRow = 0;
*PTEBufferSizeNotExceeded = true;
}
dml_print("DML: vm_bytes = meta_pte_bytes_per_frame (per_pipe) = MetaPTEBytesFrame = : %i\n", *MetaPTEBytesFrame);
if (HostVMEnable == true) {
*PixelPTEBytesPerRow = *PixelPTEBytesPerRow * (1 + 8 * HostVMDynamicLevels);
}
if (HostVMEnable == true) {
*vm_group_bytes = 512;
*dpte_group_bytes = 512;
} else if (GPUVMEnable == true) {
*vm_group_bytes = 2048;
if (SurfaceTiling != dm_sw_linear && PixelPTEReqHeightPTEs == 1 && ScanDirection == dm_vert) {
*dpte_group_bytes = 512;
} else {
*dpte_group_bytes = 2048;
}
} else {
*vm_group_bytes = 0;
*dpte_group_bytes = 0;
}
return PDEAndMetaPTEBytesFrame;
}
static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(struct display_mode_lib *mode_lib)
{
struct vba_vars_st *v = &mode_lib->vba;
unsigned int j, k;
double HostVMInefficiencyFactor = 1.0;
bool NoChromaPlanes = true;
int ReorderBytes;
double VMDataOnlyReturnBW;
double MaxTotalRDBandwidth = 0;
int PrefetchMode = v->PrefetchModePerState[v->VoltageLevel][v->maxMpcComb];
v->WritebackDISPCLK = 0.0;
v->DISPCLKWithRamping = 0;
v->DISPCLKWithoutRamping = 0;
v->GlobalDPPCLK = 0.0;
/* DAL custom code: need to update ReturnBW in case min dcfclk is overridden */
{
double IdealFabricAndSDPPortBandwidthPerState = dml_min(
v->ReturnBusWidth * v->DCFCLKState[v->VoltageLevel][v->maxMpcComb],
v->FabricClockPerState[v->VoltageLevel] * v->FabricDatapathToDCNDataReturn);
double IdealDRAMBandwidthPerState = v->DRAMSpeedPerState[v->VoltageLevel] * v->NumberOfChannels * v->DRAMChannelWidth;
if (v->HostVMEnable != true) {
v->ReturnBW = dml_min(
IdealFabricAndSDPPortBandwidthPerState * v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency / 100.0,
IdealDRAMBandwidthPerState * v->PercentOfIdealDRAMBWReceivedAfterUrgLatencyPixelDataOnly / 100.0);
} else {
v->ReturnBW = dml_min(
IdealFabricAndSDPPortBandwidthPerState * v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency / 100.0,
IdealDRAMBandwidthPerState * v->PercentOfIdealDRAMBWReceivedAfterUrgLatencyPixelMixedWithVMData / 100.0);
}
}
/* End DAL custom code */
// DISPCLK and DPPCLK Calculation
//
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k]) {
v->WritebackDISPCLK = dml_max(
v->WritebackDISPCLK,
dml314_CalculateWriteBackDISPCLK(
v->WritebackPixelFormat[k],
v->PixelClock[k],
v->WritebackHRatio[k],
v->WritebackVRatio[k],
v->WritebackHTaps[k],
v->WritebackVTaps[k],
v->WritebackSourceWidth[k],
v->WritebackDestinationWidth[k],
v->HTotal[k],
v->WritebackLineBufferSize));
}
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->HRatio[k] > 1) {
v->PSCL_THROUGHPUT_LUMA[k] = dml_min(
v->MaxDCHUBToPSCLThroughput,
v->MaxPSCLToLBThroughput * v->HRatio[k] / dml_ceil(v->htaps[k] / 6.0, 1));
} else {
v->PSCL_THROUGHPUT_LUMA[k] = dml_min(v->MaxDCHUBToPSCLThroughput, v->MaxPSCLToLBThroughput);
}
v->DPPCLKUsingSingleDPPLuma = v->PixelClock[k]
* dml_max(
v->vtaps[k] / 6.0 * dml_min(1.0, v->HRatio[k]),
dml_max(v->HRatio[k] * v->VRatio[k] / v->PSCL_THROUGHPUT_LUMA[k], 1.0));
if ((v->htaps[k] > 6 || v->vtaps[k] > 6) && v->DPPCLKUsingSingleDPPLuma < 2 * v->PixelClock[k]) {
v->DPPCLKUsingSingleDPPLuma = 2 * v->PixelClock[k];
}
if ((v->SourcePixelFormat[k] != dm_420_8 && v->SourcePixelFormat[k] != dm_420_10 && v->SourcePixelFormat[k] != dm_420_12
&& v->SourcePixelFormat[k] != dm_rgbe_alpha)) {
v->PSCL_THROUGHPUT_CHROMA[k] = 0.0;
v->DPPCLKUsingSingleDPP[k] = v->DPPCLKUsingSingleDPPLuma;
} else {
if (v->HRatioChroma[k] > 1) {
v->PSCL_THROUGHPUT_CHROMA[k] = dml_min(
v->MaxDCHUBToPSCLThroughput,
v->MaxPSCLToLBThroughput * v->HRatioChroma[k] / dml_ceil(v->HTAPsChroma[k] / 6.0, 1.0));
} else {
v->PSCL_THROUGHPUT_CHROMA[k] = dml_min(v->MaxDCHUBToPSCLThroughput, v->MaxPSCLToLBThroughput);
}
v->DPPCLKUsingSingleDPPChroma = v->PixelClock[k]
* dml_max3(
v->VTAPsChroma[k] / 6.0 * dml_min(1.0, v->HRatioChroma[k]),
v->HRatioChroma[k] * v->VRatioChroma[k] / v->PSCL_THROUGHPUT_CHROMA[k],
1.0);
if ((v->HTAPsChroma[k] > 6 || v->VTAPsChroma[k] > 6) && v->DPPCLKUsingSingleDPPChroma < 2 * v->PixelClock[k]) {
v->DPPCLKUsingSingleDPPChroma = 2 * v->PixelClock[k];
}
v->DPPCLKUsingSingleDPP[k] = dml_max(v->DPPCLKUsingSingleDPPLuma, v->DPPCLKUsingSingleDPPChroma);
}
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->BlendingAndTiming[k] != k)
continue;
if (v->ODMCombineEnabled[k] == dm_odm_combine_mode_4to1) {
v->DISPCLKWithRamping = dml_max(
v->DISPCLKWithRamping,
v->PixelClock[k] / 4 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100)
* (1 + v->DISPCLKRampingMargin / 100));
v->DISPCLKWithoutRamping = dml_max(
v->DISPCLKWithoutRamping,
v->PixelClock[k] / 4 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100));
} else if (v->ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
v->DISPCLKWithRamping = dml_max(
v->DISPCLKWithRamping,
v->PixelClock[k] / 2 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100)
* (1 + v->DISPCLKRampingMargin / 100));
v->DISPCLKWithoutRamping = dml_max(
v->DISPCLKWithoutRamping,
v->PixelClock[k] / 2 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100));
} else {
v->DISPCLKWithRamping = dml_max(
v->DISPCLKWithRamping,
v->PixelClock[k] * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100) * (1 + v->DISPCLKRampingMargin / 100));
v->DISPCLKWithoutRamping = dml_max(
v->DISPCLKWithoutRamping,
v->PixelClock[k] * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100));
}
}
v->DISPCLKWithRamping = dml_max(v->DISPCLKWithRamping, v->WritebackDISPCLK);
v->DISPCLKWithoutRamping = dml_max(v->DISPCLKWithoutRamping, v->WritebackDISPCLK);
ASSERT(v->DISPCLKDPPCLKVCOSpeed != 0);
v->DISPCLKWithRampingRoundedToDFSGranularity = RoundToDFSGranularityUp(v->DISPCLKWithRamping, v->DISPCLKDPPCLKVCOSpeed);
v->DISPCLKWithoutRampingRoundedToDFSGranularity = RoundToDFSGranularityUp(v->DISPCLKWithoutRamping, v->DISPCLKDPPCLKVCOSpeed);
v->MaxDispclkRoundedToDFSGranularity = RoundToDFSGranularityDown(
v->soc.clock_limits[v->soc.num_states - 1].dispclk_mhz,
v->DISPCLKDPPCLKVCOSpeed);
if (v->DISPCLKWithoutRampingRoundedToDFSGranularity > v->MaxDispclkRoundedToDFSGranularity) {
v->DISPCLK_calculated = v->DISPCLKWithoutRampingRoundedToDFSGranularity;
} else if (v->DISPCLKWithRampingRoundedToDFSGranularity > v->MaxDispclkRoundedToDFSGranularity) {
v->DISPCLK_calculated = v->MaxDispclkRoundedToDFSGranularity;
} else {
v->DISPCLK_calculated = v->DISPCLKWithRampingRoundedToDFSGranularity;
}
v->DISPCLK = v->DISPCLK_calculated;
DTRACE(" dispclk_mhz (calculated) = %f", v->DISPCLK_calculated);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->DPPCLK_calculated[k] = v->DPPCLKUsingSingleDPP[k] / v->DPPPerPlane[k] * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100);
v->GlobalDPPCLK = dml_max(v->GlobalDPPCLK, v->DPPCLK_calculated[k]);
}
v->GlobalDPPCLK = RoundToDFSGranularityUp(v->GlobalDPPCLK, v->DISPCLKDPPCLKVCOSpeed);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->DPPCLK_calculated[k] = v->GlobalDPPCLK / 255 * dml_ceil(v->DPPCLK_calculated[k] * 255.0 / v->GlobalDPPCLK, 1);
DTRACE(" dppclk_mhz[%i] (calculated) = %f", k, v->DPPCLK_calculated[k]);
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->DPPCLK[k] = v->DPPCLK_calculated[k];
}
// Urgent and B P-State/DRAM Clock Change Watermark
DTRACE(" dcfclk_mhz = %f", v->DCFCLK);
DTRACE(" return_bus_bw = %f", v->ReturnBW);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
CalculateBytePerPixelAnd256BBlockSizes(
v->SourcePixelFormat[k],
v->SurfaceTiling[k],
&v->BytePerPixelY[k],
&v->BytePerPixelC[k],
&v->BytePerPixelDETY[k],
&v->BytePerPixelDETC[k],
&v->BlockHeight256BytesY[k],
&v->BlockHeight256BytesC[k],
&v->BlockWidth256BytesY[k],
&v->BlockWidth256BytesC[k]);
}
CalculateSwathWidth(
false,
v->NumberOfActivePlanes,
v->SourcePixelFormat,
v->SourceScan,
v->ViewportWidth,
v->ViewportHeight,
v->SurfaceWidthY,
v->SurfaceWidthC,
v->SurfaceHeightY,
v->SurfaceHeightC,
v->ODMCombineEnabled,
v->BytePerPixelY,
v->BytePerPixelC,
v->BlockHeight256BytesY,
v->BlockHeight256BytesC,
v->BlockWidth256BytesY,
v->BlockWidth256BytesC,
v->BlendingAndTiming,
v->HActive,
v->HRatio,
v->DPPPerPlane,
v->SwathWidthSingleDPPY,
v->SwathWidthSingleDPPC,
v->SwathWidthY,
v->SwathWidthC,
v->dummyinteger3,
v->dummyinteger4,
v->swath_width_luma_ub,
v->swath_width_chroma_ub);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->ReadBandwidthPlaneLuma[k] = v->SwathWidthSingleDPPY[k] * v->BytePerPixelY[k] / (v->HTotal[k] / v->PixelClock[k])
* v->VRatio[k];
v->ReadBandwidthPlaneChroma[k] = v->SwathWidthSingleDPPC[k] * v->BytePerPixelC[k] / (v->HTotal[k] / v->PixelClock[k])
* v->VRatioChroma[k];
DTRACE(" read_bw[%i] = %fBps", k, v->ReadBandwidthPlaneLuma[k] + v->ReadBandwidthPlaneChroma[k]);
}
// DCFCLK Deep Sleep
CalculateDCFCLKDeepSleep(
mode_lib,
v->NumberOfActivePlanes,
v->BytePerPixelY,
v->BytePerPixelC,
v->VRatio,
v->VRatioChroma,
v->SwathWidthY,
v->SwathWidthC,
v->DPPPerPlane,
v->HRatio,
v->HRatioChroma,
v->PixelClock,
v->PSCL_THROUGHPUT_LUMA,
v->PSCL_THROUGHPUT_CHROMA,
v->DPPCLK,
v->ReadBandwidthPlaneLuma,
v->ReadBandwidthPlaneChroma,
v->ReturnBusWidth,
&v->DCFCLKDeepSleep);
// DSCCLK
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if ((v->BlendingAndTiming[k] != k) || !v->DSCEnabled[k]) {
v->DSCCLK_calculated[k] = 0.0;
} else {
if (v->OutputFormat[k] == dm_420)
v->DSCFormatFactor = 2;
else if (v->OutputFormat[k] == dm_444)
v->DSCFormatFactor = 1;
else if (v->OutputFormat[k] == dm_n422)
v->DSCFormatFactor = 2;
else
v->DSCFormatFactor = 1;
if (v->ODMCombineEnabled[k] == dm_odm_combine_mode_4to1)
v->DSCCLK_calculated[k] = v->PixelClockBackEnd[k] / 12 / v->DSCFormatFactor
/ (1 - v->DISPCLKDPPCLKDSCCLKDownSpreading / 100);
else if (v->ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
v->DSCCLK_calculated[k] = v->PixelClockBackEnd[k] / 6 / v->DSCFormatFactor
/ (1 - v->DISPCLKDPPCLKDSCCLKDownSpreading / 100);
else
v->DSCCLK_calculated[k] = v->PixelClockBackEnd[k] / 3 / v->DSCFormatFactor
/ (1 - v->DISPCLKDPPCLKDSCCLKDownSpreading / 100);
}
}
// DSC Delay
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double BPP = v->OutputBpp[k];
if (v->DSCEnabled[k] && BPP != 0) {
if (v->ODMCombineEnabled[k] == dm_odm_combine_mode_disabled) {
v->DSCDelay[k] = dscceComputeDelay(
v->DSCInputBitPerComponent[k],
BPP,
dml_ceil((double) v->HActive[k] / v->NumberOfDSCSlices[k], 1),
v->NumberOfDSCSlices[k],
v->OutputFormat[k],
v->Output[k]) + dscComputeDelay(v->OutputFormat[k], v->Output[k]);
} else if (v->ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
v->DSCDelay[k] = 2
* (dscceComputeDelay(
v->DSCInputBitPerComponent[k],
BPP,
dml_ceil((double) v->HActive[k] / v->NumberOfDSCSlices[k], 1),
v->NumberOfDSCSlices[k] / 2.0,
v->OutputFormat[k],
v->Output[k]) + dscComputeDelay(v->OutputFormat[k], v->Output[k]));
} else {
v->DSCDelay[k] = 4
* (dscceComputeDelay(
v->DSCInputBitPerComponent[k],
BPP,
dml_ceil((double) v->HActive[k] / v->NumberOfDSCSlices[k], 1),
v->NumberOfDSCSlices[k] / 4.0,
v->OutputFormat[k],
v->Output[k]) + dscComputeDelay(v->OutputFormat[k], v->Output[k]));
}
v->DSCDelay[k] = v->DSCDelay[k] * v->PixelClock[k] / v->PixelClockBackEnd[k];
} else {
v->DSCDelay[k] = 0;
}
}
for (k = 0; k < v->NumberOfActivePlanes; ++k)
for (j = 0; j < v->NumberOfActivePlanes; ++j) // NumberOfPlanes
if (j != k && v->BlendingAndTiming[k] == j && v->DSCEnabled[j])
v->DSCDelay[k] = v->DSCDelay[j];
// Prefetch
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
unsigned int PDEAndMetaPTEBytesFrameY;
unsigned int PixelPTEBytesPerRowY;
unsigned int MetaRowByteY;
unsigned int MetaRowByteC;
unsigned int PDEAndMetaPTEBytesFrameC;
unsigned int PixelPTEBytesPerRowC;
bool PTEBufferSizeNotExceededY;
bool PTEBufferSizeNotExceededC;
if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_420_12
|| v->SourcePixelFormat[k] == dm_rgbe_alpha) {
if ((v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_420_12) && v->SourceScan[k] != dm_vert) {
v->PTEBufferSizeInRequestsForLuma = (v->PTEBufferSizeInRequestsLuma + v->PTEBufferSizeInRequestsChroma) / 2;
v->PTEBufferSizeInRequestsForChroma = v->PTEBufferSizeInRequestsForLuma;
} else {
v->PTEBufferSizeInRequestsForLuma = v->PTEBufferSizeInRequestsLuma;
v->PTEBufferSizeInRequestsForChroma = v->PTEBufferSizeInRequestsChroma;
}
PDEAndMetaPTEBytesFrameC = CalculateVMAndRowBytes(
mode_lib,
v->DCCEnable[k],
v->BlockHeight256BytesC[k],
v->BlockWidth256BytesC[k],
v->SourcePixelFormat[k],
v->SurfaceTiling[k],
v->BytePerPixelC[k],
v->SourceScan[k],
v->SwathWidthC[k],
v->ViewportHeightChroma[k],
v->GPUVMEnable,
v->HostVMEnable,
v->HostVMMaxNonCachedPageTableLevels,
v->GPUVMMinPageSize,
v->HostVMMinPageSize,
v->PTEBufferSizeInRequestsForChroma,
v->PitchC[k],
v->DCCMetaPitchC[k],
&v->MacroTileWidthC[k],
&MetaRowByteC,
&PixelPTEBytesPerRowC,
&PTEBufferSizeNotExceededC,
&v->dpte_row_width_chroma_ub[k],
&v->dpte_row_height_chroma[k],
&v->meta_req_width_chroma[k],
&v->meta_req_height_chroma[k],
&v->meta_row_width_chroma[k],
&v->meta_row_height_chroma[k],
&v->dummyinteger1,
&v->dummyinteger2,
&v->PixelPTEReqWidthC[k],
&v->PixelPTEReqHeightC[k],
&v->PTERequestSizeC[k],
&v->dpde0_bytes_per_frame_ub_c[k],
&v->meta_pte_bytes_per_frame_ub_c[k]);
v->PrefetchSourceLinesC[k] = CalculatePrefetchSourceLines(
mode_lib,
v->VRatioChroma[k],
v->VTAPsChroma[k],
v->Interlace[k],
v->ProgressiveToInterlaceUnitInOPP,
v->SwathHeightC[k],
v->ViewportYStartC[k],
&v->VInitPreFillC[k],
&v->MaxNumSwathC[k]);
} else {
v->PTEBufferSizeInRequestsForLuma = v->PTEBufferSizeInRequestsLuma + v->PTEBufferSizeInRequestsChroma;
v->PTEBufferSizeInRequestsForChroma = 0;
PixelPTEBytesPerRowC = 0;
PDEAndMetaPTEBytesFrameC = 0;
MetaRowByteC = 0;
v->MaxNumSwathC[k] = 0;
v->PrefetchSourceLinesC[k] = 0;
}
PDEAndMetaPTEBytesFrameY = CalculateVMAndRowBytes(
mode_lib,
v->DCCEnable[k],
v->BlockHeight256BytesY[k],
v->BlockWidth256BytesY[k],
v->SourcePixelFormat[k],
v->SurfaceTiling[k],
v->BytePerPixelY[k],
v->SourceScan[k],
v->SwathWidthY[k],
v->ViewportHeight[k],
v->GPUVMEnable,
v->HostVMEnable,
v->HostVMMaxNonCachedPageTableLevels,
v->GPUVMMinPageSize,
v->HostVMMinPageSize,
v->PTEBufferSizeInRequestsForLuma,
v->PitchY[k],
v->DCCMetaPitchY[k],
&v->MacroTileWidthY[k],
&MetaRowByteY,
&PixelPTEBytesPerRowY,
&PTEBufferSizeNotExceededY,
&v->dpte_row_width_luma_ub[k],
&v->dpte_row_height[k],
&v->meta_req_width[k],
&v->meta_req_height[k],
&v->meta_row_width[k],
&v->meta_row_height[k],
&v->vm_group_bytes[k],
&v->dpte_group_bytes[k],
&v->PixelPTEReqWidthY[k],
&v->PixelPTEReqHeightY[k],
&v->PTERequestSizeY[k],
&v->dpde0_bytes_per_frame_ub_l[k],
&v->meta_pte_bytes_per_frame_ub_l[k]);
v->PrefetchSourceLinesY[k] = CalculatePrefetchSourceLines(
mode_lib,
v->VRatio[k],
v->vtaps[k],
v->Interlace[k],
v->ProgressiveToInterlaceUnitInOPP,
v->SwathHeightY[k],
v->ViewportYStartY[k],
&v->VInitPreFillY[k],
&v->MaxNumSwathY[k]);
v->PixelPTEBytesPerRow[k] = PixelPTEBytesPerRowY + PixelPTEBytesPerRowC;
v->PDEAndMetaPTEBytesFrame[k] = PDEAndMetaPTEBytesFrameY + PDEAndMetaPTEBytesFrameC;
v->MetaRowByte[k] = MetaRowByteY + MetaRowByteC;
CalculateRowBandwidth(
v->GPUVMEnable,
v->SourcePixelFormat[k],
v->VRatio[k],
v->VRatioChroma[k],
v->DCCEnable[k],
v->HTotal[k] / v->PixelClock[k],
MetaRowByteY,
MetaRowByteC,
v->meta_row_height[k],
v->meta_row_height_chroma[k],
PixelPTEBytesPerRowY,
PixelPTEBytesPerRowC,
v->dpte_row_height[k],
v->dpte_row_height_chroma[k],
&v->meta_row_bw[k],
&v->dpte_row_bw[k]);
}
v->TotalDCCActiveDPP = 0;
v->TotalActiveDPP = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->TotalActiveDPP = v->TotalActiveDPP + v->DPPPerPlane[k];
if (v->DCCEnable[k])
v->TotalDCCActiveDPP = v->TotalDCCActiveDPP + v->DPPPerPlane[k];
if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_420_12
|| v->SourcePixelFormat[k] == dm_rgbe_alpha)
NoChromaPlanes = false;
}
ReorderBytes = v->NumberOfChannels
* dml_max3(
v->UrgentOutOfOrderReturnPerChannelPixelDataOnly,
v->UrgentOutOfOrderReturnPerChannelPixelMixedWithVMData,
v->UrgentOutOfOrderReturnPerChannelVMDataOnly);
VMDataOnlyReturnBW = dml_min(
dml_min(v->ReturnBusWidth * v->DCFCLK, v->FabricClock * v->FabricDatapathToDCNDataReturn)
* v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency / 100.0,
v->DRAMSpeed * v->NumberOfChannels * v->DRAMChannelWidth
* v->PercentOfIdealDRAMBWReceivedAfterUrgLatencyVMDataOnly / 100.0);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: v->ReturnBusWidth = %f\n", __func__, v->ReturnBusWidth);
dml_print("DML::%s: v->DCFCLK = %f\n", __func__, v->DCFCLK);
dml_print("DML::%s: v->FabricClock = %f\n", __func__, v->FabricClock);
dml_print("DML::%s: v->FabricDatapathToDCNDataReturn = %f\n", __func__, v->FabricDatapathToDCNDataReturn);
dml_print("DML::%s: v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency = %f\n", __func__, v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency);
dml_print("DML::%s: v->DRAMSpeed = %f\n", __func__, v->DRAMSpeed);
dml_print("DML::%s: v->NumberOfChannels = %f\n", __func__, v->NumberOfChannels);
dml_print("DML::%s: v->DRAMChannelWidth = %f\n", __func__, v->DRAMChannelWidth);
dml_print("DML::%s: v->PercentOfIdealDRAMBWReceivedAfterUrgLatencyVMDataOnly = %f\n", __func__, v->PercentOfIdealDRAMBWReceivedAfterUrgLatencyVMDataOnly);
dml_print("DML::%s: VMDataOnlyReturnBW = %f\n", __func__, VMDataOnlyReturnBW);
dml_print("DML::%s: ReturnBW = %f\n", __func__, v->ReturnBW);
#endif
if (v->GPUVMEnable && v->HostVMEnable)
HostVMInefficiencyFactor = v->ReturnBW / VMDataOnlyReturnBW;
v->UrgentExtraLatency = CalculateExtraLatency(
v->RoundTripPingLatencyCycles,
ReorderBytes,
v->DCFCLK,
v->TotalActiveDPP,
v->PixelChunkSizeInKByte,
v->TotalDCCActiveDPP,
v->MetaChunkSize,
v->ReturnBW,
v->GPUVMEnable,
v->HostVMEnable,
v->NumberOfActivePlanes,
v->DPPPerPlane,
v->dpte_group_bytes,
HostVMInefficiencyFactor,
v->HostVMMinPageSize,
v->HostVMMaxNonCachedPageTableLevels);
v->TCalc = 24.0 / v->DCFCLKDeepSleep;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->BlendingAndTiming[k] == k) {
if (v->WritebackEnable[k] == true) {
v->WritebackDelay[v->VoltageLevel][k] = v->WritebackLatency
+ CalculateWriteBackDelay(
v->WritebackPixelFormat[k],
v->WritebackHRatio[k],
v->WritebackVRatio[k],
v->WritebackVTaps[k],
v->WritebackDestinationWidth[k],
v->WritebackDestinationHeight[k],
v->WritebackSourceHeight[k],
v->HTotal[k]) / v->DISPCLK;
} else
v->WritebackDelay[v->VoltageLevel][k] = 0;
for (j = 0; j < v->NumberOfActivePlanes; ++j) {
if (v->BlendingAndTiming[j] == k && v->WritebackEnable[j] == true) {
v->WritebackDelay[v->VoltageLevel][k] = dml_max(
v->WritebackDelay[v->VoltageLevel][k],
v->WritebackLatency
+ CalculateWriteBackDelay(
v->WritebackPixelFormat[j],
v->WritebackHRatio[j],
v->WritebackVRatio[j],
v->WritebackVTaps[j],
v->WritebackDestinationWidth[j],
v->WritebackDestinationHeight[j],
v->WritebackSourceHeight[j],
v->HTotal[k]) / v->DISPCLK);
}
}
}
}
for (k = 0; k < v->NumberOfActivePlanes; ++k)
for (j = 0; j < v->NumberOfActivePlanes; ++j)
if (v->BlendingAndTiming[k] == j)
v->WritebackDelay[v->VoltageLevel][k] = v->WritebackDelay[v->VoltageLevel][j];
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->MaxVStartupLines[k] =
CalculateMaxVStartup(
v->VTotal[k],
v->VActive[k],
v->VBlankNom[k],
v->HTotal[k],
v->PixelClock[k],
v->ProgressiveToInterlaceUnitInOPP,
v->Interlace[k],
v->ip.VBlankNomDefaultUS,
v->WritebackDelay[v->VoltageLevel][k]);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d MaxVStartupLines = %d\n", __func__, k, v->MaxVStartupLines[k]);
dml_print("DML::%s: k=%d VoltageLevel = %d\n", __func__, k, v->VoltageLevel);
dml_print("DML::%s: k=%d WritebackDelay = %f\n", __func__, k, v->WritebackDelay[v->VoltageLevel][k]);
#endif
}
v->MaximumMaxVStartupLines = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k)
v->MaximumMaxVStartupLines = dml_max(v->MaximumMaxVStartupLines, v->MaxVStartupLines[k]);
// VBA_DELTA
// We don't really care to iterate between the various prefetch modes
//v->PrefetchERROR = CalculateMinAndMaxPrefetchMode(v->AllowDRAMSelfRefreshOrDRAMClockChangeInVblank, &v->MinPrefetchMode, &v->MaxPrefetchMode);
v->UrgentLatency = CalculateUrgentLatency(
v->UrgentLatencyPixelDataOnly,
v->UrgentLatencyPixelMixedWithVMData,
v->UrgentLatencyVMDataOnly,
v->DoUrgentLatencyAdjustment,
v->UrgentLatencyAdjustmentFabricClockComponent,
v->UrgentLatencyAdjustmentFabricClockReference,
v->FabricClock);
v->FractionOfUrgentBandwidth = 0.0;
v->FractionOfUrgentBandwidthImmediateFlip = 0.0;
v->VStartupLines = __DML_VBA_MIN_VSTARTUP__;
do {
double MaxTotalRDBandwidthNoUrgentBurst = 0.0;
bool DestinationLineTimesForPrefetchLessThan2 = false;
bool VRatioPrefetchMoreThan4 = false;
double TWait = CalculateTWait(PrefetchMode, v->DRAMClockChangeLatency, v->UrgentLatency, v->SREnterPlusExitTime);
MaxTotalRDBandwidth = 0;
dml_print("DML::%s: Start loop: VStartup = %d\n", __func__, v->VStartupLines);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
Pipe myPipe;
myPipe.DPPCLK = v->DPPCLK[k];
myPipe.DISPCLK = v->DISPCLK;
myPipe.PixelClock = v->PixelClock[k];
myPipe.DCFCLKDeepSleep = v->DCFCLKDeepSleep;
myPipe.DPPPerPlane = v->DPPPerPlane[k];
myPipe.ScalerEnabled = v->ScalerEnabled[k];
myPipe.VRatio = v->VRatio[k];
myPipe.VRatioChroma = v->VRatioChroma[k];
myPipe.SourceScan = v->SourceScan[k];
myPipe.BlockWidth256BytesY = v->BlockWidth256BytesY[k];
myPipe.BlockHeight256BytesY = v->BlockHeight256BytesY[k];
myPipe.BlockWidth256BytesC = v->BlockWidth256BytesC[k];
myPipe.BlockHeight256BytesC = v->BlockHeight256BytesC[k];
myPipe.InterlaceEnable = v->Interlace[k];
myPipe.NumberOfCursors = v->NumberOfCursors[k];
myPipe.VBlank = v->VTotal[k] - v->VActive[k];
myPipe.HTotal = v->HTotal[k];
myPipe.DCCEnable = v->DCCEnable[k];
myPipe.ODMCombineIsEnabled = v->ODMCombineEnabled[k] == dm_odm_combine_mode_4to1
|| v->ODMCombineEnabled[k] == dm_odm_combine_mode_2to1;
myPipe.SourcePixelFormat = v->SourcePixelFormat[k];
myPipe.BytePerPixelY = v->BytePerPixelY[k];
myPipe.BytePerPixelC = v->BytePerPixelC[k];
myPipe.ProgressiveToInterlaceUnitInOPP = v->ProgressiveToInterlaceUnitInOPP;
v->ErrorResult[k] = CalculatePrefetchSchedule(
mode_lib,
HostVMInefficiencyFactor,
&myPipe,
v->DSCDelay[k],
v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater,
v->DPPCLKDelaySCL,
v->DPPCLKDelaySCLLBOnly,
v->DPPCLKDelayCNVCCursor,
v->DISPCLKDelaySubtotal,
(unsigned int) (v->SwathWidthY[k] / v->HRatio[k]),
v->OutputFormat[k],
v->MaxInterDCNTileRepeaters,
dml_min(v->VStartupLines, v->MaxVStartupLines[k]),
v->MaxVStartupLines[k],
v->GPUVMMaxPageTableLevels,
v->GPUVMEnable,
v->HostVMEnable,
v->HostVMMaxNonCachedPageTableLevels,
v->HostVMMinPageSize,
v->DynamicMetadataEnable[k],
v->DynamicMetadataVMEnabled,
v->DynamicMetadataLinesBeforeActiveRequired[k],
v->DynamicMetadataTransmittedBytes[k],
v->UrgentLatency,
v->UrgentExtraLatency,
v->TCalc,
v->PDEAndMetaPTEBytesFrame[k],
v->MetaRowByte[k],
v->PixelPTEBytesPerRow[k],
v->PrefetchSourceLinesY[k],
v->SwathWidthY[k],
v->VInitPreFillY[k],
v->MaxNumSwathY[k],
v->PrefetchSourceLinesC[k],
v->SwathWidthC[k],
v->VInitPreFillC[k],
v->MaxNumSwathC[k],
v->swath_width_luma_ub[k],
v->swath_width_chroma_ub[k],
v->SwathHeightY[k],
v->SwathHeightC[k],
TWait,
&v->DSTXAfterScaler[k],
&v->DSTYAfterScaler[k],
&v->DestinationLinesForPrefetch[k],
&v->PrefetchBandwidth[k],
&v->DestinationLinesToRequestVMInVBlank[k],
&v->DestinationLinesToRequestRowInVBlank[k],
&v->VRatioPrefetchY[k],
&v->VRatioPrefetchC[k],
&v->RequiredPrefetchPixDataBWLuma[k],
&v->RequiredPrefetchPixDataBWChroma[k],
&v->NotEnoughTimeForDynamicMetadata[k],
&v->Tno_bw[k],
&v->prefetch_vmrow_bw[k],
&v->Tdmdl_vm[k],
&v->Tdmdl[k],
&v->TSetup[k],
&v->VUpdateOffsetPix[k],
&v->VUpdateWidthPix[k],
&v->VReadyOffsetPix[k]);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d Prefetch cal result=%0d\n", __func__, k, v->ErrorResult[k]);
#endif
v->VStartup[k] = dml_min(v->VStartupLines, v->MaxVStartupLines[k]);
}
v->NoEnoughUrgentLatencyHiding = false;
v->NoEnoughUrgentLatencyHidingPre = false;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->cursor_bw[k] = v->NumberOfCursors[k] * v->CursorWidth[k][0] * v->CursorBPP[k][0] / 8.0
/ (v->HTotal[k] / v->PixelClock[k]) * v->VRatio[k];
v->cursor_bw_pre[k] = v->NumberOfCursors[k] * v->CursorWidth[k][0] * v->CursorBPP[k][0] / 8.0
/ (v->HTotal[k] / v->PixelClock[k]) * v->VRatioPrefetchY[k];
CalculateUrgentBurstFactor(
v->swath_width_luma_ub[k],
v->swath_width_chroma_ub[k],
v->SwathHeightY[k],
v->SwathHeightC[k],
v->HTotal[k] / v->PixelClock[k],
v->UrgentLatency,
v->CursorBufferSize,
v->CursorWidth[k][0],
v->CursorBPP[k][0],
v->VRatio[k],
v->VRatioChroma[k],
v->BytePerPixelDETY[k],
v->BytePerPixelDETC[k],
v->DETBufferSizeY[k],
v->DETBufferSizeC[k],
&v->UrgBurstFactorCursor[k],
&v->UrgBurstFactorLuma[k],
&v->UrgBurstFactorChroma[k],
&v->NoUrgentLatencyHiding[k]);
CalculateUrgentBurstFactor(
v->swath_width_luma_ub[k],
v->swath_width_chroma_ub[k],
v->SwathHeightY[k],
v->SwathHeightC[k],
v->HTotal[k] / v->PixelClock[k],
v->UrgentLatency,
v->CursorBufferSize,
v->CursorWidth[k][0],
v->CursorBPP[k][0],
v->VRatioPrefetchY[k],
v->VRatioPrefetchC[k],
v->BytePerPixelDETY[k],
v->BytePerPixelDETC[k],
v->DETBufferSizeY[k],
v->DETBufferSizeC[k],
&v->UrgBurstFactorCursorPre[k],
&v->UrgBurstFactorLumaPre[k],
&v->UrgBurstFactorChromaPre[k],
&v->NoUrgentLatencyHidingPre[k]);
MaxTotalRDBandwidth = MaxTotalRDBandwidth
+ dml_max3(
v->DPPPerPlane[k] * v->prefetch_vmrow_bw[k],
v->ReadBandwidthPlaneLuma[k] * v->UrgBurstFactorLuma[k]
+ v->ReadBandwidthPlaneChroma[k] * v->UrgBurstFactorChroma[k]
+ v->cursor_bw[k] * v->UrgBurstFactorCursor[k]
+ v->DPPPerPlane[k] * (v->meta_row_bw[k] + v->dpte_row_bw[k]),
v->DPPPerPlane[k]
* (v->RequiredPrefetchPixDataBWLuma[k] * v->UrgBurstFactorLumaPre[k]
+ v->RequiredPrefetchPixDataBWChroma[k] * v->UrgBurstFactorChromaPre[k])
+ v->cursor_bw_pre[k] * v->UrgBurstFactorCursorPre[k]);
MaxTotalRDBandwidthNoUrgentBurst = MaxTotalRDBandwidthNoUrgentBurst
+ dml_max3(
v->DPPPerPlane[k] * v->prefetch_vmrow_bw[k],
v->ReadBandwidthPlaneLuma[k] + v->ReadBandwidthPlaneChroma[k] + v->cursor_bw[k]
+ v->DPPPerPlane[k] * (v->meta_row_bw[k] + v->dpte_row_bw[k]),
v->DPPPerPlane[k] * (v->RequiredPrefetchPixDataBWLuma[k] + v->RequiredPrefetchPixDataBWChroma[k])
+ v->cursor_bw_pre[k]);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d DPPPerPlane=%d\n", __func__, k, v->DPPPerPlane[k]);
dml_print("DML::%s: k=%0d UrgBurstFactorLuma=%f\n", __func__, k, v->UrgBurstFactorLuma[k]);
dml_print("DML::%s: k=%0d UrgBurstFactorChroma=%f\n", __func__, k, v->UrgBurstFactorChroma[k]);
dml_print("DML::%s: k=%0d UrgBurstFactorLumaPre=%f\n", __func__, k, v->UrgBurstFactorLumaPre[k]);
dml_print("DML::%s: k=%0d UrgBurstFactorChromaPre=%f\n", __func__, k, v->UrgBurstFactorChromaPre[k]);
dml_print("DML::%s: k=%0d VRatioPrefetchY=%f\n", __func__, k, v->VRatioPrefetchY[k]);
dml_print("DML::%s: k=%0d VRatioY=%f\n", __func__, k, v->VRatio[k]);
dml_print("DML::%s: k=%0d prefetch_vmrow_bw=%f\n", __func__, k, v->prefetch_vmrow_bw[k]);
dml_print("DML::%s: k=%0d ReadBandwidthPlaneLuma=%f\n", __func__, k, v->ReadBandwidthPlaneLuma[k]);
dml_print("DML::%s: k=%0d ReadBandwidthPlaneChroma=%f\n", __func__, k, v->ReadBandwidthPlaneChroma[k]);
dml_print("DML::%s: k=%0d cursor_bw=%f\n", __func__, k, v->cursor_bw[k]);
dml_print("DML::%s: k=%0d meta_row_bw=%f\n", __func__, k, v->meta_row_bw[k]);
dml_print("DML::%s: k=%0d dpte_row_bw=%f\n", __func__, k, v->dpte_row_bw[k]);
dml_print("DML::%s: k=%0d RequiredPrefetchPixDataBWLuma=%f\n", __func__, k, v->RequiredPrefetchPixDataBWLuma[k]);
dml_print("DML::%s: k=%0d RequiredPrefetchPixDataBWChroma=%f\n", __func__, k, v->RequiredPrefetchPixDataBWChroma[k]);
dml_print("DML::%s: k=%0d cursor_bw_pre=%f\n", __func__, k, v->cursor_bw_pre[k]);
dml_print("DML::%s: k=%0d MaxTotalRDBandwidthNoUrgentBurst=%f\n", __func__, k, MaxTotalRDBandwidthNoUrgentBurst);
#endif
if (v->DestinationLinesForPrefetch[k] < 2)
DestinationLineTimesForPrefetchLessThan2 = true;
if (v->VRatioPrefetchY[k] > 4 || v->VRatioPrefetchC[k] > 4)
VRatioPrefetchMoreThan4 = true;
if (v->NoUrgentLatencyHiding[k] == true)
v->NoEnoughUrgentLatencyHiding = true;
if (v->NoUrgentLatencyHidingPre[k] == true)
v->NoEnoughUrgentLatencyHidingPre = true;
}
v->FractionOfUrgentBandwidth = MaxTotalRDBandwidthNoUrgentBurst / v->ReturnBW;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: MaxTotalRDBandwidthNoUrgentBurst=%f\n", __func__, MaxTotalRDBandwidthNoUrgentBurst);
dml_print("DML::%s: ReturnBW=%f\n", __func__, v->ReturnBW);
dml_print("DML::%s: FractionOfUrgentBandwidth=%f\n", __func__, v->FractionOfUrgentBandwidth);
#endif
if (MaxTotalRDBandwidth <= v->ReturnBW && v->NoEnoughUrgentLatencyHiding == 0 && v->NoEnoughUrgentLatencyHidingPre == 0
&& !VRatioPrefetchMoreThan4 && !DestinationLineTimesForPrefetchLessThan2)
v->PrefetchModeSupported = true;
else {
v->PrefetchModeSupported = false;
dml_print("DML::%s: ***failed***. Bandwidth violation. Results are NOT valid\n", __func__);
dml_print("DML::%s: MaxTotalRDBandwidth:%f AvailReturnBandwidth:%f\n", __func__, MaxTotalRDBandwidth, v->ReturnBW);
dml_print("DML::%s: VRatioPrefetch %s more than 4\n", __func__, (VRatioPrefetchMoreThan4) ? "is" : "is not");
dml_print("DML::%s: DestinationLines for Prefetch %s less than 2\n", __func__, (DestinationLineTimesForPrefetchLessThan2) ? "is" : "is not");
}
// PREVIOUS_ERROR
// This error result check was done after the PrefetchModeSupported. So we will
// still try to calculate flip schedule even prefetch mode not supported
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->ErrorResult[k] == true || v->NotEnoughTimeForDynamicMetadata[k] == true) {
v->PrefetchModeSupported = false;
dml_print("DML::%s: ***failed***. Prefetch schedule violation. Results are NOT valid\n", __func__);
}
}
if (v->PrefetchModeSupported == true && v->ImmediateFlipSupport == true) {
v->BandwidthAvailableForImmediateFlip = v->ReturnBW;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->BandwidthAvailableForImmediateFlip = v->BandwidthAvailableForImmediateFlip
- dml_max(
v->ReadBandwidthPlaneLuma[k] * v->UrgBurstFactorLuma[k]
+ v->ReadBandwidthPlaneChroma[k] * v->UrgBurstFactorChroma[k]
+ v->cursor_bw[k] * v->UrgBurstFactorCursor[k],
v->DPPPerPlane[k]
* (v->RequiredPrefetchPixDataBWLuma[k] * v->UrgBurstFactorLumaPre[k]
+ v->RequiredPrefetchPixDataBWChroma[k] * v->UrgBurstFactorChromaPre[k])
+ v->cursor_bw_pre[k] * v->UrgBurstFactorCursorPre[k]);
}
v->TotImmediateFlipBytes = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->TotImmediateFlipBytes = v->TotImmediateFlipBytes
+ v->DPPPerPlane[k] * (v->PDEAndMetaPTEBytesFrame[k] + v->MetaRowByte[k] + v->PixelPTEBytesPerRow[k]);
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
CalculateFlipSchedule(
mode_lib,
k,
HostVMInefficiencyFactor,
v->UrgentExtraLatency,
v->UrgentLatency,
v->PDEAndMetaPTEBytesFrame[k],
v->MetaRowByte[k],
v->PixelPTEBytesPerRow[k]);
}
v->total_dcn_read_bw_with_flip = 0.0;
v->total_dcn_read_bw_with_flip_no_urgent_burst = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->total_dcn_read_bw_with_flip = v->total_dcn_read_bw_with_flip
+ dml_max3(
v->DPPPerPlane[k] * v->prefetch_vmrow_bw[k],
v->DPPPerPlane[k] * v->final_flip_bw[k]
+ v->ReadBandwidthLuma[k] * v->UrgBurstFactorLuma[k]
+ v->ReadBandwidthChroma[k] * v->UrgBurstFactorChroma[k]
+ v->cursor_bw[k] * v->UrgBurstFactorCursor[k],
v->DPPPerPlane[k]
* (v->final_flip_bw[k]
+ v->RequiredPrefetchPixDataBWLuma[k] * v->UrgBurstFactorLumaPre[k]
+ v->RequiredPrefetchPixDataBWChroma[k] * v->UrgBurstFactorChromaPre[k])
+ v->cursor_bw_pre[k] * v->UrgBurstFactorCursorPre[k]);
v->total_dcn_read_bw_with_flip_no_urgent_burst = v->total_dcn_read_bw_with_flip_no_urgent_burst
+ dml_max3(
v->DPPPerPlane[k] * v->prefetch_vmrow_bw[k],
v->DPPPerPlane[k] * v->final_flip_bw[k] + v->ReadBandwidthPlaneLuma[k]
+ v->ReadBandwidthPlaneChroma[k] + v->cursor_bw[k],
v->DPPPerPlane[k]
* (v->final_flip_bw[k] + v->RequiredPrefetchPixDataBWLuma[k]
+ v->RequiredPrefetchPixDataBWChroma[k]) + v->cursor_bw_pre[k]);
}
v->FractionOfUrgentBandwidthImmediateFlip = v->total_dcn_read_bw_with_flip_no_urgent_burst / v->ReturnBW;
v->ImmediateFlipSupported = true;
if (v->total_dcn_read_bw_with_flip > v->ReturnBW) {
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: total_dcn_read_bw_with_flip %f (bw w/ flip too high!)\n", __func__, v->total_dcn_read_bw_with_flip);
#endif
v->ImmediateFlipSupported = false;
v->total_dcn_read_bw_with_flip = MaxTotalRDBandwidth;
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->ImmediateFlipSupportedForPipe[k] == false) {
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Pipe %0d not supporting iflip\n", __func__, k);
#endif
v->ImmediateFlipSupported = false;
}
}
} else {
v->ImmediateFlipSupported = false;
}
v->PrefetchAndImmediateFlipSupported =
(v->PrefetchModeSupported == true && ((!v->ImmediateFlipSupport && !v->HostVMEnable
&& v->ImmediateFlipRequirement[0] != dm_immediate_flip_required) ||
v->ImmediateFlipSupported)) ? true : false;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: PrefetchModeSupported %d\n", __func__, v->PrefetchModeSupported);
dml_print("DML::%s: ImmediateFlipRequirement %d\n", __func__, v->ImmediateFlipRequirement == dm_immediate_flip_required);
dml_print("DML::%s: ImmediateFlipSupported %d\n", __func__, v->ImmediateFlipSupported);
dml_print("DML::%s: ImmediateFlipSupport %d\n", __func__, v->ImmediateFlipSupport);
dml_print("DML::%s: HostVMEnable %d\n", __func__, v->HostVMEnable);
dml_print("DML::%s: PrefetchAndImmediateFlipSupported %d\n", __func__, v->PrefetchAndImmediateFlipSupported);
#endif
dml_print("DML::%s: Done loop: Vstartup=%d, Max Vstartup is %d\n", __func__, v->VStartupLines, v->MaximumMaxVStartupLines);
v->VStartupLines = v->VStartupLines + 1;
} while (!v->PrefetchAndImmediateFlipSupported && v->VStartupLines <= v->MaximumMaxVStartupLines);
ASSERT(v->PrefetchAndImmediateFlipSupported);
// Unbounded Request Enabled
CalculateUnboundedRequestAndCompressedBufferSize(
v->DETBufferSizeInKByte[0],
v->ConfigReturnBufferSizeInKByte,
v->UseUnboundedRequesting,
v->TotalActiveDPP,
NoChromaPlanes,
v->MaxNumDPP,
v->CompressedBufferSegmentSizeInkByte,
v->Output,
&v->UnboundedRequestEnabled,
&v->CompressedBufferSizeInkByte);
//Watermarks and NB P-State/DRAM Clock Change Support
{
enum clock_change_support DRAMClockChangeSupport; // dummy
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
PrefetchMode,
v->DCFCLK,
v->ReturnBW,
v->UrgentLatency,
v->UrgentExtraLatency,
v->SOCCLK,
v->DCFCLKDeepSleep,
v->DETBufferSizeY,
v->DETBufferSizeC,
v->SwathHeightY,
v->SwathHeightC,
v->SwathWidthY,
v->SwathWidthC,
v->DPPPerPlane,
v->BytePerPixelDETY,
v->BytePerPixelDETC,
v->UnboundedRequestEnabled,
v->CompressedBufferSizeInkByte,
&DRAMClockChangeSupport,
&v->StutterExitWatermark,
&v->StutterEnterPlusExitWatermark,
&v->Z8StutterExitWatermark,
&v->Z8StutterEnterPlusExitWatermark);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k] == true) {
v->WritebackAllowDRAMClockChangeEndPosition[k] = dml_max(
0,
v->VStartup[k] * v->HTotal[k] / v->PixelClock[k] - v->WritebackDRAMClockChangeWatermark);
} else {
v->WritebackAllowDRAMClockChangeEndPosition[k] = 0;
}
}
}
//Display Pipeline Delivery Time in Prefetch, Groups
CalculatePixelDeliveryTimes(
v->NumberOfActivePlanes,
v->VRatio,
v->VRatioChroma,
v->VRatioPrefetchY,
v->VRatioPrefetchC,
v->swath_width_luma_ub,
v->swath_width_chroma_ub,
v->DPPPerPlane,
v->HRatio,
v->HRatioChroma,
v->PixelClock,
v->PSCL_THROUGHPUT_LUMA,
v->PSCL_THROUGHPUT_CHROMA,
v->DPPCLK,
v->BytePerPixelC,
v->SourceScan,
v->NumberOfCursors,
v->CursorWidth,
v->CursorBPP,
v->BlockWidth256BytesY,
v->BlockHeight256BytesY,
v->BlockWidth256BytesC,
v->BlockHeight256BytesC,
v->DisplayPipeLineDeliveryTimeLuma,
v->DisplayPipeLineDeliveryTimeChroma,
v->DisplayPipeLineDeliveryTimeLumaPrefetch,
v->DisplayPipeLineDeliveryTimeChromaPrefetch,
v->DisplayPipeRequestDeliveryTimeLuma,
v->DisplayPipeRequestDeliveryTimeChroma,
v->DisplayPipeRequestDeliveryTimeLumaPrefetch,
v->DisplayPipeRequestDeliveryTimeChromaPrefetch,
v->CursorRequestDeliveryTime,
v->CursorRequestDeliveryTimePrefetch);
CalculateMetaAndPTETimes(
v->NumberOfActivePlanes,
v->GPUVMEnable,
v->MetaChunkSize,
v->MinMetaChunkSizeBytes,
v->HTotal,
v->VRatio,
v->VRatioChroma,
v->DestinationLinesToRequestRowInVBlank,
v->DestinationLinesToRequestRowInImmediateFlip,
v->DCCEnable,
v->PixelClock,
v->BytePerPixelY,
v->BytePerPixelC,
v->SourceScan,
v->dpte_row_height,
v->dpte_row_height_chroma,
v->meta_row_width,
v->meta_row_width_chroma,
v->meta_row_height,
v->meta_row_height_chroma,
v->meta_req_width,
v->meta_req_width_chroma,
v->meta_req_height,
v->meta_req_height_chroma,
v->dpte_group_bytes,
v->PTERequestSizeY,
v->PTERequestSizeC,
v->PixelPTEReqWidthY,
v->PixelPTEReqHeightY,
v->PixelPTEReqWidthC,
v->PixelPTEReqHeightC,
v->dpte_row_width_luma_ub,
v->dpte_row_width_chroma_ub,
v->DST_Y_PER_PTE_ROW_NOM_L,
v->DST_Y_PER_PTE_ROW_NOM_C,
v->DST_Y_PER_META_ROW_NOM_L,
v->DST_Y_PER_META_ROW_NOM_C,
v->TimePerMetaChunkNominal,
v->TimePerChromaMetaChunkNominal,
v->TimePerMetaChunkVBlank,
v->TimePerChromaMetaChunkVBlank,
v->TimePerMetaChunkFlip,
v->TimePerChromaMetaChunkFlip,
v->time_per_pte_group_nom_luma,
v->time_per_pte_group_vblank_luma,
v->time_per_pte_group_flip_luma,
v->time_per_pte_group_nom_chroma,
v->time_per_pte_group_vblank_chroma,
v->time_per_pte_group_flip_chroma);
CalculateVMGroupAndRequestTimes(
v->NumberOfActivePlanes,
v->GPUVMEnable,
v->GPUVMMaxPageTableLevels,
v->HTotal,
v->BytePerPixelC,
v->DestinationLinesToRequestVMInVBlank,
v->DestinationLinesToRequestVMInImmediateFlip,
v->DCCEnable,
v->PixelClock,
v->dpte_row_width_luma_ub,
v->dpte_row_width_chroma_ub,
v->vm_group_bytes,
v->dpde0_bytes_per_frame_ub_l,
v->dpde0_bytes_per_frame_ub_c,
v->meta_pte_bytes_per_frame_ub_l,
v->meta_pte_bytes_per_frame_ub_c,
v->TimePerVMGroupVBlank,
v->TimePerVMGroupFlip,
v->TimePerVMRequestVBlank,
v->TimePerVMRequestFlip);
// Min TTUVBlank
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (PrefetchMode == 0) {
v->AllowDRAMClockChangeDuringVBlank[k] = true;
v->AllowDRAMSelfRefreshDuringVBlank[k] = true;
v->MinTTUVBlank[k] = dml_max(
v->DRAMClockChangeWatermark,
dml_max(v->StutterEnterPlusExitWatermark, v->UrgentWatermark));
} else if (PrefetchMode == 1) {
v->AllowDRAMClockChangeDuringVBlank[k] = false;
v->AllowDRAMSelfRefreshDuringVBlank[k] = true;
v->MinTTUVBlank[k] = dml_max(v->StutterEnterPlusExitWatermark, v->UrgentWatermark);
} else {
v->AllowDRAMClockChangeDuringVBlank[k] = false;
v->AllowDRAMSelfRefreshDuringVBlank[k] = false;
v->MinTTUVBlank[k] = v->UrgentWatermark;
}
if (!v->DynamicMetadataEnable[k])
v->MinTTUVBlank[k] = v->TCalc + v->MinTTUVBlank[k];
}
// DCC Configuration
v->ActiveDPPs = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
CalculateDCCConfiguration(v->DCCEnable[k], false, // We should always know the direction DCCProgrammingAssumesScanDirectionUnknown,
v->SourcePixelFormat[k],
v->SurfaceWidthY[k],
v->SurfaceWidthC[k],
v->SurfaceHeightY[k],
v->SurfaceHeightC[k],
v->DETBufferSizeInKByte[0] * 1024,
v->BlockHeight256BytesY[k],
v->BlockHeight256BytesC[k],
v->SurfaceTiling[k],
v->BytePerPixelY[k],
v->BytePerPixelC[k],
v->BytePerPixelDETY[k],
v->BytePerPixelDETC[k],
v->SourceScan[k],
&v->DCCYMaxUncompressedBlock[k],
&v->DCCCMaxUncompressedBlock[k],
&v->DCCYMaxCompressedBlock[k],
&v->DCCCMaxCompressedBlock[k],
&v->DCCYIndependentBlock[k],
&v->DCCCIndependentBlock[k]);
}
// VStartup Adjustment
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
bool isInterlaceTiming;
double Tvstartup_margin = (v->MaxVStartupLines[k] - v->VStartup[k]) * v->HTotal[k] / v->PixelClock[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d, MinTTUVBlank = %f (before margin)\n", __func__, k, v->MinTTUVBlank[k]);
#endif
v->MinTTUVBlank[k] = v->MinTTUVBlank[k] + Tvstartup_margin;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d, Tvstartup_margin = %f\n", __func__, k, Tvstartup_margin);
dml_print("DML::%s: k=%d, MaxVStartupLines = %d\n", __func__, k, v->MaxVStartupLines[k]);
dml_print("DML::%s: k=%d, VStartup = %d\n", __func__, k, v->VStartup[k]);
dml_print("DML::%s: k=%d, MinTTUVBlank = %f\n", __func__, k, v->MinTTUVBlank[k]);
#endif
v->Tdmdl[k] = v->Tdmdl[k] + Tvstartup_margin;
if (v->DynamicMetadataEnable[k] && v->DynamicMetadataVMEnabled) {
v->Tdmdl_vm[k] = v->Tdmdl_vm[k] + Tvstartup_margin;
}
isInterlaceTiming = (v->Interlace[k] && !v->ProgressiveToInterlaceUnitInOPP);
v->VStartup[k] = (isInterlaceTiming ? (2 * v->MaxVStartupLines[k]) : v->MaxVStartupLines[k]);
if (v->Interlace[k] && !v->ProgressiveToInterlaceUnitInOPP) {
v->MIN_DST_Y_NEXT_START[k] = dml_floor((v->VTotal[k] - v->VFrontPorch[k] + v->VTotal[k] - v->VActive[k] - v->VStartup[k]) / 2.0, 1.0);
} else {
v->MIN_DST_Y_NEXT_START[k] = v->VTotal[k] - v->VFrontPorch[k] + v->VTotal[k] - v->VActive[k] - v->VStartup[k];
}
v->MIN_DST_Y_NEXT_START[k] += dml_floor(4.0 * v->TSetup[k] / ((double)v->HTotal[k] / v->PixelClock[k]), 1.0) / 4.0;
if (((v->VUpdateOffsetPix[k] + v->VUpdateWidthPix[k] + v->VReadyOffsetPix[k]) / v->HTotal[k])
<= (isInterlaceTiming ?
dml_floor((v->VTotal[k] - v->VActive[k] - v->VFrontPorch[k] - v->VStartup[k]) / 2.0, 1.0) :
(int) (v->VTotal[k] - v->VActive[k] - v->VFrontPorch[k] - v->VStartup[k]))) {
v->VREADY_AT_OR_AFTER_VSYNC[k] = true;
} else {
v->VREADY_AT_OR_AFTER_VSYNC[k] = false;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d, VStartup = %d (max)\n", __func__, k, v->VStartup[k]);
dml_print("DML::%s: k=%d, VUpdateOffsetPix = %d\n", __func__, k, v->VUpdateOffsetPix[k]);
dml_print("DML::%s: k=%d, VUpdateWidthPix = %d\n", __func__, k, v->VUpdateWidthPix[k]);
dml_print("DML::%s: k=%d, VReadyOffsetPix = %d\n", __func__, k, v->VReadyOffsetPix[k]);
dml_print("DML::%s: k=%d, HTotal = %d\n", __func__, k, v->HTotal[k]);
dml_print("DML::%s: k=%d, VTotal = %d\n", __func__, k, v->VTotal[k]);
dml_print("DML::%s: k=%d, VActive = %d\n", __func__, k, v->VActive[k]);
dml_print("DML::%s: k=%d, VFrontPorch = %d\n", __func__, k, v->VFrontPorch[k]);
dml_print("DML::%s: k=%d, VStartup = %d\n", __func__, k, v->VStartup[k]);
dml_print("DML::%s: k=%d, MIN_DST_Y_NEXT_START = %f\n", __func__, k, v->MIN_DST_Y_NEXT_START[k]);
dml_print("DML::%s: k=%d, VREADY_AT_OR_AFTER_VSYNC = %d\n", __func__, k, v->VREADY_AT_OR_AFTER_VSYNC[k]);
#endif
}
{
//Maximum Bandwidth Used
double TotalWRBandwidth = 0;
double MaxPerPlaneVActiveWRBandwidth = 0;
double WRBandwidth = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k] == true && v->WritebackPixelFormat[k] == dm_444_32) {
WRBandwidth = v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k]
/ (v->HTotal[k] * v->WritebackSourceHeight[k] / v->PixelClock[k]) * 4;
} else if (v->WritebackEnable[k] == true) {
WRBandwidth = v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k]
/ (v->HTotal[k] * v->WritebackSourceHeight[k] / v->PixelClock[k]) * 8;
}
TotalWRBandwidth = TotalWRBandwidth + WRBandwidth;
MaxPerPlaneVActiveWRBandwidth = dml_max(MaxPerPlaneVActiveWRBandwidth, WRBandwidth);
}
v->TotalDataReadBandwidth = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->TotalDataReadBandwidth = v->TotalDataReadBandwidth + v->ReadBandwidthPlaneLuma[k] + v->ReadBandwidthPlaneChroma[k];
}
}
// Stutter Efficiency
CalculateStutterEfficiency(
mode_lib,
v->CompressedBufferSizeInkByte,
v->UnboundedRequestEnabled,
v->ConfigReturnBufferSizeInKByte,
v->MetaFIFOSizeInKEntries,
v->ZeroSizeBufferEntries,
v->NumberOfActivePlanes,
v->ROBBufferSizeInKByte,
v->TotalDataReadBandwidth,
v->DCFCLK,
v->ReturnBW,
v->COMPBUF_RESERVED_SPACE_64B,
v->COMPBUF_RESERVED_SPACE_ZS,
v->SRExitTime,
v->SRExitZ8Time,
v->SynchronizedVBlank,
v->StutterEnterPlusExitWatermark,
v->Z8StutterEnterPlusExitWatermark,
v->ProgressiveToInterlaceUnitInOPP,
v->Interlace,
v->MinTTUVBlank,
v->DPPPerPlane,
v->DETBufferSizeY,
v->BytePerPixelY,
v->BytePerPixelDETY,
v->SwathWidthY,
v->SwathHeightY,
v->SwathHeightC,
v->DCCRateLuma,
v->DCCRateChroma,
v->DCCFractionOfZeroSizeRequestsLuma,
v->DCCFractionOfZeroSizeRequestsChroma,
v->HTotal,
v->VTotal,
v->PixelClock,
v->VRatio,
v->SourceScan,
v->BlockHeight256BytesY,
v->BlockWidth256BytesY,
v->BlockHeight256BytesC,
v->BlockWidth256BytesC,
v->DCCYMaxUncompressedBlock,
v->DCCCMaxUncompressedBlock,
v->VActive,
v->DCCEnable,
v->WritebackEnable,
v->ReadBandwidthPlaneLuma,
v->ReadBandwidthPlaneChroma,
v->meta_row_bw,
v->dpte_row_bw,
&v->StutterEfficiencyNotIncludingVBlank,
&v->StutterEfficiency,
&v->NumberOfStutterBurstsPerFrame,
&v->Z8StutterEfficiencyNotIncludingVBlank,
&v->Z8StutterEfficiency,
&v->Z8NumberOfStutterBurstsPerFrame,
&v->StutterPeriod);
}
static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
{
struct vba_vars_st *v = &mode_lib->vba;
// Display Pipe Configuration
double BytePerPixDETY[DC__NUM_DPP__MAX];
double BytePerPixDETC[DC__NUM_DPP__MAX];
int BytePerPixY[DC__NUM_DPP__MAX];
int BytePerPixC[DC__NUM_DPP__MAX];
int Read256BytesBlockHeightY[DC__NUM_DPP__MAX];
int Read256BytesBlockHeightC[DC__NUM_DPP__MAX];
int Read256BytesBlockWidthY[DC__NUM_DPP__MAX];
int Read256BytesBlockWidthC[DC__NUM_DPP__MAX];
double dummy1[DC__NUM_DPP__MAX];
double dummy2[DC__NUM_DPP__MAX];
double dummy3[DC__NUM_DPP__MAX];
double dummy4[DC__NUM_DPP__MAX];
int dummy5[DC__NUM_DPP__MAX];
int dummy6[DC__NUM_DPP__MAX];
bool dummy7[DC__NUM_DPP__MAX];
bool dummysinglestring;
unsigned int k;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
CalculateBytePerPixelAnd256BBlockSizes(
v->SourcePixelFormat[k],
v->SurfaceTiling[k],
&BytePerPixY[k],
&BytePerPixC[k],
&BytePerPixDETY[k],
&BytePerPixDETC[k],
&Read256BytesBlockHeightY[k],
&Read256BytesBlockHeightC[k],
&Read256BytesBlockWidthY[k],
&Read256BytesBlockWidthC[k]);
}
CalculateSwathAndDETConfiguration(
false,
v->NumberOfActivePlanes,
v->DETBufferSizeInKByte[0],
dummy1,
dummy2,
v->SourceScan,
v->SourcePixelFormat,
v->SurfaceTiling,
v->ViewportWidth,
v->ViewportHeight,
v->SurfaceWidthY,
v->SurfaceWidthC,
v->SurfaceHeightY,
v->SurfaceHeightC,
Read256BytesBlockHeightY,
Read256BytesBlockHeightC,
Read256BytesBlockWidthY,
Read256BytesBlockWidthC,
v->ODMCombineEnabled,
v->BlendingAndTiming,
BytePerPixY,
BytePerPixC,
BytePerPixDETY,
BytePerPixDETC,
v->HActive,
v->HRatio,
v->HRatioChroma,
v->DPPPerPlane,
dummy5,
dummy6,
dummy3,
dummy4,
v->SwathHeightY,
v->SwathHeightC,
v->DETBufferSizeY,
v->DETBufferSizeC,
dummy7,
&dummysinglestring);
}
static bool CalculateBytePerPixelAnd256BBlockSizes(
enum source_format_class SourcePixelFormat,
enum dm_swizzle_mode SurfaceTiling,
unsigned int *BytePerPixelY,
unsigned int *BytePerPixelC,
double *BytePerPixelDETY,
double *BytePerPixelDETC,
unsigned int *BlockHeight256BytesY,
unsigned int *BlockHeight256BytesC,
unsigned int *BlockWidth256BytesY,
unsigned int *BlockWidth256BytesC)
{
if (SourcePixelFormat == dm_444_64) {
*BytePerPixelDETY = 8;
*BytePerPixelDETC = 0;
*BytePerPixelY = 8;
*BytePerPixelC = 0;
} else if (SourcePixelFormat == dm_444_32 || SourcePixelFormat == dm_rgbe) {
*BytePerPixelDETY = 4;
*BytePerPixelDETC = 0;
*BytePerPixelY = 4;
*BytePerPixelC = 0;
} else if (SourcePixelFormat == dm_444_16) {
*BytePerPixelDETY = 2;
*BytePerPixelDETC = 0;
*BytePerPixelY = 2;
*BytePerPixelC = 0;
} else if (SourcePixelFormat == dm_444_8) {
*BytePerPixelDETY = 1;
*BytePerPixelDETC = 0;
*BytePerPixelY = 1;
*BytePerPixelC = 0;
} else if (SourcePixelFormat == dm_rgbe_alpha) {
*BytePerPixelDETY = 4;
*BytePerPixelDETC = 1;
*BytePerPixelY = 4;
*BytePerPixelC = 1;
} else if (SourcePixelFormat == dm_420_8) {
*BytePerPixelDETY = 1;
*BytePerPixelDETC = 2;
*BytePerPixelY = 1;
*BytePerPixelC = 2;
} else if (SourcePixelFormat == dm_420_12) {
*BytePerPixelDETY = 2;
*BytePerPixelDETC = 4;
*BytePerPixelY = 2;
*BytePerPixelC = 4;
} else {
*BytePerPixelDETY = 4.0 / 3;
*BytePerPixelDETC = 8.0 / 3;
*BytePerPixelY = 2;
*BytePerPixelC = 4;
}
if ((SourcePixelFormat == dm_444_64 || SourcePixelFormat == dm_444_32 || SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_8 || SourcePixelFormat == dm_mono_16
|| SourcePixelFormat == dm_mono_8 || SourcePixelFormat == dm_rgbe)) {
if (SurfaceTiling == dm_sw_linear) {
*BlockHeight256BytesY = 1;
} else if (SourcePixelFormat == dm_444_64) {
*BlockHeight256BytesY = 4;
} else if (SourcePixelFormat == dm_444_8) {
*BlockHeight256BytesY = 16;
} else {
*BlockHeight256BytesY = 8;
}
*BlockWidth256BytesY = 256U / *BytePerPixelY / *BlockHeight256BytesY;
*BlockHeight256BytesC = 0;
*BlockWidth256BytesC = 0;
} else {
if (SurfaceTiling == dm_sw_linear) {
*BlockHeight256BytesY = 1;
*BlockHeight256BytesC = 1;
} else if (SourcePixelFormat == dm_rgbe_alpha) {
*BlockHeight256BytesY = 8;
*BlockHeight256BytesC = 16;
} else if (SourcePixelFormat == dm_420_8) {
*BlockHeight256BytesY = 16;
*BlockHeight256BytesC = 8;
} else {
*BlockHeight256BytesY = 8;
*BlockHeight256BytesC = 8;
}
*BlockWidth256BytesY = 256U / *BytePerPixelY / *BlockHeight256BytesY;
*BlockWidth256BytesC = 256U / *BytePerPixelC / *BlockHeight256BytesC;
}
return true;
}
static double CalculateTWait(unsigned int PrefetchMode, double DRAMClockChangeLatency, double UrgentLatency, double SREnterPlusExitTime)
{
if (PrefetchMode == 0) {
return dml_max(DRAMClockChangeLatency + UrgentLatency, dml_max(SREnterPlusExitTime, UrgentLatency));
} else if (PrefetchMode == 1) {
return dml_max(SREnterPlusExitTime, UrgentLatency);
} else {
return UrgentLatency;
}
}
double dml314_CalculateWriteBackDISPCLK(
enum source_format_class WritebackPixelFormat,
double PixelClock,
double WritebackHRatio,
double WritebackVRatio,
unsigned int WritebackHTaps,
unsigned int WritebackVTaps,
long WritebackSourceWidth,
long WritebackDestinationWidth,
unsigned int HTotal,
unsigned int WritebackLineBufferSize)
{
double DISPCLK_H, DISPCLK_V, DISPCLK_HB;
DISPCLK_H = PixelClock * dml_ceil(WritebackHTaps / 8.0, 1) / WritebackHRatio;
DISPCLK_V = PixelClock * (WritebackVTaps * dml_ceil(WritebackDestinationWidth / 6.0, 1) + 8.0) / HTotal;
DISPCLK_HB = PixelClock * WritebackVTaps * (WritebackDestinationWidth * WritebackVTaps - WritebackLineBufferSize / 57.0) / 6.0 / WritebackSourceWidth;
return dml_max3(DISPCLK_H, DISPCLK_V, DISPCLK_HB);
}
static double CalculateWriteBackDelay(
enum source_format_class WritebackPixelFormat,
double WritebackHRatio,
double WritebackVRatio,
unsigned int WritebackVTaps,
int WritebackDestinationWidth,
int WritebackDestinationHeight,
int WritebackSourceHeight,
unsigned int HTotal)
{
double CalculateWriteBackDelay;
double Line_length;
double Output_lines_last_notclamped;
double WritebackVInit;
WritebackVInit = (WritebackVRatio + WritebackVTaps + 1) / 2;
Line_length = dml_max((double) WritebackDestinationWidth, dml_ceil(WritebackDestinationWidth / 6.0, 1) * WritebackVTaps);
Output_lines_last_notclamped = WritebackDestinationHeight - 1 - dml_ceil((WritebackSourceHeight - WritebackVInit) / WritebackVRatio, 1);
if (Output_lines_last_notclamped < 0) {
CalculateWriteBackDelay = 0;
} else {
CalculateWriteBackDelay = Output_lines_last_notclamped * Line_length + (HTotal - WritebackDestinationWidth) + 80;
}
return CalculateWriteBackDelay;
}
static void CalculateVupdateAndDynamicMetadataParameters(
int MaxInterDCNTileRepeaters,
double DPPCLK,
double DISPCLK,
double DCFClkDeepSleep,
double PixelClock,
int HTotal,
int VBlank,
int DynamicMetadataTransmittedBytes,
int DynamicMetadataLinesBeforeActiveRequired,
int InterlaceEnable,
bool ProgressiveToInterlaceUnitInOPP,
double *TSetup,
double *Tdmbf,
double *Tdmec,
double *Tdmsks,
int *VUpdateOffsetPix,
double *VUpdateWidthPix,
double *VReadyOffsetPix)
{
double TotalRepeaterDelayTime;
TotalRepeaterDelayTime = MaxInterDCNTileRepeaters * (2 / DPPCLK + 3 / DISPCLK);
*VUpdateWidthPix = dml_ceil((14.0 / DCFClkDeepSleep + 12.0 / DPPCLK + TotalRepeaterDelayTime) * PixelClock, 1.0);
*VReadyOffsetPix = dml_ceil(dml_max(150.0 / DPPCLK, TotalRepeaterDelayTime + 20.0 / DCFClkDeepSleep + 10.0 / DPPCLK) * PixelClock, 1.0);
*VUpdateOffsetPix = dml_ceil(HTotal / 4.0, 1);
*TSetup = (*VUpdateOffsetPix + *VUpdateWidthPix + *VReadyOffsetPix) / PixelClock;
*Tdmbf = DynamicMetadataTransmittedBytes / 4.0 / DISPCLK;
*Tdmec = HTotal / PixelClock;
if (DynamicMetadataLinesBeforeActiveRequired == 0) {
*Tdmsks = VBlank * HTotal / PixelClock / 2.0;
} else {
*Tdmsks = DynamicMetadataLinesBeforeActiveRequired * HTotal / PixelClock;
}
if (InterlaceEnable == 1 && ProgressiveToInterlaceUnitInOPP == false) {
*Tdmsks = *Tdmsks / 2;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VUpdateWidthPix = %d\n", __func__, *VUpdateWidthPix);
dml_print("DML::%s: VReadyOffsetPix = %d\n", __func__, *VReadyOffsetPix);
dml_print("DML::%s: VUpdateOffsetPix = %d\n", __func__, *VUpdateOffsetPix);
#endif
}
static void CalculateRowBandwidth(
bool GPUVMEnable,
enum source_format_class SourcePixelFormat,
double VRatio,
double VRatioChroma,
bool DCCEnable,
double LineTime,
unsigned int MetaRowByteLuma,
unsigned int MetaRowByteChroma,
unsigned int meta_row_height_luma,
unsigned int meta_row_height_chroma,
unsigned int PixelPTEBytesPerRowLuma,
unsigned int PixelPTEBytesPerRowChroma,
unsigned int dpte_row_height_luma,
unsigned int dpte_row_height_chroma,
double *meta_row_bw,
double *dpte_row_bw)
{
if (DCCEnable != true) {
*meta_row_bw = 0;
} else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_420_12 || SourcePixelFormat == dm_rgbe_alpha) {
*meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime) + VRatioChroma * MetaRowByteChroma / (meta_row_height_chroma * LineTime);
} else {
*meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime);
}
if (GPUVMEnable != true) {
*dpte_row_bw = 0;
} else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_420_12 || SourcePixelFormat == dm_rgbe_alpha) {
*dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime)
+ VRatioChroma * PixelPTEBytesPerRowChroma / (dpte_row_height_chroma * LineTime);
} else {
*dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime);
}
}
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
unsigned int k,
double HostVMInefficiencyFactor,
double UrgentExtraLatency,
double UrgentLatency,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
double DPTEBytesPerRow)
{
struct vba_vars_st *v = &mode_lib->vba;
double min_row_time = 0.0;
unsigned int HostVMDynamicLevelsTrips;
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
double ImmediateFlipBW = 1.0;
double LineTime = v->HTotal[k] / v->PixelClock[k];
if (v->GPUVMEnable == true && v->HostVMEnable == true) {
HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
} else {
HostVMDynamicLevelsTrips = 0;
}
if (v->GPUVMEnable == true || v->DCCEnable[k] == true) {
ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * v->BandwidthAvailableForImmediateFlip / v->TotImmediateFlipBytes;
}
if (v->GPUVMEnable == true) {
TimeForFetchingMetaPTEImmediateFlip = dml_max3(
v->Tno_bw[k] + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
UrgentExtraLatency + UrgentLatency * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
LineTime / 4.0);
} else {
TimeForFetchingMetaPTEImmediateFlip = 0;
}
v->DestinationLinesToRequestVMInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
TimeForFetchingRowInVBlankImmediateFlip = dml_max3(
(MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / ImmediateFlipBW,
UrgentLatency * (HostVMDynamicLevelsTrips + 1),
LineTime / 4);
} else {
TimeForFetchingRowInVBlankImmediateFlip = 0;
}
v->DestinationLinesToRequestRowInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
if (v->GPUVMEnable == true) {
v->final_flip_bw[k] = dml_max(
PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (v->DestinationLinesToRequestVMInImmediateFlip[k] * LineTime),
(MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime));
} else if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
v->final_flip_bw[k] = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime);
} else {
v->final_flip_bw[k] = 0;
}
if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_rgbe_alpha) {
if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
} else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
min_row_time = dml_min(v->meta_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
} else {
min_row_time = dml_min4(
v->dpte_row_height[k] * LineTime / v->VRatio[k],
v->meta_row_height[k] * LineTime / v->VRatio[k],
v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k],
v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
}
} else {
if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
min_row_time = v->dpte_row_height[k] * LineTime / v->VRatio[k];
} else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
min_row_time = v->meta_row_height[k] * LineTime / v->VRatio[k];
} else {
min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height[k] * LineTime / v->VRatio[k]);
}
}
if (v->DestinationLinesToRequestVMInImmediateFlip[k] >= 32 || v->DestinationLinesToRequestRowInImmediateFlip[k] >= 16
|| TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) {
v->ImmediateFlipSupportedForPipe[k] = false;
} else {
v->ImmediateFlipSupportedForPipe[k] = true;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestVMInImmediateFlip[k]);
dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestRowInImmediateFlip[k]);
dml_print("DML::%s: TimeForFetchingMetaPTEImmediateFlip = %f\n", __func__, TimeForFetchingMetaPTEImmediateFlip);
dml_print("DML::%s: TimeForFetchingRowInVBlankImmediateFlip = %f\n", __func__, TimeForFetchingRowInVBlankImmediateFlip);
dml_print("DML::%s: min_row_time = %f\n", __func__, min_row_time);
dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, v->ImmediateFlipSupportedForPipe[k]);
#endif
}
static double TruncToValidBPP(
double LinkBitRate,
int Lanes,
int HTotal,
int HActive,
double PixelClock,
double DesiredBPP,
bool DSCEnable,
enum output_encoder_class Output,
enum output_format_class Format,
unsigned int DSCInputBitPerComponent,
int DSCSlices,
int AudioRate,
int AudioLayout,
enum odm_combine_mode ODMCombine)
{
double MaxLinkBPP;
int MinDSCBPP;
double MaxDSCBPP;
int NonDSCBPP0;
int NonDSCBPP1;
int NonDSCBPP2;
if (Format == dm_420) {
NonDSCBPP0 = 12;
NonDSCBPP1 = 15;
NonDSCBPP2 = 18;
MinDSCBPP = 6;
MaxDSCBPP = 1.5 * DSCInputBitPerComponent - 1 / 16;
} else if (Format == dm_444) {
NonDSCBPP0 = 24;
NonDSCBPP1 = 30;
NonDSCBPP2 = 36;
MinDSCBPP = 8;
MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16;
} else {
NonDSCBPP0 = 16;
NonDSCBPP1 = 20;
NonDSCBPP2 = 24;
if (Format == dm_n422) {
MinDSCBPP = 7;
MaxDSCBPP = 2 * DSCInputBitPerComponent - 1.0 / 16.0;
} else {
MinDSCBPP = 8;
MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16.0;
}
}
if (DSCEnable && Output == dm_dp) {
MaxLinkBPP = LinkBitRate / 10 * 8 * Lanes / PixelClock * (1 - 2.4 / 100);
} else {
MaxLinkBPP = LinkBitRate / 10 * 8 * Lanes / PixelClock;
}
if (ODMCombine == dm_odm_combine_mode_4to1 && MaxLinkBPP > 16) {
MaxLinkBPP = 16;
} else if (ODMCombine == dm_odm_combine_mode_2to1 && MaxLinkBPP > 32) {
MaxLinkBPP = 32;
}
if (DesiredBPP == 0) {
if (DSCEnable) {
if (MaxLinkBPP < MinDSCBPP) {
return BPP_INVALID;
} else if (MaxLinkBPP >= MaxDSCBPP) {
return MaxDSCBPP;
} else {
return dml_floor(16.0 * MaxLinkBPP, 1.0) / 16.0;
}
} else {
if (MaxLinkBPP >= NonDSCBPP2) {
return NonDSCBPP2;
} else if (MaxLinkBPP >= NonDSCBPP1) {
return NonDSCBPP1;
} else if (MaxLinkBPP >= NonDSCBPP0) {
return 16.0;
} else {
return BPP_INVALID;
}
}
} else {
if (!((DSCEnable == false && (DesiredBPP == NonDSCBPP2 || DesiredBPP == NonDSCBPP1 || DesiredBPP <= NonDSCBPP0))
|| (DSCEnable && DesiredBPP >= MinDSCBPP && DesiredBPP <= MaxDSCBPP))) {
return BPP_INVALID;
} else {
return DesiredBPP;
}
}
return BPP_INVALID;
}
static noinline void CalculatePrefetchSchedulePerPlane(
struct display_mode_lib *mode_lib,
double HostVMInefficiencyFactor,
int i,
unsigned int j,
unsigned int k)
{
struct vba_vars_st *v = &mode_lib->vba;
Pipe myPipe;
myPipe.DPPCLK = v->RequiredDPPCLK[i][j][k];
myPipe.DISPCLK = v->RequiredDISPCLK[i][j];
myPipe.PixelClock = v->PixelClock[k];
myPipe.DCFCLKDeepSleep = v->ProjectedDCFCLKDeepSleep[i][j];
myPipe.DPPPerPlane = v->NoOfDPP[i][j][k];
myPipe.ScalerEnabled = v->ScalerEnabled[k];
myPipe.VRatio = mode_lib->vba.VRatio[k];
myPipe.VRatioChroma = mode_lib->vba.VRatioChroma[k];
myPipe.SourceScan = v->SourceScan[k];
myPipe.BlockWidth256BytesY = v->Read256BlockWidthY[k];
myPipe.BlockHeight256BytesY = v->Read256BlockHeightY[k];
myPipe.BlockWidth256BytesC = v->Read256BlockWidthC[k];
myPipe.BlockHeight256BytesC = v->Read256BlockHeightC[k];
myPipe.InterlaceEnable = v->Interlace[k];
myPipe.NumberOfCursors = v->NumberOfCursors[k];
myPipe.VBlank = v->VTotal[k] - v->VActive[k];
myPipe.HTotal = v->HTotal[k];
myPipe.DCCEnable = v->DCCEnable[k];
myPipe.ODMCombineIsEnabled = v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1
|| v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1;
myPipe.SourcePixelFormat = v->SourcePixelFormat[k];
myPipe.BytePerPixelY = v->BytePerPixelY[k];
myPipe.BytePerPixelC = v->BytePerPixelC[k];
myPipe.ProgressiveToInterlaceUnitInOPP = v->ProgressiveToInterlaceUnitInOPP;
v->NoTimeForPrefetch[i][j][k] = CalculatePrefetchSchedule(
mode_lib,
HostVMInefficiencyFactor,
&myPipe,
v->DSCDelayPerState[i][k],
v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater,
v->DPPCLKDelaySCL,
v->DPPCLKDelaySCLLBOnly,
v->DPPCLKDelayCNVCCursor,
v->DISPCLKDelaySubtotal,
v->SwathWidthYThisState[k] / v->HRatio[k],
v->OutputFormat[k],
v->MaxInterDCNTileRepeaters,
dml_min(v->MaxVStartup, v->MaximumVStartup[i][j][k]),
v->MaximumVStartup[i][j][k],
v->GPUVMMaxPageTableLevels,
v->GPUVMEnable,
v->HostVMEnable,
v->HostVMMaxNonCachedPageTableLevels,
v->HostVMMinPageSize,
v->DynamicMetadataEnable[k],
v->DynamicMetadataVMEnabled,
v->DynamicMetadataLinesBeforeActiveRequired[k],
v->DynamicMetadataTransmittedBytes[k],
v->UrgLatency[i],
v->ExtraLatency,
v->TimeCalc,
v->PDEAndMetaPTEBytesPerFrame[i][j][k],
v->MetaRowBytes[i][j][k],
v->DPTEBytesPerRow[i][j][k],
v->PrefetchLinesY[i][j][k],
v->SwathWidthYThisState[k],
v->PrefillY[k],
v->MaxNumSwY[k],
v->PrefetchLinesC[i][j][k],
v->SwathWidthCThisState[k],
v->PrefillC[k],
v->MaxNumSwC[k],
v->swath_width_luma_ub_this_state[k],
v->swath_width_chroma_ub_this_state[k],
v->SwathHeightYThisState[k],
v->SwathHeightCThisState[k],
v->TWait,
&v->DSTXAfterScaler[k],
&v->DSTYAfterScaler[k],
&v->LineTimesForPrefetch[k],
&v->PrefetchBW[k],
&v->LinesForMetaPTE[k],
&v->LinesForMetaAndDPTERow[k],
&v->VRatioPreY[i][j][k],
&v->VRatioPreC[i][j][k],
&v->RequiredPrefetchPixelDataBWLuma[i][j][k],
&v->RequiredPrefetchPixelDataBWChroma[i][j][k],
&v->NoTimeForDynamicMetadata[i][j][k],
&v->Tno_bw[k],
&v->prefetch_vmrow_bw[k],
&v->dummy7[k],
&v->dummy8[k],
&v->dummy13[k],
&v->VUpdateOffsetPix[k],
&v->VUpdateWidthPix[k],
&v->VReadyOffsetPix[k]);
}
void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
{
struct vba_vars_st *v = &mode_lib->vba;
int i, j;
unsigned int k, m;
int ReorderingBytes;
int MinPrefetchMode = 0, MaxPrefetchMode = 2;
bool NoChroma = true;
bool EnoughWritebackUnits = true;
bool P2IWith420 = false;
bool DSCOnlyIfNecessaryWithBPP = false;
bool DSC422NativeNotSupported = false;
double MaxTotalVActiveRDBandwidth;
bool ViewportExceedsSurface = false;
bool FMTBufferExceeded = false;
/*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/
CalculateMinAndMaxPrefetchMode(
mode_lib->vba.AllowDRAMSelfRefreshOrDRAMClockChangeInVblank,
&MinPrefetchMode, &MaxPrefetchMode);
/*Scale Ratio, taps Support Check*/
v->ScaleRatioAndTapsSupport = true;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->ScalerEnabled[k] == false
&& ((v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32
&& v->SourcePixelFormat[k] != dm_444_16 && v->SourcePixelFormat[k] != dm_mono_16
&& v->SourcePixelFormat[k] != dm_mono_8 && v->SourcePixelFormat[k] != dm_rgbe
&& v->SourcePixelFormat[k] != dm_rgbe_alpha) || v->HRatio[k] != 1.0 || v->htaps[k] != 1.0
|| v->VRatio[k] != 1.0 || v->vtaps[k] != 1.0)) {
v->ScaleRatioAndTapsSupport = false;
} else if (v->vtaps[k] < 1.0 || v->vtaps[k] > 8.0 || v->htaps[k] < 1.0 || v->htaps[k] > 8.0
|| (v->htaps[k] > 1.0 && (v->htaps[k] % 2) == 1) || v->HRatio[k] > v->MaxHSCLRatio
|| v->VRatio[k] > v->MaxVSCLRatio || v->HRatio[k] > v->htaps[k]
|| v->VRatio[k] > v->vtaps[k]
|| (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32
&& v->SourcePixelFormat[k] != dm_444_16 && v->SourcePixelFormat[k] != dm_mono_16
&& v->SourcePixelFormat[k] != dm_mono_8 && v->SourcePixelFormat[k] != dm_rgbe
&& (v->VTAPsChroma[k] < 1 || v->VTAPsChroma[k] > 8 || v->HTAPsChroma[k] < 1
|| v->HTAPsChroma[k] > 8 || (v->HTAPsChroma[k] > 1 && v->HTAPsChroma[k] % 2 == 1)
|| v->HRatioChroma[k] > v->MaxHSCLRatio
|| v->VRatioChroma[k] > v->MaxVSCLRatio
|| v->HRatioChroma[k] > v->HTAPsChroma[k]
|| v->VRatioChroma[k] > v->VTAPsChroma[k]))) {
v->ScaleRatioAndTapsSupport = false;
}
}
/*Source Format, Pixel Format and Scan Support Check*/
v->SourceFormatPixelAndScanSupport = true;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->SurfaceTiling[k] == dm_sw_linear && (!(v->SourceScan[k] != dm_vert) || v->DCCEnable[k] == true)) {
v->SourceFormatPixelAndScanSupport = false;
}
}
/*Bandwidth Support Check*/
for (k = 0; k < v->NumberOfActivePlanes; k++) {
CalculateBytePerPixelAnd256BBlockSizes(
v->SourcePixelFormat[k],
v->SurfaceTiling[k],
&v->BytePerPixelY[k],
&v->BytePerPixelC[k],
&v->BytePerPixelInDETY[k],
&v->BytePerPixelInDETC[k],
&v->Read256BlockHeightY[k],
&v->Read256BlockHeightC[k],
&v->Read256BlockWidthY[k],
&v->Read256BlockWidthC[k]);
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->SourceScan[k] != dm_vert) {
v->SwathWidthYSingleDPP[k] = v->ViewportWidth[k];
v->SwathWidthCSingleDPP[k] = v->ViewportWidthChroma[k];
} else {
v->SwathWidthYSingleDPP[k] = v->ViewportHeight[k];
v->SwathWidthCSingleDPP[k] = v->ViewportHeightChroma[k];
}
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->ReadBandwidthLuma[k] = v->SwathWidthYSingleDPP[k] * dml_ceil(v->BytePerPixelInDETY[k], 1.0)
/ (v->HTotal[k] / v->PixelClock[k]) * v->VRatio[k];
v->ReadBandwidthChroma[k] = v->SwathWidthYSingleDPP[k] / 2 * dml_ceil(v->BytePerPixelInDETC[k], 2.0)
/ (v->HTotal[k] / v->PixelClock[k]) * v->VRatio[k] / 2.0;
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->WritebackEnable[k] == true && v->WritebackPixelFormat[k] == dm_444_64) {
v->WriteBandwidth[k] = v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k]
/ (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 8.0;
} else if (v->WritebackEnable[k] == true) {
v->WriteBandwidth[k] = v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k]
/ (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4.0;
} else {
v->WriteBandwidth[k] = 0.0;
}
}
/*Writeback Latency support check*/
v->WritebackLatencySupport = true;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->WritebackEnable[k] == true && (v->WriteBandwidth[k] > v->WritebackInterfaceBufferSize * 1024 / v->WritebackLatency)) {
v->WritebackLatencySupport = false;
}
}
/*Writeback Mode Support Check*/
v->TotalNumberOfActiveWriteback = 0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->WritebackEnable[k] == true) {
v->TotalNumberOfActiveWriteback = v->TotalNumberOfActiveWriteback + 1;
}
}
if (v->TotalNumberOfActiveWriteback > v->MaxNumWriteback) {
EnoughWritebackUnits = false;
}
/*Writeback Scale Ratio and Taps Support Check*/
v->WritebackScaleRatioAndTapsSupport = true;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->WritebackEnable[k] == true) {
if (v->WritebackHRatio[k] > v->WritebackMaxHSCLRatio || v->WritebackVRatio[k] > v->WritebackMaxVSCLRatio
|| v->WritebackHRatio[k] < v->WritebackMinHSCLRatio
|| v->WritebackVRatio[k] < v->WritebackMinVSCLRatio
|| v->WritebackHTaps[k] > v->WritebackMaxHSCLTaps
|| v->WritebackVTaps[k] > v->WritebackMaxVSCLTaps
|| v->WritebackHRatio[k] > v->WritebackHTaps[k] || v->WritebackVRatio[k] > v->WritebackVTaps[k]
|| (v->WritebackHTaps[k] > 2.0 && ((v->WritebackHTaps[k] % 2) == 1))) {
v->WritebackScaleRatioAndTapsSupport = false;
}
if (2.0 * v->WritebackDestinationWidth[k] * (v->WritebackVTaps[k] - 1) * 57 > v->WritebackLineBufferSize) {
v->WritebackScaleRatioAndTapsSupport = false;
}
}
}
/*Maximum DISPCLK/DPPCLK Support check*/
v->WritebackRequiredDISPCLK = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->WritebackEnable[k] == true) {
v->WritebackRequiredDISPCLK = dml_max(
v->WritebackRequiredDISPCLK,
dml314_CalculateWriteBackDISPCLK(
v->WritebackPixelFormat[k],
v->PixelClock[k],
v->WritebackHRatio[k],
v->WritebackVRatio[k],
v->WritebackHTaps[k],
v->WritebackVTaps[k],
v->WritebackSourceWidth[k],
v->WritebackDestinationWidth[k],
v->HTotal[k],
v->WritebackLineBufferSize));
}
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->HRatio[k] > 1.0) {
v->PSCL_FACTOR[k] = dml_min(
v->MaxDCHUBToPSCLThroughput,
v->MaxPSCLToLBThroughput * v->HRatio[k] / dml_ceil(v->htaps[k] / 6.0, 1.0));
} else {
v->PSCL_FACTOR[k] = dml_min(v->MaxDCHUBToPSCLThroughput, v->MaxPSCLToLBThroughput);
}
if (v->BytePerPixelC[k] == 0.0) {
v->PSCL_FACTOR_CHROMA[k] = 0.0;
v->MinDPPCLKUsingSingleDPP[k] = v->PixelClock[k]
* dml_max3(
v->vtaps[k] / 6.0 * dml_min(1.0, v->HRatio[k]),
v->HRatio[k] * v->VRatio[k] / v->PSCL_FACTOR[k],
1.0);
if ((v->htaps[k] > 6.0 || v->vtaps[k] > 6.0) && v->MinDPPCLKUsingSingleDPP[k] < 2.0 * v->PixelClock[k]) {
v->MinDPPCLKUsingSingleDPP[k] = 2.0 * v->PixelClock[k];
}
} else {
if (v->HRatioChroma[k] > 1.0) {
v->PSCL_FACTOR_CHROMA[k] = dml_min(
v->MaxDCHUBToPSCLThroughput,
v->MaxPSCLToLBThroughput * v->HRatioChroma[k] / dml_ceil(v->HTAPsChroma[k] / 6.0, 1.0));
} else {
v->PSCL_FACTOR_CHROMA[k] = dml_min(v->MaxDCHUBToPSCLThroughput, v->MaxPSCLToLBThroughput);
}
v->MinDPPCLKUsingSingleDPP[k] = v->PixelClock[k]
* dml_max5(
v->vtaps[k] / 6.0 * dml_min(1.0, v->HRatio[k]),
v->HRatio[k] * v->VRatio[k] / v->PSCL_FACTOR[k],
v->VTAPsChroma[k] / 6.0 * dml_min(1.0, v->HRatioChroma[k]),
v->HRatioChroma[k] * v->VRatioChroma[k] / v->PSCL_FACTOR_CHROMA[k],
1.0);
if ((v->htaps[k] > 6.0 || v->vtaps[k] > 6.0 || v->HTAPsChroma[k] > 6.0 || v->VTAPsChroma[k] > 6.0)
&& v->MinDPPCLKUsingSingleDPP[k] < 2.0 * v->PixelClock[k]) {
v->MinDPPCLKUsingSingleDPP[k] = 2.0 * v->PixelClock[k];
}
}
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
int MaximumSwathWidthSupportLuma;
int MaximumSwathWidthSupportChroma;
if (v->SurfaceTiling[k] == dm_sw_linear) {
MaximumSwathWidthSupportLuma = 8192.0;
} else if (v->SourceScan[k] == dm_vert && v->BytePerPixelC[k] > 0) {
MaximumSwathWidthSupportLuma = 2880.0;
} else if (v->SourcePixelFormat[k] == dm_rgbe_alpha) {
MaximumSwathWidthSupportLuma = 3840.0;
} else {
MaximumSwathWidthSupportLuma = 5760.0;
}
if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_420_12) {
MaximumSwathWidthSupportChroma = MaximumSwathWidthSupportLuma / 2.0;
} else {
MaximumSwathWidthSupportChroma = MaximumSwathWidthSupportLuma;
}
v->MaximumSwathWidthInLineBufferLuma = v->LineBufferSize * dml_max(v->HRatio[k], 1.0) / v->LBBitPerPixel[k]
/ (v->vtaps[k] + dml_max(dml_ceil(v->VRatio[k], 1.0) - 2, 0.0));
if (v->BytePerPixelC[k] == 0.0) {
v->MaximumSwathWidthInLineBufferChroma = 0;
} else {
v->MaximumSwathWidthInLineBufferChroma = v->LineBufferSize * dml_max(v->HRatioChroma[k], 1.0) / v->LBBitPerPixel[k]
/ (v->VTAPsChroma[k] + dml_max(dml_ceil(v->VRatioChroma[k], 1.0) - 2, 0.0));
}
v->MaximumSwathWidthLuma[k] = dml_min(MaximumSwathWidthSupportLuma, v->MaximumSwathWidthInLineBufferLuma);
v->MaximumSwathWidthChroma[k] = dml_min(MaximumSwathWidthSupportChroma, v->MaximumSwathWidthInLineBufferChroma);
}
CalculateSwathAndDETConfiguration(
true,
v->NumberOfActivePlanes,
v->DETBufferSizeInKByte[0],
v->MaximumSwathWidthLuma,
v->MaximumSwathWidthChroma,
v->SourceScan,
v->SourcePixelFormat,
v->SurfaceTiling,
v->ViewportWidth,
v->ViewportHeight,
v->SurfaceWidthY,
v->SurfaceWidthC,
v->SurfaceHeightY,
v->SurfaceHeightC,
v->Read256BlockHeightY,
v->Read256BlockHeightC,
v->Read256BlockWidthY,
v->Read256BlockWidthC,
v->odm_combine_dummy,
v->BlendingAndTiming,
v->BytePerPixelY,
v->BytePerPixelC,
v->BytePerPixelInDETY,
v->BytePerPixelInDETC,
v->HActive,
v->HRatio,
v->HRatioChroma,
v->NoOfDPPThisState,
v->swath_width_luma_ub_this_state,
v->swath_width_chroma_ub_this_state,
v->SwathWidthYThisState,
v->SwathWidthCThisState,
v->SwathHeightYThisState,
v->SwathHeightCThisState,
v->DETBufferSizeYThisState,
v->DETBufferSizeCThisState,
v->SingleDPPViewportSizeSupportPerPlane,
&v->ViewportSizeSupport[0][0]);
for (i = 0; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
v->MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(v->MaxDispclk[i], v->DISPCLKDPPCLKVCOSpeed);
v->MaxDppclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(v->MaxDppclk[i], v->DISPCLKDPPCLKVCOSpeed);
v->RequiredDISPCLK[i][j] = 0.0;
v->DISPCLK_DPPCLK_Support[i][j] = true;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->PlaneRequiredDISPCLKWithoutODMCombine = v->PixelClock[k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
* (1.0 + v->DISPCLKRampingMargin / 100.0);
if ((v->PlaneRequiredDISPCLKWithoutODMCombine >= v->MaxDispclk[i]
&& v->MaxDispclk[i] == v->MaxDispclk[v->soc.num_states - 1]
&& v->MaxDppclk[i] == v->MaxDppclk[v->soc.num_states - 1])) {
v->PlaneRequiredDISPCLKWithoutODMCombine = v->PixelClock[k]
* (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
}
v->PlaneRequiredDISPCLKWithODMCombine2To1 = v->PixelClock[k] / 2 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
* (1 + v->DISPCLKRampingMargin / 100.0);
if ((v->PlaneRequiredDISPCLKWithODMCombine2To1 >= v->MaxDispclk[i]
&& v->MaxDispclk[i] == v->MaxDispclk[v->soc.num_states - 1]
&& v->MaxDppclk[i] == v->MaxDppclk[v->soc.num_states - 1])) {
v->PlaneRequiredDISPCLKWithODMCombine2To1 = v->PixelClock[k] / 2
* (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
}
v->PlaneRequiredDISPCLKWithODMCombine4To1 = v->PixelClock[k] / 4 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
* (1 + v->DISPCLKRampingMargin / 100.0);
if ((v->PlaneRequiredDISPCLKWithODMCombine4To1 >= v->MaxDispclk[i]
&& v->MaxDispclk[i] == v->MaxDispclk[v->soc.num_states - 1]
&& v->MaxDppclk[i] == v->MaxDppclk[v->soc.num_states - 1])) {
v->PlaneRequiredDISPCLKWithODMCombine4To1 = v->PixelClock[k] / 4
* (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
}
if (v->ODMCombinePolicy == dm_odm_combine_policy_none
|| !(v->Output[k] == dm_dp ||
v->Output[k] == dm_dp2p0 ||
v->Output[k] == dm_edp)) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithoutODMCombine;
if (v->HActive[k] / 2 > DCN314_MAX_FMT_420_BUFFER_WIDTH)
FMTBufferExceeded = true;
} else if (v->ODMCombinePolicy == dm_odm_combine_policy_2to1) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
} else if (v->ODMCombinePolicy == dm_odm_combine_policy_4to1
|| v->PlaneRequiredDISPCLKWithODMCombine2To1 > v->MaxDispclkRoundedDownToDFSGranularity) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_4to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine4To1;
} else if (v->PlaneRequiredDISPCLKWithoutODMCombine > v->MaxDispclkRoundedDownToDFSGranularity) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
} else {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithoutODMCombine;
}
if (v->DSCEnabled[k] && v->HActive[k] > DCN314_MAX_DSC_IMAGE_WIDTH
&& v->ODMCombineEnablePerState[i][k] != dm_odm_combine_mode_4to1) {
if (v->HActive[k] / 2 > DCN314_MAX_DSC_IMAGE_WIDTH) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_4to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine4To1;
} else {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
}
}
if (v->OutputFormat[k] == dm_420 && v->HActive[k] > DCN314_MAX_FMT_420_BUFFER_WIDTH
&& v->ODMCombineEnablePerState[i][k] != dm_odm_combine_mode_4to1) {
if (v->Output[k] == dm_hdmi) {
FMTBufferExceeded = true;
} else if (v->HActive[k] / 2 > DCN314_MAX_FMT_420_BUFFER_WIDTH) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_4to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine4To1;
if (v->HActive[k] / 4 > DCN314_MAX_FMT_420_BUFFER_WIDTH)
FMTBufferExceeded = true;
} else {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
}
}
if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1) {
v->MPCCombine[i][j][k] = false;
v->NoOfDPP[i][j][k] = 4;
v->RequiredDPPCLK[i][j][k] = v->MinDPPCLKUsingSingleDPP[k] * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 4;
} else if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
v->MPCCombine[i][j][k] = false;
v->NoOfDPP[i][j][k] = 2;
v->RequiredDPPCLK[i][j][k] = v->MinDPPCLKUsingSingleDPP[k] * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 2;
} else if ((v->WhenToDoMPCCombine == dm_mpc_never
|| (v->MinDPPCLKUsingSingleDPP[k] * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
<= v->MaxDppclkRoundedDownToDFSGranularity && v->SingleDPPViewportSizeSupportPerPlane[k] == true))) {
v->MPCCombine[i][j][k] = false;
v->NoOfDPP[i][j][k] = 1;
v->RequiredDPPCLK[i][j][k] = v->MinDPPCLKUsingSingleDPP[k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
} else {
v->MPCCombine[i][j][k] = true;
v->NoOfDPP[i][j][k] = 2;
v->RequiredDPPCLK[i][j][k] = v->MinDPPCLKUsingSingleDPP[k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 2.0;
}
v->RequiredDISPCLK[i][j] = dml_max(v->RequiredDISPCLK[i][j], v->PlaneRequiredDISPCLK);
if ((v->MinDPPCLKUsingSingleDPP[k] / v->NoOfDPP[i][j][k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
> v->MaxDppclkRoundedDownToDFSGranularity)
|| (v->PlaneRequiredDISPCLK > v->MaxDispclkRoundedDownToDFSGranularity)) {
v->DISPCLK_DPPCLK_Support[i][j] = false;
}
}
v->TotalNumberOfActiveDPP[i][j] = 0;
v->TotalNumberOfSingleDPPPlanes[i][j] = 0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->TotalNumberOfActiveDPP[i][j] = v->TotalNumberOfActiveDPP[i][j] + v->NoOfDPP[i][j][k];
if (v->NoOfDPP[i][j][k] == 1)
v->TotalNumberOfSingleDPPPlanes[i][j] = v->TotalNumberOfSingleDPPPlanes[i][j] + 1;
if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10
|| v->SourcePixelFormat[k] == dm_420_12 || v->SourcePixelFormat[k] == dm_rgbe_alpha)
NoChroma = false;
}
// UPTO
if (j == 1 && v->WhenToDoMPCCombine != dm_mpc_never
&& !UnboundedRequest(v->UseUnboundedRequesting, v->TotalNumberOfActiveDPP[i][j], NoChroma, v->Output[0])) {
while (!(v->TotalNumberOfActiveDPP[i][j] >= v->MaxNumDPP || v->TotalNumberOfSingleDPPPlanes[i][j] == 0)) {
double BWOfNonSplitPlaneOfMaximumBandwidth;
unsigned int NumberOfNonSplitPlaneOfMaximumBandwidth;
BWOfNonSplitPlaneOfMaximumBandwidth = 0;
NumberOfNonSplitPlaneOfMaximumBandwidth = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k] > BWOfNonSplitPlaneOfMaximumBandwidth
&& v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled && v->MPCCombine[i][j][k] == false) {
BWOfNonSplitPlaneOfMaximumBandwidth = v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k];
NumberOfNonSplitPlaneOfMaximumBandwidth = k;
}
}
v->MPCCombine[i][j][NumberOfNonSplitPlaneOfMaximumBandwidth] = true;
v->NoOfDPP[i][j][NumberOfNonSplitPlaneOfMaximumBandwidth] = 2;
v->RequiredDPPCLK[i][j][NumberOfNonSplitPlaneOfMaximumBandwidth] =
v->MinDPPCLKUsingSingleDPP[NumberOfNonSplitPlaneOfMaximumBandwidth]
* (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100) / 2;
v->TotalNumberOfActiveDPP[i][j] = v->TotalNumberOfActiveDPP[i][j] + 1;
v->TotalNumberOfSingleDPPPlanes[i][j] = v->TotalNumberOfSingleDPPPlanes[i][j] - 1;
}
}
if (v->TotalNumberOfActiveDPP[i][j] > v->MaxNumDPP) {
v->RequiredDISPCLK[i][j] = 0.0;
v->DISPCLK_DPPCLK_Support[i][j] = true;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
if (v->SingleDPPViewportSizeSupportPerPlane[k] == false && v->WhenToDoMPCCombine != dm_mpc_never) {
v->MPCCombine[i][j][k] = true;
v->NoOfDPP[i][j][k] = 2;
v->RequiredDPPCLK[i][j][k] = v->MinDPPCLKUsingSingleDPP[k]
* (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 2.0;
} else {
v->MPCCombine[i][j][k] = false;
v->NoOfDPP[i][j][k] = 1;
v->RequiredDPPCLK[i][j][k] = v->MinDPPCLKUsingSingleDPP[k]
* (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
}
if (!(v->MaxDispclk[i] == v->MaxDispclk[v->soc.num_states - 1]
&& v->MaxDppclk[i] == v->MaxDppclk[v->soc.num_states - 1])) {
v->PlaneRequiredDISPCLK = v->PixelClock[k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
* (1.0 + v->DISPCLKRampingMargin / 100.0);
} else {
v->PlaneRequiredDISPCLK = v->PixelClock[k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
}
v->RequiredDISPCLK[i][j] = dml_max(v->RequiredDISPCLK[i][j], v->PlaneRequiredDISPCLK);
if ((v->MinDPPCLKUsingSingleDPP[k] / v->NoOfDPP[i][j][k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
> v->MaxDppclkRoundedDownToDFSGranularity)
|| (v->PlaneRequiredDISPCLK > v->MaxDispclkRoundedDownToDFSGranularity)) {
v->DISPCLK_DPPCLK_Support[i][j] = false;
}
}
v->TotalNumberOfActiveDPP[i][j] = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->TotalNumberOfActiveDPP[i][j] = v->TotalNumberOfActiveDPP[i][j] + v->NoOfDPP[i][j][k];
}
}
v->RequiredDISPCLK[i][j] = dml_max(v->RequiredDISPCLK[i][j], v->WritebackRequiredDISPCLK);
if (v->MaxDispclkRoundedDownToDFSGranularity < v->WritebackRequiredDISPCLK) {
v->DISPCLK_DPPCLK_Support[i][j] = false;
}
}
}
/*Total Available Pipes Support Check*/
for (i = 0; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
if (v->TotalNumberOfActiveDPP[i][j] <= v->MaxNumDPP) {
v->TotalAvailablePipesSupport[i][j] = true;
} else {
v->TotalAvailablePipesSupport[i][j] = false;
}
}
}
/*Display IO and DSC Support Check*/
v->NonsupportedDSCInputBPC = false;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (!(v->DSCInputBitPerComponent[k] == 12.0 || v->DSCInputBitPerComponent[k] == 10.0 || v->DSCInputBitPerComponent[k] == 8.0)
|| v->DSCInputBitPerComponent[k] > v->MaximumDSCBitsPerComponent) {
v->NonsupportedDSCInputBPC = true;
}
}
/*Number Of DSC Slices*/
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->BlendingAndTiming[k] == k) {
if (v->PixelClockBackEnd[k] > 3200) {
v->NumberOfDSCSlices[k] = dml_ceil(v->PixelClockBackEnd[k] / 400.0, 4.0);
} else if (v->PixelClockBackEnd[k] > 1360) {
v->NumberOfDSCSlices[k] = 8;
} else if (v->PixelClockBackEnd[k] > 680) {
v->NumberOfDSCSlices[k] = 4;
} else if (v->PixelClockBackEnd[k] > 340) {
v->NumberOfDSCSlices[k] = 2;
} else {
v->NumberOfDSCSlices[k] = 1;
}
} else {
v->NumberOfDSCSlices[k] = 0;
}
}
for (i = 0; i < v->soc.num_states; i++) {
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->RequiresDSC[i][k] = false;
v->RequiresFEC[i][k] = false;
if (v->BlendingAndTiming[k] == k) {
if (v->Output[k] == dm_hdmi) {
v->RequiresDSC[i][k] = false;
v->RequiresFEC[i][k] = false;
v->OutputBppPerState[i][k] = TruncToValidBPP(
dml_min(600.0, v->PHYCLKPerState[i]) * 10,
3,
v->HTotal[k],
v->HActive[k],
v->PixelClockBackEnd[k],
v->ForcedOutputLinkBPP[k],
false,
v->Output[k],
v->OutputFormat[k],
v->DSCInputBitPerComponent[k],
v->NumberOfDSCSlices[k],
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
} else if (v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_dp2p0) {
if (v->DSCEnable[k] == true) {
v->RequiresDSC[i][k] = true;
v->LinkDSCEnable = true;
if (v->Output[k] == dm_dp || v->Output[k] == dm_dp2p0) {
v->RequiresFEC[i][k] = true;
} else {
v->RequiresFEC[i][k] = false;
}
} else {
v->RequiresDSC[i][k] = false;
v->LinkDSCEnable = false;
if (v->Output[k] == dm_dp2p0) {
v->RequiresFEC[i][k] = true;
} else {
v->RequiresFEC[i][k] = false;
}
}
if (v->Output[k] == dm_dp2p0) {
v->Outbpp = BPP_INVALID;
if ((v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr10) &&
v->PHYCLKD18PerState[k] >= 10000.0 / 18.0) {
v->Outbpp = TruncToValidBPP(
(1.0 - v->Downspreading / 100.0) * 10000,
v->OutputLinkDPLanes[k],
v->HTotal[k],
v->HActive[k],
v->PixelClockBackEnd[k],
v->ForcedOutputLinkBPP[k],
v->LinkDSCEnable,
v->Output[k],
v->OutputFormat[k],
v->DSCInputBitPerComponent[k],
v->NumberOfDSCSlices[k],
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[k] < 13500.0 / 18.0 &&
v->DSCEnable[k] == true && v->ForcedOutputLinkBPP[k] == 0) {
v->RequiresDSC[i][k] = true;
v->LinkDSCEnable = true;
v->Outbpp = TruncToValidBPP(
(1.0 - v->Downspreading / 100.0) * 10000,
v->OutputLinkDPLanes[k],
v->HTotal[k],
v->HActive[k],
v->PixelClockBackEnd[k],
v->ForcedOutputLinkBPP[k],
v->LinkDSCEnable,
v->Output[k],
v->OutputFormat[k],
v->DSCInputBitPerComponent[k],
v->NumberOfDSCSlices[k],
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
}
v->OutputBppPerState[i][k] = v->Outbpp;
// TODO: Need some other way to handle this nonsense
// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR10"
}
if (v->Outbpp == BPP_INVALID &&
(v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr13p5) &&
v->PHYCLKD18PerState[k] >= 13500.0 / 18.0) {
v->Outbpp = TruncToValidBPP(
(1.0 - v->Downspreading / 100.0) * 13500,
v->OutputLinkDPLanes[k],
v->HTotal[k],
v->HActive[k],
v->PixelClockBackEnd[k],
v->ForcedOutputLinkBPP[k],
v->LinkDSCEnable,
v->Output[k],
v->OutputFormat[k],
v->DSCInputBitPerComponent[k],
v->NumberOfDSCSlices[k],
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[k] < 20000.0 / 18.0 &&
v->DSCEnable[k] == true && v->ForcedOutputLinkBPP[k] == 0) {
v->RequiresDSC[i][k] = true;
v->LinkDSCEnable = true;
v->Outbpp = TruncToValidBPP(
(1.0 - v->Downspreading / 100.0) * 13500,
v->OutputLinkDPLanes[k],
v->HTotal[k],
v->HActive[k],
v->PixelClockBackEnd[k],
v->ForcedOutputLinkBPP[k],
v->LinkDSCEnable,
v->Output[k],
v->OutputFormat[k],
v->DSCInputBitPerComponent[k],
v->NumberOfDSCSlices[k],
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
}
v->OutputBppPerState[i][k] = v->Outbpp;
// TODO: Need some other way to handle this nonsense
// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR13p5"
}
if (v->Outbpp == BPP_INVALID &&
(v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr20) &&
v->PHYCLKD18PerState[k] >= 20000.0 / 18.0) {
v->Outbpp = TruncToValidBPP(
(1.0 - v->Downspreading / 100.0) * 20000,
v->OutputLinkDPLanes[k],
v->HTotal[k],
v->HActive[k],
v->PixelClockBackEnd[k],
v->ForcedOutputLinkBPP[k],
v->LinkDSCEnable,
v->Output[k],
v->OutputFormat[k],
v->DSCInputBitPerComponent[k],
v->NumberOfDSCSlices[k],
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
if (v->Outbpp == BPP_INVALID && v->DSCEnable[k] == true &&
v->ForcedOutputLinkBPP[k] == 0) {
v->RequiresDSC[i][k] = true;
v->LinkDSCEnable = true;
v->Outbpp = TruncToValidBPP(
(1.0 - v->Downspreading / 100.0) * 20000,
v->OutputLinkDPLanes[k],
v->HTotal[k],
v->HActive[k],
v->PixelClockBackEnd[k],
v->ForcedOutputLinkBPP[k],
v->LinkDSCEnable,
v->Output[k],
v->OutputFormat[k],
v->DSCInputBitPerComponent[k],
v->NumberOfDSCSlices[k],
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
}
v->OutputBppPerState[i][k] = v->Outbpp;
// TODO: Need some other way to handle this nonsense
// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR20"
}
} else {
v->Outbpp = BPP_INVALID;
if (v->PHYCLKPerState[i] >= 270.0) {
v->Outbpp = TruncToValidBPP(
(1.0 - v->Downspreading / 100.0) * 2700,
v->OutputLinkDPLanes[k],
v->HTotal[k],
v->HActive[k],
v->PixelClockBackEnd[k],
v->ForcedOutputLinkBPP[k],
v->LinkDSCEnable,
v->Output[k],
v->OutputFormat[k],
v->DSCInputBitPerComponent[k],
v->NumberOfDSCSlices[k],
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
v->OutputBppPerState[i][k] = v->Outbpp;
// TODO: Need some other way to handle this nonsense
// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR"
}
if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 540.0) {
v->Outbpp = TruncToValidBPP(
(1.0 - v->Downspreading / 100.0) * 5400,
v->OutputLinkDPLanes[k],
v->HTotal[k],
v->HActive[k],
v->PixelClockBackEnd[k],
v->ForcedOutputLinkBPP[k],
v->LinkDSCEnable,
v->Output[k],
v->OutputFormat[k],
v->DSCInputBitPerComponent[k],
v->NumberOfDSCSlices[k],
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
v->OutputBppPerState[i][k] = v->Outbpp;
// TODO: Need some other way to handle this nonsense
// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR2"
}
if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 810.0) {
v->Outbpp = TruncToValidBPP(
(1.0 - v->Downspreading / 100.0) * 8100,
v->OutputLinkDPLanes[k],
v->HTotal[k],
v->HActive[k],
v->PixelClockBackEnd[k],
v->ForcedOutputLinkBPP[k],
v->LinkDSCEnable,
v->Output[k],
v->OutputFormat[k],
v->DSCInputBitPerComponent[k],
v->NumberOfDSCSlices[k],
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
v->OutputBppPerState[i][k] = v->Outbpp;
// TODO: Need some other way to handle this nonsense
// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR3"
}
}
}
} else {
v->OutputBppPerState[i][k] = 0;
}
}
}
for (i = 0; i < v->soc.num_states; i++) {
v->LinkCapacitySupport[i] = true;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->BlendingAndTiming[k] == k
&& (v->Output[k] == dm_dp ||
v->Output[k] == dm_edp ||
v->Output[k] == dm_hdmi) && v->OutputBppPerState[i][k] == 0) {
v->LinkCapacitySupport[i] = false;
}
}
}
// UPTO 2172
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->BlendingAndTiming[k] == k
&& (v->Output[k] == dm_dp ||
v->Output[k] == dm_edp ||
v->Output[k] == dm_hdmi)) {
if (v->OutputFormat[k] == dm_420 && v->Interlace[k] == 1 && v->ProgressiveToInterlaceUnitInOPP == true) {
P2IWith420 = true;
}
if (v->DSCEnable[k] == true && v->OutputFormat[k] == dm_n422
&& !v->DSC422NativeSupport) {
DSC422NativeNotSupported = true;
}
}
}
for (i = 0; i < v->soc.num_states; ++i) {
v->ODMCombine4To1SupportCheckOK[i] = true;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->BlendingAndTiming[k] == k && v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1
&& (v->ODMCombine4To1Supported == false || v->Output[k] == dm_dp || v->Output[k] == dm_edp
|| v->Output[k] == dm_hdmi)) {
v->ODMCombine4To1SupportCheckOK[i] = false;
}
}
}
/* Skip dscclk validation: as long as dispclk is supported, dscclk is also implicitly supported */
for (i = 0; i < v->soc.num_states; i++) {
v->NotEnoughDSCUnits[i] = false;
v->TotalDSCUnitsRequired = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->RequiresDSC[i][k] == true) {
if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1) {
v->TotalDSCUnitsRequired = v->TotalDSCUnitsRequired + 4.0;
} else if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
v->TotalDSCUnitsRequired = v->TotalDSCUnitsRequired + 2.0;
} else {
v->TotalDSCUnitsRequired = v->TotalDSCUnitsRequired + 1.0;
}
}
}
if (v->TotalDSCUnitsRequired > v->NumberOfDSC) {
v->NotEnoughDSCUnits[i] = true;
}
}
/*DSC Delay per state*/
for (i = 0; i < v->soc.num_states; i++) {
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->OutputBppPerState[i][k] == BPP_INVALID) {
v->BPP = 0.0;
} else {
v->BPP = v->OutputBppPerState[i][k];
}
if (v->RequiresDSC[i][k] == true && v->BPP != 0.0) {
if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
v->DSCDelayPerState[i][k] = dscceComputeDelay(
v->DSCInputBitPerComponent[k],
v->BPP,
dml_ceil(1.0 * v->HActive[k] / v->NumberOfDSCSlices[k], 1.0),
v->NumberOfDSCSlices[k],
v->OutputFormat[k],
v->Output[k]) + dscComputeDelay(v->OutputFormat[k], v->Output[k]);
} else if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
v->DSCDelayPerState[i][k] = 2.0
* (dscceComputeDelay(
v->DSCInputBitPerComponent[k],
v->BPP,
dml_ceil(1.0 * v->HActive[k] / v->NumberOfDSCSlices[k], 1.0),
v->NumberOfDSCSlices[k] / 2,
v->OutputFormat[k],
v->Output[k]) + dscComputeDelay(v->OutputFormat[k], v->Output[k]));
} else {
v->DSCDelayPerState[i][k] = 4.0
* (dscceComputeDelay(
v->DSCInputBitPerComponent[k],
v->BPP,
dml_ceil(1.0 * v->HActive[k] / v->NumberOfDSCSlices[k], 1.0),
v->NumberOfDSCSlices[k] / 4,
v->OutputFormat[k],
v->Output[k]) + dscComputeDelay(v->OutputFormat[k], v->Output[k]));
}
v->DSCDelayPerState[i][k] = v->DSCDelayPerState[i][k] * v->PixelClock[k] / v->PixelClockBackEnd[k];
} else {
v->DSCDelayPerState[i][k] = 0.0;
}
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
for (m = 0; m < v->NumberOfActivePlanes; m++) {
if (v->BlendingAndTiming[k] == m && v->RequiresDSC[i][m] == true) {
v->DSCDelayPerState[i][k] = v->DSCDelayPerState[i][m];
}
}
}
}
//Calculate Swath, DET Configuration, DCFCLKDeepSleep
//
for (i = 0; i < v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->RequiredDPPCLKThisState[k] = v->RequiredDPPCLK[i][j][k];
v->NoOfDPPThisState[k] = v->NoOfDPP[i][j][k];
v->ODMCombineEnableThisState[k] = v->ODMCombineEnablePerState[i][k];
}
CalculateSwathAndDETConfiguration(
false,
v->NumberOfActivePlanes,
v->DETBufferSizeInKByte[0],
v->MaximumSwathWidthLuma,
v->MaximumSwathWidthChroma,
v->SourceScan,
v->SourcePixelFormat,
v->SurfaceTiling,
v->ViewportWidth,
v->ViewportHeight,
v->SurfaceWidthY,
v->SurfaceWidthC,
v->SurfaceHeightY,
v->SurfaceHeightC,
v->Read256BlockHeightY,
v->Read256BlockHeightC,
v->Read256BlockWidthY,
v->Read256BlockWidthC,
v->ODMCombineEnableThisState,
v->BlendingAndTiming,
v->BytePerPixelY,
v->BytePerPixelC,
v->BytePerPixelInDETY,
v->BytePerPixelInDETC,
v->HActive,
v->HRatio,
v->HRatioChroma,
v->NoOfDPPThisState,
v->swath_width_luma_ub_this_state,
v->swath_width_chroma_ub_this_state,
v->SwathWidthYThisState,
v->SwathWidthCThisState,
v->SwathHeightYThisState,
v->SwathHeightCThisState,
v->DETBufferSizeYThisState,
v->DETBufferSizeCThisState,
v->dummystring,
&v->ViewportSizeSupport[i][j]);
CalculateDCFCLKDeepSleep(
mode_lib,
v->NumberOfActivePlanes,
v->BytePerPixelY,
v->BytePerPixelC,
v->VRatio,
v->VRatioChroma,
v->SwathWidthYThisState,
v->SwathWidthCThisState,
v->NoOfDPPThisState,
v->HRatio,
v->HRatioChroma,
v->PixelClock,
v->PSCL_FACTOR,
v->PSCL_FACTOR_CHROMA,
v->RequiredDPPCLKThisState,
v->ReadBandwidthLuma,
v->ReadBandwidthChroma,
v->ReturnBusWidth,
&v->ProjectedDCFCLKDeepSleep[i][j]);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->swath_width_luma_ub_all_states[i][j][k] = v->swath_width_luma_ub_this_state[k];
v->swath_width_chroma_ub_all_states[i][j][k] = v->swath_width_chroma_ub_this_state[k];
v->SwathWidthYAllStates[i][j][k] = v->SwathWidthYThisState[k];
v->SwathWidthCAllStates[i][j][k] = v->SwathWidthCThisState[k];
v->SwathHeightYAllStates[i][j][k] = v->SwathHeightYThisState[k];
v->SwathHeightCAllStates[i][j][k] = v->SwathHeightCThisState[k];
v->DETBufferSizeYAllStates[i][j][k] = v->DETBufferSizeYThisState[k];
v->DETBufferSizeCAllStates[i][j][k] = v->DETBufferSizeCThisState[k];
}
}
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->cursor_bw[k] = v->NumberOfCursors[k] * v->CursorWidth[k][0] * v->CursorBPP[k][0] / 8.0
/ (v->HTotal[k] / v->PixelClock[k]) * v->VRatio[k];
}
for (i = 0; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
bool NotUrgentLatencyHiding[DC__NUM_DPP__MAX];
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->swath_width_luma_ub_this_state[k] = v->swath_width_luma_ub_all_states[i][j][k];
v->swath_width_chroma_ub_this_state[k] = v->swath_width_chroma_ub_all_states[i][j][k];
v->SwathWidthYThisState[k] = v->SwathWidthYAllStates[i][j][k];
v->SwathWidthCThisState[k] = v->SwathWidthCAllStates[i][j][k];
v->SwathHeightYThisState[k] = v->SwathHeightYAllStates[i][j][k];
v->SwathHeightCThisState[k] = v->SwathHeightCAllStates[i][j][k];
v->DETBufferSizeYThisState[k] = v->DETBufferSizeYAllStates[i][j][k];
v->DETBufferSizeCThisState[k] = v->DETBufferSizeCAllStates[i][j][k];
}
v->TotalNumberOfDCCActiveDPP[i][j] = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->DCCEnable[k] == true) {
v->TotalNumberOfDCCActiveDPP[i][j] = v->TotalNumberOfDCCActiveDPP[i][j] + v->NoOfDPP[i][j][k];
}
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10
|| v->SourcePixelFormat[k] == dm_420_12 || v->SourcePixelFormat[k] == dm_rgbe_alpha) {
if ((v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_420_12)
&& v->SourceScan[k] != dm_vert) {
v->PTEBufferSizeInRequestsForLuma = (v->PTEBufferSizeInRequestsLuma + v->PTEBufferSizeInRequestsChroma)
/ 2;
v->PTEBufferSizeInRequestsForChroma = v->PTEBufferSizeInRequestsForLuma;
} else {
v->PTEBufferSizeInRequestsForLuma = v->PTEBufferSizeInRequestsLuma;
v->PTEBufferSizeInRequestsForChroma = v->PTEBufferSizeInRequestsChroma;
}
v->PDEAndMetaPTEBytesPerFrameC = CalculateVMAndRowBytes(
mode_lib,
v->DCCEnable[k],
v->Read256BlockHeightC[k],
v->Read256BlockWidthC[k],
v->SourcePixelFormat[k],
v->SurfaceTiling[k],
v->BytePerPixelC[k],
v->SourceScan[k],
v->SwathWidthCThisState[k],
v->ViewportHeightChroma[k],
v->GPUVMEnable,
v->HostVMEnable,
v->HostVMMaxNonCachedPageTableLevels,
v->GPUVMMinPageSize,
v->HostVMMinPageSize,
v->PTEBufferSizeInRequestsForChroma,
v->PitchC[k],
0.0,
&v->MacroTileWidthC[k],
&v->MetaRowBytesC,
&v->DPTEBytesPerRowC,
&v->PTEBufferSizeNotExceededC[i][j][k],
&v->dummyinteger7,
&v->dpte_row_height_chroma[k],
&v->dummyinteger28,
&v->dummyinteger26,
&v->dummyinteger23,
&v->meta_row_height_chroma[k],
&v->dummyinteger8,
&v->dummyinteger9,
&v->dummyinteger19,
&v->dummyinteger20,
&v->dummyinteger17,
&v->dummyinteger10,
&v->dummyinteger11);
v->PrefetchLinesC[i][j][k] = CalculatePrefetchSourceLines(
mode_lib,
v->VRatioChroma[k],
v->VTAPsChroma[k],
v->Interlace[k],
v->ProgressiveToInterlaceUnitInOPP,
v->SwathHeightCThisState[k],
v->ViewportYStartC[k],
&v->PrefillC[k],
&v->MaxNumSwC[k]);
} else {
v->PTEBufferSizeInRequestsForLuma = v->PTEBufferSizeInRequestsLuma + v->PTEBufferSizeInRequestsChroma;
v->PTEBufferSizeInRequestsForChroma = 0;
v->PDEAndMetaPTEBytesPerFrameC = 0.0;
v->MetaRowBytesC = 0.0;
v->DPTEBytesPerRowC = 0.0;
v->PrefetchLinesC[i][j][k] = 0.0;
v->PTEBufferSizeNotExceededC[i][j][k] = true;
}
v->PDEAndMetaPTEBytesPerFrameY = CalculateVMAndRowBytes(
mode_lib,
v->DCCEnable[k],
v->Read256BlockHeightY[k],
v->Read256BlockWidthY[k],
v->SourcePixelFormat[k],
v->SurfaceTiling[k],
v->BytePerPixelY[k],
v->SourceScan[k],
v->SwathWidthYThisState[k],
v->ViewportHeight[k],
v->GPUVMEnable,
v->HostVMEnable,
v->HostVMMaxNonCachedPageTableLevels,
v->GPUVMMinPageSize,
v->HostVMMinPageSize,
v->PTEBufferSizeInRequestsForLuma,
v->PitchY[k],
v->DCCMetaPitchY[k],
&v->MacroTileWidthY[k],
&v->MetaRowBytesY,
&v->DPTEBytesPerRowY,
&v->PTEBufferSizeNotExceededY[i][j][k],
&v->dummyinteger7,
&v->dpte_row_height[k],
&v->dummyinteger29,
&v->dummyinteger27,
&v->dummyinteger24,
&v->meta_row_height[k],
&v->dummyinteger25,
&v->dpte_group_bytes[k],
&v->dummyinteger21,
&v->dummyinteger22,
&v->dummyinteger18,
&v->dummyinteger5,
&v->dummyinteger6);
v->PrefetchLinesY[i][j][k] = CalculatePrefetchSourceLines(
mode_lib,
v->VRatio[k],
v->vtaps[k],
v->Interlace[k],
v->ProgressiveToInterlaceUnitInOPP,
v->SwathHeightYThisState[k],
v->ViewportYStartY[k],
&v->PrefillY[k],
&v->MaxNumSwY[k]);
v->PDEAndMetaPTEBytesPerFrame[i][j][k] = v->PDEAndMetaPTEBytesPerFrameY + v->PDEAndMetaPTEBytesPerFrameC;
v->MetaRowBytes[i][j][k] = v->MetaRowBytesY + v->MetaRowBytesC;
v->DPTEBytesPerRow[i][j][k] = v->DPTEBytesPerRowY + v->DPTEBytesPerRowC;
CalculateRowBandwidth(
v->GPUVMEnable,
v->SourcePixelFormat[k],
v->VRatio[k],
v->VRatioChroma[k],
v->DCCEnable[k],
v->HTotal[k] / v->PixelClock[k],
v->MetaRowBytesY,
v->MetaRowBytesC,
v->meta_row_height[k],
v->meta_row_height_chroma[k],
v->DPTEBytesPerRowY,
v->DPTEBytesPerRowC,
v->dpte_row_height[k],
v->dpte_row_height_chroma[k],
&v->meta_row_bandwidth[i][j][k],
&v->dpte_row_bandwidth[i][j][k]);
}
/*
* DCCMetaBufferSizeSupport(i, j) = True
* For k = 0 To NumberOfActivePlanes - 1
* If MetaRowBytes(i, j, k) > 24064 Then
* DCCMetaBufferSizeSupport(i, j) = False
* End If
* Next k
*/
v->DCCMetaBufferSizeSupport[i][j] = true;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->MetaRowBytes[i][j][k] > 24064)
v->DCCMetaBufferSizeSupport[i][j] = false;
}
v->UrgLatency[i] = CalculateUrgentLatency(
v->UrgentLatencyPixelDataOnly,
v->UrgentLatencyPixelMixedWithVMData,
v->UrgentLatencyVMDataOnly,
v->DoUrgentLatencyAdjustment,
v->UrgentLatencyAdjustmentFabricClockComponent,
v->UrgentLatencyAdjustmentFabricClockReference,
v->FabricClockPerState[i]);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
CalculateUrgentBurstFactor(
v->swath_width_luma_ub_this_state[k],
v->swath_width_chroma_ub_this_state[k],
v->SwathHeightYThisState[k],
v->SwathHeightCThisState[k],
v->HTotal[k] / v->PixelClock[k],
v->UrgLatency[i],
v->CursorBufferSize,
v->CursorWidth[k][0],
v->CursorBPP[k][0],
v->VRatio[k],
v->VRatioChroma[k],
v->BytePerPixelInDETY[k],
v->BytePerPixelInDETC[k],
v->DETBufferSizeYThisState[k],
v->DETBufferSizeCThisState[k],
&v->UrgentBurstFactorCursor[k],
&v->UrgentBurstFactorLuma[k],
&v->UrgentBurstFactorChroma[k],
&NotUrgentLatencyHiding[k]);
}
v->NotEnoughUrgentLatencyHidingA[i][j] = false;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (NotUrgentLatencyHiding[k]) {
v->NotEnoughUrgentLatencyHidingA[i][j] = true;
}
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->VActivePixelBandwidth[i][j][k] = v->ReadBandwidthLuma[k] * v->UrgentBurstFactorLuma[k]
+ v->ReadBandwidthChroma[k] * v->UrgentBurstFactorChroma[k];
v->VActiveCursorBandwidth[i][j][k] = v->cursor_bw[k] * v->UrgentBurstFactorCursor[k];
}
v->TotalVActivePixelBandwidth[i][j] = 0;
v->TotalVActiveCursorBandwidth[i][j] = 0;
v->TotalMetaRowBandwidth[i][j] = 0;
v->TotalDPTERowBandwidth[i][j] = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->TotalVActivePixelBandwidth[i][j] = v->TotalVActivePixelBandwidth[i][j] + v->VActivePixelBandwidth[i][j][k];
v->TotalVActiveCursorBandwidth[i][j] = v->TotalVActiveCursorBandwidth[i][j] + v->VActiveCursorBandwidth[i][j][k];
v->TotalMetaRowBandwidth[i][j] = v->TotalMetaRowBandwidth[i][j] + v->NoOfDPP[i][j][k] * v->meta_row_bandwidth[i][j][k];
v->TotalDPTERowBandwidth[i][j] = v->TotalDPTERowBandwidth[i][j] + v->NoOfDPP[i][j][k] * v->dpte_row_bandwidth[i][j][k];
}
}
}
//Calculate Return BW
for (i = 0; i < v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->BlendingAndTiming[k] == k) {
if (v->WritebackEnable[k] == true) {
v->WritebackDelayTime[k] = v->WritebackLatency
+ CalculateWriteBackDelay(
v->WritebackPixelFormat[k],
v->WritebackHRatio[k],
v->WritebackVRatio[k],
v->WritebackVTaps[k],
v->WritebackDestinationWidth[k],
v->WritebackDestinationHeight[k],
v->WritebackSourceHeight[k],
v->HTotal[k]) / v->RequiredDISPCLK[i][j];
} else {
v->WritebackDelayTime[k] = 0.0;
}
for (m = 0; m < v->NumberOfActivePlanes; m++) {
if (v->BlendingAndTiming[m] == k && v->WritebackEnable[m] == true) {
v->WritebackDelayTime[k] = dml_max(
v->WritebackDelayTime[k],
v->WritebackLatency
+ CalculateWriteBackDelay(
v->WritebackPixelFormat[m],
v->WritebackHRatio[m],
v->WritebackVRatio[m],
v->WritebackVTaps[m],
v->WritebackDestinationWidth[m],
v->WritebackDestinationHeight[m],
v->WritebackSourceHeight[m],
v->HTotal[m]) / v->RequiredDISPCLK[i][j]);
}
}
}
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
for (m = 0; m < v->NumberOfActivePlanes; m++) {
if (v->BlendingAndTiming[k] == m) {
v->WritebackDelayTime[k] = v->WritebackDelayTime[m];
}
}
}
v->MaxMaxVStartup[i][j] = 0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->MaximumVStartup[i][j][k] =
CalculateMaxVStartup(
v->VTotal[k],
v->VActive[k],
v->VBlankNom[k],
v->HTotal[k],
v->PixelClock[k],
v->ProgressiveToInterlaceUnitInOPP,
v->Interlace[k],
v->ip.VBlankNomDefaultUS,
v->WritebackDelayTime[k]);
v->MaxMaxVStartup[i][j] = dml_max(v->MaxMaxVStartup[i][j], v->MaximumVStartup[i][j][k]);
}
}
}
ReorderingBytes = v->NumberOfChannels
* dml_max3(
v->UrgentOutOfOrderReturnPerChannelPixelDataOnly,
v->UrgentOutOfOrderReturnPerChannelPixelMixedWithVMData,
v->UrgentOutOfOrderReturnPerChannelVMDataOnly);
for (i = 0; i < v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
v->DCFCLKState[i][j] = v->DCFCLKPerState[i];
}
}
if (v->UseMinimumRequiredDCFCLK == true)
UseMinimumDCFCLK(mode_lib, MaxPrefetchMode, ReorderingBytes);
for (i = 0; i < v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
double IdealFabricAndSDPPortBandwidthPerState = dml_min(
v->ReturnBusWidth * v->DCFCLKState[i][j],
v->FabricClockPerState[i] * v->FabricDatapathToDCNDataReturn);
double IdealDRAMBandwidthPerState = v->DRAMSpeedPerState[i] * v->NumberOfChannels * v->DRAMChannelWidth;
double PixelDataOnlyReturnBWPerState = dml_min(
IdealFabricAndSDPPortBandwidthPerState * v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency / 100.0,
IdealDRAMBandwidthPerState * v->PercentOfIdealDRAMBWReceivedAfterUrgLatencyPixelDataOnly / 100.0);
double PixelMixedWithVMDataReturnBWPerState = dml_min(
IdealFabricAndSDPPortBandwidthPerState * v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency / 100.0,
IdealDRAMBandwidthPerState * v->PercentOfIdealDRAMBWReceivedAfterUrgLatencyPixelMixedWithVMData / 100.0);
if (v->HostVMEnable != true) {
v->ReturnBWPerState[i][j] = PixelDataOnlyReturnBWPerState;
} else {
v->ReturnBWPerState[i][j] = PixelMixedWithVMDataReturnBWPerState;
}
}
}
//Re-ordering Buffer Support Check
for (i = 0; i < v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
if ((v->ROBBufferSizeInKByte - v->PixelChunkSizeInKByte) * 1024 / v->ReturnBWPerState[i][j]
> (v->RoundTripPingLatencyCycles + __DML_ARB_TO_RET_DELAY__) / v->DCFCLKState[i][j] + ReorderingBytes / v->ReturnBWPerState[i][j]) {
v->ROBSupport[i][j] = true;
} else {
v->ROBSupport[i][j] = false;
}
}
}
//Vertical Active BW support check
MaxTotalVActiveRDBandwidth = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
MaxTotalVActiveRDBandwidth = MaxTotalVActiveRDBandwidth + v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k];
}
for (i = 0; i < v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
v->MaxTotalVerticalActiveAvailableBandwidth[i][j] = dml_min(
dml_min(
v->ReturnBusWidth * v->DCFCLKState[i][j],
v->FabricClockPerState[i] * v->FabricDatapathToDCNDataReturn)
* v->MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation / 100,
v->DRAMSpeedPerState[i] * v->NumberOfChannels * v->DRAMChannelWidth
* v->MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation / 100);
if (MaxTotalVActiveRDBandwidth <= v->MaxTotalVerticalActiveAvailableBandwidth[i][j]) {
v->TotalVerticalActiveBandwidthSupport[i][j] = true;
} else {
v->TotalVerticalActiveBandwidthSupport[i][j] = false;
}
}
}
v->UrgentLatency = CalculateUrgentLatency(
v->UrgentLatencyPixelDataOnly,
v->UrgentLatencyPixelMixedWithVMData,
v->UrgentLatencyVMDataOnly,
v->DoUrgentLatencyAdjustment,
v->UrgentLatencyAdjustmentFabricClockComponent,
v->UrgentLatencyAdjustmentFabricClockReference,
v->FabricClock);
//Prefetch Check
for (i = 0; i < v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
double VMDataOnlyReturnBWPerState;
double HostVMInefficiencyFactor = 1;
int NextPrefetchModeState = MinPrefetchMode;
bool UnboundedRequestEnabledThisState = false;
int CompressedBufferSizeInkByteThisState = 0;
double dummy;
v->TimeCalc = 24 / v->ProjectedDCFCLKDeepSleep[i][j];
v->BandwidthWithoutPrefetchSupported[i][j] = true;
if (v->TotalVActivePixelBandwidth[i][j] + v->TotalVActiveCursorBandwidth[i][j] + v->TotalMetaRowBandwidth[i][j]
+ v->TotalDPTERowBandwidth[i][j] > v->ReturnBWPerState[i][j] || v->NotEnoughUrgentLatencyHidingA[i][j]) {
v->BandwidthWithoutPrefetchSupported[i][j] = false;
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->NoOfDPPThisState[k] = v->NoOfDPP[i][j][k];
v->swath_width_luma_ub_this_state[k] = v->swath_width_luma_ub_all_states[i][j][k];
v->swath_width_chroma_ub_this_state[k] = v->swath_width_chroma_ub_all_states[i][j][k];
v->SwathWidthYThisState[k] = v->SwathWidthYAllStates[i][j][k];
v->SwathWidthCThisState[k] = v->SwathWidthCAllStates[i][j][k];
v->SwathHeightYThisState[k] = v->SwathHeightYAllStates[i][j][k];
v->SwathHeightCThisState[k] = v->SwathHeightCAllStates[i][j][k];
v->DETBufferSizeYThisState[k] = v->DETBufferSizeYAllStates[i][j][k];
v->DETBufferSizeCThisState[k] = v->DETBufferSizeCAllStates[i][j][k];
}
VMDataOnlyReturnBWPerState = dml_min(
dml_min(
v->ReturnBusWidth * v->DCFCLKState[i][j],
v->FabricClockPerState[i] * v->FabricDatapathToDCNDataReturn)
* v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency / 100.0,
v->DRAMSpeedPerState[i] * v->NumberOfChannels * v->DRAMChannelWidth
* v->PercentOfIdealDRAMBWReceivedAfterUrgLatencyVMDataOnly / 100.0);
if (v->GPUVMEnable && v->HostVMEnable)
HostVMInefficiencyFactor = v->ReturnBWPerState[i][j] / VMDataOnlyReturnBWPerState;
v->ExtraLatency = CalculateExtraLatency(
v->RoundTripPingLatencyCycles,
ReorderingBytes,
v->DCFCLKState[i][j],
v->TotalNumberOfActiveDPP[i][j],
v->PixelChunkSizeInKByte,
v->TotalNumberOfDCCActiveDPP[i][j],
v->MetaChunkSize,
v->ReturnBWPerState[i][j],
v->GPUVMEnable,
v->HostVMEnable,
v->NumberOfActivePlanes,
v->NoOfDPPThisState,
v->dpte_group_bytes,
HostVMInefficiencyFactor,
v->HostVMMinPageSize,
v->HostVMMaxNonCachedPageTableLevels);
v->NextMaxVStartup = v->MaxMaxVStartup[i][j];
do {
v->PrefetchModePerState[i][j] = NextPrefetchModeState;
v->MaxVStartup = v->NextMaxVStartup;
v->TWait = CalculateTWait(
v->PrefetchModePerState[i][j],
v->DRAMClockChangeLatency,
v->UrgLatency[i],
v->SREnterPlusExitTime);
for (k = 0; k < v->NumberOfActivePlanes; k++) {
CalculatePrefetchSchedulePerPlane(mode_lib,
HostVMInefficiencyFactor,
i, j, k);
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
CalculateUrgentBurstFactor(
v->swath_width_luma_ub_this_state[k],
v->swath_width_chroma_ub_this_state[k],
v->SwathHeightYThisState[k],
v->SwathHeightCThisState[k],
v->HTotal[k] / v->PixelClock[k],
v->UrgLatency[i],
v->CursorBufferSize,
v->CursorWidth[k][0],
v->CursorBPP[k][0],
v->VRatioPreY[i][j][k],
v->VRatioPreC[i][j][k],
v->BytePerPixelInDETY[k],
v->BytePerPixelInDETC[k],
v->DETBufferSizeYThisState[k],
v->DETBufferSizeCThisState[k],
&v->UrgentBurstFactorCursorPre[k],
&v->UrgentBurstFactorLumaPre[k],
&v->UrgentBurstFactorChromaPre[k],
&v->NotUrgentLatencyHidingPre[k]);
}
v->MaximumReadBandwidthWithPrefetch = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->cursor_bw_pre[k] = v->NumberOfCursors[k] * v->CursorWidth[k][0] * v->CursorBPP[k][0] / 8.0
/ (v->HTotal[k] / v->PixelClock[k]) * v->VRatioPreY[i][j][k];
v->MaximumReadBandwidthWithPrefetch =
v->MaximumReadBandwidthWithPrefetch
+ dml_max3(
v->VActivePixelBandwidth[i][j][k]
+ v->VActiveCursorBandwidth[i][j][k]
+ v->NoOfDPP[i][j][k]
* (v->meta_row_bandwidth[i][j][k]
+ v->dpte_row_bandwidth[i][j][k]),
v->NoOfDPP[i][j][k] * v->prefetch_vmrow_bw[k],
v->NoOfDPP[i][j][k]
* (v->RequiredPrefetchPixelDataBWLuma[i][j][k]
* v->UrgentBurstFactorLumaPre[k]
+ v->RequiredPrefetchPixelDataBWChroma[i][j][k]
* v->UrgentBurstFactorChromaPre[k])
+ v->cursor_bw_pre[k] * v->UrgentBurstFactorCursorPre[k]);
}
v->NotEnoughUrgentLatencyHidingPre = false;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->NotUrgentLatencyHidingPre[k] == true) {
v->NotEnoughUrgentLatencyHidingPre = true;
}
}
v->PrefetchSupported[i][j] = true;
if (v->BandwidthWithoutPrefetchSupported[i][j] == false || v->MaximumReadBandwidthWithPrefetch > v->ReturnBWPerState[i][j]
|| v->NotEnoughUrgentLatencyHidingPre == 1) {
v->PrefetchSupported[i][j] = false;
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->LineTimesForPrefetch[k] < 2.0 || v->LinesForMetaPTE[k] >= 32.0 || v->LinesForMetaAndDPTERow[k] >= 16.0
|| v->NoTimeForPrefetch[i][j][k] == true) {
v->PrefetchSupported[i][j] = false;
}
}
v->DynamicMetadataSupported[i][j] = true;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->NoTimeForDynamicMetadata[i][j][k] == true) {
v->DynamicMetadataSupported[i][j] = false;
}
}
v->VRatioInPrefetchSupported[i][j] = true;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->VRatioPreY[i][j][k] > 4.0 || v->VRatioPreC[i][j][k] > 4.0 || v->NoTimeForPrefetch[i][j][k] == true) {
v->VRatioInPrefetchSupported[i][j] = false;
}
}
v->AnyLinesForVMOrRowTooLarge = false;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->LinesForMetaAndDPTERow[k] >= 16 || v->LinesForMetaPTE[k] >= 32) {
v->AnyLinesForVMOrRowTooLarge = true;
}
}
v->NextPrefetchMode = v->NextPrefetchMode + 1;
if (v->PrefetchSupported[i][j] == true && v->VRatioInPrefetchSupported[i][j] == true) {
v->BandwidthAvailableForImmediateFlip = v->ReturnBWPerState[i][j];
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->BandwidthAvailableForImmediateFlip = v->BandwidthAvailableForImmediateFlip
- dml_max(
v->VActivePixelBandwidth[i][j][k] + v->VActiveCursorBandwidth[i][j][k],
v->NoOfDPP[i][j][k]
* (v->RequiredPrefetchPixelDataBWLuma[i][j][k]
* v->UrgentBurstFactorLumaPre[k]
+ v->RequiredPrefetchPixelDataBWChroma[i][j][k]
* v->UrgentBurstFactorChromaPre[k])
+ v->cursor_bw_pre[k] * v->UrgentBurstFactorCursorPre[k]);
}
v->TotImmediateFlipBytes = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->TotImmediateFlipBytes = v->TotImmediateFlipBytes
+ v->NoOfDPP[i][j][k] * (v->PDEAndMetaPTEBytesPerFrame[i][j][k] + v->MetaRowBytes[i][j][k]
+ v->DPTEBytesPerRow[i][j][k]);
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
CalculateFlipSchedule(
mode_lib,
k,
HostVMInefficiencyFactor,
v->ExtraLatency,
v->UrgLatency[i],
v->PDEAndMetaPTEBytesPerFrame[i][j][k],
v->MetaRowBytes[i][j][k],
v->DPTEBytesPerRow[i][j][k]);
}
v->total_dcn_read_bw_with_flip = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->total_dcn_read_bw_with_flip = v->total_dcn_read_bw_with_flip
+ dml_max3(
v->NoOfDPP[i][j][k] * v->prefetch_vmrow_bw[k],
v->NoOfDPP[i][j][k] * v->final_flip_bw[k] + v->VActivePixelBandwidth[i][j][k]
+ v->VActiveCursorBandwidth[i][j][k],
v->NoOfDPP[i][j][k]
* (v->final_flip_bw[k]
+ v->RequiredPrefetchPixelDataBWLuma[i][j][k]
* v->UrgentBurstFactorLumaPre[k]
+ v->RequiredPrefetchPixelDataBWChroma[i][j][k]
* v->UrgentBurstFactorChromaPre[k])
+ v->cursor_bw_pre[k] * v->UrgentBurstFactorCursorPre[k]);
}
v->ImmediateFlipSupportedForState[i][j] = true;
if (v->total_dcn_read_bw_with_flip > v->ReturnBWPerState[i][j]) {
v->ImmediateFlipSupportedForState[i][j] = false;
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->ImmediateFlipSupportedForPipe[k] == false) {
v->ImmediateFlipSupportedForState[i][j] = false;
}
}
} else {
v->ImmediateFlipSupportedForState[i][j] = false;
}
if (v->MaxVStartup <= __DML_VBA_MIN_VSTARTUP__ || v->AnyLinesForVMOrRowTooLarge == false) {
v->NextMaxVStartup = v->MaxMaxVStartup[i][j];
NextPrefetchModeState = NextPrefetchModeState + 1;
} else {
v->NextMaxVStartup = v->NextMaxVStartup - 1;
}
v->NextPrefetchMode = v->NextPrefetchMode + 1;
} while (!((v->PrefetchSupported[i][j] == true && v->DynamicMetadataSupported[i][j] == true && v->VRatioInPrefetchSupported[i][j] == true
&& ((v->HostVMEnable == false &&
v->ImmediateFlipRequirement[0] != dm_immediate_flip_required)
|| v->ImmediateFlipSupportedForState[i][j] == true))
|| (v->NextMaxVStartup == v->MaxMaxVStartup[i][j] && NextPrefetchModeState > MaxPrefetchMode)));
CalculateUnboundedRequestAndCompressedBufferSize(
v->DETBufferSizeInKByte[0],
v->ConfigReturnBufferSizeInKByte,
v->UseUnboundedRequesting,
v->TotalNumberOfActiveDPP[i][j],
NoChroma,
v->MaxNumDPP,
v->CompressedBufferSegmentSizeInkByte,
v->Output,
&UnboundedRequestEnabledThisState,
&CompressedBufferSizeInkByteThisState);
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
v->PrefetchModePerState[i][j],
v->DCFCLKState[i][j],
v->ReturnBWPerState[i][j],
v->UrgLatency[i],
v->ExtraLatency,
v->SOCCLKPerState[i],
v->ProjectedDCFCLKDeepSleep[i][j],
v->DETBufferSizeYThisState,
v->DETBufferSizeCThisState,
v->SwathHeightYThisState,
v->SwathHeightCThisState,
v->SwathWidthYThisState,
v->SwathWidthCThisState,
v->NoOfDPPThisState,
v->BytePerPixelInDETY,
v->BytePerPixelInDETC,
UnboundedRequestEnabledThisState,
CompressedBufferSizeInkByteThisState,
&v->DRAMClockChangeSupport[i][j],
&dummy,
&dummy,
&dummy,
&dummy);
}
}
/*PTE Buffer Size Check*/
for (i = 0; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
v->PTEBufferSizeNotExceeded[i][j] = true;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->PTEBufferSizeNotExceededY[i][j][k] == false || v->PTEBufferSizeNotExceededC[i][j][k] == false) {
v->PTEBufferSizeNotExceeded[i][j] = false;
}
}
}
}
/*Cursor Support Check*/
v->CursorSupport = true;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->CursorWidth[k][0] > 0.0) {
if (v->CursorBPP[k][0] == 64 && v->Cursor64BppSupport == false) {
v->CursorSupport = false;
}
}
}
/*Valid Pitch Check*/
v->PitchSupport = true;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->AlignedYPitch[k] = dml_ceil(dml_max(v->PitchY[k], v->SurfaceWidthY[k]), v->MacroTileWidthY[k]);
if (v->DCCEnable[k] == true) {
v->AlignedDCCMetaPitchY[k] = dml_ceil(dml_max(v->DCCMetaPitchY[k], v->SurfaceWidthY[k]), 64.0 * v->Read256BlockWidthY[k]);
} else {
v->AlignedDCCMetaPitchY[k] = v->DCCMetaPitchY[k];
}
if (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32 && v->SourcePixelFormat[k] != dm_444_16
&& v->SourcePixelFormat[k] != dm_mono_16 && v->SourcePixelFormat[k] != dm_rgbe
&& v->SourcePixelFormat[k] != dm_mono_8) {
v->AlignedCPitch[k] = dml_ceil(dml_max(v->PitchC[k], v->SurfaceWidthC[k]), v->MacroTileWidthC[k]);
if (v->DCCEnable[k] == true) {
v->AlignedDCCMetaPitchC[k] = dml_ceil(
dml_max(v->DCCMetaPitchC[k], v->SurfaceWidthC[k]),
64.0 * v->Read256BlockWidthC[k]);
} else {
v->AlignedDCCMetaPitchC[k] = v->DCCMetaPitchC[k];
}
} else {
v->AlignedCPitch[k] = v->PitchC[k];
v->AlignedDCCMetaPitchC[k] = v->DCCMetaPitchC[k];
}
if (v->AlignedYPitch[k] > v->PitchY[k] || v->AlignedCPitch[k] > v->PitchC[k]
|| v->AlignedDCCMetaPitchY[k] > v->DCCMetaPitchY[k] || v->AlignedDCCMetaPitchC[k] > v->DCCMetaPitchC[k]) {
v->PitchSupport = false;
}
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->ViewportWidth[k] > v->SurfaceWidthY[k] || v->ViewportHeight[k] > v->SurfaceHeightY[k]) {
ViewportExceedsSurface = true;
if (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32
&& v->SourcePixelFormat[k] != dm_444_16 && v->SourcePixelFormat[k] != dm_444_8
&& v->SourcePixelFormat[k] != dm_rgbe) {
if (v->ViewportWidthChroma[k] > v->SurfaceWidthC[k]
|| v->ViewportHeightChroma[k] > v->SurfaceHeightC[k]) {
ViewportExceedsSurface = true;
}
}
}
}
/*Mode Support, Voltage State and SOC Configuration*/
for (i = v->soc.num_states - 1; i >= 0; i--) {
for (j = 0; j < 2; j++) {
if (v->ScaleRatioAndTapsSupport == true && v->SourceFormatPixelAndScanSupport == true && v->ViewportSizeSupport[i][j] == true
&& v->LinkCapacitySupport[i] == true && !P2IWith420 && !DSCOnlyIfNecessaryWithBPP
&& !DSC422NativeNotSupported && v->ODMCombine4To1SupportCheckOK[i] == true && v->NotEnoughDSCUnits[i] == false
&& v->DTBCLKRequiredMoreThanSupported[i] == false
&& v->ROBSupport[i][j] == true && v->DISPCLK_DPPCLK_Support[i][j] == true
&& v->TotalAvailablePipesSupport[i][j] == true && EnoughWritebackUnits == true
&& v->WritebackLatencySupport == true && v->WritebackScaleRatioAndTapsSupport == true
&& v->CursorSupport == true && v->PitchSupport == true && ViewportExceedsSurface == false
&& v->PrefetchSupported[i][j] == true && v->DynamicMetadataSupported[i][j] == true
&& v->TotalVerticalActiveBandwidthSupport[i][j] == true && v->VRatioInPrefetchSupported[i][j] == true
&& v->PTEBufferSizeNotExceeded[i][j] == true && v->NonsupportedDSCInputBPC == false
&& ((v->HostVMEnable == false
&& v->ImmediateFlipRequirement[0] != dm_immediate_flip_required)
|| v->ImmediateFlipSupportedForState[i][j] == true)
&& FMTBufferExceeded == false) {
v->ModeSupport[i][j] = true;
} else {
v->ModeSupport[i][j] = false;
}
}
}
for (i = v->soc.num_states; i >= 0; i--) {
for (j = 0; j < 2; j++) {
enum dm_validation_status status = DML_VALIDATION_OK;
if (!v->ScaleRatioAndTapsSupport) {
status = DML_FAIL_SCALE_RATIO_TAP;
} else if (!v->SourceFormatPixelAndScanSupport) {
status = DML_FAIL_SOURCE_PIXEL_FORMAT;
} else if (!v->ViewportSizeSupport[i][j]) {
status = DML_FAIL_VIEWPORT_SIZE;
} else if (P2IWith420) {
status = DML_FAIL_P2I_WITH_420;
} else if (DSCOnlyIfNecessaryWithBPP) {
status = DML_FAIL_DSC_ONLY_IF_NECESSARY_WITH_BPP;
} else if (DSC422NativeNotSupported) {
status = DML_FAIL_NOT_DSC422_NATIVE;
} else if (!v->ODMCombine4To1SupportCheckOK[i]) {
status = DML_FAIL_ODM_COMBINE4TO1;
} else if (v->NotEnoughDSCUnits[i]) {
status = DML_FAIL_NOT_ENOUGH_DSC;
} else if (!v->ROBSupport[i][j]) {
status = DML_FAIL_REORDERING_BUFFER;
} else if (!v->DISPCLK_DPPCLK_Support[i][j]) {
status = DML_FAIL_DISPCLK_DPPCLK;
} else if (!v->TotalAvailablePipesSupport[i][j]) {
status = DML_FAIL_TOTAL_AVAILABLE_PIPES;
} else if (!EnoughWritebackUnits) {
status = DML_FAIL_ENOUGH_WRITEBACK_UNITS;
} else if (!v->WritebackLatencySupport) {
status = DML_FAIL_WRITEBACK_LATENCY;
} else if (!v->WritebackScaleRatioAndTapsSupport) {
status = DML_FAIL_WRITEBACK_SCALE_RATIO_TAP;
} else if (!v->CursorSupport) {
status = DML_FAIL_CURSOR_SUPPORT;
} else if (!v->PitchSupport) {
status = DML_FAIL_PITCH_SUPPORT;
} else if (ViewportExceedsSurface) {
status = DML_FAIL_VIEWPORT_EXCEEDS_SURFACE;
} else if (!v->PrefetchSupported[i][j]) {
status = DML_FAIL_PREFETCH_SUPPORT;
} else if (!v->DynamicMetadataSupported[i][j]) {
status = DML_FAIL_DYNAMIC_METADATA;
} else if (!v->TotalVerticalActiveBandwidthSupport[i][j]) {
status = DML_FAIL_TOTAL_V_ACTIVE_BW;
} else if (!v->VRatioInPrefetchSupported[i][j]) {
status = DML_FAIL_V_RATIO_PREFETCH;
} else if (!v->PTEBufferSizeNotExceeded[i][j]) {
status = DML_FAIL_PTE_BUFFER_SIZE;
} else if (v->NonsupportedDSCInputBPC) {
status = DML_FAIL_DSC_INPUT_BPC;
} else if ((v->HostVMEnable
&& !v->ImmediateFlipSupportedForState[i][j])) {
status = DML_FAIL_HOST_VM_IMMEDIATE_FLIP;
} else if (FMTBufferExceeded) {
status = DML_FAIL_FMT_BUFFER_EXCEEDED;
}
mode_lib->vba.ValidationStatus[i] = status;
}
}
{
unsigned int MaximumMPCCombine = 0;
for (i = v->soc.num_states; i >= 0; i--) {
if (i == v->soc.num_states || v->ModeSupport[i][0] == true || v->ModeSupport[i][1] == true) {
v->VoltageLevel = i;
v->ModeIsSupported = v->ModeSupport[i][0] == true || v->ModeSupport[i][1] == true;
if (v->ModeSupport[i][0] == true) {
MaximumMPCCombine = 0;
} else {
MaximumMPCCombine = 1;
}
}
}
v->ImmediateFlipSupport = v->ImmediateFlipSupportedForState[v->VoltageLevel][MaximumMPCCombine];
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
v->MPCCombineEnable[k] = v->MPCCombine[v->VoltageLevel][MaximumMPCCombine][k];
v->DPPPerPlane[k] = v->NoOfDPP[v->VoltageLevel][MaximumMPCCombine][k];
}
v->DCFCLK = v->DCFCLKState[v->VoltageLevel][MaximumMPCCombine];
v->DRAMSpeed = v->DRAMSpeedPerState[v->VoltageLevel];
v->FabricClock = v->FabricClockPerState[v->VoltageLevel];
v->SOCCLK = v->SOCCLKPerState[v->VoltageLevel];
v->ReturnBW = v->ReturnBWPerState[v->VoltageLevel][MaximumMPCCombine];
v->maxMpcComb = MaximumMPCCombine;
}
}
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
double DCFCLK,
double ReturnBW,
double UrgentLatency,
double ExtraLatency,
double SOCCLK,
double DCFCLKDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
double SwathWidthY[],
double SwathWidthC[],
unsigned int DPPPerPlane[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
bool UnboundedRequestEnabled,
unsigned int CompressedBufferSizeInkByte,
enum clock_change_support *DRAMClockChangeSupport,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *Z8StutterExitWatermark,
double *Z8StutterEnterPlusExitWatermark)
{
struct vba_vars_st *v = &mode_lib->vba;
double EffectiveLBLatencyHidingY;
double EffectiveLBLatencyHidingC;
double LinesInDETY[DC__NUM_DPP__MAX];
double LinesInDETC;
unsigned int LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX];
unsigned int LinesInDETCRoundedDownToSwath;
double FullDETBufferingTimeY;
double FullDETBufferingTimeC;
double ActiveDRAMClockChangeLatencyMarginY;
double ActiveDRAMClockChangeLatencyMarginC;
double WritebackDRAMClockChangeLatencyMargin;
double PlaneWithMinActiveDRAMClockChangeMargin;
double SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank;
double WritebackDRAMClockChangeLatencyHiding;
double TotalPixelBW = 0.0;
int k, j;
v->UrgentWatermark = UrgentLatency + ExtraLatency;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
dml_print("DML::%s: ExtraLatency = %f\n", __func__, ExtraLatency);
dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->UrgentWatermark);
#endif
v->DRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->UrgentWatermark;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: v->DRAMClockChangeLatency = %f\n", __func__, v->DRAMClockChangeLatency);
dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->DRAMClockChangeWatermark);
#endif
v->TotalActiveWriteback = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k] == true) {
v->TotalActiveWriteback = v->TotalActiveWriteback + 1;
}
}
if (v->TotalActiveWriteback <= 1) {
v->WritebackUrgentWatermark = v->WritebackLatency;
} else {
v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
if (v->TotalActiveWriteback <= 1) {
v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency;
} else {
v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
TotalPixelBW = TotalPixelBW
+ DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k])
/ (v->HTotal[k] / v->PixelClock[k]);
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double EffectiveDETBufferSizeY = DETBufferSizeY[k];
v->LBLatencyHidingSourceLinesY = dml_min(
(double) v->MaxLineBufferLines,
dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
v->LBLatencyHidingSourceLinesC = dml_min(
(double) v->MaxLineBufferLines,
dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
if (UnboundedRequestEnabled) {
EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
+ CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW;
}
LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
if (BytePerPixelDETC[k] > 0) {
LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k];
} else {
LinesInDETC = 0;
FullDETBufferingTimeC = 999999;
}
ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
- ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY
- (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
- ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC
- (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k];
}
v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
} else {
v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
}
if (v->WritebackEnable[k] == true) {
WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024
/ (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
if (v->WritebackPixelFormat[k] == dm_444_64) {
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
}
WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark;
v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(v->ActiveDRAMClockChangeLatencyMargin[k], WritebackDRAMClockChangeLatencyMargin);
}
}
v->MinActiveDRAMClockChangeMargin = 999999;
PlaneWithMinActiveDRAMClockChangeMargin = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) {
v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k];
if (v->BlendingAndTiming[k] == k) {
PlaneWithMinActiveDRAMClockChangeMargin = k;
} else {
for (j = 0; j < v->NumberOfActivePlanes; ++j) {
if (v->BlendingAndTiming[k] == j) {
PlaneWithMinActiveDRAMClockChangeMargin = j;
}
}
}
}
}
v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->DRAMClockChangeLatency ;
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
&& v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k];
}
}
v->TotalNumberOfActiveOTG = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->BlendingAndTiming[k] == k) {
v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1;
}
}
if (v->MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
} else if ((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
|| SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
} else {
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
}
*StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
*StutterEnterPlusExitWatermark = (v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
*Z8StutterExitWatermark = v->SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
*Z8StutterEnterPlusExitWatermark = v->SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, *StutterExitWatermark);
dml_print("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, *StutterEnterPlusExitWatermark);
dml_print("DML::%s: Z8StutterExitWatermark = %f\n", __func__, *Z8StutterExitWatermark);
dml_print("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, *Z8StutterEnterPlusExitWatermark);
#endif
}
static void CalculateDCFCLKDeepSleep(
struct display_mode_lib *mode_lib,
unsigned int NumberOfActivePlanes,
int BytePerPixelY[],
int BytePerPixelC[],
double VRatio[],
double VRatioChroma[],
double SwathWidthY[],
double SwathWidthC[],
unsigned int DPPPerPlane[],
double HRatio[],
double HRatioChroma[],
double PixelClock[],
double PSCL_THROUGHPUT[],
double PSCL_THROUGHPUT_CHROMA[],
double DPPCLK[],
double ReadBandwidthLuma[],
double ReadBandwidthChroma[],
int ReturnBusWidth,
double *DCFCLKDeepSleep)
{
struct vba_vars_st *v = &mode_lib->vba;
double DisplayPipeLineDeliveryTimeLuma;
double DisplayPipeLineDeliveryTimeChroma;
double ReadBandwidth = 0.0;
int k;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (VRatio[k] <= 1) {
DisplayPipeLineDeliveryTimeLuma = SwathWidthY[k] * DPPPerPlane[k] / HRatio[k] / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeLuma = SwathWidthY[k] / PSCL_THROUGHPUT[k] / DPPCLK[k];
}
if (BytePerPixelC[k] == 0) {
DisplayPipeLineDeliveryTimeChroma = 0;
} else {
if (VRatioChroma[k] <= 1) {
DisplayPipeLineDeliveryTimeChroma = SwathWidthC[k] * DPPPerPlane[k] / HRatioChroma[k] / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeChroma = SwathWidthC[k] / PSCL_THROUGHPUT_CHROMA[k] / DPPCLK[k];
}
}
if (BytePerPixelC[k] > 0) {
v->DCFCLKDeepSleepPerPlane[k] = dml_max(__DML_MIN_DCFCLK_FACTOR__ * SwathWidthY[k] * BytePerPixelY[k] / 32.0 / DisplayPipeLineDeliveryTimeLuma,
__DML_MIN_DCFCLK_FACTOR__ * SwathWidthC[k] * BytePerPixelC[k] / 32.0 / DisplayPipeLineDeliveryTimeChroma);
} else {
v->DCFCLKDeepSleepPerPlane[k] = __DML_MIN_DCFCLK_FACTOR__ * SwathWidthY[k] * BytePerPixelY[k] / 64.0 / DisplayPipeLineDeliveryTimeLuma;
}
v->DCFCLKDeepSleepPerPlane[k] = dml_max(v->DCFCLKDeepSleepPerPlane[k], PixelClock[k] / 16);
}
for (k = 0; k < NumberOfActivePlanes; ++k) {
ReadBandwidth = ReadBandwidth + ReadBandwidthLuma[k] + ReadBandwidthChroma[k];
}
*DCFCLKDeepSleep = dml_max(8.0, __DML_MIN_DCFCLK_FACTOR__ * ReadBandwidth / ReturnBusWidth);
for (k = 0; k < NumberOfActivePlanes; ++k) {
*DCFCLKDeepSleep = dml_max(*DCFCLKDeepSleep, v->DCFCLKDeepSleepPerPlane[k]);
}
}
static void CalculateUrgentBurstFactor(
int swath_width_luma_ub,
int swath_width_chroma_ub,
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double LineTime,
double UrgentLatency,
double CursorBufferSize,
unsigned int CursorWidth,
unsigned int CursorBPP,
double VRatio,
double VRatioC,
double BytePerPixelInDETY,
double BytePerPixelInDETC,
double DETBufferSizeY,
double DETBufferSizeC,
double *UrgentBurstFactorCursor,
double *UrgentBurstFactorLuma,
double *UrgentBurstFactorChroma,
bool *NotEnoughUrgentLatencyHiding)
{
double LinesInDETLuma;
double LinesInDETChroma;
unsigned int LinesInCursorBuffer;
double CursorBufferSizeInTime;
double DETBufferSizeInTimeLuma;
double DETBufferSizeInTimeChroma;
*NotEnoughUrgentLatencyHiding = 0;
if (CursorWidth > 0) {
LinesInCursorBuffer = 1 << (unsigned int) dml_floor(dml_log2(CursorBufferSize * 1024.0 / (CursorWidth * CursorBPP / 8.0)), 1.0);
if (VRatio > 0) {
CursorBufferSizeInTime = LinesInCursorBuffer * LineTime / VRatio;
if (CursorBufferSizeInTime - UrgentLatency <= 0) {
*NotEnoughUrgentLatencyHiding = 1;
*UrgentBurstFactorCursor = 0;
} else {
*UrgentBurstFactorCursor = CursorBufferSizeInTime / (CursorBufferSizeInTime - UrgentLatency);
}
} else {
*UrgentBurstFactorCursor = 1;
}
}
LinesInDETLuma = DETBufferSizeY / BytePerPixelInDETY / swath_width_luma_ub;
if (VRatio > 0) {
DETBufferSizeInTimeLuma = dml_floor(LinesInDETLuma, SwathHeightY) * LineTime / VRatio;
if (DETBufferSizeInTimeLuma - UrgentLatency <= 0) {
*NotEnoughUrgentLatencyHiding = 1;
*UrgentBurstFactorLuma = 0;
} else {
*UrgentBurstFactorLuma = DETBufferSizeInTimeLuma / (DETBufferSizeInTimeLuma - UrgentLatency);
}
} else {
*UrgentBurstFactorLuma = 1;
}
if (BytePerPixelInDETC > 0) {
LinesInDETChroma = DETBufferSizeC / BytePerPixelInDETC / swath_width_chroma_ub;
if (VRatio > 0) {
DETBufferSizeInTimeChroma = dml_floor(LinesInDETChroma, SwathHeightC) * LineTime / VRatio;
if (DETBufferSizeInTimeChroma - UrgentLatency <= 0) {
*NotEnoughUrgentLatencyHiding = 1;
*UrgentBurstFactorChroma = 0;
} else {
*UrgentBurstFactorChroma = DETBufferSizeInTimeChroma / (DETBufferSizeInTimeChroma - UrgentLatency);
}
} else {
*UrgentBurstFactorChroma = 1;
}
}
}
static void CalculatePixelDeliveryTimes(
unsigned int NumberOfActivePlanes,
double VRatio[],
double VRatioChroma[],
double VRatioPrefetchY[],
double VRatioPrefetchC[],
unsigned int swath_width_luma_ub[],
unsigned int swath_width_chroma_ub[],
unsigned int DPPPerPlane[],
double HRatio[],
double HRatioChroma[],
double PixelClock[],
double PSCL_THROUGHPUT[],
double PSCL_THROUGHPUT_CHROMA[],
double DPPCLK[],
int BytePerPixelC[],
enum scan_direction_class SourceScan[],
unsigned int NumberOfCursors[],
unsigned int CursorWidth[][DC__NUM_CURSOR__MAX],
unsigned int CursorBPP[][DC__NUM_CURSOR__MAX],
unsigned int BlockWidth256BytesY[],
unsigned int BlockHeight256BytesY[],
unsigned int BlockWidth256BytesC[],
unsigned int BlockHeight256BytesC[],
double DisplayPipeLineDeliveryTimeLuma[],
double DisplayPipeLineDeliveryTimeChroma[],
double DisplayPipeLineDeliveryTimeLumaPrefetch[],
double DisplayPipeLineDeliveryTimeChromaPrefetch[],
double DisplayPipeRequestDeliveryTimeLuma[],
double DisplayPipeRequestDeliveryTimeChroma[],
double DisplayPipeRequestDeliveryTimeLumaPrefetch[],
double DisplayPipeRequestDeliveryTimeChromaPrefetch[],
double CursorRequestDeliveryTime[],
double CursorRequestDeliveryTimePrefetch[])
{
double req_per_swath_ub;
int k;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (VRatio[k] <= 1) {
DisplayPipeLineDeliveryTimeLuma[k] = swath_width_luma_ub[k] * DPPPerPlane[k] / HRatio[k] / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeLuma[k] = swath_width_luma_ub[k] / PSCL_THROUGHPUT[k] / DPPCLK[k];
}
if (BytePerPixelC[k] == 0) {
DisplayPipeLineDeliveryTimeChroma[k] = 0;
} else {
if (VRatioChroma[k] <= 1) {
DisplayPipeLineDeliveryTimeChroma[k] = swath_width_chroma_ub[k] * DPPPerPlane[k] / HRatioChroma[k] / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeChroma[k] = swath_width_chroma_ub[k] / PSCL_THROUGHPUT_CHROMA[k] / DPPCLK[k];
}
}
if (VRatioPrefetchY[k] <= 1) {
DisplayPipeLineDeliveryTimeLumaPrefetch[k] = swath_width_luma_ub[k] * DPPPerPlane[k] / HRatio[k] / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeLumaPrefetch[k] = swath_width_luma_ub[k] / PSCL_THROUGHPUT[k] / DPPCLK[k];
}
if (BytePerPixelC[k] == 0) {
DisplayPipeLineDeliveryTimeChromaPrefetch[k] = 0;
} else {
if (VRatioPrefetchC[k] <= 1) {
DisplayPipeLineDeliveryTimeChromaPrefetch[k] = swath_width_chroma_ub[k] * DPPPerPlane[k] / HRatioChroma[k] / PixelClock[k];
} else {
DisplayPipeLineDeliveryTimeChromaPrefetch[k] = swath_width_chroma_ub[k] / PSCL_THROUGHPUT_CHROMA[k] / DPPCLK[k];
}
}
}
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (SourceScan[k] != dm_vert) {
req_per_swath_ub = swath_width_luma_ub[k] / BlockWidth256BytesY[k];
} else {
req_per_swath_ub = swath_width_luma_ub[k] / BlockHeight256BytesY[k];
}
DisplayPipeRequestDeliveryTimeLuma[k] = DisplayPipeLineDeliveryTimeLuma[k] / req_per_swath_ub;
DisplayPipeRequestDeliveryTimeLumaPrefetch[k] = DisplayPipeLineDeliveryTimeLumaPrefetch[k] / req_per_swath_ub;
if (BytePerPixelC[k] == 0) {
DisplayPipeRequestDeliveryTimeChroma[k] = 0;
DisplayPipeRequestDeliveryTimeChromaPrefetch[k] = 0;
} else {
if (SourceScan[k] != dm_vert) {
req_per_swath_ub = swath_width_chroma_ub[k] / BlockWidth256BytesC[k];
} else {
req_per_swath_ub = swath_width_chroma_ub[k] / BlockHeight256BytesC[k];
}
DisplayPipeRequestDeliveryTimeChroma[k] = DisplayPipeLineDeliveryTimeChroma[k] / req_per_swath_ub;
DisplayPipeRequestDeliveryTimeChromaPrefetch[k] = DisplayPipeLineDeliveryTimeChromaPrefetch[k] / req_per_swath_ub;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d : HRatio = %f\n", __func__, k, HRatio[k]);
dml_print("DML::%s: k=%d : VRatio = %f\n", __func__, k, VRatio[k]);
dml_print("DML::%s: k=%d : HRatioChroma = %f\n", __func__, k, HRatioChroma[k]);
dml_print("DML::%s: k=%d : VRatioChroma = %f\n", __func__, k, VRatioChroma[k]);
dml_print("DML::%s: k=%d : DisplayPipeLineDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLuma[k]);
dml_print("DML::%s: k=%d : DisplayPipeLineDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLumaPrefetch[k]);
dml_print("DML::%s: k=%d : DisplayPipeLineDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChroma[k]);
dml_print("DML::%s: k=%d : DisplayPipeLineDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChromaPrefetch[k]);
dml_print("DML::%s: k=%d : DisplayPipeRequestDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLuma[k]);
dml_print("DML::%s: k=%d : DisplayPipeRequestDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLumaPrefetch[k]);
dml_print("DML::%s: k=%d : DisplayPipeRequestDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChroma[k]);
dml_print("DML::%s: k=%d : DisplayPipeRequestDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChromaPrefetch[k]);
#endif
}
for (k = 0; k < NumberOfActivePlanes; ++k) {
int cursor_req_per_width;
cursor_req_per_width = dml_ceil(CursorWidth[k][0] * CursorBPP[k][0] / 256 / 8, 1);
if (NumberOfCursors[k] > 0) {
if (VRatio[k] <= 1) {
CursorRequestDeliveryTime[k] = CursorWidth[k][0] / HRatio[k] / PixelClock[k] / cursor_req_per_width;
} else {
CursorRequestDeliveryTime[k] = CursorWidth[k][0] / PSCL_THROUGHPUT[k] / DPPCLK[k] / cursor_req_per_width;
}
if (VRatioPrefetchY[k] <= 1) {
CursorRequestDeliveryTimePrefetch[k] = CursorWidth[k][0] / HRatio[k] / PixelClock[k] / cursor_req_per_width;
} else {
CursorRequestDeliveryTimePrefetch[k] = CursorWidth[k][0] / PSCL_THROUGHPUT[k] / DPPCLK[k] / cursor_req_per_width;
}
} else {
CursorRequestDeliveryTime[k] = 0;
CursorRequestDeliveryTimePrefetch[k] = 0;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d : NumberOfCursors = %d\n", __func__, k, NumberOfCursors[k]);
dml_print("DML::%s: k=%d : CursorRequestDeliveryTime = %f\n", __func__, k, CursorRequestDeliveryTime[k]);
dml_print("DML::%s: k=%d : CursorRequestDeliveryTimePrefetch = %f\n", __func__, k, CursorRequestDeliveryTimePrefetch[k]);
#endif
}
}
static void CalculateMetaAndPTETimes(
int NumberOfActivePlanes,
bool GPUVMEnable,
int MetaChunkSize,
int MinMetaChunkSizeBytes,
int HTotal[],
double VRatio[],
double VRatioChroma[],
double DestinationLinesToRequestRowInVBlank[],
double DestinationLinesToRequestRowInImmediateFlip[],
bool DCCEnable[],
double PixelClock[],
int BytePerPixelY[],
int BytePerPixelC[],
enum scan_direction_class SourceScan[],
int dpte_row_height[],
int dpte_row_height_chroma[],
int meta_row_width[],
int meta_row_width_chroma[],
int meta_row_height[],
int meta_row_height_chroma[],
int meta_req_width[],
int meta_req_width_chroma[],
int meta_req_height[],
int meta_req_height_chroma[],
int dpte_group_bytes[],
int PTERequestSizeY[],
int PTERequestSizeC[],
int PixelPTEReqWidthY[],
int PixelPTEReqHeightY[],
int PixelPTEReqWidthC[],
int PixelPTEReqHeightC[],
int dpte_row_width_luma_ub[],
int dpte_row_width_chroma_ub[],
double DST_Y_PER_PTE_ROW_NOM_L[],
double DST_Y_PER_PTE_ROW_NOM_C[],
double DST_Y_PER_META_ROW_NOM_L[],
double DST_Y_PER_META_ROW_NOM_C[],
double TimePerMetaChunkNominal[],
double TimePerChromaMetaChunkNominal[],
double TimePerMetaChunkVBlank[],
double TimePerChromaMetaChunkVBlank[],
double TimePerMetaChunkFlip[],
double TimePerChromaMetaChunkFlip[],
double time_per_pte_group_nom_luma[],
double time_per_pte_group_vblank_luma[],
double time_per_pte_group_flip_luma[],
double time_per_pte_group_nom_chroma[],
double time_per_pte_group_vblank_chroma[],
double time_per_pte_group_flip_chroma[])
{
unsigned int meta_chunk_width;
unsigned int min_meta_chunk_width;
unsigned int meta_chunk_per_row_int;
unsigned int meta_row_remainder;
unsigned int meta_chunk_threshold;
unsigned int meta_chunks_per_row_ub;
unsigned int meta_chunk_width_chroma;
unsigned int min_meta_chunk_width_chroma;
unsigned int meta_chunk_per_row_int_chroma;
unsigned int meta_row_remainder_chroma;
unsigned int meta_chunk_threshold_chroma;
unsigned int meta_chunks_per_row_ub_chroma;
unsigned int dpte_group_width_luma;
unsigned int dpte_groups_per_row_luma_ub;
unsigned int dpte_group_width_chroma;
unsigned int dpte_groups_per_row_chroma_ub;
int k;
for (k = 0; k < NumberOfActivePlanes; ++k) {
DST_Y_PER_PTE_ROW_NOM_L[k] = dpte_row_height[k] / VRatio[k];
if (BytePerPixelC[k] == 0) {
DST_Y_PER_PTE_ROW_NOM_C[k] = 0;
} else {
DST_Y_PER_PTE_ROW_NOM_C[k] = dpte_row_height_chroma[k] / VRatioChroma[k];
}
DST_Y_PER_META_ROW_NOM_L[k] = meta_row_height[k] / VRatio[k];
if (BytePerPixelC[k] == 0) {
DST_Y_PER_META_ROW_NOM_C[k] = 0;
} else {
DST_Y_PER_META_ROW_NOM_C[k] = meta_row_height_chroma[k] / VRatioChroma[k];
}
}
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (DCCEnable[k] == true) {
meta_chunk_width = MetaChunkSize * 1024 * 256 / BytePerPixelY[k] / meta_row_height[k];
min_meta_chunk_width = MinMetaChunkSizeBytes * 256 / BytePerPixelY[k] / meta_row_height[k];
meta_chunk_per_row_int = meta_row_width[k] / meta_chunk_width;
meta_row_remainder = meta_row_width[k] % meta_chunk_width;
if (SourceScan[k] != dm_vert) {
meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_width[k];
} else {
meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_height[k];
}
if (meta_row_remainder <= meta_chunk_threshold) {
meta_chunks_per_row_ub = meta_chunk_per_row_int + 1;
} else {
meta_chunks_per_row_ub = meta_chunk_per_row_int + 2;
}
TimePerMetaChunkNominal[k] = meta_row_height[k] / VRatio[k] * HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub;
TimePerMetaChunkVBlank[k] = DestinationLinesToRequestRowInVBlank[k] * HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub;
TimePerMetaChunkFlip[k] = DestinationLinesToRequestRowInImmediateFlip[k] * HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub;
if (BytePerPixelC[k] == 0) {
TimePerChromaMetaChunkNominal[k] = 0;
TimePerChromaMetaChunkVBlank[k] = 0;
TimePerChromaMetaChunkFlip[k] = 0;
} else {
meta_chunk_width_chroma = MetaChunkSize * 1024 * 256 / BytePerPixelC[k] / meta_row_height_chroma[k];
min_meta_chunk_width_chroma = MinMetaChunkSizeBytes * 256 / BytePerPixelC[k] / meta_row_height_chroma[k];
meta_chunk_per_row_int_chroma = (double) meta_row_width_chroma[k] / meta_chunk_width_chroma;
meta_row_remainder_chroma = meta_row_width_chroma[k] % meta_chunk_width_chroma;
if (SourceScan[k] != dm_vert) {
meta_chunk_threshold_chroma = 2 * min_meta_chunk_width_chroma - meta_req_width_chroma[k];
} else {
meta_chunk_threshold_chroma = 2 * min_meta_chunk_width_chroma - meta_req_height_chroma[k];
}
if (meta_row_remainder_chroma <= meta_chunk_threshold_chroma) {
meta_chunks_per_row_ub_chroma = meta_chunk_per_row_int_chroma + 1;
} else {
meta_chunks_per_row_ub_chroma = meta_chunk_per_row_int_chroma + 2;
}
TimePerChromaMetaChunkNominal[k] = meta_row_height_chroma[k] / VRatioChroma[k] * HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub_chroma;
TimePerChromaMetaChunkVBlank[k] = DestinationLinesToRequestRowInVBlank[k] * HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub_chroma;
TimePerChromaMetaChunkFlip[k] = DestinationLinesToRequestRowInImmediateFlip[k] * HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub_chroma;
}
} else {
TimePerMetaChunkNominal[k] = 0;
TimePerMetaChunkVBlank[k] = 0;
TimePerMetaChunkFlip[k] = 0;
TimePerChromaMetaChunkNominal[k] = 0;
TimePerChromaMetaChunkVBlank[k] = 0;
TimePerChromaMetaChunkFlip[k] = 0;
}
}
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (GPUVMEnable == true) {
if (SourceScan[k] != dm_vert) {
dpte_group_width_luma = dpte_group_bytes[k] / PTERequestSizeY[k] * PixelPTEReqWidthY[k];
} else {
dpte_group_width_luma = dpte_group_bytes[k] / PTERequestSizeY[k] * PixelPTEReqHeightY[k];
}
dpte_groups_per_row_luma_ub = dml_ceil(1.0 * dpte_row_width_luma_ub[k] / dpte_group_width_luma, 1);
time_per_pte_group_nom_luma[k] = DST_Y_PER_PTE_ROW_NOM_L[k] * HTotal[k] / PixelClock[k] / dpte_groups_per_row_luma_ub;
time_per_pte_group_vblank_luma[k] = DestinationLinesToRequestRowInVBlank[k] * HTotal[k] / PixelClock[k] / dpte_groups_per_row_luma_ub;
time_per_pte_group_flip_luma[k] = DestinationLinesToRequestRowInImmediateFlip[k] * HTotal[k] / PixelClock[k] / dpte_groups_per_row_luma_ub;
if (BytePerPixelC[k] == 0) {
time_per_pte_group_nom_chroma[k] = 0;
time_per_pte_group_vblank_chroma[k] = 0;
time_per_pte_group_flip_chroma[k] = 0;
} else {
if (SourceScan[k] != dm_vert) {
dpte_group_width_chroma = dpte_group_bytes[k] / PTERequestSizeC[k] * PixelPTEReqWidthC[k];
} else {
dpte_group_width_chroma = dpte_group_bytes[k] / PTERequestSizeC[k] * PixelPTEReqHeightC[k];
}
dpte_groups_per_row_chroma_ub = dml_ceil(1.0 * dpte_row_width_chroma_ub[k] / dpte_group_width_chroma, 1);
time_per_pte_group_nom_chroma[k] = DST_Y_PER_PTE_ROW_NOM_C[k] * HTotal[k] / PixelClock[k] / dpte_groups_per_row_chroma_ub;
time_per_pte_group_vblank_chroma[k] = DestinationLinesToRequestRowInVBlank[k] * HTotal[k] / PixelClock[k] / dpte_groups_per_row_chroma_ub;
time_per_pte_group_flip_chroma[k] = DestinationLinesToRequestRowInImmediateFlip[k] * HTotal[k] / PixelClock[k] / dpte_groups_per_row_chroma_ub;
}
} else {
time_per_pte_group_nom_luma[k] = 0;
time_per_pte_group_vblank_luma[k] = 0;
time_per_pte_group_flip_luma[k] = 0;
time_per_pte_group_nom_chroma[k] = 0;
time_per_pte_group_vblank_chroma[k] = 0;
time_per_pte_group_flip_chroma[k] = 0;
}
}
}
static void CalculateVMGroupAndRequestTimes(
unsigned int NumberOfActivePlanes,
bool GPUVMEnable,
unsigned int GPUVMMaxPageTableLevels,
unsigned int HTotal[],
int BytePerPixelC[],
double DestinationLinesToRequestVMInVBlank[],
double DestinationLinesToRequestVMInImmediateFlip[],
bool DCCEnable[],
double PixelClock[],
int dpte_row_width_luma_ub[],
int dpte_row_width_chroma_ub[],
int vm_group_bytes[],
unsigned int dpde0_bytes_per_frame_ub_l[],
unsigned int dpde0_bytes_per_frame_ub_c[],
int meta_pte_bytes_per_frame_ub_l[],
int meta_pte_bytes_per_frame_ub_c[],
double TimePerVMGroupVBlank[],
double TimePerVMGroupFlip[],
double TimePerVMRequestVBlank[],
double TimePerVMRequestFlip[])
{
int num_group_per_lower_vm_stage;
int num_req_per_lower_vm_stage;
int k;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (GPUVMEnable == true && (DCCEnable[k] == true || GPUVMMaxPageTableLevels > 1)) {
if (DCCEnable[k] == false) {
if (BytePerPixelC[k] > 0) {
num_group_per_lower_vm_stage = dml_ceil((double) (dpde0_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1)
+ dml_ceil((double) (dpde0_bytes_per_frame_ub_c[k]) / (double) (vm_group_bytes[k]), 1);
} else {
num_group_per_lower_vm_stage = dml_ceil((double) (dpde0_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1);
}
} else {
if (GPUVMMaxPageTableLevels == 1) {
if (BytePerPixelC[k] > 0) {
num_group_per_lower_vm_stage = dml_ceil((double) (meta_pte_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1)
+ dml_ceil((double) (meta_pte_bytes_per_frame_ub_c[k]) / (double) (vm_group_bytes[k]), 1);
} else {
num_group_per_lower_vm_stage = dml_ceil((double) (meta_pte_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1);
}
} else {
if (BytePerPixelC[k] > 0) {
num_group_per_lower_vm_stage = 2 + dml_ceil((double) (dpde0_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1)
+ dml_ceil((double) (dpde0_bytes_per_frame_ub_c[k]) / (double) (vm_group_bytes[k]), 1)
+ dml_ceil((double) (meta_pte_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1)
+ dml_ceil((double) (meta_pte_bytes_per_frame_ub_c[k]) / (double) (vm_group_bytes[k]), 1);
} else {
num_group_per_lower_vm_stage = 1 + dml_ceil((double) (dpde0_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1)
+ dml_ceil((double) (meta_pte_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1);
}
}
}
if (DCCEnable[k] == false) {
if (BytePerPixelC[k] > 0) {
num_req_per_lower_vm_stage = dpde0_bytes_per_frame_ub_l[k] / 64 + dpde0_bytes_per_frame_ub_c[k] / 64;
} else {
num_req_per_lower_vm_stage = dpde0_bytes_per_frame_ub_l[k] / 64;
}
} else {
if (GPUVMMaxPageTableLevels == 1) {
if (BytePerPixelC[k] > 0) {
num_req_per_lower_vm_stage = meta_pte_bytes_per_frame_ub_l[k] / 64 + meta_pte_bytes_per_frame_ub_c[k] / 64;
} else {
num_req_per_lower_vm_stage = meta_pte_bytes_per_frame_ub_l[k] / 64;
}
} else {
if (BytePerPixelC[k] > 0) {
num_req_per_lower_vm_stage = dpde0_bytes_per_frame_ub_l[k] / 64 + dpde0_bytes_per_frame_ub_c[k] / 64
+ meta_pte_bytes_per_frame_ub_l[k] / 64 + meta_pte_bytes_per_frame_ub_c[k] / 64;
} else {
num_req_per_lower_vm_stage = dpde0_bytes_per_frame_ub_l[k] / 64 + meta_pte_bytes_per_frame_ub_l[k] / 64;
}
}
}
TimePerVMGroupVBlank[k] = DestinationLinesToRequestVMInVBlank[k] * HTotal[k] / PixelClock[k] / num_group_per_lower_vm_stage;
TimePerVMGroupFlip[k] = DestinationLinesToRequestVMInImmediateFlip[k] * HTotal[k] / PixelClock[k] / num_group_per_lower_vm_stage;
TimePerVMRequestVBlank[k] = DestinationLinesToRequestVMInVBlank[k] * HTotal[k] / PixelClock[k] / num_req_per_lower_vm_stage;
TimePerVMRequestFlip[k] = DestinationLinesToRequestVMInImmediateFlip[k] * HTotal[k] / PixelClock[k] / num_req_per_lower_vm_stage;
if (GPUVMMaxPageTableLevels > 2) {
TimePerVMGroupVBlank[k] = TimePerVMGroupVBlank[k] / 2;
TimePerVMGroupFlip[k] = TimePerVMGroupFlip[k] / 2;
TimePerVMRequestVBlank[k] = TimePerVMRequestVBlank[k] / 2;
TimePerVMRequestFlip[k] = TimePerVMRequestFlip[k] / 2;
}
} else {
TimePerVMGroupVBlank[k] = 0;
TimePerVMGroupFlip[k] = 0;
TimePerVMRequestVBlank[k] = 0;
TimePerVMRequestFlip[k] = 0;
}
}
}
static void CalculateStutterEfficiency(
struct display_mode_lib *mode_lib,
int CompressedBufferSizeInkByte,
bool UnboundedRequestEnabled,
int ConfigReturnBufferSizeInKByte,
int MetaFIFOSizeInKEntries,
int ZeroSizeBufferEntries,
int NumberOfActivePlanes,
int ROBBufferSizeInKByte,
double TotalDataReadBandwidth,
double DCFCLK,
double ReturnBW,
double COMPBUF_RESERVED_SPACE_64B,
double COMPBUF_RESERVED_SPACE_ZS,
double SRExitTime,
double SRExitZ8Time,
bool SynchronizedVBlank,
double Z8StutterEnterPlusExitWatermark,
double StutterEnterPlusExitWatermark,
bool ProgressiveToInterlaceUnitInOPP,
bool Interlace[],
double MinTTUVBlank[],
int DPPPerPlane[],
unsigned int DETBufferSizeY[],
int BytePerPixelY[],
double BytePerPixelDETY[],
double SwathWidthY[],
int SwathHeightY[],
int SwathHeightC[],
double NetDCCRateLuma[],
double NetDCCRateChroma[],
double DCCFractionOfZeroSizeRequestsLuma[],
double DCCFractionOfZeroSizeRequestsChroma[],
int HTotal[],
int VTotal[],
double PixelClock[],
double VRatio[],
enum scan_direction_class SourceScan[],
int BlockHeight256BytesY[],
int BlockWidth256BytesY[],
int BlockHeight256BytesC[],
int BlockWidth256BytesC[],
int DCCYMaxUncompressedBlock[],
int DCCCMaxUncompressedBlock[],
int VActive[],
bool DCCEnable[],
bool WritebackEnable[],
double ReadBandwidthPlaneLuma[],
double ReadBandwidthPlaneChroma[],
double meta_row_bw[],
double dpte_row_bw[],
double *StutterEfficiencyNotIncludingVBlank,
double *StutterEfficiency,
int *NumberOfStutterBurstsPerFrame,
double *Z8StutterEfficiencyNotIncludingVBlank,
double *Z8StutterEfficiency,
int *Z8NumberOfStutterBurstsPerFrame,
double *StutterPeriod)
{
struct vba_vars_st *v = &mode_lib->vba;
double DETBufferingTimeY;
double SwathWidthYCriticalPlane = 0;
double VActiveTimeCriticalPlane = 0;
double FrameTimeCriticalPlane = 0;
int BytePerPixelYCriticalPlane = 0;
double LinesToFinishSwathTransferStutterCriticalPlane = 0;
double MinTTUVBlankCriticalPlane = 0;
double TotalCompressedReadBandwidth;
double TotalRowReadBandwidth;
double AverageDCCCompressionRate;
double EffectiveCompressedBufferSize;
double PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer;
double StutterBurstTime;
int TotalActiveWriteback;
double LinesInDETY;
double LinesInDETYRoundedDownToSwath;
double MaximumEffectiveCompressionLuma;
double MaximumEffectiveCompressionChroma;
double TotalZeroSizeRequestReadBandwidth;
double TotalZeroSizeCompressedReadBandwidth;
double AverageDCCZeroSizeFraction;
double AverageZeroSizeCompressionRate;
int TotalNumberOfActiveOTG = 0;
double LastStutterPeriod = 0.0;
double LastZ8StutterPeriod = 0.0;
int k;
TotalZeroSizeRequestReadBandwidth = 0;
TotalZeroSizeCompressedReadBandwidth = 0;
TotalRowReadBandwidth = 0;
TotalCompressedReadBandwidth = 0;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (DCCEnable[k] == true) {
if ((SourceScan[k] == dm_vert && BlockWidth256BytesY[k] > SwathHeightY[k]) || (SourceScan[k] != dm_vert && BlockHeight256BytesY[k] > SwathHeightY[k])
|| DCCYMaxUncompressedBlock[k] < 256) {
MaximumEffectiveCompressionLuma = 2;
} else {
MaximumEffectiveCompressionLuma = 4;
}
TotalCompressedReadBandwidth = TotalCompressedReadBandwidth + ReadBandwidthPlaneLuma[k] / dml_min(NetDCCRateLuma[k], MaximumEffectiveCompressionLuma);
TotalZeroSizeRequestReadBandwidth = TotalZeroSizeRequestReadBandwidth + ReadBandwidthPlaneLuma[k] * DCCFractionOfZeroSizeRequestsLuma[k];
TotalZeroSizeCompressedReadBandwidth = TotalZeroSizeCompressedReadBandwidth
+ ReadBandwidthPlaneLuma[k] * DCCFractionOfZeroSizeRequestsLuma[k] / MaximumEffectiveCompressionLuma;
if (ReadBandwidthPlaneChroma[k] > 0) {
if ((SourceScan[k] == dm_vert && BlockWidth256BytesC[k] > SwathHeightC[k])
|| (SourceScan[k] != dm_vert && BlockHeight256BytesC[k] > SwathHeightC[k]) || DCCCMaxUncompressedBlock[k] < 256) {
MaximumEffectiveCompressionChroma = 2;
} else {
MaximumEffectiveCompressionChroma = 4;
}
TotalCompressedReadBandwidth = TotalCompressedReadBandwidth
+ ReadBandwidthPlaneChroma[k] / dml_min(NetDCCRateChroma[k], MaximumEffectiveCompressionChroma);
TotalZeroSizeRequestReadBandwidth = TotalZeroSizeRequestReadBandwidth + ReadBandwidthPlaneChroma[k] * DCCFractionOfZeroSizeRequestsChroma[k];
TotalZeroSizeCompressedReadBandwidth = TotalZeroSizeCompressedReadBandwidth
+ ReadBandwidthPlaneChroma[k] * DCCFractionOfZeroSizeRequestsChroma[k] / MaximumEffectiveCompressionChroma;
}
} else {
TotalCompressedReadBandwidth = TotalCompressedReadBandwidth + ReadBandwidthPlaneLuma[k] + ReadBandwidthPlaneChroma[k];
}
TotalRowReadBandwidth = TotalRowReadBandwidth + DPPPerPlane[k] * (meta_row_bw[k] + dpte_row_bw[k]);
}
AverageDCCCompressionRate = TotalDataReadBandwidth / TotalCompressedReadBandwidth;
AverageDCCZeroSizeFraction = TotalZeroSizeRequestReadBandwidth / TotalDataReadBandwidth;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: TotalCompressedReadBandwidth = %f\n", __func__, TotalCompressedReadBandwidth);
dml_print("DML::%s: TotalZeroSizeRequestReadBandwidth = %f\n", __func__, TotalZeroSizeRequestReadBandwidth);
dml_print("DML::%s: TotalZeroSizeCompressedReadBandwidth = %f\n", __func__, TotalZeroSizeCompressedReadBandwidth);
dml_print("DML::%s: MaximumEffectiveCompressionLuma = %f\n", __func__, MaximumEffectiveCompressionLuma);
dml_print("DML::%s: MaximumEffectiveCompressionChroma = %f\n", __func__, MaximumEffectiveCompressionChroma);
dml_print("DML::%s: AverageDCCCompressionRate = %f\n", __func__, AverageDCCCompressionRate);
dml_print("DML::%s: AverageDCCZeroSizeFraction = %f\n", __func__, AverageDCCZeroSizeFraction);
dml_print("DML::%s: CompressedBufferSizeInkByte = %d\n", __func__, CompressedBufferSizeInkByte);
#endif
if (AverageDCCZeroSizeFraction == 1) {
AverageZeroSizeCompressionRate = TotalZeroSizeRequestReadBandwidth / TotalZeroSizeCompressedReadBandwidth;
EffectiveCompressedBufferSize = MetaFIFOSizeInKEntries * 1024 * 64 * AverageZeroSizeCompressionRate + (ZeroSizeBufferEntries - COMPBUF_RESERVED_SPACE_ZS) * 64 * AverageZeroSizeCompressionRate;
} else if (AverageDCCZeroSizeFraction > 0) {
AverageZeroSizeCompressionRate = TotalZeroSizeRequestReadBandwidth / TotalZeroSizeCompressedReadBandwidth;
EffectiveCompressedBufferSize = dml_min(
CompressedBufferSizeInkByte * 1024 * AverageDCCCompressionRate,
MetaFIFOSizeInKEntries * 1024 * 64 / (AverageDCCZeroSizeFraction / AverageZeroSizeCompressionRate + 1 / AverageDCCCompressionRate))
+ dml_min((ROBBufferSizeInKByte * 1024 - COMPBUF_RESERVED_SPACE_64B * 64) * AverageDCCCompressionRate,
(ZeroSizeBufferEntries - COMPBUF_RESERVED_SPACE_ZS) * 64 / (AverageDCCZeroSizeFraction / AverageZeroSizeCompressionRate));
dml_print("DML::%s: min 1 = %f\n", __func__, CompressedBufferSizeInkByte * 1024 * AverageDCCCompressionRate);
dml_print(
"DML::%s: min 2 = %f\n",
__func__,
MetaFIFOSizeInKEntries * 1024 * 64 / (AverageDCCZeroSizeFraction / AverageZeroSizeCompressionRate + 1 / AverageDCCCompressionRate));
dml_print("DML::%s: min 3 = %f\n", __func__, ROBBufferSizeInKByte * 1024 * AverageDCCCompressionRate);
dml_print("DML::%s: min 4 = %f\n", __func__, ZeroSizeBufferEntries * 64 / (AverageDCCZeroSizeFraction / AverageZeroSizeCompressionRate));
} else {
EffectiveCompressedBufferSize = dml_min(
CompressedBufferSizeInkByte * 1024 * AverageDCCCompressionRate,
MetaFIFOSizeInKEntries * 1024 * 64 * AverageDCCCompressionRate) + (ROBBufferSizeInKByte * 1024 - COMPBUF_RESERVED_SPACE_64B * 64) * AverageDCCCompressionRate;
dml_print("DML::%s: min 1 = %f\n", __func__, CompressedBufferSizeInkByte * 1024 * AverageDCCCompressionRate);
dml_print("DML::%s: min 2 = %f\n", __func__, MetaFIFOSizeInKEntries * 1024 * 64 * AverageDCCCompressionRate);
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: MetaFIFOSizeInKEntries = %d\n", __func__, MetaFIFOSizeInKEntries);
dml_print("DML::%s: AverageZeroSizeCompressionRate = %f\n", __func__, AverageZeroSizeCompressionRate);
dml_print("DML::%s: EffectiveCompressedBufferSize = %f\n", __func__, EffectiveCompressedBufferSize);
#endif
*StutterPeriod = 0;
for (k = 0; k < NumberOfActivePlanes; ++k) {
LinesInDETY = (DETBufferSizeY[k] + (UnboundedRequestEnabled == true ? EffectiveCompressedBufferSize : 0) * ReadBandwidthPlaneLuma[k] / TotalDataReadBandwidth)
/ BytePerPixelDETY[k] / SwathWidthY[k];
LinesInDETYRoundedDownToSwath = dml_floor(LinesInDETY, SwathHeightY[k]);
DETBufferingTimeY = LinesInDETYRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatio[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d DETBufferSizeY = %f\n", __func__, k, DETBufferSizeY[k]);
dml_print("DML::%s: k=%0d BytePerPixelDETY = %f\n", __func__, k, BytePerPixelDETY[k]);
dml_print("DML::%s: k=%0d SwathWidthY = %f\n", __func__, k, SwathWidthY[k]);
dml_print("DML::%s: k=%0d ReadBandwidthPlaneLuma = %f\n", __func__, k, ReadBandwidthPlaneLuma[k]);
dml_print("DML::%s: k=%0d TotalDataReadBandwidth = %f\n", __func__, k, TotalDataReadBandwidth);
dml_print("DML::%s: k=%0d LinesInDETY = %f\n", __func__, k, LinesInDETY);
dml_print("DML::%s: k=%0d LinesInDETYRoundedDownToSwath = %f\n", __func__, k, LinesInDETYRoundedDownToSwath);
dml_print("DML::%s: k=%0d HTotal = %d\n", __func__, k, HTotal[k]);
dml_print("DML::%s: k=%0d PixelClock = %f\n", __func__, k, PixelClock[k]);
dml_print("DML::%s: k=%0d VRatio = %f\n", __func__, k, VRatio[k]);
dml_print("DML::%s: k=%0d DETBufferingTimeY = %f\n", __func__, k, DETBufferingTimeY);
dml_print("DML::%s: k=%0d PixelClock = %f\n", __func__, k, PixelClock[k]);
#endif
if (k == 0 || DETBufferingTimeY < *StutterPeriod) {
bool isInterlaceTiming = Interlace[k] && !ProgressiveToInterlaceUnitInOPP;
*StutterPeriod = DETBufferingTimeY;
FrameTimeCriticalPlane = (isInterlaceTiming ? dml_floor(VTotal[k] / 2.0, 1.0) : VTotal[k]) * HTotal[k] / PixelClock[k];
VActiveTimeCriticalPlane = (isInterlaceTiming ? dml_floor(VActive[k] / 2.0, 1.0) : VActive[k]) * HTotal[k] / PixelClock[k];
BytePerPixelYCriticalPlane = BytePerPixelY[k];
SwathWidthYCriticalPlane = SwathWidthY[k];
LinesToFinishSwathTransferStutterCriticalPlane = SwathHeightY[k] - (LinesInDETY - LinesInDETYRoundedDownToSwath);
MinTTUVBlankCriticalPlane = MinTTUVBlank[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: StutterPeriod = %f\n", __func__, *StutterPeriod);
dml_print("DML::%s: MinTTUVBlankCriticalPlane = %f\n", __func__, MinTTUVBlankCriticalPlane);
dml_print("DML::%s: FrameTimeCriticalPlane = %f\n", __func__, FrameTimeCriticalPlane);
dml_print("DML::%s: VActiveTimeCriticalPlane = %f\n", __func__, VActiveTimeCriticalPlane);
dml_print("DML::%s: BytePerPixelYCriticalPlane = %d\n", __func__, BytePerPixelYCriticalPlane);
dml_print("DML::%s: SwathWidthYCriticalPlane = %f\n", __func__, SwathWidthYCriticalPlane);
dml_print("DML::%s: LinesToFinishSwathTransferStutterCriticalPlane = %f\n", __func__, LinesToFinishSwathTransferStutterCriticalPlane);
#endif
}
}
PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer = dml_min(*StutterPeriod * TotalDataReadBandwidth, EffectiveCompressedBufferSize);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: ROBBufferSizeInKByte = %d\n", __func__, ROBBufferSizeInKByte);
dml_print("DML::%s: AverageDCCCompressionRate = %f\n", __func__, AverageDCCCompressionRate);
dml_print("DML::%s: StutterPeriod * TotalDataReadBandwidth = %f\n", __func__, *StutterPeriod * TotalDataReadBandwidth);
dml_print("DML::%s: ROBBufferSizeInKByte * 1024 * AverageDCCCompressionRate + EffectiveCompressedBufferSize = %f\n", __func__, ROBBufferSizeInKByte * 1024 * AverageDCCCompressionRate + EffectiveCompressedBufferSize);
dml_print("DML::%s: EffectiveCompressedBufferSize = %f\n", __func__, EffectiveCompressedBufferSize);
dml_print("DML::%s: PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer = %f\n", __func__, PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer);
dml_print("DML::%s: ReturnBW = %f\n", __func__, ReturnBW);
dml_print("DML::%s: TotalDataReadBandwidth = %f\n", __func__, TotalDataReadBandwidth);
dml_print("DML::%s: TotalRowReadBandwidth = %f\n", __func__, TotalRowReadBandwidth);
dml_print("DML::%s: DCFCLK = %f\n", __func__, DCFCLK);
#endif
StutterBurstTime = PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / AverageDCCCompressionRate / ReturnBW
+ (*StutterPeriod * TotalDataReadBandwidth - PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer) / (DCFCLK * 64)
+ *StutterPeriod * TotalRowReadBandwidth / ReturnBW;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Part 1 = %f\n", __func__, PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / AverageDCCCompressionRate / ReturnBW);
dml_print("DML::%s: StutterPeriod * TotalDataReadBandwidth = %f\n", __func__, (*StutterPeriod * TotalDataReadBandwidth));
dml_print("DML::%s: Part 2 = %f\n", __func__, (*StutterPeriod * TotalDataReadBandwidth - PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer) / (DCFCLK * 64));
dml_print("DML::%s: Part 3 = %f\n", __func__, *StutterPeriod * TotalRowReadBandwidth / ReturnBW);
dml_print("DML::%s: StutterBurstTime = %f\n", __func__, StutterBurstTime);
#endif
StutterBurstTime = dml_max(StutterBurstTime, LinesToFinishSwathTransferStutterCriticalPlane * BytePerPixelYCriticalPlane * SwathWidthYCriticalPlane / ReturnBW);
dml_print(
"DML::%s: Time to finish residue swath=%f\n",
__func__,
LinesToFinishSwathTransferStutterCriticalPlane * BytePerPixelYCriticalPlane * SwathWidthYCriticalPlane / ReturnBW);
TotalActiveWriteback = 0;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (WritebackEnable[k]) {
TotalActiveWriteback = TotalActiveWriteback + 1;
}
}
if (TotalActiveWriteback == 0) {
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: SRExitTime = %f\n", __func__, SRExitTime);
dml_print("DML::%s: SRExitZ8Time = %f\n", __func__, SRExitZ8Time);
dml_print("DML::%s: StutterBurstTime = %f (final)\n", __func__, StutterBurstTime);
dml_print("DML::%s: StutterPeriod = %f\n", __func__, *StutterPeriod);
#endif
*StutterEfficiencyNotIncludingVBlank = dml_max(0., 1 - (SRExitTime + StutterBurstTime) / *StutterPeriod) * 100;
*Z8StutterEfficiencyNotIncludingVBlank = dml_max(0., 1 - (SRExitZ8Time + StutterBurstTime) / *StutterPeriod) * 100;
*NumberOfStutterBurstsPerFrame = (*StutterEfficiencyNotIncludingVBlank > 0 ? dml_ceil(VActiveTimeCriticalPlane / *StutterPeriod, 1) : 0);
*Z8NumberOfStutterBurstsPerFrame = (*Z8StutterEfficiencyNotIncludingVBlank > 0 ? dml_ceil(VActiveTimeCriticalPlane / *StutterPeriod, 1) : 0);
} else {
*StutterEfficiencyNotIncludingVBlank = 0.;
*Z8StutterEfficiencyNotIncludingVBlank = 0.;
*NumberOfStutterBurstsPerFrame = 0;
*Z8NumberOfStutterBurstsPerFrame = 0;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VActiveTimeCriticalPlane = %f\n", __func__, VActiveTimeCriticalPlane);
dml_print("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *StutterEfficiencyNotIncludingVBlank);
dml_print("DML::%s: Z8StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *Z8StutterEfficiencyNotIncludingVBlank);
dml_print("DML::%s: NumberOfStutterBurstsPerFrame = %d\n", __func__, *NumberOfStutterBurstsPerFrame);
dml_print("DML::%s: Z8NumberOfStutterBurstsPerFrame = %d\n", __func__, *Z8NumberOfStutterBurstsPerFrame);
#endif
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (v->BlendingAndTiming[k] == k) {
TotalNumberOfActiveOTG = TotalNumberOfActiveOTG + 1;
}
}
if (*StutterEfficiencyNotIncludingVBlank > 0) {
LastStutterPeriod = VActiveTimeCriticalPlane - (*NumberOfStutterBurstsPerFrame - 1) * *StutterPeriod;
if ((SynchronizedVBlank || TotalNumberOfActiveOTG == 1) && LastStutterPeriod + MinTTUVBlankCriticalPlane > StutterEnterPlusExitWatermark) {
*StutterEfficiency = (1 - (*NumberOfStutterBurstsPerFrame * SRExitTime + StutterBurstTime * VActiveTimeCriticalPlane
/ *StutterPeriod) / FrameTimeCriticalPlane) * 100;
} else {
*StutterEfficiency = *StutterEfficiencyNotIncludingVBlank;
}
} else {
*StutterEfficiency = 0;
}
if (*Z8StutterEfficiencyNotIncludingVBlank > 0) {
LastZ8StutterPeriod = VActiveTimeCriticalPlane - (*NumberOfStutterBurstsPerFrame - 1) * *StutterPeriod;
if ((SynchronizedVBlank || TotalNumberOfActiveOTG == 1) && LastZ8StutterPeriod + MinTTUVBlankCriticalPlane > Z8StutterEnterPlusExitWatermark) {
*Z8StutterEfficiency = (1 - (*NumberOfStutterBurstsPerFrame * SRExitZ8Time + StutterBurstTime * VActiveTimeCriticalPlane
/ *StutterPeriod) / FrameTimeCriticalPlane) * 100;
} else {
*Z8StutterEfficiency = *Z8StutterEfficiencyNotIncludingVBlank;
}
} else {
*Z8StutterEfficiency = 0.;
}
dml_print("DML::%s: LastZ8StutterPeriod = %f\n", __func__, LastZ8StutterPeriod);
dml_print("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, Z8StutterEnterPlusExitWatermark);
dml_print("DML::%s: StutterBurstTime = %f\n", __func__, StutterBurstTime);
dml_print("DML::%s: StutterPeriod = %f\n", __func__, *StutterPeriod);
dml_print("DML::%s: StutterEfficiency = %f\n", __func__, *StutterEfficiency);
dml_print("DML::%s: Z8StutterEfficiency = %f\n", __func__, *Z8StutterEfficiency);
dml_print("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *StutterEfficiencyNotIncludingVBlank);
dml_print("DML::%s: Z8NumberOfStutterBurstsPerFrame = %d\n", __func__, *Z8NumberOfStutterBurstsPerFrame);
}
static void CalculateSwathAndDETConfiguration(
bool ForceSingleDPP,
int NumberOfActivePlanes,
unsigned int DETBufferSizeInKByte,
double MaximumSwathWidthLuma[],
double MaximumSwathWidthChroma[],
enum scan_direction_class SourceScan[],
enum source_format_class SourcePixelFormat[],
enum dm_swizzle_mode SurfaceTiling[],
int ViewportWidth[],
int ViewportHeight[],
int SurfaceWidthY[],
int SurfaceWidthC[],
int SurfaceHeightY[],
int SurfaceHeightC[],
int Read256BytesBlockHeightY[],
int Read256BytesBlockHeightC[],
int Read256BytesBlockWidthY[],
int Read256BytesBlockWidthC[],
enum odm_combine_mode ODMCombineEnabled[],
int BlendingAndTiming[],
int BytePerPixY[],
int BytePerPixC[],
double BytePerPixDETY[],
double BytePerPixDETC[],
int HActive[],
double HRatio[],
double HRatioChroma[],
int DPPPerPlane[],
int swath_width_luma_ub[],
int swath_width_chroma_ub[],
double SwathWidth[],
double SwathWidthChroma[],
int SwathHeightY[],
int SwathHeightC[],
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
bool ViewportSizeSupportPerPlane[],
bool *ViewportSizeSupport)
{
int MaximumSwathHeightY[DC__NUM_DPP__MAX];
int MaximumSwathHeightC[DC__NUM_DPP__MAX];
int MinimumSwathHeightY;
int MinimumSwathHeightC;
int RoundedUpMaxSwathSizeBytesY;
int RoundedUpMaxSwathSizeBytesC;
int RoundedUpMinSwathSizeBytesY;
int RoundedUpMinSwathSizeBytesC;
int RoundedUpSwathSizeBytesY;
int RoundedUpSwathSizeBytesC;
double SwathWidthSingleDPP[DC__NUM_DPP__MAX];
double SwathWidthSingleDPPChroma[DC__NUM_DPP__MAX];
int k;
CalculateSwathWidth(
ForceSingleDPP,
NumberOfActivePlanes,
SourcePixelFormat,
SourceScan,
ViewportWidth,
ViewportHeight,
SurfaceWidthY,
SurfaceWidthC,
SurfaceHeightY,
SurfaceHeightC,
ODMCombineEnabled,
BytePerPixY,
BytePerPixC,
Read256BytesBlockHeightY,
Read256BytesBlockHeightC,
Read256BytesBlockWidthY,
Read256BytesBlockWidthC,
BlendingAndTiming,
HActive,
HRatio,
DPPPerPlane,
SwathWidthSingleDPP,
SwathWidthSingleDPPChroma,
SwathWidth,
SwathWidthChroma,
MaximumSwathHeightY,
MaximumSwathHeightC,
swath_width_luma_ub,
swath_width_chroma_ub);
*ViewportSizeSupport = true;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if ((SourcePixelFormat[k] == dm_444_64 || SourcePixelFormat[k] == dm_444_32 || SourcePixelFormat[k] == dm_444_16 || SourcePixelFormat[k] == dm_mono_16
|| SourcePixelFormat[k] == dm_mono_8 || SourcePixelFormat[k] == dm_rgbe)) {
if (SurfaceTiling[k] == dm_sw_linear
|| (SourcePixelFormat[k] == dm_444_64
&& (SurfaceTiling[k] == dm_sw_64kb_s || SurfaceTiling[k] == dm_sw_64kb_s_t || SurfaceTiling[k] == dm_sw_64kb_s_x)
&& SourceScan[k] != dm_vert)) {
MinimumSwathHeightY = MaximumSwathHeightY[k];
} else if (SourcePixelFormat[k] == dm_444_8 && SourceScan[k] == dm_vert) {
MinimumSwathHeightY = MaximumSwathHeightY[k];
} else {
MinimumSwathHeightY = MaximumSwathHeightY[k] / 2;
}
MinimumSwathHeightC = MaximumSwathHeightC[k];
} else {
if (SurfaceTiling[k] == dm_sw_linear) {
MinimumSwathHeightY = MaximumSwathHeightY[k];
MinimumSwathHeightC = MaximumSwathHeightC[k];
} else if (SourcePixelFormat[k] == dm_rgbe_alpha && SourceScan[k] == dm_vert) {
MinimumSwathHeightY = MaximumSwathHeightY[k] / 2;
MinimumSwathHeightC = MaximumSwathHeightC[k];
} else if (SourcePixelFormat[k] == dm_rgbe_alpha) {
MinimumSwathHeightY = MaximumSwathHeightY[k] / 2;
MinimumSwathHeightC = MaximumSwathHeightC[k] / 2;
} else if (SourcePixelFormat[k] == dm_420_8 && SourceScan[k] == dm_vert) {
MinimumSwathHeightY = MaximumSwathHeightY[k];
MinimumSwathHeightC = MaximumSwathHeightC[k] / 2;
} else {
MinimumSwathHeightC = MaximumSwathHeightC[k] / 2;
MinimumSwathHeightY = MaximumSwathHeightY[k] / 2;
}
}
RoundedUpMaxSwathSizeBytesY = swath_width_luma_ub[k] * BytePerPixDETY[k] * MaximumSwathHeightY[k];
RoundedUpMinSwathSizeBytesY = swath_width_luma_ub[k] * BytePerPixDETY[k] * MinimumSwathHeightY;
if (SourcePixelFormat[k] == dm_420_10) {
RoundedUpMaxSwathSizeBytesY = dml_ceil((double) RoundedUpMaxSwathSizeBytesY, 256);
RoundedUpMinSwathSizeBytesY = dml_ceil((double) RoundedUpMinSwathSizeBytesY, 256);
}
RoundedUpMaxSwathSizeBytesC = swath_width_chroma_ub[k] * BytePerPixDETC[k] * MaximumSwathHeightC[k];
RoundedUpMinSwathSizeBytesC = swath_width_chroma_ub[k] * BytePerPixDETC[k] * MinimumSwathHeightC;
if (SourcePixelFormat[k] == dm_420_10) {
RoundedUpMaxSwathSizeBytesC = dml_ceil(RoundedUpMaxSwathSizeBytesC, 256);
RoundedUpMinSwathSizeBytesC = dml_ceil(RoundedUpMinSwathSizeBytesC, 256);
}
if (RoundedUpMaxSwathSizeBytesY + RoundedUpMaxSwathSizeBytesC <= DETBufferSizeInKByte * 1024 / 2) {
SwathHeightY[k] = MaximumSwathHeightY[k];
SwathHeightC[k] = MaximumSwathHeightC[k];
RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY;
RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC;
} else if (RoundedUpMaxSwathSizeBytesY >= 1.5 * RoundedUpMaxSwathSizeBytesC
&& RoundedUpMinSwathSizeBytesY + RoundedUpMaxSwathSizeBytesC <= DETBufferSizeInKByte * 1024 / 2) {
SwathHeightY[k] = MinimumSwathHeightY;
SwathHeightC[k] = MaximumSwathHeightC[k];
RoundedUpSwathSizeBytesY = RoundedUpMinSwathSizeBytesY;
RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC;
} else if (RoundedUpMaxSwathSizeBytesY < 1.5 * RoundedUpMaxSwathSizeBytesC
&& RoundedUpMaxSwathSizeBytesY + RoundedUpMinSwathSizeBytesC <= DETBufferSizeInKByte * 1024 / 2) {
SwathHeightY[k] = MaximumSwathHeightY[k];
SwathHeightC[k] = MinimumSwathHeightC;
RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY;
RoundedUpSwathSizeBytesC = RoundedUpMinSwathSizeBytesC;
} else {
SwathHeightY[k] = MinimumSwathHeightY;
SwathHeightC[k] = MinimumSwathHeightC;
RoundedUpSwathSizeBytesY = RoundedUpMinSwathSizeBytesY;
RoundedUpSwathSizeBytesC = RoundedUpMinSwathSizeBytesC;
}
{
double actDETBufferSizeInKByte = dml_ceil(DETBufferSizeInKByte, 64);
if (SwathHeightC[k] == 0) {
DETBufferSizeY[k] = actDETBufferSizeInKByte * 1024;
DETBufferSizeC[k] = 0;
} else if (RoundedUpSwathSizeBytesY <= 1.5 * RoundedUpSwathSizeBytesC) {
DETBufferSizeY[k] = actDETBufferSizeInKByte * 1024 / 2;
DETBufferSizeC[k] = actDETBufferSizeInKByte * 1024 / 2;
} else {
DETBufferSizeY[k] = dml_floor(actDETBufferSizeInKByte * 1024 * 2 / 3, 1024);
DETBufferSizeC[k] = actDETBufferSizeInKByte * 1024 / 3;
}
if (RoundedUpMinSwathSizeBytesY + RoundedUpMinSwathSizeBytesC > actDETBufferSizeInKByte * 1024 / 2 || SwathWidth[k] > MaximumSwathWidthLuma[k]
|| (SwathHeightC[k] > 0 && SwathWidthChroma[k] > MaximumSwathWidthChroma[k])) {
*ViewportSizeSupport = false;
ViewportSizeSupportPerPlane[k] = false;
} else {
ViewportSizeSupportPerPlane[k] = true;
}
}
}
}
static void CalculateSwathWidth(
bool ForceSingleDPP,
int NumberOfActivePlanes,
enum source_format_class SourcePixelFormat[],
enum scan_direction_class SourceScan[],
int ViewportWidth[],
int ViewportHeight[],
int SurfaceWidthY[],
int SurfaceWidthC[],
int SurfaceHeightY[],
int SurfaceHeightC[],
enum odm_combine_mode ODMCombineEnabled[],
int BytePerPixY[],
int BytePerPixC[],
int Read256BytesBlockHeightY[],
int Read256BytesBlockHeightC[],
int Read256BytesBlockWidthY[],
int Read256BytesBlockWidthC[],
int BlendingAndTiming[],
int HActive[],
double HRatio[],
int DPPPerPlane[],
double SwathWidthSingleDPPY[],
double SwathWidthSingleDPPC[],
double SwathWidthY[],
double SwathWidthC[],
int MaximumSwathHeightY[],
int MaximumSwathHeightC[],
int swath_width_luma_ub[],
int swath_width_chroma_ub[])
{
enum odm_combine_mode MainPlaneODMCombine;
int j, k;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: NumberOfActivePlanes = %d\n", __func__, NumberOfActivePlanes);
#endif
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (SourceScan[k] != dm_vert) {
SwathWidthSingleDPPY[k] = ViewportWidth[k];
} else {
SwathWidthSingleDPPY[k] = ViewportHeight[k];
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d ViewportWidth=%d\n", __func__, k, ViewportWidth[k]);
dml_print("DML::%s: k=%d ViewportHeight=%d\n", __func__, k, ViewportHeight[k]);
#endif
MainPlaneODMCombine = ODMCombineEnabled[k];
for (j = 0; j < NumberOfActivePlanes; ++j) {
if (BlendingAndTiming[k] == j) {
MainPlaneODMCombine = ODMCombineEnabled[j];
}
}
if (MainPlaneODMCombine == dm_odm_combine_mode_4to1)
SwathWidthY[k] = dml_min(SwathWidthSingleDPPY[k], dml_round(HActive[k] / 4.0 * HRatio[k]));
else if (MainPlaneODMCombine == dm_odm_combine_mode_2to1)
SwathWidthY[k] = dml_min(SwathWidthSingleDPPY[k], dml_round(HActive[k] / 2.0 * HRatio[k]));
else if (DPPPerPlane[k] == 2)
SwathWidthY[k] = SwathWidthSingleDPPY[k] / 2;
else
SwathWidthY[k] = SwathWidthSingleDPPY[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d SwathWidthSingleDPPY=%f\n", __func__, k, SwathWidthSingleDPPY[k]);
dml_print("DML::%s: k=%d SwathWidthY=%f\n", __func__, k, SwathWidthY[k]);
#endif
if (SourcePixelFormat[k] == dm_420_8 || SourcePixelFormat[k] == dm_420_10 || SourcePixelFormat[k] == dm_420_12) {
SwathWidthC[k] = SwathWidthY[k] / 2;
SwathWidthSingleDPPC[k] = SwathWidthSingleDPPY[k] / 2;
} else {
SwathWidthC[k] = SwathWidthY[k];
SwathWidthSingleDPPC[k] = SwathWidthSingleDPPY[k];
}
if (ForceSingleDPP == true) {
SwathWidthY[k] = SwathWidthSingleDPPY[k];
SwathWidthC[k] = SwathWidthSingleDPPC[k];
}
{
int surface_width_ub_l = dml_ceil(SurfaceWidthY[k], Read256BytesBlockWidthY[k]);
int surface_height_ub_l = dml_ceil(SurfaceHeightY[k], Read256BytesBlockHeightY[k]);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d surface_width_ub_l=%0d\n", __func__, k, surface_width_ub_l);
#endif
if (SourceScan[k] != dm_vert) {
MaximumSwathHeightY[k] = Read256BytesBlockHeightY[k];
MaximumSwathHeightC[k] = Read256BytesBlockHeightC[k];
swath_width_luma_ub[k] = dml_min(surface_width_ub_l, (int) dml_ceil(SwathWidthY[k] - 1, Read256BytesBlockWidthY[k]) + Read256BytesBlockWidthY[k]);
if (BytePerPixC[k] > 0) {
int surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
swath_width_chroma_ub[k] = dml_min(
surface_width_ub_c,
(int) dml_ceil(SwathWidthC[k] - 1, Read256BytesBlockWidthC[k]) + Read256BytesBlockWidthC[k]);
} else {
swath_width_chroma_ub[k] = 0;
}
} else {
MaximumSwathHeightY[k] = Read256BytesBlockWidthY[k];
MaximumSwathHeightC[k] = Read256BytesBlockWidthC[k];
swath_width_luma_ub[k] = dml_min(surface_height_ub_l, (int) dml_ceil(SwathWidthY[k] - 1, Read256BytesBlockHeightY[k]) + Read256BytesBlockHeightY[k]);
if (BytePerPixC[k] > 0) {
int surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
swath_width_chroma_ub[k] = dml_min(
surface_height_ub_c,
(int) dml_ceil(SwathWidthC[k] - 1, Read256BytesBlockHeightC[k]) + Read256BytesBlockHeightC[k]);
} else {
swath_width_chroma_ub[k] = 0;
}
}
}
}
}
static double CalculateExtraLatency(
int RoundTripPingLatencyCycles,
int ReorderingBytes,
double DCFCLK,
int TotalNumberOfActiveDPP,
int PixelChunkSizeInKByte,
int TotalNumberOfDCCActiveDPP,
int MetaChunkSize,
double ReturnBW,
bool GPUVMEnable,
bool HostVMEnable,
int NumberOfActivePlanes,
int NumberOfDPP[],
int dpte_group_bytes[],
double HostVMInefficiencyFactor,
double HostVMMinPageSize,
int HostVMMaxNonCachedPageTableLevels)
{
double ExtraLatencyBytes;
double ExtraLatency;
ExtraLatencyBytes = CalculateExtraLatencyBytes(
ReorderingBytes,
TotalNumberOfActiveDPP,
PixelChunkSizeInKByte,
TotalNumberOfDCCActiveDPP,
MetaChunkSize,
GPUVMEnable,
HostVMEnable,
NumberOfActivePlanes,
NumberOfDPP,
dpte_group_bytes,
HostVMInefficiencyFactor,
HostVMMinPageSize,
HostVMMaxNonCachedPageTableLevels);
ExtraLatency = (RoundTripPingLatencyCycles + __DML_ARB_TO_RET_DELAY__) / DCFCLK + ExtraLatencyBytes / ReturnBW;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: RoundTripPingLatencyCycles=%d\n", __func__, RoundTripPingLatencyCycles);
dml_print("DML::%s: DCFCLK=%f\n", __func__, DCFCLK);
dml_print("DML::%s: ExtraLatencyBytes=%f\n", __func__, ExtraLatencyBytes);
dml_print("DML::%s: ReturnBW=%f\n", __func__, ReturnBW);
dml_print("DML::%s: ExtraLatency=%f\n", __func__, ExtraLatency);
#endif
return ExtraLatency;
}
static double CalculateExtraLatencyBytes(
int ReorderingBytes,
int TotalNumberOfActiveDPP,
int PixelChunkSizeInKByte,
int TotalNumberOfDCCActiveDPP,
int MetaChunkSize,
bool GPUVMEnable,
bool HostVMEnable,
int NumberOfActivePlanes,
int NumberOfDPP[],
int dpte_group_bytes[],
double HostVMInefficiencyFactor,
double HostVMMinPageSize,
int HostVMMaxNonCachedPageTableLevels)
{
double ret;
int HostVMDynamicLevels = 0, k;
if (GPUVMEnable == true && HostVMEnable == true) {
if (HostVMMinPageSize < 2048)
HostVMDynamicLevels = HostVMMaxNonCachedPageTableLevels;
else if (HostVMMinPageSize >= 2048 && HostVMMinPageSize < 1048576)
HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 1);
else
HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 2);
} else {
HostVMDynamicLevels = 0;
}
ret = ReorderingBytes + (TotalNumberOfActiveDPP * PixelChunkSizeInKByte + TotalNumberOfDCCActiveDPP * MetaChunkSize) * 1024.0;
if (GPUVMEnable == true) {
for (k = 0; k < NumberOfActivePlanes; ++k)
ret = ret + NumberOfDPP[k] * dpte_group_bytes[k] * (1 + 8 * HostVMDynamicLevels) * HostVMInefficiencyFactor;
}
return ret;
}
static double CalculateUrgentLatency(
double UrgentLatencyPixelDataOnly,
double UrgentLatencyPixelMixedWithVMData,
double UrgentLatencyVMDataOnly,
bool DoUrgentLatencyAdjustment,
double UrgentLatencyAdjustmentFabricClockComponent,
double UrgentLatencyAdjustmentFabricClockReference,
double FabricClock)
{
double ret;
ret = dml_max3(UrgentLatencyPixelDataOnly, UrgentLatencyPixelMixedWithVMData, UrgentLatencyVMDataOnly);
if (DoUrgentLatencyAdjustment == true)
ret = ret + UrgentLatencyAdjustmentFabricClockComponent * (UrgentLatencyAdjustmentFabricClockReference / FabricClock - 1);
return ret;
}
static noinline_for_stack void UseMinimumDCFCLK(
struct display_mode_lib *mode_lib,
int MaxPrefetchMode,
int ReorderingBytes)
{
struct vba_vars_st *v = &mode_lib->vba;
int dummy1, i, j, k;
double NormalEfficiency, dummy2, dummy3;
double TotalMaxPrefetchFlipDPTERowBandwidth[DC__VOLTAGE_STATES][2];
NormalEfficiency = v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency / 100.0;
for (i = 0; i < v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
double PixelDCFCLKCyclesRequiredInPrefetch[DC__NUM_DPP__MAX];
double PrefetchPixelLinesTime[DC__NUM_DPP__MAX];
double DCFCLKRequiredForPeakBandwidthPerPlane[DC__NUM_DPP__MAX];
double DynamicMetadataVMExtraLatency[DC__NUM_DPP__MAX];
double MinimumTWait;
double NonDPTEBandwidth;
double DPTEBandwidth;
double DCFCLKRequiredForAverageBandwidth;
double ExtraLatencyBytes;
double ExtraLatencyCycles;
double DCFCLKRequiredForPeakBandwidth;
int NoOfDPPState[DC__NUM_DPP__MAX];
double MinimumTvmPlus2Tr0;
TotalMaxPrefetchFlipDPTERowBandwidth[i][j] = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
TotalMaxPrefetchFlipDPTERowBandwidth[i][j] = TotalMaxPrefetchFlipDPTERowBandwidth[i][j]
+ v->NoOfDPP[i][j][k] * v->DPTEBytesPerRow[i][j][k] / (15.75 * v->HTotal[k] / v->PixelClock[k]);
}
for (k = 0; k <= v->NumberOfActivePlanes - 1; ++k)
NoOfDPPState[k] = v->NoOfDPP[i][j][k];
MinimumTWait = CalculateTWait(MaxPrefetchMode, v->FinalDRAMClockChangeLatency, v->UrgLatency[i], v->SREnterPlusExitTime);
NonDPTEBandwidth = v->TotalVActivePixelBandwidth[i][j] + v->TotalVActiveCursorBandwidth[i][j] + v->TotalMetaRowBandwidth[i][j];
DPTEBandwidth = (v->HostVMEnable == true || v->ImmediateFlipRequirement[0] == dm_immediate_flip_required) ?
TotalMaxPrefetchFlipDPTERowBandwidth[i][j] : v->TotalDPTERowBandwidth[i][j];
DCFCLKRequiredForAverageBandwidth = dml_max3(
v->ProjectedDCFCLKDeepSleep[i][j],
(NonDPTEBandwidth + v->TotalDPTERowBandwidth[i][j]) / v->ReturnBusWidth
/ (v->MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation / 100),
(NonDPTEBandwidth + DPTEBandwidth / NormalEfficiency) / NormalEfficiency / v->ReturnBusWidth);
ExtraLatencyBytes = CalculateExtraLatencyBytes(
ReorderingBytes,
v->TotalNumberOfActiveDPP[i][j],
v->PixelChunkSizeInKByte,
v->TotalNumberOfDCCActiveDPP[i][j],
v->MetaChunkSize,
v->GPUVMEnable,
v->HostVMEnable,
v->NumberOfActivePlanes,
NoOfDPPState,
v->dpte_group_bytes,
1,
v->HostVMMinPageSize,
v->HostVMMaxNonCachedPageTableLevels);
ExtraLatencyCycles = v->RoundTripPingLatencyCycles + __DML_ARB_TO_RET_DELAY__ + ExtraLatencyBytes / NormalEfficiency / v->ReturnBusWidth;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double DCFCLKCyclesRequiredInPrefetch;
double ExpectedPrefetchBWAcceleration;
double PrefetchTime;
PixelDCFCLKCyclesRequiredInPrefetch[k] = (v->PrefetchLinesY[i][j][k] * v->swath_width_luma_ub_all_states[i][j][k] * v->BytePerPixelY[k]
+ v->PrefetchLinesC[i][j][k] * v->swath_width_chroma_ub_all_states[i][j][k] * v->BytePerPixelC[k]) / NormalEfficiency / v->ReturnBusWidth;
DCFCLKCyclesRequiredInPrefetch = 2 * ExtraLatencyCycles / NoOfDPPState[k]
+ v->PDEAndMetaPTEBytesPerFrame[i][j][k] / NormalEfficiency / NormalEfficiency / v->ReturnBusWidth * (v->GPUVMMaxPageTableLevels > 2 ? 1 : 0)
+ 2 * v->DPTEBytesPerRow[i][j][k] / NormalEfficiency / NormalEfficiency / v->ReturnBusWidth
+ 2 * v->MetaRowBytes[i][j][k] / NormalEfficiency / v->ReturnBusWidth + PixelDCFCLKCyclesRequiredInPrefetch[k];
PrefetchPixelLinesTime[k] = dml_max(v->PrefetchLinesY[i][j][k], v->PrefetchLinesC[i][j][k]) * v->HTotal[k] / v->PixelClock[k];
ExpectedPrefetchBWAcceleration = (v->VActivePixelBandwidth[i][j][k] + v->VActiveCursorBandwidth[i][j][k])
/ (v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k]);
DynamicMetadataVMExtraLatency[k] =
(v->GPUVMEnable == true && v->DynamicMetadataEnable[k] == true && v->DynamicMetadataVMEnabled == true) ?
v->UrgLatency[i] * v->GPUVMMaxPageTableLevels * (v->HostVMEnable == true ? v->HostVMMaxNonCachedPageTableLevels + 1 : 1) : 0;
PrefetchTime = (v->MaximumVStartup[i][j][k] - 1) * v->HTotal[k] / v->PixelClock[k] - MinimumTWait
- v->UrgLatency[i]
* ((v->GPUVMMaxPageTableLevels <= 2 ? v->GPUVMMaxPageTableLevels : v->GPUVMMaxPageTableLevels - 2)
* (v->HostVMEnable == true ? v->HostVMMaxNonCachedPageTableLevels + 1 : 1) - 1)
- DynamicMetadataVMExtraLatency[k];
if (PrefetchTime > 0) {
double ExpectedVRatioPrefetch;
ExpectedVRatioPrefetch = PrefetchPixelLinesTime[k]
/ (PrefetchTime * PixelDCFCLKCyclesRequiredInPrefetch[k] / DCFCLKCyclesRequiredInPrefetch);
DCFCLKRequiredForPeakBandwidthPerPlane[k] = NoOfDPPState[k] * PixelDCFCLKCyclesRequiredInPrefetch[k] / PrefetchPixelLinesTime[k]
* dml_max(1.0, ExpectedVRatioPrefetch) * dml_max(1.0, ExpectedVRatioPrefetch / 4) * ExpectedPrefetchBWAcceleration;
if (v->HostVMEnable == true || v->ImmediateFlipRequirement[0] == dm_immediate_flip_required) {
DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKRequiredForPeakBandwidthPerPlane[k]
+ NoOfDPPState[k] * DPTEBandwidth / NormalEfficiency / NormalEfficiency / v->ReturnBusWidth;
}
} else {
DCFCLKRequiredForPeakBandwidthPerPlane[k] = v->DCFCLKPerState[i];
}
if (v->DynamicMetadataEnable[k] == true) {
double TSetupPipe;
double TdmbfPipe;
double TdmsksPipe;
double TdmecPipe;
double AllowedTimeForUrgentExtraLatency;
CalculateVupdateAndDynamicMetadataParameters(
v->MaxInterDCNTileRepeaters,
v->RequiredDPPCLK[i][j][k],
v->RequiredDISPCLK[i][j],
v->ProjectedDCFCLKDeepSleep[i][j],
v->PixelClock[k],
v->HTotal[k],
v->VTotal[k] - v->VActive[k],
v->DynamicMetadataTransmittedBytes[k],
v->DynamicMetadataLinesBeforeActiveRequired[k],
v->Interlace[k],
v->ProgressiveToInterlaceUnitInOPP,
&TSetupPipe,
&TdmbfPipe,
&TdmecPipe,
&TdmsksPipe,
&dummy1,
&dummy2,
&dummy3);
AllowedTimeForUrgentExtraLatency = v->MaximumVStartup[i][j][k] * v->HTotal[k] / v->PixelClock[k] - MinimumTWait - TSetupPipe - TdmbfPipe - TdmecPipe
- TdmsksPipe - DynamicMetadataVMExtraLatency[k];
if (AllowedTimeForUrgentExtraLatency > 0) {
DCFCLKRequiredForPeakBandwidthPerPlane[k] = dml_max(
DCFCLKRequiredForPeakBandwidthPerPlane[k],
ExtraLatencyCycles / AllowedTimeForUrgentExtraLatency);
} else {
DCFCLKRequiredForPeakBandwidthPerPlane[k] = v->DCFCLKPerState[i];
}
}
}
DCFCLKRequiredForPeakBandwidth = 0;
for (k = 0; k <= v->NumberOfActivePlanes - 1; ++k)
DCFCLKRequiredForPeakBandwidth = DCFCLKRequiredForPeakBandwidth + DCFCLKRequiredForPeakBandwidthPerPlane[k];
MinimumTvmPlus2Tr0 = v->UrgLatency[i]
* (v->GPUVMEnable == true ?
(v->HostVMEnable == true ?
(v->GPUVMMaxPageTableLevels + 2) * (v->HostVMMaxNonCachedPageTableLevels + 1) - 1 : v->GPUVMMaxPageTableLevels + 1) :
0);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double MaximumTvmPlus2Tr0PlusTsw;
MaximumTvmPlus2Tr0PlusTsw = (v->MaximumVStartup[i][j][k] - 2) * v->HTotal[k] / v->PixelClock[k] - MinimumTWait - DynamicMetadataVMExtraLatency[k];
if (MaximumTvmPlus2Tr0PlusTsw <= MinimumTvmPlus2Tr0 + PrefetchPixelLinesTime[k] / 4) {
DCFCLKRequiredForPeakBandwidth = v->DCFCLKPerState[i];
} else {
DCFCLKRequiredForPeakBandwidth = dml_max3(
DCFCLKRequiredForPeakBandwidth,
2 * ExtraLatencyCycles / (MaximumTvmPlus2Tr0PlusTsw - MinimumTvmPlus2Tr0 - PrefetchPixelLinesTime[k] / 4),
(2 * ExtraLatencyCycles + PixelDCFCLKCyclesRequiredInPrefetch[k]) / (MaximumTvmPlus2Tr0PlusTsw - MinimumTvmPlus2Tr0));
}
}
v->DCFCLKState[i][j] = dml_min(v->DCFCLKPerState[i], 1.05 * dml_max(DCFCLKRequiredForAverageBandwidth, DCFCLKRequiredForPeakBandwidth));
}
}
}
static void CalculateUnboundedRequestAndCompressedBufferSize(
unsigned int DETBufferSizeInKByte,
int ConfigReturnBufferSizeInKByte,
enum unbounded_requesting_policy UseUnboundedRequestingFinal,
int TotalActiveDPP,
bool NoChromaPlanes,
int MaxNumDPP,
int CompressedBufferSegmentSizeInkByteFinal,
enum output_encoder_class *Output,
bool *UnboundedRequestEnabled,
int *CompressedBufferSizeInkByte)
{
double actDETBufferSizeInKByte = dml_ceil(DETBufferSizeInKByte, 64);
*UnboundedRequestEnabled = UnboundedRequest(UseUnboundedRequestingFinal, TotalActiveDPP, NoChromaPlanes, Output[0]);
*CompressedBufferSizeInkByte = (
*UnboundedRequestEnabled == true ?
ConfigReturnBufferSizeInKByte - TotalActiveDPP * actDETBufferSizeInKByte :
ConfigReturnBufferSizeInKByte - MaxNumDPP * actDETBufferSizeInKByte);
*CompressedBufferSizeInkByte = *CompressedBufferSizeInkByte * CompressedBufferSegmentSizeInkByteFinal / 64;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: TotalActiveDPP = %d\n", __func__, TotalActiveDPP);
dml_print("DML::%s: DETBufferSizeInKByte = %d\n", __func__, DETBufferSizeInKByte);
dml_print("DML::%s: ConfigReturnBufferSizeInKByte = %d\n", __func__, ConfigReturnBufferSizeInKByte);
dml_print("DML::%s: UseUnboundedRequestingFinal = %d\n", __func__, UseUnboundedRequestingFinal);
dml_print("DML::%s: actDETBufferSizeInKByte = %f\n", __func__, actDETBufferSizeInKByte);
dml_print("DML::%s: UnboundedRequestEnabled = %d\n", __func__, *UnboundedRequestEnabled);
dml_print("DML::%s: CompressedBufferSizeInkByte = %d\n", __func__, *CompressedBufferSizeInkByte);
#endif
}
static bool UnboundedRequest(enum unbounded_requesting_policy UseUnboundedRequestingFinal, int TotalNumberOfActiveDPP, bool NoChroma, enum output_encoder_class Output)
{
bool ret_val = false;
ret_val = (UseUnboundedRequestingFinal != dm_unbounded_requesting_disable && TotalNumberOfActiveDPP == 1 && NoChroma);
if (UseUnboundedRequestingFinal == dm_unbounded_requesting_edp_only && Output != dm_edp)
ret_val = false;
return ret_val;
}
static unsigned int CalculateMaxVStartup(
unsigned int VTotal,
unsigned int VActive,
unsigned int VBlankNom,
unsigned int HTotal,
double PixelClock,
bool ProgressiveTointerlaceUnitinOPP,
bool Interlace,
unsigned int VBlankNomDefaultUS,
double WritebackDelayTime)
{
unsigned int MaxVStartup = 0;
unsigned int vblank_size = 0;
double line_time_us = HTotal / PixelClock;
unsigned int vblank_actual = VTotal - VActive;
unsigned int vblank_nom_default_in_line = dml_floor(VBlankNomDefaultUS / line_time_us, 1.0);
unsigned int vblank_nom_input = VBlankNom; //dml_min(VBlankNom, vblank_nom_default_in_line);
unsigned int vblank_avail = vblank_nom_input == 0 ? vblank_nom_default_in_line : vblank_nom_input;
vblank_size = (unsigned int) dml_min(vblank_actual, vblank_avail);
if (Interlace && !ProgressiveTointerlaceUnitinOPP)
MaxVStartup = dml_floor(vblank_size / 2.0, 1.0);
else
MaxVStartup = vblank_size - dml_max(1.0, dml_ceil(WritebackDelayTime / line_time_us, 1.0));
if (MaxVStartup > 1023)
MaxVStartup = 1023;
return MaxVStartup;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c |
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "rc_calc_fpu.h"
#include "qp_tables.h"
#include "amdgpu_dm/dc_fpu.h"
#define table_hash(mode, bpc, max_min) ((mode << 16) | (bpc << 8) | max_min)
#define MODE_SELECT(val444, val422, val420) \
(cm == CM_444 || cm == CM_RGB) ? (val444) : (cm == CM_422 ? (val422) : (val420))
#define TABLE_CASE(mode, bpc, max) case (table_hash(mode, BPC_##bpc, max)): \
table = qp_table_##mode##_##bpc##bpc_##max; \
table_size = sizeof(qp_table_##mode##_##bpc##bpc_##max)/sizeof(*qp_table_##mode##_##bpc##bpc_##max); \
break
static int median3(int a, int b, int c)
{
if (a > b)
swap(a, b);
if (b > c)
swap(b, c);
if (a > b)
swap(b, c);
return b;
}
static double dsc_roundf(double num)
{
if (num < 0.0)
num = num - 0.5;
else
num = num + 0.5;
return (int)(num);
}
static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc,
enum max_min max_min, float bpp)
{
int mode = MODE_SELECT(444, 422, 420);
int sel = table_hash(mode, bpc, max_min);
int table_size = 0;
int index;
const struct qp_entry *table = NULL;
// alias enum
enum { min = DAL_MM_MIN, max = DAL_MM_MAX };
switch (sel) {
TABLE_CASE(444, 8, max);
TABLE_CASE(444, 8, min);
TABLE_CASE(444, 10, max);
TABLE_CASE(444, 10, min);
TABLE_CASE(444, 12, max);
TABLE_CASE(444, 12, min);
TABLE_CASE(422, 8, max);
TABLE_CASE(422, 8, min);
TABLE_CASE(422, 10, max);
TABLE_CASE(422, 10, min);
TABLE_CASE(422, 12, max);
TABLE_CASE(422, 12, min);
TABLE_CASE(420, 8, max);
TABLE_CASE(420, 8, min);
TABLE_CASE(420, 10, max);
TABLE_CASE(420, 10, min);
TABLE_CASE(420, 12, max);
TABLE_CASE(420, 12, min);
}
if (!table)
return;
index = (bpp - table[0].bpp) * 2;
/* requested size is bigger than the table */
if (index >= table_size) {
dm_error("ERROR: Requested rc_calc to find a bpp entry that exceeds the table size\n");
return;
}
memcpy(qps, table[index].qps, sizeof(qp_set));
}
static void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp)
{
int *p = ofs;
if (mode == CM_444 || mode == CM_RGB) {
*p++ = (bpp <= 6) ? (0) : ((((bpp >= 8) && (bpp <= 12))) ? (2) : ((bpp >= 15) ? (10) : ((((bpp > 6) && (bpp < 8))) ? (0 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (2 + dsc_roundf((bpp - 12) * (8 / 3.0))))));
*p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (8) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (8 / 3.0))))));
*p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (6) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (6 / 3.0))))));
*p++ = (bpp <= 6) ? (-4) : ((((bpp >= 8) && (bpp <= 12))) ? (-2) : ((bpp >= 15) ? (4) : ((((bpp > 6) && (bpp < 8))) ? (-4 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-2 + dsc_roundf((bpp - 12) * (6 / 3.0))))));
*p++ = (bpp <= 6) ? (-6) : ((((bpp >= 8) && (bpp <= 12))) ? (-4) : ((bpp >= 15) ? (2) : ((((bpp > 6) && (bpp < 8))) ? (-6 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-4 + dsc_roundf((bpp - 12) * (6 / 3.0))))));
*p++ = (bpp <= 12) ? (-6) : ((bpp >= 15) ? (0) : (-6 + dsc_roundf((bpp - 12) * (6 / 3.0))));
*p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-2) : (-8 + dsc_roundf((bpp - 12) * (6 / 3.0))));
*p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-4) : (-8 + dsc_roundf((bpp - 12) * (4 / 3.0))));
*p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-6) : (-8 + dsc_roundf((bpp - 12) * (2 / 3.0))));
*p++ = (bpp <= 12) ? (-10) : ((bpp >= 15) ? (-8) : (-10 + dsc_roundf((bpp - 12) * (2 / 3.0))));
*p++ = -10;
*p++ = (bpp <= 6) ? (-12) : ((bpp >= 8) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2 / 2.0))));
*p++ = -12;
*p++ = -12;
*p++ = -12;
} else if (mode == CM_422) {
*p++ = (bpp <= 8) ? (2) : ((bpp >= 10) ? (10) : (2 + dsc_roundf((bpp - 8) * (8 / 2.0))));
*p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (8) : (0 + dsc_roundf((bpp - 8) * (8 / 2.0))));
*p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (6) : (0 + dsc_roundf((bpp - 8) * (6 / 2.0))));
*p++ = (bpp <= 8) ? (-2) : ((bpp >= 10) ? (4) : (-2 + dsc_roundf((bpp - 8) * (6 / 2.0))));
*p++ = (bpp <= 8) ? (-4) : ((bpp >= 10) ? (2) : (-4 + dsc_roundf((bpp - 8) * (6 / 2.0))));
*p++ = (bpp <= 8) ? (-6) : ((bpp >= 10) ? (0) : (-6 + dsc_roundf((bpp - 8) * (6 / 2.0))));
*p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-2) : (-8 + dsc_roundf((bpp - 8) * (6 / 2.0))));
*p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-4) : (-8 + dsc_roundf((bpp - 8) * (4 / 2.0))));
*p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-6) : (-8 + dsc_roundf((bpp - 8) * (2 / 2.0))));
*p++ = (bpp <= 8) ? (-10) : ((bpp >= 10) ? (-8) : (-10 + dsc_roundf((bpp - 8) * (2 / 2.0))));
*p++ = -10;
*p++ = (bpp <= 6) ? (-12) : ((bpp >= 7) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2.0 / 1))));
*p++ = -12;
*p++ = -12;
*p++ = -12;
} else {
*p++ = (bpp <= 6) ? (2) : ((bpp >= 8) ? (10) : (2 + dsc_roundf((bpp - 6) * (8 / 2.0))));
*p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (8) : (0 + dsc_roundf((bpp - 6) * (8 / 2.0))));
*p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (6) : (0 + dsc_roundf((bpp - 6) * (6 / 2.0))));
*p++ = (bpp <= 6) ? (-2) : ((bpp >= 8) ? (4) : (-2 + dsc_roundf((bpp - 6) * (6 / 2.0))));
*p++ = (bpp <= 6) ? (-4) : ((bpp >= 8) ? (2) : (-4 + dsc_roundf((bpp - 6) * (6 / 2.0))));
*p++ = (bpp <= 6) ? (-6) : ((bpp >= 8) ? (0) : (-6 + dsc_roundf((bpp - 6) * (6 / 2.0))));
*p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-2) : (-8 + dsc_roundf((bpp - 6) * (6 / 2.0))));
*p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-4) : (-8 + dsc_roundf((bpp - 6) * (4 / 2.0))));
*p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-6) : (-8 + dsc_roundf((bpp - 6) * (2 / 2.0))));
*p++ = (bpp <= 6) ? (-10) : ((bpp >= 8) ? (-8) : (-10 + dsc_roundf((bpp - 6) * (2 / 2.0))));
*p++ = -10;
*p++ = (bpp <= 4) ? (-12) : ((bpp >= 5) ? (-10) : (-12 + dsc_roundf((bpp - 4) * (2 / 1.0))));
*p++ = -12;
*p++ = -12;
*p++ = -12;
}
}
void _do_calc_rc_params(struct rc_params *rc,
enum colour_mode cm,
enum bits_per_comp bpc,
u16 drm_bpp,
bool is_navite_422_or_420,
int slice_width,
int slice_height,
int minor_version)
{
float bpp;
float bpp_group;
float initial_xmit_delay_factor;
int padding_pixels;
int i;
dc_assert_fp_enabled();
bpp = ((float)drm_bpp / 16.0);
/* in native_422 or native_420 modes, the bits_per_pixel is double the
* target bpp (the latter is what calc_rc_params expects)
*/
if (is_navite_422_or_420)
bpp /= 2.0;
rc->rc_quant_incr_limit0 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
rc->rc_quant_incr_limit1 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
bpp_group = MODE_SELECT(bpp, bpp * 2.0, bpp * 2.0);
switch (cm) {
case CM_420:
rc->initial_fullness_offset = (bpp >= 6) ? (2048) : ((bpp <= 4) ? (6144) : ((((bpp > 4) && (bpp <= 5))) ? (6144 - dsc_roundf((bpp - 4) * (512))) : (5632 - dsc_roundf((bpp - 5) * (3584)))));
rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 3) - (3 * bpp_group)));
rc->second_line_bpg_offset = median3(0, 12, (int)((3 * bpc * 3) - (3 * bpp_group)));
break;
case CM_422:
rc->initial_fullness_offset = (bpp >= 8) ? (2048) : ((bpp <= 7) ? (5632) : (5632 - dsc_roundf((bpp - 7) * (3584))));
rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 4) - (3 * bpp_group)));
rc->second_line_bpg_offset = 0;
break;
case CM_444:
case CM_RGB:
rc->initial_fullness_offset = (bpp >= 12) ? (2048) : ((bpp <= 8) ? (6144) : ((((bpp > 8) && (bpp <= 10))) ? (6144 - dsc_roundf((bpp - 8) * (512 / 2))) : (5632 - dsc_roundf((bpp - 10) * (3584 / 2)))));
rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)(((3 * bpc + (cm == CM_444 ? 0 : 2)) * 3) - (3 * bpp_group)));
rc->second_line_bpg_offset = 0;
break;
}
initial_xmit_delay_factor = (cm == CM_444 || cm == CM_RGB) ? 1.0 : 2.0;
rc->initial_xmit_delay = dsc_roundf(8192.0/2.0/bpp/initial_xmit_delay_factor);
if (cm == CM_422 || cm == CM_420)
slice_width /= 2;
padding_pixels = ((slice_width % 3) != 0) ? (3 - (slice_width % 3)) * (rc->initial_xmit_delay / slice_width) : 0;
if (3 * bpp_group >= (((rc->initial_xmit_delay + 2) / 3) * (3 + (cm == CM_422)))) {
if ((rc->initial_xmit_delay + padding_pixels) % 3 == 1)
rc->initial_xmit_delay++;
}
rc->flatness_min_qp = ((bpc == BPC_8) ? (3) : ((bpc == BPC_10) ? (7) : (11))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
rc->flatness_max_qp = ((bpc == BPC_8) ? (12) : ((bpc == BPC_10) ? (16) : (20))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
rc->flatness_det_thresh = 2 << (bpc - 8);
get_qp_set(rc->qp_min, cm, bpc, DAL_MM_MIN, bpp);
get_qp_set(rc->qp_max, cm, bpc, DAL_MM_MAX, bpp);
if (cm == CM_444 && minor_version == 1) {
for (i = 0; i < QP_SET_SIZE; ++i) {
rc->qp_min[i] = rc->qp_min[i] > 0 ? rc->qp_min[i] - 1 : 0;
rc->qp_max[i] = rc->qp_max[i] > 0 ? rc->qp_max[i] - 1 : 0;
}
}
get_ofs_set(rc->ofs, cm, bpp);
/* fixed parameters */
rc->rc_model_size = 8192;
rc->rc_edge_factor = 6;
rc->rc_tgt_offset_hi = 3;
rc->rc_tgt_offset_lo = 3;
rc->rc_buf_thresh[0] = 896;
rc->rc_buf_thresh[1] = 1792;
rc->rc_buf_thresh[2] = 2688;
rc->rc_buf_thresh[3] = 3584;
rc->rc_buf_thresh[4] = 4480;
rc->rc_buf_thresh[5] = 5376;
rc->rc_buf_thresh[6] = 6272;
rc->rc_buf_thresh[7] = 6720;
rc->rc_buf_thresh[8] = 7168;
rc->rc_buf_thresh[9] = 7616;
rc->rc_buf_thresh[10] = 7744;
rc->rc_buf_thresh[11] = 7872;
rc->rc_buf_thresh[12] = 8000;
rc->rc_buf_thresh[13] = 8064;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "include/irq_service_interface.h"
#include "include/logger_interface.h"
#include "dce110/irq_service_dce110.h"
#if defined(CONFIG_DRM_AMD_DC_SI)
#include "dce60/irq_service_dce60.h"
#endif
#include "dce80/irq_service_dce80.h"
#include "dce120/irq_service_dce120.h"
#include "dcn10/irq_service_dcn10.h"
#include "reg_helper.h"
#include "irq_service.h"
#define CTX \
irq_service->ctx
#define DC_LOGGER \
irq_service->ctx->logger
void dal_irq_service_construct(
struct irq_service *irq_service,
struct irq_service_init_data *init_data)
{
if (!init_data || !init_data->ctx) {
BREAK_TO_DEBUGGER();
return;
}
irq_service->ctx = init_data->ctx;
}
void dal_irq_service_destroy(struct irq_service **irq_service)
{
if (!irq_service || !*irq_service) {
BREAK_TO_DEBUGGER();
return;
}
kfree(*irq_service);
*irq_service = NULL;
}
static const struct irq_source_info *find_irq_source_info(
struct irq_service *irq_service,
enum dc_irq_source source)
{
if (source >= DAL_IRQ_SOURCES_NUMBER || source < DC_IRQ_SOURCE_INVALID)
return NULL;
return &irq_service->info[source];
}
void dal_irq_service_set_generic(
struct irq_service *irq_service,
const struct irq_source_info *info,
bool enable)
{
uint32_t addr = info->enable_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
value = (value & ~info->enable_mask) |
(info->enable_value[enable ? 0 : 1] & info->enable_mask);
dm_write_reg(irq_service->ctx, addr, value);
}
bool dal_irq_service_set(
struct irq_service *irq_service,
enum dc_irq_source source,
bool enable)
{
const struct irq_source_info *info =
find_irq_source_info(irq_service, source);
if (!info) {
DC_LOG_ERROR("%s: cannot find irq info table entry for %d\n",
__func__,
source);
return false;
}
dal_irq_service_ack(irq_service, source);
if (info->funcs && info->funcs->set) {
if (info->funcs->set == dal_irq_service_dummy_set) {
DC_LOG_WARNING("%s: src: %d, st: %d\n", __func__,
source, enable);
ASSERT(0);
}
return info->funcs->set(irq_service, info, enable);
}
dal_irq_service_set_generic(irq_service, info, enable);
return true;
}
void dal_irq_service_ack_generic(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->ack_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
value = (value & ~info->ack_mask) |
(info->ack_value & info->ack_mask);
dm_write_reg(irq_service->ctx, addr, value);
}
bool dal_irq_service_ack(
struct irq_service *irq_service,
enum dc_irq_source source)
{
const struct irq_source_info *info =
find_irq_source_info(irq_service, source);
if (!info) {
DC_LOG_ERROR("%s: cannot find irq info table entry for %d\n",
__func__,
source);
return false;
}
if (info->funcs && info->funcs->ack) {
if (info->funcs->ack == dal_irq_service_dummy_ack) {
DC_LOG_WARNING("%s: src: %d\n", __func__, source);
ASSERT(0);
}
return info->funcs->ack(irq_service, info);
}
dal_irq_service_ack_generic(irq_service, info);
return true;
}
enum dc_irq_source dal_irq_service_to_irq_source(
struct irq_service *irq_service,
uint32_t src_id,
uint32_t ext_id)
{
return irq_service->funcs->to_dal_irq_source(
irq_service,
src_id,
ext_id);
}
| linux-master | drivers/gpu/drm/amd/display/dc/irq/irq_service.c |
// SPDX-License-Identifier: MIT
/*
* Copyright (C) 2021 Advanced Micro Devices, Inc.
*
* Authors: AMD
*/
#include "dm_services.h"
#include "irq_service_dcn303.h"
#include "../dce110/irq_service_dce110.h"
#include "sienna_cichlid_ip_offset.h"
#include "dcn/dcn_3_0_3_offset.h"
#include "dcn/dcn_3_0_3_sh_mask.h"
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
static enum dc_irq_source to_dal_irq_source_dcn303(struct irq_service *irq_service,
uint32_t src_id,
uint32_t ext_id)
{
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK1;
case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK2;
case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC1_VLINE0;
case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC2_VLINE0;
case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP1;
case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP2;
case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE1;
case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE2;
case DCN_1_0__SRCID__DC_HPD1_INT:
/* generic src_id for all HPD and HPDRX interrupts */
switch (ext_id) {
case DCN_1_0__CTXID__DC_HPD1_INT:
return DC_IRQ_SOURCE_HPD1;
case DCN_1_0__CTXID__DC_HPD2_INT:
return DC_IRQ_SOURCE_HPD2;
case DCN_1_0__CTXID__DC_HPD1_RX_INT:
return DC_IRQ_SOURCE_HPD1RX;
case DCN_1_0__CTXID__DC_HPD2_RX_INT:
return DC_IRQ_SOURCE_HPD2RX;
default:
return DC_IRQ_SOURCE_INVALID;
}
break;
default:
return DC_IRQ_SOURCE_INVALID;
}
}
static bool hpd_ack(struct irq_service *irq_service, const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status = get_reg_field_value(value, HPD0_DC_HPD_INT_STATUS, DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(value, current_status ? 0 : 1, HPD0_DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static const struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
};
static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs pflip_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vline0_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
/* compile time expand base address. */
#define BASE(seg) BASE_INNER(seg)
#define SRI(reg_name, block, id)\
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
.enable_reg = SRI(reg1, block, reg_num),\
.enable_mask = block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI(reg2, block, reg_num),\
.ack_mask = block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
.ack_value = block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1 + reg_num] = {\
IRQ_REG_ENTRY(HPD, reg_num,\
DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
.status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
.funcs = &hpd_irq_info_funcs\
}
#define hpd_rx_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
IRQ_REG_ENTRY(HPD, reg_num,\
DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
.status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
.funcs = &hpd_rx_irq_info_funcs\
}
#define pflip_int_entry(reg_num)\
[DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
IRQ_REG_ENTRY(HUBPREQ, reg_num,\
DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\
DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\
.funcs = &pflip_irq_info_funcs\
}
/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
* of DCE's DC_IRQ_SOURCE_VUPDATEx.
*/
#define vupdate_no_lock_int_entry(reg_num)\
[DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
.funcs = &vupdate_no_lock_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
[DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
.funcs = &vblank_irq_info_funcs\
}
#define vline0_int_entry(reg_num)\
[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
.funcs = &vline0_irq_info_funcs\
}
#define dummy_irq_entry() { .funcs = &dummy_irq_info_funcs }
#define i2c_int_entry(reg_num) \
[DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
#define dp_sink_int_entry(reg_num) \
[DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
#define gpio_pad_int_entry(reg_num) \
[DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
#define dc_underflow_int_entry(reg_num) \
[DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
static const struct irq_source_info_funcs dummy_irq_info_funcs = {
.set = dal_irq_service_dummy_set,
.ack = dal_irq_service_dummy_ack
};
static const struct irq_source_info irq_source_info_dcn303[DAL_IRQ_SOURCES_NUMBER] = {
[DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
hpd_int_entry(0),
hpd_int_entry(1),
hpd_rx_int_entry(0),
hpd_rx_int_entry(1),
i2c_int_entry(1),
i2c_int_entry(2),
dp_sink_int_entry(1),
dp_sink_int_entry(2),
[DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
pflip_int_entry(0),
pflip_int_entry(1),
[DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
gpio_pad_int_entry(0),
gpio_pad_int_entry(1),
gpio_pad_int_entry(2),
gpio_pad_int_entry(3),
gpio_pad_int_entry(4),
gpio_pad_int_entry(5),
gpio_pad_int_entry(6),
gpio_pad_int_entry(7),
gpio_pad_int_entry(8),
gpio_pad_int_entry(9),
gpio_pad_int_entry(10),
gpio_pad_int_entry(11),
gpio_pad_int_entry(12),
gpio_pad_int_entry(13),
gpio_pad_int_entry(14),
gpio_pad_int_entry(15),
gpio_pad_int_entry(16),
gpio_pad_int_entry(17),
gpio_pad_int_entry(18),
gpio_pad_int_entry(19),
gpio_pad_int_entry(20),
gpio_pad_int_entry(21),
gpio_pad_int_entry(22),
gpio_pad_int_entry(23),
gpio_pad_int_entry(24),
gpio_pad_int_entry(25),
gpio_pad_int_entry(26),
gpio_pad_int_entry(27),
gpio_pad_int_entry(28),
gpio_pad_int_entry(29),
gpio_pad_int_entry(30),
dc_underflow_int_entry(1),
dc_underflow_int_entry(2),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
vupdate_no_lock_int_entry(0),
vupdate_no_lock_int_entry(1),
vblank_int_entry(0),
vblank_int_entry(1),
vline0_int_entry(0),
vline0_int_entry(1),
};
static const struct irq_service_funcs irq_service_funcs_dcn303 = {
.to_dal_irq_source = to_dal_irq_source_dcn303
};
static void dcn303_irq_construct(struct irq_service *irq_service, struct irq_service_init_data *init_data)
{
dal_irq_service_construct(irq_service, init_data);
irq_service->info = irq_source_info_dcn303;
irq_service->funcs = &irq_service_funcs_dcn303;
}
struct irq_service *dal_irq_service_dcn303_create(struct irq_service_init_data *init_data)
{
struct irq_service *irq_service = kzalloc(sizeof(*irq_service), GFP_KERNEL);
if (!irq_service)
return NULL;
dcn303_irq_construct(irq_service, init_data);
return irq_service;
}
| linux-master | drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "include/logger_interface.h"
#include "irq_service_dce80.h"
#include "../dce110/irq_service_dce110.h"
#include "dce/dce_8_0_d.h"
#include "dce/dce_8_0_sh_mask.h"
#include "ivsrcid/ivsrcid_vislands30.h"
#include "dc_types.h"
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
DC_HPD1_INT_STATUS,
DC_HPD1_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
DC_HPD1_INT_CONTROL,
DC_HPD1_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static const struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
};
static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs pflip_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.set = dce110_vblank_set,
.ack = NULL
};
static const struct irq_source_info_funcs vupdate_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_INVALID + reg_num] = {\
.enable_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
.enable_mask = DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK,\
.enable_value = {\
DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK,\
~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK\
},\
.ack_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
.ack_mask = DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK,\
.ack_value = DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK,\
.status_reg = mmDC_HPD ## reg_num ## _INT_STATUS,\
.funcs = &hpd_irq_info_funcs\
}
#define hpd_rx_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD6 + reg_num] = {\
.enable_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
.enable_mask = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK,\
.enable_value = {\
DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK,\
~DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK },\
.ack_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
.ack_mask = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_ACK_MASK,\
.ack_value = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_ACK_MASK,\
.status_reg = mmDC_HPD ## reg_num ## _INT_STATUS,\
.funcs = &hpd_rx_irq_info_funcs\
}
#define pflip_int_entry(reg_num)\
[DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
.enable_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_CONTROL,\
.enable_mask =\
GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\
.enable_value = {\
GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\
~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK},\
.ack_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_STATUS,\
.ack_mask = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\
.ack_value = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\
.status_reg = mmDCP ## reg_num ##_GRPH_INTERRUPT_STATUS,\
.funcs = &pflip_irq_info_funcs\
}
#define vupdate_int_entry(reg_num)\
[DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
.enable_reg = mmCRTC ## reg_num ## _CRTC_INTERRUPT_CONTROL,\
.enable_mask =\
CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\
.enable_value = {\
CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\
~CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK},\
.ack_reg = mmCRTC ## reg_num ## _CRTC_V_UPDATE_INT_STATUS,\
.ack_mask =\
CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
.ack_value =\
CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
.funcs = &vupdate_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
[DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
.enable_reg = mmCRTC ## reg_num ## _CRTC_VERTICAL_INTERRUPT0_CONTROL,\
.enable_mask =\
CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK,\
.enable_value = {\
CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK,\
~CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK},\
.ack_reg = mmCRTC ## reg_num ## _CRTC_VERTICAL_INTERRUPT0_CONTROL,\
.ack_mask =\
CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_CLEAR_MASK,\
.ack_value =\
CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_CLEAR_MASK,\
.funcs = &vblank_irq_info_funcs,\
.src_id = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0 + reg_num\
}
#define dummy_irq_entry() \
{\
.funcs = &dummy_irq_info_funcs\
}
#define i2c_int_entry(reg_num) \
[DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
#define dp_sink_int_entry(reg_num) \
[DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
#define gpio_pad_int_entry(reg_num) \
[DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
#define dc_underflow_int_entry(reg_num) \
[DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
static const struct irq_source_info_funcs dummy_irq_info_funcs = {
.set = dal_irq_service_dummy_set,
.ack = dal_irq_service_dummy_ack
};
static const struct irq_source_info
irq_source_info_dce80[DAL_IRQ_SOURCES_NUMBER] = {
[DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
hpd_int_entry(1),
hpd_int_entry(2),
hpd_int_entry(3),
hpd_int_entry(4),
hpd_int_entry(5),
hpd_int_entry(6),
hpd_rx_int_entry(1),
hpd_rx_int_entry(2),
hpd_rx_int_entry(3),
hpd_rx_int_entry(4),
hpd_rx_int_entry(5),
hpd_rx_int_entry(6),
i2c_int_entry(1),
i2c_int_entry(2),
i2c_int_entry(3),
i2c_int_entry(4),
i2c_int_entry(5),
i2c_int_entry(6),
dp_sink_int_entry(1),
dp_sink_int_entry(2),
dp_sink_int_entry(3),
dp_sink_int_entry(4),
dp_sink_int_entry(5),
dp_sink_int_entry(6),
[DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
pflip_int_entry(0),
pflip_int_entry(1),
pflip_int_entry(2),
pflip_int_entry(3),
pflip_int_entry(4),
pflip_int_entry(5),
[DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
gpio_pad_int_entry(0),
gpio_pad_int_entry(1),
gpio_pad_int_entry(2),
gpio_pad_int_entry(3),
gpio_pad_int_entry(4),
gpio_pad_int_entry(5),
gpio_pad_int_entry(6),
gpio_pad_int_entry(7),
gpio_pad_int_entry(8),
gpio_pad_int_entry(9),
gpio_pad_int_entry(10),
gpio_pad_int_entry(11),
gpio_pad_int_entry(12),
gpio_pad_int_entry(13),
gpio_pad_int_entry(14),
gpio_pad_int_entry(15),
gpio_pad_int_entry(16),
gpio_pad_int_entry(17),
gpio_pad_int_entry(18),
gpio_pad_int_entry(19),
gpio_pad_int_entry(20),
gpio_pad_int_entry(21),
gpio_pad_int_entry(22),
gpio_pad_int_entry(23),
gpio_pad_int_entry(24),
gpio_pad_int_entry(25),
gpio_pad_int_entry(26),
gpio_pad_int_entry(27),
gpio_pad_int_entry(28),
gpio_pad_int_entry(29),
gpio_pad_int_entry(30),
dc_underflow_int_entry(1),
dc_underflow_int_entry(2),
dc_underflow_int_entry(3),
dc_underflow_int_entry(4),
dc_underflow_int_entry(5),
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
vupdate_int_entry(0),
vupdate_int_entry(1),
vupdate_int_entry(2),
vupdate_int_entry(3),
vupdate_int_entry(4),
vupdate_int_entry(5),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
vblank_int_entry(3),
vblank_int_entry(4),
vblank_int_entry(5),
};
static const struct irq_service_funcs irq_service_funcs_dce80 = {
.to_dal_irq_source = to_dal_irq_source_dce110
};
static void dce80_irq_construct(
struct irq_service *irq_service,
struct irq_service_init_data *init_data)
{
dal_irq_service_construct(irq_service, init_data);
irq_service->info = irq_source_info_dce80;
irq_service->funcs = &irq_service_funcs_dce80;
}
struct irq_service *dal_irq_service_dce80_create(
struct irq_service_init_data *init_data)
{
struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
GFP_KERNEL);
if (!irq_service)
return NULL;
dce80_irq_construct(irq_service, init_data);
return irq_service;
}
| linux-master | drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "irq_service_dcn302.h"
#include "../dce110/irq_service_dce110.h"
#include "dimgrey_cavefish_ip_offset.h"
#include "dcn/dcn_3_0_0_offset.h"
#include "dcn/dcn_3_0_0_sh_mask.h"
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
static enum dc_irq_source to_dal_irq_source_dcn302(struct irq_service *irq_service, uint32_t src_id, uint32_t ext_id)
{
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK1;
case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK2;
case DCN_1_0__SRCID__DC_D3_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK3;
case DCN_1_0__SRCID__DC_D4_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK4;
case DCN_1_0__SRCID__DC_D5_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK5;
case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK6;
case DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT:
return DC_IRQ_SOURCE_DMCUB_OUTBOX0;
case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC1_VLINE0;
case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC2_VLINE0;
case DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC3_VLINE0;
case DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC4_VLINE0;
case DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC5_VLINE0;
case DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC6_VLINE0;
case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP1;
case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP2;
case DCN_1_0__SRCID__HUBP2_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP3;
case DCN_1_0__SRCID__HUBP3_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP4;
case DCN_1_0__SRCID__HUBP4_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP5;
case DCN_1_0__SRCID__HUBP5_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP6;
case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE1;
case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE2;
case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE3;
case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE4;
case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE5;
case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE6;
case DCN_1_0__SRCID__DC_HPD1_INT:
/* generic src_id for all HPD and HPDRX interrupts */
switch (ext_id) {
case DCN_1_0__CTXID__DC_HPD1_INT:
return DC_IRQ_SOURCE_HPD1;
case DCN_1_0__CTXID__DC_HPD2_INT:
return DC_IRQ_SOURCE_HPD2;
case DCN_1_0__CTXID__DC_HPD3_INT:
return DC_IRQ_SOURCE_HPD3;
case DCN_1_0__CTXID__DC_HPD4_INT:
return DC_IRQ_SOURCE_HPD4;
case DCN_1_0__CTXID__DC_HPD5_INT:
return DC_IRQ_SOURCE_HPD5;
case DCN_1_0__CTXID__DC_HPD6_INT:
return DC_IRQ_SOURCE_HPD6;
case DCN_1_0__CTXID__DC_HPD1_RX_INT:
return DC_IRQ_SOURCE_HPD1RX;
case DCN_1_0__CTXID__DC_HPD2_RX_INT:
return DC_IRQ_SOURCE_HPD2RX;
case DCN_1_0__CTXID__DC_HPD3_RX_INT:
return DC_IRQ_SOURCE_HPD3RX;
case DCN_1_0__CTXID__DC_HPD4_RX_INT:
return DC_IRQ_SOURCE_HPD4RX;
case DCN_1_0__CTXID__DC_HPD5_RX_INT:
return DC_IRQ_SOURCE_HPD5RX;
case DCN_1_0__CTXID__DC_HPD6_RX_INT:
return DC_IRQ_SOURCE_HPD6RX;
default:
return DC_IRQ_SOURCE_INVALID;
}
break;
default:
return DC_IRQ_SOURCE_INVALID;
}
}
static bool hpd_ack(struct irq_service *irq_service, const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status = get_reg_field_value(value, HPD0_DC_HPD_INT_STATUS, DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(value, current_status ? 0 : 1, HPD0_DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static const struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
};
static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs pflip_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs dmub_trace_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vline0_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
/* compile time expand base address. */
#define BASE(seg) BASE_INNER(seg)
#define SRI(reg_name, block, id)\
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define SRI_DMUB(reg_name)\
BASE(mm ## reg_name ## _BASE_IDX) + \
mm ## reg_name
#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
.enable_reg = SRI(reg1, block, reg_num),\
.enable_mask = block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI(reg2, block, reg_num),\
.ack_mask = block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
.ack_value = block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
#define dmub_trace_int_entry()\
[DC_IRQ_SOURCE_DMCUB_OUTBOX0] = {\
IRQ_REG_ENTRY_DMUB(DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX0_READY_INT_EN,\
DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX0_READY_INT_ACK),\
.funcs = &dmub_trace_irq_info_funcs\
}
#define IRQ_REG_ENTRY_DMUB(reg1, mask1, reg2, mask2)\
.enable_reg = SRI_DMUB(reg1),\
.enable_mask = \
reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
reg1 ## __ ## mask1 ## _MASK,\
~reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI_DMUB(reg2),\
.ack_mask = \
reg2 ## __ ## mask2 ## _MASK,\
.ack_value = \
reg2 ## __ ## mask2 ## _MASK \
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1 + reg_num] = {\
IRQ_REG_ENTRY(HPD, reg_num,\
DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
.status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
.funcs = &hpd_irq_info_funcs\
}
#define hpd_rx_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
IRQ_REG_ENTRY(HPD, reg_num,\
DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
.status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
.funcs = &hpd_rx_irq_info_funcs\
}
#define pflip_int_entry(reg_num)\
[DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
IRQ_REG_ENTRY(HUBPREQ, reg_num,\
DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\
DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\
.funcs = &pflip_irq_info_funcs\
}
/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
* of DCE's DC_IRQ_SOURCE_VUPDATEx.
*/
#define vupdate_no_lock_int_entry(reg_num)\
[DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
.funcs = &vupdate_no_lock_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
[DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
.funcs = &vblank_irq_info_funcs\
}
#define vline0_int_entry(reg_num)\
[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
.funcs = &vline0_irq_info_funcs\
}
#define dummy_irq_entry() { .funcs = &dummy_irq_info_funcs }
#define i2c_int_entry(reg_num) \
[DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
#define dp_sink_int_entry(reg_num) \
[DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
#define gpio_pad_int_entry(reg_num) \
[DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
#define dc_underflow_int_entry(reg_num) \
[DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
static const struct irq_source_info_funcs dummy_irq_info_funcs = {
.set = dal_irq_service_dummy_set,
.ack = dal_irq_service_dummy_ack
};
static const struct irq_source_info irq_source_info_dcn302[DAL_IRQ_SOURCES_NUMBER] = {
[DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
hpd_int_entry(0),
hpd_int_entry(1),
hpd_int_entry(2),
hpd_int_entry(3),
hpd_int_entry(4),
hpd_rx_int_entry(0),
hpd_rx_int_entry(1),
hpd_rx_int_entry(2),
hpd_rx_int_entry(3),
hpd_rx_int_entry(4),
i2c_int_entry(1),
i2c_int_entry(2),
i2c_int_entry(3),
i2c_int_entry(4),
i2c_int_entry(5),
dp_sink_int_entry(1),
dp_sink_int_entry(2),
dp_sink_int_entry(3),
dp_sink_int_entry(4),
dp_sink_int_entry(5),
[DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
pflip_int_entry(0),
pflip_int_entry(1),
pflip_int_entry(2),
pflip_int_entry(3),
pflip_int_entry(4),
[DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
gpio_pad_int_entry(0),
gpio_pad_int_entry(1),
gpio_pad_int_entry(2),
gpio_pad_int_entry(3),
gpio_pad_int_entry(4),
gpio_pad_int_entry(5),
gpio_pad_int_entry(6),
gpio_pad_int_entry(7),
gpio_pad_int_entry(8),
gpio_pad_int_entry(9),
gpio_pad_int_entry(10),
gpio_pad_int_entry(11),
gpio_pad_int_entry(12),
gpio_pad_int_entry(13),
gpio_pad_int_entry(14),
gpio_pad_int_entry(15),
gpio_pad_int_entry(16),
gpio_pad_int_entry(17),
gpio_pad_int_entry(18),
gpio_pad_int_entry(19),
gpio_pad_int_entry(20),
gpio_pad_int_entry(21),
gpio_pad_int_entry(22),
gpio_pad_int_entry(23),
gpio_pad_int_entry(24),
gpio_pad_int_entry(25),
gpio_pad_int_entry(26),
gpio_pad_int_entry(27),
gpio_pad_int_entry(28),
gpio_pad_int_entry(29),
gpio_pad_int_entry(30),
dc_underflow_int_entry(1),
dc_underflow_int_entry(2),
dc_underflow_int_entry(3),
dc_underflow_int_entry(4),
dc_underflow_int_entry(5),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
vupdate_no_lock_int_entry(0),
vupdate_no_lock_int_entry(1),
vupdate_no_lock_int_entry(2),
vupdate_no_lock_int_entry(3),
vupdate_no_lock_int_entry(4),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
vblank_int_entry(3),
vblank_int_entry(4),
vline0_int_entry(0),
vline0_int_entry(1),
vline0_int_entry(2),
vline0_int_entry(3),
vline0_int_entry(4),
dmub_trace_int_entry(),
};
static const struct irq_service_funcs irq_service_funcs_dcn302 = {
.to_dal_irq_source = to_dal_irq_source_dcn302
};
static void dcn302_irq_construct(struct irq_service *irq_service, struct irq_service_init_data *init_data)
{
dal_irq_service_construct(irq_service, init_data);
irq_service->info = irq_source_info_dcn302;
irq_service->funcs = &irq_service_funcs_dcn302;
}
struct irq_service *dal_irq_service_dcn302_create(struct irq_service_init_data *init_data)
{
struct irq_service *irq_service = kzalloc(sizeof(*irq_service), GFP_KERNEL);
if (!irq_service)
return NULL;
dcn302_irq_construct(irq_service, init_data);
return irq_service;
}
| linux-master | drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "include/logger_interface.h"
#include "../dce110/irq_service_dce110.h"
#include "yellow_carp_offset.h"
#include "dcn/dcn_3_1_2_offset.h"
#include "dcn/dcn_3_1_2_sh_mask.h"
#include "irq_service_dcn31.h"
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
static enum dc_irq_source to_dal_irq_source_dcn31(struct irq_service *irq_service,
uint32_t src_id,
uint32_t ext_id)
{
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK1;
case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK2;
case DCN_1_0__SRCID__DC_D3_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK3;
case DCN_1_0__SRCID__DC_D4_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK4;
case DCN_1_0__SRCID__DC_D5_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK5;
case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK6;
case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC1_VLINE0;
case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC2_VLINE0;
case DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC3_VLINE0;
case DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC4_VLINE0;
case DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC5_VLINE0;
case DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC6_VLINE0;
case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP1;
case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP2;
case DCN_1_0__SRCID__HUBP2_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP3;
case DCN_1_0__SRCID__HUBP3_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP4;
case DCN_1_0__SRCID__HUBP4_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP5;
case DCN_1_0__SRCID__HUBP5_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP6;
case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE1;
case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE2;
case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE3;
case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE4;
case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE5;
case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE6;
case DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT:
return DC_IRQ_SOURCE_DMCUB_OUTBOX;
case DCN_1_0__SRCID__DC_HPD1_INT:
/* generic src_id for all HPD and HPDRX interrupts */
switch (ext_id) {
case DCN_1_0__CTXID__DC_HPD1_INT:
return DC_IRQ_SOURCE_HPD1;
case DCN_1_0__CTXID__DC_HPD2_INT:
return DC_IRQ_SOURCE_HPD2;
case DCN_1_0__CTXID__DC_HPD3_INT:
return DC_IRQ_SOURCE_HPD3;
case DCN_1_0__CTXID__DC_HPD4_INT:
return DC_IRQ_SOURCE_HPD4;
case DCN_1_0__CTXID__DC_HPD5_INT:
return DC_IRQ_SOURCE_HPD5;
case DCN_1_0__CTXID__DC_HPD6_INT:
return DC_IRQ_SOURCE_HPD6;
case DCN_1_0__CTXID__DC_HPD1_RX_INT:
return DC_IRQ_SOURCE_HPD1RX;
case DCN_1_0__CTXID__DC_HPD2_RX_INT:
return DC_IRQ_SOURCE_HPD2RX;
case DCN_1_0__CTXID__DC_HPD3_RX_INT:
return DC_IRQ_SOURCE_HPD3RX;
case DCN_1_0__CTXID__DC_HPD4_RX_INT:
return DC_IRQ_SOURCE_HPD4RX;
case DCN_1_0__CTXID__DC_HPD5_RX_INT:
return DC_IRQ_SOURCE_HPD5RX;
case DCN_1_0__CTXID__DC_HPD6_RX_INT:
return DC_IRQ_SOURCE_HPD6RX;
default:
return DC_IRQ_SOURCE_INVALID;
}
break;
default:
return DC_IRQ_SOURCE_INVALID;
}
}
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
HPD0_DC_HPD_INT_STATUS,
DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
HPD0_DC_HPD_INT_CONTROL,
DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static const struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
};
static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs pflip_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs outbox_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vline0_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
/* compile time expand base address. */
#define BASE(seg) \
BASE_INNER(seg)
#define SRI(reg_name, block, id)\
BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
#define SRI_DMUB(reg_name)\
BASE(reg ## reg_name ## _BASE_IDX) + \
reg ## reg_name
#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
.enable_reg = SRI(reg1, block, reg_num),\
.enable_mask = \
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI(reg2, block, reg_num),\
.ack_mask = \
block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
.ack_value = \
block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
#define IRQ_REG_ENTRY_DMUB(reg1, mask1, reg2, mask2)\
.enable_reg = SRI_DMUB(reg1),\
.enable_mask = \
reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
reg1 ## __ ## mask1 ## _MASK,\
~reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI_DMUB(reg2),\
.ack_mask = \
reg2 ## __ ## mask2 ## _MASK,\
.ack_value = \
reg2 ## __ ## mask2 ## _MASK \
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1 + reg_num] = {\
IRQ_REG_ENTRY(HPD, reg_num,\
DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
.status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
.funcs = &hpd_irq_info_funcs\
}
#define hpd_rx_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
IRQ_REG_ENTRY(HPD, reg_num,\
DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
.status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
.funcs = &hpd_rx_irq_info_funcs\
}
#define pflip_int_entry(reg_num)\
[DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
IRQ_REG_ENTRY(HUBPREQ, reg_num,\
DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\
DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\
.funcs = &pflip_irq_info_funcs\
}
/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
* of DCE's DC_IRQ_SOURCE_VUPDATEx.
*/
#define vupdate_no_lock_int_entry(reg_num)\
[DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
.funcs = &vupdate_no_lock_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
[DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
.funcs = &vblank_irq_info_funcs\
}
#define vline0_int_entry(reg_num)\
[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
.funcs = &vline0_irq_info_funcs\
}
#define dmub_outbox_int_entry()\
[DC_IRQ_SOURCE_DMCUB_OUTBOX] = {\
IRQ_REG_ENTRY_DMUB(\
DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX1_READY_INT_EN,\
DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX1_READY_INT_ACK),\
.funcs = &outbox_irq_info_funcs\
}
#define dummy_irq_entry() \
{\
.funcs = &dummy_irq_info_funcs\
}
#define i2c_int_entry(reg_num) \
[DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
#define dp_sink_int_entry(reg_num) \
[DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
#define gpio_pad_int_entry(reg_num) \
[DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
#define dc_underflow_int_entry(reg_num) \
[DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
static const struct irq_source_info_funcs dummy_irq_info_funcs = {
.set = dal_irq_service_dummy_set,
.ack = dal_irq_service_dummy_ack
};
static const struct irq_source_info
irq_source_info_dcn31[DAL_IRQ_SOURCES_NUMBER] = {
[DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
hpd_int_entry(0),
hpd_int_entry(1),
hpd_int_entry(2),
hpd_int_entry(3),
hpd_int_entry(4),
hpd_rx_int_entry(0),
hpd_rx_int_entry(1),
hpd_rx_int_entry(2),
hpd_rx_int_entry(3),
hpd_rx_int_entry(4),
i2c_int_entry(1),
i2c_int_entry(2),
i2c_int_entry(3),
i2c_int_entry(4),
i2c_int_entry(5),
i2c_int_entry(6),
dp_sink_int_entry(1),
dp_sink_int_entry(2),
dp_sink_int_entry(3),
dp_sink_int_entry(4),
dp_sink_int_entry(5),
dp_sink_int_entry(6),
[DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
pflip_int_entry(0),
pflip_int_entry(1),
pflip_int_entry(2),
pflip_int_entry(3),
[DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(),
[DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(),
[DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
gpio_pad_int_entry(0),
gpio_pad_int_entry(1),
gpio_pad_int_entry(2),
gpio_pad_int_entry(3),
gpio_pad_int_entry(4),
gpio_pad_int_entry(5),
gpio_pad_int_entry(6),
gpio_pad_int_entry(7),
gpio_pad_int_entry(8),
gpio_pad_int_entry(9),
gpio_pad_int_entry(10),
gpio_pad_int_entry(11),
gpio_pad_int_entry(12),
gpio_pad_int_entry(13),
gpio_pad_int_entry(14),
gpio_pad_int_entry(15),
gpio_pad_int_entry(16),
gpio_pad_int_entry(17),
gpio_pad_int_entry(18),
gpio_pad_int_entry(19),
gpio_pad_int_entry(20),
gpio_pad_int_entry(21),
gpio_pad_int_entry(22),
gpio_pad_int_entry(23),
gpio_pad_int_entry(24),
gpio_pad_int_entry(25),
gpio_pad_int_entry(26),
gpio_pad_int_entry(27),
gpio_pad_int_entry(28),
gpio_pad_int_entry(29),
gpio_pad_int_entry(30),
dc_underflow_int_entry(1),
dc_underflow_int_entry(2),
dc_underflow_int_entry(3),
dc_underflow_int_entry(4),
dc_underflow_int_entry(5),
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
vupdate_no_lock_int_entry(0),
vupdate_no_lock_int_entry(1),
vupdate_no_lock_int_entry(2),
vupdate_no_lock_int_entry(3),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
vblank_int_entry(3),
vline0_int_entry(0),
vline0_int_entry(1),
vline0_int_entry(2),
vline0_int_entry(3),
[DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(),
[DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(),
dmub_outbox_int_entry(),
};
static const struct irq_service_funcs irq_service_funcs_dcn31 = {
.to_dal_irq_source = to_dal_irq_source_dcn31
};
static void dcn31_irq_construct(
struct irq_service *irq_service,
struct irq_service_init_data *init_data)
{
dal_irq_service_construct(irq_service, init_data);
irq_service->info = irq_source_info_dcn31;
irq_service->funcs = &irq_service_funcs_dcn31;
}
struct irq_service *dal_irq_service_dcn31_create(
struct irq_service_init_data *init_data)
{
struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
GFP_KERNEL);
if (!irq_service)
return NULL;
dcn31_irq_construct(irq_service, init_data);
return irq_service;
}
| linux-master | drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "include/logger_interface.h"
#include "../dce110/irq_service_dce110.h"
#include "sienna_cichlid_ip_offset.h"
#include "dcn/dcn_3_0_0_offset.h"
#include "dcn/dcn_3_0_0_sh_mask.h"
#include "nbio/nbio_7_4_offset.h"
#include "dpcs/dpcs_3_0_0_offset.h"
#include "dpcs/dpcs_3_0_0_sh_mask.h"
#include "mmhub/mmhub_2_0_0_offset.h"
#include "mmhub/mmhub_2_0_0_sh_mask.h"
#include "irq_service_dcn30.h"
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
static enum dc_irq_source to_dal_irq_source_dcn30(
struct irq_service *irq_service,
uint32_t src_id,
uint32_t ext_id)
{
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK1;
case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK2;
case DCN_1_0__SRCID__DC_D3_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK3;
case DCN_1_0__SRCID__DC_D4_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK4;
case DCN_1_0__SRCID__DC_D5_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK5;
case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK6;
case DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT:
return DC_IRQ_SOURCE_DMCUB_OUTBOX0;
case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC1_VLINE0;
case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC2_VLINE0;
case DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC3_VLINE0;
case DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC4_VLINE0;
case DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC5_VLINE0;
case DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC6_VLINE0;
case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP1;
case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP2;
case DCN_1_0__SRCID__HUBP2_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP3;
case DCN_1_0__SRCID__HUBP3_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP4;
case DCN_1_0__SRCID__HUBP4_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP5;
case DCN_1_0__SRCID__HUBP5_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP6;
case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE1;
case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE2;
case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE3;
case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE4;
case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE5;
case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE6;
case DCN_1_0__SRCID__DC_HPD1_INT:
/* generic src_id for all HPD and HPDRX interrupts */
switch (ext_id) {
case DCN_1_0__CTXID__DC_HPD1_INT:
return DC_IRQ_SOURCE_HPD1;
case DCN_1_0__CTXID__DC_HPD2_INT:
return DC_IRQ_SOURCE_HPD2;
case DCN_1_0__CTXID__DC_HPD3_INT:
return DC_IRQ_SOURCE_HPD3;
case DCN_1_0__CTXID__DC_HPD4_INT:
return DC_IRQ_SOURCE_HPD4;
case DCN_1_0__CTXID__DC_HPD5_INT:
return DC_IRQ_SOURCE_HPD5;
case DCN_1_0__CTXID__DC_HPD6_INT:
return DC_IRQ_SOURCE_HPD6;
case DCN_1_0__CTXID__DC_HPD1_RX_INT:
return DC_IRQ_SOURCE_HPD1RX;
case DCN_1_0__CTXID__DC_HPD2_RX_INT:
return DC_IRQ_SOURCE_HPD2RX;
case DCN_1_0__CTXID__DC_HPD3_RX_INT:
return DC_IRQ_SOURCE_HPD3RX;
case DCN_1_0__CTXID__DC_HPD4_RX_INT:
return DC_IRQ_SOURCE_HPD4RX;
case DCN_1_0__CTXID__DC_HPD5_RX_INT:
return DC_IRQ_SOURCE_HPD5RX;
case DCN_1_0__CTXID__DC_HPD6_RX_INT:
return DC_IRQ_SOURCE_HPD6RX;
default:
return DC_IRQ_SOURCE_INVALID;
}
break;
default:
return DC_IRQ_SOURCE_INVALID;
}
}
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
HPD0_DC_HPD_INT_STATUS,
DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
HPD0_DC_HPD_INT_CONTROL,
DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static const struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
};
static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs pflip_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs dmub_trace_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vline0_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
/* compile time expand base address. */
#define BASE(seg) \
BASE_INNER(seg)
#define SRI(reg_name, block, id)\
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define SRI_DMUB(reg_name)\
BASE(mm ## reg_name ## _BASE_IDX) + \
mm ## reg_name
#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
.enable_reg = SRI(reg1, block, reg_num),\
.enable_mask = \
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI(reg2, block, reg_num),\
.ack_mask = \
block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
.ack_value = \
block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
#define IRQ_REG_ENTRY_DMUB(reg1, mask1, reg2, mask2)\
.enable_reg = SRI_DMUB(reg1),\
.enable_mask = \
reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
reg1 ## __ ## mask1 ## _MASK,\
~reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI_DMUB(reg2),\
.ack_mask = \
reg2 ## __ ## mask2 ## _MASK,\
.ack_value = \
reg2 ## __ ## mask2 ## _MASK \
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1 + reg_num] = {\
IRQ_REG_ENTRY(HPD, reg_num,\
DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
.status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
.funcs = &hpd_irq_info_funcs\
}
#define hpd_rx_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
IRQ_REG_ENTRY(HPD, reg_num,\
DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
.status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
.funcs = &hpd_rx_irq_info_funcs\
}
#define pflip_int_entry(reg_num)\
[DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
IRQ_REG_ENTRY(HUBPREQ, reg_num,\
DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\
DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\
.funcs = &pflip_irq_info_funcs\
}
/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
* of DCE's DC_IRQ_SOURCE_VUPDATEx.
*/
#define vupdate_no_lock_int_entry(reg_num)\
[DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
.funcs = &vupdate_no_lock_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
[DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
.funcs = &vblank_irq_info_funcs\
}
#define dmub_trace_int_entry()\
[DC_IRQ_SOURCE_DMCUB_OUTBOX0] = {\
IRQ_REG_ENTRY_DMUB(DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX0_READY_INT_EN,\
DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX0_READY_INT_ACK),\
.funcs = &dmub_trace_irq_info_funcs\
}
#define vline0_int_entry(reg_num)\
[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
.funcs = &vline0_irq_info_funcs\
}
#define dummy_irq_entry() \
{\
.funcs = &dummy_irq_info_funcs\
}
#define i2c_int_entry(reg_num) \
[DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
#define dp_sink_int_entry(reg_num) \
[DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
#define gpio_pad_int_entry(reg_num) \
[DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
#define dc_underflow_int_entry(reg_num) \
[DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
static const struct irq_source_info_funcs dummy_irq_info_funcs = {
.set = dal_irq_service_dummy_set,
.ack = dal_irq_service_dummy_ack
};
static const struct irq_source_info
irq_source_info_dcn30[DAL_IRQ_SOURCES_NUMBER] = {
[DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
hpd_int_entry(0),
hpd_int_entry(1),
hpd_int_entry(2),
hpd_int_entry(3),
hpd_int_entry(4),
hpd_int_entry(5),
hpd_rx_int_entry(0),
hpd_rx_int_entry(1),
hpd_rx_int_entry(2),
hpd_rx_int_entry(3),
hpd_rx_int_entry(4),
hpd_rx_int_entry(5),
i2c_int_entry(1),
i2c_int_entry(2),
i2c_int_entry(3),
i2c_int_entry(4),
i2c_int_entry(5),
i2c_int_entry(6),
dp_sink_int_entry(1),
dp_sink_int_entry(2),
dp_sink_int_entry(3),
dp_sink_int_entry(4),
dp_sink_int_entry(5),
dp_sink_int_entry(6),
[DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
pflip_int_entry(0),
pflip_int_entry(1),
pflip_int_entry(2),
pflip_int_entry(3),
pflip_int_entry(4),
pflip_int_entry(5),
[DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
gpio_pad_int_entry(0),
gpio_pad_int_entry(1),
gpio_pad_int_entry(2),
gpio_pad_int_entry(3),
gpio_pad_int_entry(4),
gpio_pad_int_entry(5),
gpio_pad_int_entry(6),
gpio_pad_int_entry(7),
gpio_pad_int_entry(8),
gpio_pad_int_entry(9),
gpio_pad_int_entry(10),
gpio_pad_int_entry(11),
gpio_pad_int_entry(12),
gpio_pad_int_entry(13),
gpio_pad_int_entry(14),
gpio_pad_int_entry(15),
gpio_pad_int_entry(16),
gpio_pad_int_entry(17),
gpio_pad_int_entry(18),
gpio_pad_int_entry(19),
gpio_pad_int_entry(20),
gpio_pad_int_entry(21),
gpio_pad_int_entry(22),
gpio_pad_int_entry(23),
gpio_pad_int_entry(24),
gpio_pad_int_entry(25),
gpio_pad_int_entry(26),
gpio_pad_int_entry(27),
gpio_pad_int_entry(28),
gpio_pad_int_entry(29),
gpio_pad_int_entry(30),
dc_underflow_int_entry(1),
dc_underflow_int_entry(2),
dc_underflow_int_entry(3),
dc_underflow_int_entry(4),
dc_underflow_int_entry(5),
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
vupdate_no_lock_int_entry(0),
vupdate_no_lock_int_entry(1),
vupdate_no_lock_int_entry(2),
vupdate_no_lock_int_entry(3),
vupdate_no_lock_int_entry(4),
vupdate_no_lock_int_entry(5),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
vblank_int_entry(3),
vblank_int_entry(4),
vblank_int_entry(5),
vline0_int_entry(0),
vline0_int_entry(1),
vline0_int_entry(2),
vline0_int_entry(3),
vline0_int_entry(4),
vline0_int_entry(5),
dmub_trace_int_entry(),
};
static const struct irq_service_funcs irq_service_funcs_dcn30 = {
.to_dal_irq_source = to_dal_irq_source_dcn30
};
static void dcn30_irq_construct(
struct irq_service *irq_service,
struct irq_service_init_data *init_data)
{
dal_irq_service_construct(irq_service, init_data);
irq_service->info = irq_source_info_dcn30;
irq_service->funcs = &irq_service_funcs_dcn30;
}
struct irq_service *dal_irq_service_dcn30_create(
struct irq_service_init_data *init_data)
{
struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
GFP_KERNEL);
if (!irq_service)
return NULL;
dcn30_irq_construct(irq_service, init_data);
return irq_service;
}
| linux-master | drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "include/logger_interface.h"
#include "../dce110/irq_service_dce110.h"
#include "irq_service_dcn201.h"
#include "dcn/dcn_2_0_3_offset.h"
#include "dcn/dcn_2_0_3_sh_mask.h"
#include "cyan_skillfish_ip_offset.h"
#include "soc15_hw_ip.h"
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
static enum dc_irq_source to_dal_irq_source_dcn201(
struct irq_service *irq_service,
uint32_t src_id,
uint32_t ext_id)
{
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK1;
case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK2;
case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC1_VLINE0;
case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC2_VLINE0;
case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP1;
case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP2;
case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE1;
case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE2;
case DCN_1_0__SRCID__DC_HPD1_INT:
/* generic src_id for all HPD and HPDRX interrupts */
switch (ext_id) {
case DCN_1_0__CTXID__DC_HPD1_INT:
return DC_IRQ_SOURCE_HPD1;
case DCN_1_0__CTXID__DC_HPD2_INT:
return DC_IRQ_SOURCE_HPD2;
case DCN_1_0__CTXID__DC_HPD1_RX_INT:
return DC_IRQ_SOURCE_HPD1RX;
case DCN_1_0__CTXID__DC_HPD2_RX_INT:
return DC_IRQ_SOURCE_HPD2RX;
default:
return DC_IRQ_SOURCE_INVALID;
}
break;
default:
return DC_IRQ_SOURCE_INVALID;
}
}
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
HPD0_DC_HPD_INT_STATUS,
DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
HPD0_DC_HPD_INT_CONTROL,
DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static const struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
};
static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs pflip_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vline0_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
#undef BASE_INNER
#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
/* compile time expand base address. */
#define BASE(seg) \
BASE_INNER(seg)
#define SRI(reg_name, block, id)\
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
.enable_reg = SRI(reg1, block, reg_num),\
.enable_mask = \
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI(reg2, block, reg_num),\
.ack_mask = \
block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
.ack_value = \
block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1 + reg_num] = {\
IRQ_REG_ENTRY(HPD, reg_num,\
DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
.status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
.funcs = &hpd_irq_info_funcs\
}
#define hpd_rx_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
IRQ_REG_ENTRY(HPD, reg_num,\
DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
.status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
.funcs = &hpd_rx_irq_info_funcs\
}
#define pflip_int_entry(reg_num)\
[DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
IRQ_REG_ENTRY(HUBPREQ, reg_num,\
DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\
DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\
.funcs = &pflip_irq_info_funcs\
}
#define vupdate_int_entry(reg_num)\
[DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\
.funcs = &vblank_irq_info_funcs\
}
/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
* of DCE's DC_IRQ_SOURCE_VUPDATEx.
*/
#define vupdate_no_lock_int_entry(reg_num)\
[DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
.funcs = &vupdate_no_lock_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
[DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
.funcs = &vblank_irq_info_funcs\
}
#define vline0_int_entry(reg_num)\
[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
.funcs = &vline0_irq_info_funcs\
}
#define dummy_irq_entry() \
{\
.funcs = &dummy_irq_info_funcs\
}
#define i2c_int_entry(reg_num) \
[DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
#define dp_sink_int_entry(reg_num) \
[DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
#define gpio_pad_int_entry(reg_num) \
[DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
#define dc_underflow_int_entry(reg_num) \
[DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
static const struct irq_source_info_funcs dummy_irq_info_funcs = {
.set = dal_irq_service_dummy_set,
.ack = dal_irq_service_dummy_ack
};
static const struct irq_source_info
irq_source_info_dcn201[DAL_IRQ_SOURCES_NUMBER] = {
[DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
hpd_int_entry(0),
hpd_int_entry(1),
dummy_irq_entry(),
dummy_irq_entry(),
dummy_irq_entry(),
dummy_irq_entry(),
hpd_rx_int_entry(0),
hpd_rx_int_entry(1),
dummy_irq_entry(),
dummy_irq_entry(),
dummy_irq_entry(),
dummy_irq_entry(),
i2c_int_entry(1),
i2c_int_entry(2),
dummy_irq_entry(),
dummy_irq_entry(),
dummy_irq_entry(),
dummy_irq_entry(),
dp_sink_int_entry(1),
dp_sink_int_entry(2),
dummy_irq_entry(),
dummy_irq_entry(),
dummy_irq_entry(),
dummy_irq_entry(),
[DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
pflip_int_entry(0),
pflip_int_entry(1),
pflip_int_entry(2),
pflip_int_entry(3),
[DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(),
[DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(),
[DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
gpio_pad_int_entry(0),
gpio_pad_int_entry(1),
gpio_pad_int_entry(2),
gpio_pad_int_entry(3),
gpio_pad_int_entry(4),
gpio_pad_int_entry(5),
gpio_pad_int_entry(6),
gpio_pad_int_entry(7),
gpio_pad_int_entry(8),
gpio_pad_int_entry(9),
gpio_pad_int_entry(10),
gpio_pad_int_entry(11),
gpio_pad_int_entry(12),
gpio_pad_int_entry(13),
gpio_pad_int_entry(14),
gpio_pad_int_entry(15),
gpio_pad_int_entry(16),
gpio_pad_int_entry(17),
gpio_pad_int_entry(18),
gpio_pad_int_entry(19),
gpio_pad_int_entry(20),
gpio_pad_int_entry(21),
gpio_pad_int_entry(22),
gpio_pad_int_entry(23),
gpio_pad_int_entry(24),
gpio_pad_int_entry(25),
gpio_pad_int_entry(26),
gpio_pad_int_entry(27),
gpio_pad_int_entry(28),
gpio_pad_int_entry(29),
gpio_pad_int_entry(30),
dc_underflow_int_entry(1),
dc_underflow_int_entry(2),
dummy_irq_entry(),
dummy_irq_entry(),
dummy_irq_entry(),
dummy_irq_entry(),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
vupdate_no_lock_int_entry(0),
vupdate_no_lock_int_entry(1),
dummy_irq_entry(),
dummy_irq_entry(),
dummy_irq_entry(),
dummy_irq_entry(),
vblank_int_entry(0),
vblank_int_entry(1),
dummy_irq_entry(),
dummy_irq_entry(),
dummy_irq_entry(),
dummy_irq_entry(),
vline0_int_entry(0),
vline0_int_entry(1),
dummy_irq_entry(),
dummy_irq_entry(),
dummy_irq_entry(),
dummy_irq_entry(),
};
static const struct irq_service_funcs irq_service_funcs_dcn201 = {
.to_dal_irq_source = to_dal_irq_source_dcn201
};
static void dcn201_irq_construct(
struct irq_service *irq_service,
struct irq_service_init_data *init_data)
{
dal_irq_service_construct(irq_service, init_data);
irq_service->info = irq_source_info_dcn201;
irq_service->funcs = &irq_service_funcs_dcn201;
}
struct irq_service *dal_irq_service_dcn201_create(
struct irq_service_init_data *init_data)
{
struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
GFP_KERNEL);
if (!irq_service)
return NULL;
dcn201_irq_construct(irq_service, init_data);
return irq_service;
}
| linux-master | drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c |
/*
* Copyright 2020 Mauro Rossi <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include <linux/slab.h>
#include "dm_services.h"
#include "include/logger_interface.h"
#include "irq_service_dce60.h"
#include "../dce110/irq_service_dce110.h"
#include "dce/dce_6_0_d.h"
#include "dce/dce_6_0_sh_mask.h"
#include "ivsrcid/ivsrcid_vislands30.h"
#define VISLANDS30_IV_SRCID_D1_VBLANK 1
#define VISLANDS30_IV_SRCID_D2_VBLANK 2
#define VISLANDS30_IV_SRCID_D3_VBLANK 3
#define VISLANDS30_IV_SRCID_D4_VBLANK 4
#define VISLANDS30_IV_SRCID_D5_VBLANK 5
#define VISLANDS30_IV_SRCID_D6_VBLANK 6
#include "dc_types.h"
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
DC_HPD1_INT_STATUS,
DC_HPD1_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
DC_HPD1_INT_CONTROL,
DC_HPD1_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static const struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
};
static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs pflip_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.set = dce110_vblank_set,
.ack = NULL
};
static const struct irq_source_info_funcs vblank_irq_info_funcs_dce60 = {
.set = NULL,
.ack = NULL
};
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_INVALID + reg_num] = {\
.enable_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
.enable_mask = DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK,\
.enable_value = {\
DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK,\
~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK\
},\
.ack_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
.ack_mask = DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK,\
.ack_value = DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK,\
.status_reg = mmDC_HPD ## reg_num ## _INT_STATUS,\
.funcs = &hpd_irq_info_funcs\
}
#define hpd_rx_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD6 + reg_num] = {\
.enable_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
.enable_mask = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK,\
.enable_value = {\
DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK,\
~DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK },\
.ack_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
.ack_mask = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_ACK_MASK,\
.ack_value = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_ACK_MASK,\
.status_reg = mmDC_HPD ## reg_num ## _INT_STATUS,\
.funcs = &hpd_rx_irq_info_funcs\
}
#define pflip_int_entry(reg_num)\
[DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
.enable_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_CONTROL,\
.enable_mask =\
GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\
.enable_value = {\
GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\
~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK},\
.ack_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_STATUS,\
.ack_mask = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\
.ack_value = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\
.status_reg = mmDCP ## reg_num ##_GRPH_INTERRUPT_STATUS,\
.funcs = &pflip_irq_info_funcs\
}
#define vupdate_int_entry(reg_num)\
[DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
.enable_reg = mmCRTC ## reg_num ## _CRTC_INTERRUPT_CONTROL,\
.enable_mask =\
CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\
.enable_value = {\
CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\
~CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK},\
.ack_reg = mmCRTC ## reg_num ## _CRTC_V_UPDATE_INT_STATUS,\
.ack_mask =\
CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
.ack_value =\
CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
.funcs = &vblank_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
[DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
.enable_reg = mmLB ## reg_num ## _INT_MASK,\
.enable_mask =\
INT_MASK__VBLANK_INT_MASK,\
.enable_value = {\
INT_MASK__VBLANK_INT_MASK,\
~INT_MASK__VBLANK_INT_MASK},\
.ack_reg = mmLB ## reg_num ## _VBLANK_STATUS,\
.ack_mask =\
VBLANK_STATUS__VBLANK_ACK_MASK,\
.ack_value =\
VBLANK_STATUS__VBLANK_ACK_MASK,\
.funcs = &vblank_irq_info_funcs_dce60\
}
#define dummy_irq_entry() \
{\
.funcs = &dummy_irq_info_funcs\
}
#define i2c_int_entry(reg_num) \
[DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
#define dp_sink_int_entry(reg_num) \
[DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
#define gpio_pad_int_entry(reg_num) \
[DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
#define dc_underflow_int_entry(reg_num) \
[DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
static const struct irq_source_info_funcs dummy_irq_info_funcs = {
.set = dal_irq_service_dummy_set,
.ack = dal_irq_service_dummy_ack
};
static const struct irq_source_info
irq_source_info_dce60[DAL_IRQ_SOURCES_NUMBER] = {
[DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
hpd_int_entry(1),
hpd_int_entry(2),
hpd_int_entry(3),
hpd_int_entry(4),
hpd_int_entry(5),
hpd_int_entry(6),
hpd_rx_int_entry(1),
hpd_rx_int_entry(2),
hpd_rx_int_entry(3),
hpd_rx_int_entry(4),
hpd_rx_int_entry(5),
hpd_rx_int_entry(6),
i2c_int_entry(1),
i2c_int_entry(2),
i2c_int_entry(3),
i2c_int_entry(4),
i2c_int_entry(5),
i2c_int_entry(6),
dp_sink_int_entry(1),
dp_sink_int_entry(2),
dp_sink_int_entry(3),
dp_sink_int_entry(4),
dp_sink_int_entry(5),
dp_sink_int_entry(6),
[DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
pflip_int_entry(0),
pflip_int_entry(1),
pflip_int_entry(2),
pflip_int_entry(3),
pflip_int_entry(4),
pflip_int_entry(5),
[DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
gpio_pad_int_entry(0),
gpio_pad_int_entry(1),
gpio_pad_int_entry(2),
gpio_pad_int_entry(3),
gpio_pad_int_entry(4),
gpio_pad_int_entry(5),
gpio_pad_int_entry(6),
gpio_pad_int_entry(7),
gpio_pad_int_entry(8),
gpio_pad_int_entry(9),
gpio_pad_int_entry(10),
gpio_pad_int_entry(11),
gpio_pad_int_entry(12),
gpio_pad_int_entry(13),
gpio_pad_int_entry(14),
gpio_pad_int_entry(15),
gpio_pad_int_entry(16),
gpio_pad_int_entry(17),
gpio_pad_int_entry(18),
gpio_pad_int_entry(19),
gpio_pad_int_entry(20),
gpio_pad_int_entry(21),
gpio_pad_int_entry(22),
gpio_pad_int_entry(23),
gpio_pad_int_entry(24),
gpio_pad_int_entry(25),
gpio_pad_int_entry(26),
gpio_pad_int_entry(27),
gpio_pad_int_entry(28),
gpio_pad_int_entry(29),
gpio_pad_int_entry(30),
dc_underflow_int_entry(1),
dc_underflow_int_entry(2),
dc_underflow_int_entry(3),
dc_underflow_int_entry(4),
dc_underflow_int_entry(5),
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
vupdate_int_entry(0),
vupdate_int_entry(1),
vupdate_int_entry(2),
vupdate_int_entry(3),
vupdate_int_entry(4),
vupdate_int_entry(5),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
vblank_int_entry(3),
vblank_int_entry(4),
vblank_int_entry(5),
};
enum dc_irq_source to_dal_irq_source_dce60(
struct irq_service *irq_service,
uint32_t src_id,
uint32_t ext_id)
{
switch (src_id) {
case VISLANDS30_IV_SRCID_D1_VBLANK:
return DC_IRQ_SOURCE_VBLANK1;
case VISLANDS30_IV_SRCID_D2_VBLANK:
return DC_IRQ_SOURCE_VBLANK2;
case VISLANDS30_IV_SRCID_D3_VBLANK:
return DC_IRQ_SOURCE_VBLANK3;
case VISLANDS30_IV_SRCID_D4_VBLANK:
return DC_IRQ_SOURCE_VBLANK4;
case VISLANDS30_IV_SRCID_D5_VBLANK:
return DC_IRQ_SOURCE_VBLANK5;
case VISLANDS30_IV_SRCID_D6_VBLANK:
return DC_IRQ_SOURCE_VBLANK6;
case VISLANDS30_IV_SRCID_D1_V_UPDATE_INT:
return DC_IRQ_SOURCE_VUPDATE1;
case VISLANDS30_IV_SRCID_D2_V_UPDATE_INT:
return DC_IRQ_SOURCE_VUPDATE2;
case VISLANDS30_IV_SRCID_D3_V_UPDATE_INT:
return DC_IRQ_SOURCE_VUPDATE3;
case VISLANDS30_IV_SRCID_D4_V_UPDATE_INT:
return DC_IRQ_SOURCE_VUPDATE4;
case VISLANDS30_IV_SRCID_D5_V_UPDATE_INT:
return DC_IRQ_SOURCE_VUPDATE5;
case VISLANDS30_IV_SRCID_D6_V_UPDATE_INT:
return DC_IRQ_SOURCE_VUPDATE6;
case VISLANDS30_IV_SRCID_D1_GRPH_PFLIP:
return DC_IRQ_SOURCE_PFLIP1;
case VISLANDS30_IV_SRCID_D2_GRPH_PFLIP:
return DC_IRQ_SOURCE_PFLIP2;
case VISLANDS30_IV_SRCID_D3_GRPH_PFLIP:
return DC_IRQ_SOURCE_PFLIP3;
case VISLANDS30_IV_SRCID_D4_GRPH_PFLIP:
return DC_IRQ_SOURCE_PFLIP4;
case VISLANDS30_IV_SRCID_D5_GRPH_PFLIP:
return DC_IRQ_SOURCE_PFLIP5;
case VISLANDS30_IV_SRCID_D6_GRPH_PFLIP:
return DC_IRQ_SOURCE_PFLIP6;
case VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A:
/* generic src_id for all HPD and HPDRX interrupts */
switch (ext_id) {
case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_A:
return DC_IRQ_SOURCE_HPD1;
case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_B:
return DC_IRQ_SOURCE_HPD2;
case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_C:
return DC_IRQ_SOURCE_HPD3;
case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_D:
return DC_IRQ_SOURCE_HPD4;
case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_E:
return DC_IRQ_SOURCE_HPD5;
case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_F:
return DC_IRQ_SOURCE_HPD6;
case VISLANDS30_IV_EXTID_HPD_RX_A:
return DC_IRQ_SOURCE_HPD1RX;
case VISLANDS30_IV_EXTID_HPD_RX_B:
return DC_IRQ_SOURCE_HPD2RX;
case VISLANDS30_IV_EXTID_HPD_RX_C:
return DC_IRQ_SOURCE_HPD3RX;
case VISLANDS30_IV_EXTID_HPD_RX_D:
return DC_IRQ_SOURCE_HPD4RX;
case VISLANDS30_IV_EXTID_HPD_RX_E:
return DC_IRQ_SOURCE_HPD5RX;
case VISLANDS30_IV_EXTID_HPD_RX_F:
return DC_IRQ_SOURCE_HPD6RX;
default:
return DC_IRQ_SOURCE_INVALID;
}
break;
default:
return DC_IRQ_SOURCE_INVALID;
}
}
static const struct irq_service_funcs irq_service_funcs_dce60 = {
.to_dal_irq_source = to_dal_irq_source_dce60
};
static void dce60_irq_construct(
struct irq_service *irq_service,
struct irq_service_init_data *init_data)
{
dal_irq_service_construct(irq_service, init_data);
irq_service->info = irq_source_info_dce60;
irq_service->funcs = &irq_service_funcs_dce60;
}
struct irq_service *dal_irq_service_dce60_create(
struct irq_service_init_data *init_data)
{
struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
GFP_KERNEL);
if (!irq_service)
return NULL;
dce60_irq_construct(irq_service, init_data);
return irq_service;
}
| linux-master | drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "include/logger_interface.h"
#include "irq_service_dce110.h"
#include "dce/dce_11_0_d.h"
#include "dce/dce_11_0_sh_mask.h"
#include "ivsrcid/ivsrcid_vislands30.h"
#include "dc.h"
#include "core_types.h"
#define DC_LOGGER \
irq_service->ctx->logger
static bool hpd_ack(struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status = get_reg_field_value(value,
DC_HPD_INT_STATUS,
DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(value, current_status ? 0 : 1,
DC_HPD_INT_CONTROL,
DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static const struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
};
static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs pflip_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.set = dce110_vblank_set,
.ack = NULL
};
static const struct irq_source_info_funcs vupdate_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1 + reg_num] = {\
.enable_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\
.enable_mask = DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK,\
.enable_value = {\
DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK,\
~DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK\
},\
.ack_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\
.ack_mask = DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK,\
.ack_value = DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK,\
.status_reg = mmHPD ## reg_num ## _DC_HPD_INT_STATUS,\
.funcs = &hpd_irq_info_funcs\
}
#define hpd_rx_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
.enable_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\
.enable_mask = DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK,\
.enable_value = {\
DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK,\
~DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK },\
.ack_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\
.ack_mask = DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK,\
.ack_value = DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK,\
.status_reg = mmHPD ## reg_num ## _DC_HPD_INT_STATUS,\
.funcs = &hpd_rx_irq_info_funcs\
}
#define pflip_int_entry(reg_num)\
[DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
.enable_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_CONTROL,\
.enable_mask =\
GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\
.enable_value = {\
GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\
~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK},\
.ack_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_STATUS,\
.ack_mask = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\
.ack_value = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\
.status_reg = mmDCP ## reg_num ##_GRPH_INTERRUPT_STATUS,\
.funcs = &pflip_irq_info_funcs\
}
#define vupdate_int_entry(reg_num)\
[DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
.enable_reg = mmCRTC ## reg_num ## _CRTC_INTERRUPT_CONTROL,\
.enable_mask =\
CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\
.enable_value = {\
CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\
~CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK},\
.ack_reg = mmCRTC ## reg_num ## _CRTC_V_UPDATE_INT_STATUS,\
.ack_mask =\
CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
.ack_value =\
CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
.funcs = &vupdate_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
[DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
.enable_reg = mmCRTC ## reg_num ## _CRTC_VERTICAL_INTERRUPT0_CONTROL,\
.enable_mask =\
CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK,\
.enable_value = {\
CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK,\
~CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK},\
.ack_reg = mmCRTC ## reg_num ## _CRTC_VERTICAL_INTERRUPT0_CONTROL,\
.ack_mask =\
CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_CLEAR_MASK,\
.ack_value =\
CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_CLEAR_MASK,\
.funcs = &vblank_irq_info_funcs,\
.src_id = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0 + reg_num\
}
#define dummy_irq_entry() \
{\
.funcs = &dummy_irq_info_funcs\
}
#define i2c_int_entry(reg_num) \
[DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
#define dp_sink_int_entry(reg_num) \
[DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
#define gpio_pad_int_entry(reg_num) \
[DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
#define dc_underflow_int_entry(reg_num) \
[DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
bool dal_irq_service_dummy_set(struct irq_service *irq_service,
const struct irq_source_info *info,
bool enable)
{
DC_LOG_ERROR("%s: called for non-implemented irq source, src_id=%u, ext_id=%u\n",
__func__, info->src_id, info->ext_id);
return false;
}
bool dal_irq_service_dummy_ack(struct irq_service *irq_service,
const struct irq_source_info *info)
{
DC_LOG_ERROR("%s: called for non-implemented irq source, src_id=%u, ext_id=%u\n",
__func__, info->src_id, info->ext_id);
return false;
}
bool dce110_vblank_set(struct irq_service *irq_service,
const struct irq_source_info *info,
bool enable)
{
struct dc_context *dc_ctx = irq_service->ctx;
struct dc *dc = irq_service->ctx->dc;
enum dc_irq_source dal_irq_src =
dc_interrupt_to_irq_source(irq_service->ctx->dc,
info->src_id,
info->ext_id);
uint8_t pipe_offset = dal_irq_src - IRQ_TYPE_VBLANK;
struct timing_generator *tg =
dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
if (enable) {
if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) {
DC_ERROR("Failed to get VBLANK!\n");
return false;
}
}
dal_irq_service_set_generic(irq_service, info, enable);
return true;
}
static const struct irq_source_info_funcs dummy_irq_info_funcs = {
.set = dal_irq_service_dummy_set,
.ack = dal_irq_service_dummy_ack
};
static const struct irq_source_info
irq_source_info_dce110[DAL_IRQ_SOURCES_NUMBER] = {
[DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
hpd_int_entry(0),
hpd_int_entry(1),
hpd_int_entry(2),
hpd_int_entry(3),
hpd_int_entry(4),
hpd_int_entry(5),
hpd_rx_int_entry(0),
hpd_rx_int_entry(1),
hpd_rx_int_entry(2),
hpd_rx_int_entry(3),
hpd_rx_int_entry(4),
hpd_rx_int_entry(5),
i2c_int_entry(1),
i2c_int_entry(2),
i2c_int_entry(3),
i2c_int_entry(4),
i2c_int_entry(5),
i2c_int_entry(6),
dp_sink_int_entry(1),
dp_sink_int_entry(2),
dp_sink_int_entry(3),
dp_sink_int_entry(4),
dp_sink_int_entry(5),
dp_sink_int_entry(6),
[DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
pflip_int_entry(0),
pflip_int_entry(1),
pflip_int_entry(2),
pflip_int_entry(3),
pflip_int_entry(4),
pflip_int_entry(5),
[DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
gpio_pad_int_entry(0),
gpio_pad_int_entry(1),
gpio_pad_int_entry(2),
gpio_pad_int_entry(3),
gpio_pad_int_entry(4),
gpio_pad_int_entry(5),
gpio_pad_int_entry(6),
gpio_pad_int_entry(7),
gpio_pad_int_entry(8),
gpio_pad_int_entry(9),
gpio_pad_int_entry(10),
gpio_pad_int_entry(11),
gpio_pad_int_entry(12),
gpio_pad_int_entry(13),
gpio_pad_int_entry(14),
gpio_pad_int_entry(15),
gpio_pad_int_entry(16),
gpio_pad_int_entry(17),
gpio_pad_int_entry(18),
gpio_pad_int_entry(19),
gpio_pad_int_entry(20),
gpio_pad_int_entry(21),
gpio_pad_int_entry(22),
gpio_pad_int_entry(23),
gpio_pad_int_entry(24),
gpio_pad_int_entry(25),
gpio_pad_int_entry(26),
gpio_pad_int_entry(27),
gpio_pad_int_entry(28),
gpio_pad_int_entry(29),
gpio_pad_int_entry(30),
dc_underflow_int_entry(1),
dc_underflow_int_entry(2),
dc_underflow_int_entry(3),
dc_underflow_int_entry(4),
dc_underflow_int_entry(5),
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
vupdate_int_entry(0),
vupdate_int_entry(1),
vupdate_int_entry(2),
vupdate_int_entry(3),
vupdate_int_entry(4),
vupdate_int_entry(5),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
vblank_int_entry(3),
vblank_int_entry(4),
vblank_int_entry(5),
};
enum dc_irq_source to_dal_irq_source_dce110(
struct irq_service *irq_service,
uint32_t src_id,
uint32_t ext_id)
{
switch (src_id) {
case VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0:
return DC_IRQ_SOURCE_VBLANK1;
case VISLANDS30_IV_SRCID_D2_VERTICAL_INTERRUPT0:
return DC_IRQ_SOURCE_VBLANK2;
case VISLANDS30_IV_SRCID_D3_VERTICAL_INTERRUPT0:
return DC_IRQ_SOURCE_VBLANK3;
case VISLANDS30_IV_SRCID_D4_VERTICAL_INTERRUPT0:
return DC_IRQ_SOURCE_VBLANK4;
case VISLANDS30_IV_SRCID_D5_VERTICAL_INTERRUPT0:
return DC_IRQ_SOURCE_VBLANK5;
case VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0:
return DC_IRQ_SOURCE_VBLANK6;
case VISLANDS30_IV_SRCID_D1_V_UPDATE_INT:
return DC_IRQ_SOURCE_VUPDATE1;
case VISLANDS30_IV_SRCID_D2_V_UPDATE_INT:
return DC_IRQ_SOURCE_VUPDATE2;
case VISLANDS30_IV_SRCID_D3_V_UPDATE_INT:
return DC_IRQ_SOURCE_VUPDATE3;
case VISLANDS30_IV_SRCID_D4_V_UPDATE_INT:
return DC_IRQ_SOURCE_VUPDATE4;
case VISLANDS30_IV_SRCID_D5_V_UPDATE_INT:
return DC_IRQ_SOURCE_VUPDATE5;
case VISLANDS30_IV_SRCID_D6_V_UPDATE_INT:
return DC_IRQ_SOURCE_VUPDATE6;
case VISLANDS30_IV_SRCID_D1_GRPH_PFLIP:
return DC_IRQ_SOURCE_PFLIP1;
case VISLANDS30_IV_SRCID_D2_GRPH_PFLIP:
return DC_IRQ_SOURCE_PFLIP2;
case VISLANDS30_IV_SRCID_D3_GRPH_PFLIP:
return DC_IRQ_SOURCE_PFLIP3;
case VISLANDS30_IV_SRCID_D4_GRPH_PFLIP:
return DC_IRQ_SOURCE_PFLIP4;
case VISLANDS30_IV_SRCID_D5_GRPH_PFLIP:
return DC_IRQ_SOURCE_PFLIP5;
case VISLANDS30_IV_SRCID_D6_GRPH_PFLIP:
return DC_IRQ_SOURCE_PFLIP6;
case VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A:
/* generic src_id for all HPD and HPDRX interrupts */
switch (ext_id) {
case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_A:
return DC_IRQ_SOURCE_HPD1;
case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_B:
return DC_IRQ_SOURCE_HPD2;
case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_C:
return DC_IRQ_SOURCE_HPD3;
case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_D:
return DC_IRQ_SOURCE_HPD4;
case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_E:
return DC_IRQ_SOURCE_HPD5;
case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_F:
return DC_IRQ_SOURCE_HPD6;
case VISLANDS30_IV_EXTID_HPD_RX_A:
return DC_IRQ_SOURCE_HPD1RX;
case VISLANDS30_IV_EXTID_HPD_RX_B:
return DC_IRQ_SOURCE_HPD2RX;
case VISLANDS30_IV_EXTID_HPD_RX_C:
return DC_IRQ_SOURCE_HPD3RX;
case VISLANDS30_IV_EXTID_HPD_RX_D:
return DC_IRQ_SOURCE_HPD4RX;
case VISLANDS30_IV_EXTID_HPD_RX_E:
return DC_IRQ_SOURCE_HPD5RX;
case VISLANDS30_IV_EXTID_HPD_RX_F:
return DC_IRQ_SOURCE_HPD6RX;
default:
return DC_IRQ_SOURCE_INVALID;
}
break;
default:
return DC_IRQ_SOURCE_INVALID;
}
}
static const struct irq_service_funcs irq_service_funcs_dce110 = {
.to_dal_irq_source = to_dal_irq_source_dce110
};
static void dce110_irq_construct(struct irq_service *irq_service,
struct irq_service_init_data *init_data)
{
dal_irq_service_construct(irq_service, init_data);
irq_service->info = irq_source_info_dce110;
irq_service->funcs = &irq_service_funcs_dce110;
}
struct irq_service *
dal_irq_service_dce110_create(struct irq_service_init_data *init_data)
{
struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
GFP_KERNEL);
if (!irq_service)
return NULL;
dce110_irq_construct(irq_service, init_data);
return irq_service;
}
| linux-master | drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "include/logger_interface.h"
#include "irq_service_dce120.h"
#include "../dce110/irq_service_dce110.h"
#include "dce/dce_12_0_offset.h"
#include "dce/dce_12_0_sh_mask.h"
#include "soc15_hw_ip.h"
#include "vega10_ip_offset.h"
#include "ivsrcid/ivsrcid_vislands30.h"
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
HPD0_DC_HPD_INT_STATUS,
DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
HPD0_DC_HPD_INT_CONTROL,
DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static const struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
};
static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs pflip_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.set = dce110_vblank_set,
.ack = NULL
};
static const struct irq_source_info_funcs vupdate_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
#define BASE_INNER(seg) \
DCE_BASE__INST0_SEG ## seg
#define BASE(seg) \
BASE_INNER(seg)
#define SRI(reg_name, block, id)\
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
.enable_reg = SRI(reg1, block, reg_num),\
.enable_mask = \
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI(reg2, block, reg_num),\
.ack_mask = \
block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
.ack_value = \
block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1 + reg_num] = {\
IRQ_REG_ENTRY(HPD, reg_num,\
DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
.status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
.funcs = &hpd_irq_info_funcs\
}
#define hpd_rx_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
IRQ_REG_ENTRY(HPD, reg_num,\
DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
.status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
.funcs = &hpd_rx_irq_info_funcs\
}
#define pflip_int_entry(reg_num)\
[DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
IRQ_REG_ENTRY(DCP, reg_num, \
GRPH_INTERRUPT_CONTROL, GRPH_PFLIP_INT_MASK, \
GRPH_INTERRUPT_STATUS, GRPH_PFLIP_INT_CLEAR),\
.status_reg = SRI(GRPH_INTERRUPT_STATUS, DCP, reg_num),\
.funcs = &pflip_irq_info_funcs\
}
#define vupdate_int_entry(reg_num)\
[DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
IRQ_REG_ENTRY(CRTC, reg_num,\
CRTC_INTERRUPT_CONTROL, CRTC_V_UPDATE_INT_MSK,\
CRTC_V_UPDATE_INT_STATUS, CRTC_V_UPDATE_INT_CLEAR),\
.funcs = &vupdate_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
[DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
IRQ_REG_ENTRY(CRTC, reg_num,\
CRTC_VERTICAL_INTERRUPT0_CONTROL, CRTC_VERTICAL_INTERRUPT0_INT_ENABLE,\
CRTC_VERTICAL_INTERRUPT0_CONTROL, CRTC_VERTICAL_INTERRUPT0_CLEAR),\
.funcs = &vblank_irq_info_funcs,\
.src_id = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0 + reg_num\
}
#define dummy_irq_entry() \
{\
.funcs = &dummy_irq_info_funcs\
}
#define i2c_int_entry(reg_num) \
[DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
#define dp_sink_int_entry(reg_num) \
[DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
#define gpio_pad_int_entry(reg_num) \
[DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
#define dc_underflow_int_entry(reg_num) \
[DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
static const struct irq_source_info_funcs dummy_irq_info_funcs = {
.set = dal_irq_service_dummy_set,
.ack = dal_irq_service_dummy_ack
};
static const struct irq_source_info
irq_source_info_dce120[DAL_IRQ_SOURCES_NUMBER] = {
[DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
hpd_int_entry(0),
hpd_int_entry(1),
hpd_int_entry(2),
hpd_int_entry(3),
hpd_int_entry(4),
hpd_int_entry(5),
hpd_rx_int_entry(0),
hpd_rx_int_entry(1),
hpd_rx_int_entry(2),
hpd_rx_int_entry(3),
hpd_rx_int_entry(4),
hpd_rx_int_entry(5),
i2c_int_entry(1),
i2c_int_entry(2),
i2c_int_entry(3),
i2c_int_entry(4),
i2c_int_entry(5),
i2c_int_entry(6),
dp_sink_int_entry(1),
dp_sink_int_entry(2),
dp_sink_int_entry(3),
dp_sink_int_entry(4),
dp_sink_int_entry(5),
dp_sink_int_entry(6),
[DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
pflip_int_entry(0),
pflip_int_entry(1),
pflip_int_entry(2),
pflip_int_entry(3),
pflip_int_entry(4),
pflip_int_entry(5),
[DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
gpio_pad_int_entry(0),
gpio_pad_int_entry(1),
gpio_pad_int_entry(2),
gpio_pad_int_entry(3),
gpio_pad_int_entry(4),
gpio_pad_int_entry(5),
gpio_pad_int_entry(6),
gpio_pad_int_entry(7),
gpio_pad_int_entry(8),
gpio_pad_int_entry(9),
gpio_pad_int_entry(10),
gpio_pad_int_entry(11),
gpio_pad_int_entry(12),
gpio_pad_int_entry(13),
gpio_pad_int_entry(14),
gpio_pad_int_entry(15),
gpio_pad_int_entry(16),
gpio_pad_int_entry(17),
gpio_pad_int_entry(18),
gpio_pad_int_entry(19),
gpio_pad_int_entry(20),
gpio_pad_int_entry(21),
gpio_pad_int_entry(22),
gpio_pad_int_entry(23),
gpio_pad_int_entry(24),
gpio_pad_int_entry(25),
gpio_pad_int_entry(26),
gpio_pad_int_entry(27),
gpio_pad_int_entry(28),
gpio_pad_int_entry(29),
gpio_pad_int_entry(30),
dc_underflow_int_entry(1),
dc_underflow_int_entry(2),
dc_underflow_int_entry(3),
dc_underflow_int_entry(4),
dc_underflow_int_entry(5),
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
vupdate_int_entry(0),
vupdate_int_entry(1),
vupdate_int_entry(2),
vupdate_int_entry(3),
vupdate_int_entry(4),
vupdate_int_entry(5),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
vblank_int_entry(3),
vblank_int_entry(4),
vblank_int_entry(5),
};
static const struct irq_service_funcs irq_service_funcs_dce120 = {
.to_dal_irq_source = to_dal_irq_source_dce110
};
static void dce120_irq_construct(
struct irq_service *irq_service,
struct irq_service_init_data *init_data)
{
dal_irq_service_construct(irq_service, init_data);
irq_service->info = irq_source_info_dce120;
irq_service->funcs = &irq_service_funcs_dce120;
}
struct irq_service *dal_irq_service_dce120_create(
struct irq_service_init_data *init_data)
{
struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
GFP_KERNEL);
if (!irq_service)
return NULL;
dce120_irq_construct(irq_service, init_data);
return irq_service;
}
| linux-master | drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c |
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "include/logger_interface.h"
#include "../dce110/irq_service_dce110.h"
#include "dcn/dcn_3_2_0_offset.h"
#include "dcn/dcn_3_2_0_sh_mask.h"
#include "irq_service_dcn32.h"
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
#define DCN_BASE__INST0_SEG2 0x000034C0
static enum dc_irq_source to_dal_irq_source_dcn32(
struct irq_service *irq_service,
uint32_t src_id,
uint32_t ext_id)
{
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK1;
case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK2;
case DCN_1_0__SRCID__DC_D3_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK3;
case DCN_1_0__SRCID__DC_D4_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK4;
case DCN_1_0__SRCID__DC_D5_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK5;
case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK6;
case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC1_VLINE0;
case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC2_VLINE0;
case DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC3_VLINE0;
case DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC4_VLINE0;
case DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC5_VLINE0;
case DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC6_VLINE0;
case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP1;
case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP2;
case DCN_1_0__SRCID__HUBP2_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP3;
case DCN_1_0__SRCID__HUBP3_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP4;
case DCN_1_0__SRCID__HUBP4_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP5;
case DCN_1_0__SRCID__HUBP5_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP6;
case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE1;
case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE2;
case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE3;
case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE4;
case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE5;
case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE6;
case DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT:
return DC_IRQ_SOURCE_DMCUB_OUTBOX;
case DCN_1_0__SRCID__DC_HPD1_INT:
/* generic src_id for all HPD and HPDRX interrupts */
switch (ext_id) {
case DCN_1_0__CTXID__DC_HPD1_INT:
return DC_IRQ_SOURCE_HPD1;
case DCN_1_0__CTXID__DC_HPD2_INT:
return DC_IRQ_SOURCE_HPD2;
case DCN_1_0__CTXID__DC_HPD3_INT:
return DC_IRQ_SOURCE_HPD3;
case DCN_1_0__CTXID__DC_HPD4_INT:
return DC_IRQ_SOURCE_HPD4;
case DCN_1_0__CTXID__DC_HPD5_INT:
return DC_IRQ_SOURCE_HPD5;
case DCN_1_0__CTXID__DC_HPD6_INT:
return DC_IRQ_SOURCE_HPD6;
case DCN_1_0__CTXID__DC_HPD1_RX_INT:
return DC_IRQ_SOURCE_HPD1RX;
case DCN_1_0__CTXID__DC_HPD2_RX_INT:
return DC_IRQ_SOURCE_HPD2RX;
case DCN_1_0__CTXID__DC_HPD3_RX_INT:
return DC_IRQ_SOURCE_HPD3RX;
case DCN_1_0__CTXID__DC_HPD4_RX_INT:
return DC_IRQ_SOURCE_HPD4RX;
case DCN_1_0__CTXID__DC_HPD5_RX_INT:
return DC_IRQ_SOURCE_HPD5RX;
case DCN_1_0__CTXID__DC_HPD6_RX_INT:
return DC_IRQ_SOURCE_HPD6RX;
default:
return DC_IRQ_SOURCE_INVALID;
}
break;
default:
return DC_IRQ_SOURCE_INVALID;
}
}
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
HPD0_DC_HPD_INT_STATUS,
DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
HPD0_DC_HPD_INT_CONTROL,
DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static const struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
};
static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs pflip_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs outbox_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vline0_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
/* compile time expand base address. */
#define BASE(seg) \
BASE_INNER(seg)
#define SRI(reg_name, block, id)\
BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
#define SRI_DMUB(reg_name)\
BASE(reg ## reg_name ## _BASE_IDX) + \
reg ## reg_name
#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
.enable_reg = SRI(reg1, block, reg_num),\
.enable_mask = \
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI(reg2, block, reg_num),\
.ack_mask = \
block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
.ack_value = \
block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
#define IRQ_REG_ENTRY_DMUB(reg1, mask1, reg2, mask2)\
.enable_reg = SRI_DMUB(reg1),\
.enable_mask = \
reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
reg1 ## __ ## mask1 ## _MASK,\
~reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI_DMUB(reg2),\
.ack_mask = \
reg2 ## __ ## mask2 ## _MASK,\
.ack_value = \
reg2 ## __ ## mask2 ## _MASK \
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1 + reg_num] = {\
IRQ_REG_ENTRY(HPD, reg_num,\
DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
.status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
.funcs = &hpd_irq_info_funcs\
}
#define hpd_rx_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
IRQ_REG_ENTRY(HPD, reg_num,\
DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
.status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
.funcs = &hpd_rx_irq_info_funcs\
}
#define pflip_int_entry(reg_num)\
[DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
IRQ_REG_ENTRY(HUBPREQ, reg_num,\
DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\
DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\
.funcs = &pflip_irq_info_funcs\
}
/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
* of DCE's DC_IRQ_SOURCE_VUPDATEx.
*/
#define vupdate_no_lock_int_entry(reg_num)\
[DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
.funcs = &vupdate_no_lock_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
[DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
.funcs = &vblank_irq_info_funcs\
}
#define vline0_int_entry(reg_num)\
[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
.funcs = &vline0_irq_info_funcs\
}
#define dmub_outbox_int_entry()\
[DC_IRQ_SOURCE_DMCUB_OUTBOX] = {\
IRQ_REG_ENTRY_DMUB(\
DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX1_READY_INT_EN,\
DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX1_READY_INT_ACK),\
.funcs = &outbox_irq_info_funcs\
}
#define dummy_irq_entry() \
{\
.funcs = &dummy_irq_info_funcs\
}
#define i2c_int_entry(reg_num) \
[DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
#define dp_sink_int_entry(reg_num) \
[DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
#define gpio_pad_int_entry(reg_num) \
[DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
#define dc_underflow_int_entry(reg_num) \
[DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
static const struct irq_source_info_funcs dummy_irq_info_funcs = {
.set = dal_irq_service_dummy_set,
.ack = dal_irq_service_dummy_ack
};
static const struct irq_source_info
irq_source_info_dcn32[DAL_IRQ_SOURCES_NUMBER] = {
[DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
hpd_int_entry(0),
hpd_int_entry(1),
hpd_int_entry(2),
hpd_int_entry(3),
hpd_int_entry(4),
hpd_rx_int_entry(0),
hpd_rx_int_entry(1),
hpd_rx_int_entry(2),
hpd_rx_int_entry(3),
hpd_rx_int_entry(4),
i2c_int_entry(1),
i2c_int_entry(2),
i2c_int_entry(3),
i2c_int_entry(4),
i2c_int_entry(5),
i2c_int_entry(6),
dp_sink_int_entry(1),
dp_sink_int_entry(2),
dp_sink_int_entry(3),
dp_sink_int_entry(4),
dp_sink_int_entry(5),
dp_sink_int_entry(6),
[DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
pflip_int_entry(0),
pflip_int_entry(1),
pflip_int_entry(2),
pflip_int_entry(3),
[DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(),
[DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(),
[DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
gpio_pad_int_entry(0),
gpio_pad_int_entry(1),
gpio_pad_int_entry(2),
gpio_pad_int_entry(3),
gpio_pad_int_entry(4),
gpio_pad_int_entry(5),
gpio_pad_int_entry(6),
gpio_pad_int_entry(7),
gpio_pad_int_entry(8),
gpio_pad_int_entry(9),
gpio_pad_int_entry(10),
gpio_pad_int_entry(11),
gpio_pad_int_entry(12),
gpio_pad_int_entry(13),
gpio_pad_int_entry(14),
gpio_pad_int_entry(15),
gpio_pad_int_entry(16),
gpio_pad_int_entry(17),
gpio_pad_int_entry(18),
gpio_pad_int_entry(19),
gpio_pad_int_entry(20),
gpio_pad_int_entry(21),
gpio_pad_int_entry(22),
gpio_pad_int_entry(23),
gpio_pad_int_entry(24),
gpio_pad_int_entry(25),
gpio_pad_int_entry(26),
gpio_pad_int_entry(27),
gpio_pad_int_entry(28),
gpio_pad_int_entry(29),
gpio_pad_int_entry(30),
dc_underflow_int_entry(1),
dc_underflow_int_entry(2),
dc_underflow_int_entry(3),
dc_underflow_int_entry(4),
dc_underflow_int_entry(5),
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
vupdate_no_lock_int_entry(0),
vupdate_no_lock_int_entry(1),
vupdate_no_lock_int_entry(2),
vupdate_no_lock_int_entry(3),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
vblank_int_entry(3),
vline0_int_entry(0),
vline0_int_entry(1),
vline0_int_entry(2),
vline0_int_entry(3),
[DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(),
[DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(),
dmub_outbox_int_entry(),
};
static const struct irq_service_funcs irq_service_funcs_dcn32 = {
.to_dal_irq_source = to_dal_irq_source_dcn32
};
static void dcn32_irq_construct(
struct irq_service *irq_service,
struct irq_service_init_data *init_data)
{
dal_irq_service_construct(irq_service, init_data);
irq_service->info = irq_source_info_dcn32;
irq_service->funcs = &irq_service_funcs_dcn32;
}
struct irq_service *dal_irq_service_dcn32_create(
struct irq_service_init_data *init_data)
{
struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
GFP_KERNEL);
if (!irq_service)
return NULL;
dcn32_irq_construct(irq_service, init_data);
return irq_service;
}
| linux-master | drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include <linux/slab.h>
#include "dm_services.h"
#include "include/logger_interface.h"
#include "../dce110/irq_service_dce110.h"
#include "dcn/dcn_2_0_0_offset.h"
#include "dcn/dcn_2_0_0_sh_mask.h"
#include "navi10_ip_offset.h"
#include "irq_service_dcn20.h"
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
static enum dc_irq_source to_dal_irq_source_dcn20(
struct irq_service *irq_service,
uint32_t src_id,
uint32_t ext_id)
{
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK1;
case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK2;
case DCN_1_0__SRCID__DC_D3_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK3;
case DCN_1_0__SRCID__DC_D4_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK4;
case DCN_1_0__SRCID__DC_D5_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK5;
case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK6;
case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC1_VLINE0;
case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC2_VLINE0;
case DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC3_VLINE0;
case DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC4_VLINE0;
case DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC5_VLINE0;
case DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC6_VLINE0;
case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP1;
case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP2;
case DCN_1_0__SRCID__HUBP2_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP3;
case DCN_1_0__SRCID__HUBP3_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP4;
case DCN_1_0__SRCID__HUBP4_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP5;
case DCN_1_0__SRCID__HUBP5_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP6;
case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE1;
case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE2;
case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE3;
case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE4;
case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE5;
case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE6;
case DCN_1_0__SRCID__DC_HPD1_INT:
/* generic src_id for all HPD and HPDRX interrupts */
switch (ext_id) {
case DCN_1_0__CTXID__DC_HPD1_INT:
return DC_IRQ_SOURCE_HPD1;
case DCN_1_0__CTXID__DC_HPD2_INT:
return DC_IRQ_SOURCE_HPD2;
case DCN_1_0__CTXID__DC_HPD3_INT:
return DC_IRQ_SOURCE_HPD3;
case DCN_1_0__CTXID__DC_HPD4_INT:
return DC_IRQ_SOURCE_HPD4;
case DCN_1_0__CTXID__DC_HPD5_INT:
return DC_IRQ_SOURCE_HPD5;
case DCN_1_0__CTXID__DC_HPD6_INT:
return DC_IRQ_SOURCE_HPD6;
case DCN_1_0__CTXID__DC_HPD1_RX_INT:
return DC_IRQ_SOURCE_HPD1RX;
case DCN_1_0__CTXID__DC_HPD2_RX_INT:
return DC_IRQ_SOURCE_HPD2RX;
case DCN_1_0__CTXID__DC_HPD3_RX_INT:
return DC_IRQ_SOURCE_HPD3RX;
case DCN_1_0__CTXID__DC_HPD4_RX_INT:
return DC_IRQ_SOURCE_HPD4RX;
case DCN_1_0__CTXID__DC_HPD5_RX_INT:
return DC_IRQ_SOURCE_HPD5RX;
case DCN_1_0__CTXID__DC_HPD6_RX_INT:
return DC_IRQ_SOURCE_HPD6RX;
default:
return DC_IRQ_SOURCE_INVALID;
}
break;
default:
return DC_IRQ_SOURCE_INVALID;
}
}
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
HPD0_DC_HPD_INT_STATUS,
DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
HPD0_DC_HPD_INT_CONTROL,
DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static const struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
};
static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs pflip_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vline0_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
/* compile time expand base address. */
#define BASE(seg) \
BASE_INNER(seg)
#define SRI(reg_name, block, id)\
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
.enable_reg = SRI(reg1, block, reg_num),\
.enable_mask = \
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI(reg2, block, reg_num),\
.ack_mask = \
block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
.ack_value = \
block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1 + reg_num] = {\
IRQ_REG_ENTRY(HPD, reg_num,\
DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
.status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
.funcs = &hpd_irq_info_funcs\
}
#define hpd_rx_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
IRQ_REG_ENTRY(HPD, reg_num,\
DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
.status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
.funcs = &hpd_rx_irq_info_funcs\
}
#define pflip_int_entry(reg_num)\
[DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
IRQ_REG_ENTRY(HUBPREQ, reg_num,\
DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\
DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\
.funcs = &pflip_irq_info_funcs\
}
/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
* of DCE's DC_IRQ_SOURCE_VUPDATEx.
*/
#define vupdate_no_lock_int_entry(reg_num)\
[DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
.funcs = &vupdate_no_lock_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
[DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
.funcs = &vblank_irq_info_funcs\
}
#define vline0_int_entry(reg_num)\
[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
.funcs = &vline0_irq_info_funcs\
}
#define dummy_irq_entry() \
{\
.funcs = &dummy_irq_info_funcs\
}
#define i2c_int_entry(reg_num) \
[DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
#define dp_sink_int_entry(reg_num) \
[DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
#define gpio_pad_int_entry(reg_num) \
[DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
#define dc_underflow_int_entry(reg_num) \
[DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
static const struct irq_source_info_funcs dummy_irq_info_funcs = {
.set = dal_irq_service_dummy_set,
.ack = dal_irq_service_dummy_ack
};
static const struct irq_source_info
irq_source_info_dcn20[DAL_IRQ_SOURCES_NUMBER] = {
[DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
hpd_int_entry(0),
hpd_int_entry(1),
hpd_int_entry(2),
hpd_int_entry(3),
hpd_int_entry(4),
hpd_int_entry(5),
hpd_rx_int_entry(0),
hpd_rx_int_entry(1),
hpd_rx_int_entry(2),
hpd_rx_int_entry(3),
hpd_rx_int_entry(4),
hpd_rx_int_entry(5),
i2c_int_entry(1),
i2c_int_entry(2),
i2c_int_entry(3),
i2c_int_entry(4),
i2c_int_entry(5),
i2c_int_entry(6),
dp_sink_int_entry(1),
dp_sink_int_entry(2),
dp_sink_int_entry(3),
dp_sink_int_entry(4),
dp_sink_int_entry(5),
dp_sink_int_entry(6),
[DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
pflip_int_entry(0),
pflip_int_entry(1),
pflip_int_entry(2),
pflip_int_entry(3),
pflip_int_entry(4),
pflip_int_entry(5),
[DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
gpio_pad_int_entry(0),
gpio_pad_int_entry(1),
gpio_pad_int_entry(2),
gpio_pad_int_entry(3),
gpio_pad_int_entry(4),
gpio_pad_int_entry(5),
gpio_pad_int_entry(6),
gpio_pad_int_entry(7),
gpio_pad_int_entry(8),
gpio_pad_int_entry(9),
gpio_pad_int_entry(10),
gpio_pad_int_entry(11),
gpio_pad_int_entry(12),
gpio_pad_int_entry(13),
gpio_pad_int_entry(14),
gpio_pad_int_entry(15),
gpio_pad_int_entry(16),
gpio_pad_int_entry(17),
gpio_pad_int_entry(18),
gpio_pad_int_entry(19),
gpio_pad_int_entry(20),
gpio_pad_int_entry(21),
gpio_pad_int_entry(22),
gpio_pad_int_entry(23),
gpio_pad_int_entry(24),
gpio_pad_int_entry(25),
gpio_pad_int_entry(26),
gpio_pad_int_entry(27),
gpio_pad_int_entry(28),
gpio_pad_int_entry(29),
gpio_pad_int_entry(30),
dc_underflow_int_entry(1),
dc_underflow_int_entry(2),
dc_underflow_int_entry(3),
dc_underflow_int_entry(4),
dc_underflow_int_entry(5),
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
vupdate_no_lock_int_entry(0),
vupdate_no_lock_int_entry(1),
vupdate_no_lock_int_entry(2),
vupdate_no_lock_int_entry(3),
vupdate_no_lock_int_entry(4),
vupdate_no_lock_int_entry(5),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
vblank_int_entry(3),
vblank_int_entry(4),
vblank_int_entry(5),
vline0_int_entry(0),
vline0_int_entry(1),
vline0_int_entry(2),
vline0_int_entry(3),
vline0_int_entry(4),
vline0_int_entry(5),
};
static const struct irq_service_funcs irq_service_funcs_dcn20 = {
.to_dal_irq_source = to_dal_irq_source_dcn20
};
static void dcn20_irq_construct(
struct irq_service *irq_service,
struct irq_service_init_data *init_data)
{
dal_irq_service_construct(irq_service, init_data);
irq_service->info = irq_source_info_dcn20;
irq_service->funcs = &irq_service_funcs_dcn20;
}
struct irq_service *dal_irq_service_dcn20_create(
struct irq_service_init_data *init_data)
{
struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
GFP_KERNEL);
if (!irq_service)
return NULL;
dcn20_irq_construct(irq_service, init_data);
return irq_service;
}
| linux-master | drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "include/logger_interface.h"
#include "../dce110/irq_service_dce110.h"
#include "dcn/dcn_1_0_offset.h"
#include "dcn/dcn_1_0_sh_mask.h"
#include "soc15_hw_ip.h"
#include "vega10_ip_offset.h"
#include "irq_service_dcn10.h"
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
static enum dc_irq_source to_dal_irq_source_dcn10(struct irq_service *irq_service,
uint32_t src_id,
uint32_t ext_id)
{
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK1;
case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK2;
case DCN_1_0__SRCID__DC_D3_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK3;
case DCN_1_0__SRCID__DC_D4_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK4;
case DCN_1_0__SRCID__DC_D5_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK5;
case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK6;
case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC1_VLINE0;
case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC2_VLINE0;
case DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC3_VLINE0;
case DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC4_VLINE0;
case DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC5_VLINE0;
case DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC6_VLINE0;
case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE1;
case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE2;
case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE3;
case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE4;
case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE5;
case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE6;
case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP1;
case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP2;
case DCN_1_0__SRCID__HUBP2_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP3;
case DCN_1_0__SRCID__HUBP3_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP4;
case DCN_1_0__SRCID__HUBP4_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP5;
case DCN_1_0__SRCID__HUBP5_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP6;
case DCN_1_0__SRCID__DC_HPD1_INT:
/* generic src_id for all HPD and HPDRX interrupts */
switch (ext_id) {
case DCN_1_0__CTXID__DC_HPD1_INT:
return DC_IRQ_SOURCE_HPD1;
case DCN_1_0__CTXID__DC_HPD2_INT:
return DC_IRQ_SOURCE_HPD2;
case DCN_1_0__CTXID__DC_HPD3_INT:
return DC_IRQ_SOURCE_HPD3;
case DCN_1_0__CTXID__DC_HPD4_INT:
return DC_IRQ_SOURCE_HPD4;
case DCN_1_0__CTXID__DC_HPD5_INT:
return DC_IRQ_SOURCE_HPD5;
case DCN_1_0__CTXID__DC_HPD6_INT:
return DC_IRQ_SOURCE_HPD6;
case DCN_1_0__CTXID__DC_HPD1_RX_INT:
return DC_IRQ_SOURCE_HPD1RX;
case DCN_1_0__CTXID__DC_HPD2_RX_INT:
return DC_IRQ_SOURCE_HPD2RX;
case DCN_1_0__CTXID__DC_HPD3_RX_INT:
return DC_IRQ_SOURCE_HPD3RX;
case DCN_1_0__CTXID__DC_HPD4_RX_INT:
return DC_IRQ_SOURCE_HPD4RX;
case DCN_1_0__CTXID__DC_HPD5_RX_INT:
return DC_IRQ_SOURCE_HPD5RX;
case DCN_1_0__CTXID__DC_HPD6_RX_INT:
return DC_IRQ_SOURCE_HPD6RX;
default:
return DC_IRQ_SOURCE_INVALID;
}
break;
default:
return DC_IRQ_SOURCE_INVALID;
}
}
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
HPD0_DC_HPD_INT_STATUS,
DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
HPD0_DC_HPD_INT_CONTROL,
DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static const struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
};
static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs pflip_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vline0_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
#define BASE_INNER(seg) \
DCE_BASE__INST0_SEG ## seg
#define BASE(seg) \
BASE_INNER(seg)
#define SRI(reg_name, block, id)\
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
.enable_reg = SRI(reg1, block, reg_num),\
.enable_mask = \
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI(reg2, block, reg_num),\
.ack_mask = \
block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
.ack_value = \
block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1 + reg_num] = {\
IRQ_REG_ENTRY(HPD, reg_num,\
DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
.status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
.funcs = &hpd_irq_info_funcs\
}
#define hpd_rx_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
IRQ_REG_ENTRY(HPD, reg_num,\
DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
.status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
.funcs = &hpd_rx_irq_info_funcs\
}
#define pflip_int_entry(reg_num)\
[DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
IRQ_REG_ENTRY(HUBPREQ, reg_num,\
DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\
DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\
.funcs = &pflip_irq_info_funcs\
}
/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
* of DCE's DC_IRQ_SOURCE_VUPDATEx.
*/
#define vupdate_no_lock_int_entry(reg_num)\
[DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
.funcs = &vupdate_no_lock_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
[DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
.funcs = &vblank_irq_info_funcs\
}
#define vline0_int_entry(reg_num)\
[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
.funcs = &vline0_irq_info_funcs\
}
#define dummy_irq_entry() \
{\
.funcs = &dummy_irq_info_funcs\
}
#define i2c_int_entry(reg_num) \
[DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
#define dp_sink_int_entry(reg_num) \
[DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
#define gpio_pad_int_entry(reg_num) \
[DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
#define dc_underflow_int_entry(reg_num) \
[DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
static const struct irq_source_info_funcs dummy_irq_info_funcs = {
.set = dal_irq_service_dummy_set,
.ack = dal_irq_service_dummy_ack
};
static const struct irq_source_info
irq_source_info_dcn10[DAL_IRQ_SOURCES_NUMBER] = {
[DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
hpd_int_entry(0),
hpd_int_entry(1),
hpd_int_entry(2),
hpd_int_entry(3),
hpd_int_entry(4),
hpd_int_entry(5),
hpd_rx_int_entry(0),
hpd_rx_int_entry(1),
hpd_rx_int_entry(2),
hpd_rx_int_entry(3),
hpd_rx_int_entry(4),
hpd_rx_int_entry(5),
i2c_int_entry(1),
i2c_int_entry(2),
i2c_int_entry(3),
i2c_int_entry(4),
i2c_int_entry(5),
i2c_int_entry(6),
dp_sink_int_entry(1),
dp_sink_int_entry(2),
dp_sink_int_entry(3),
dp_sink_int_entry(4),
dp_sink_int_entry(5),
dp_sink_int_entry(6),
[DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
pflip_int_entry(0),
pflip_int_entry(1),
pflip_int_entry(2),
pflip_int_entry(3),
[DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(),
[DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(),
[DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
gpio_pad_int_entry(0),
gpio_pad_int_entry(1),
gpio_pad_int_entry(2),
gpio_pad_int_entry(3),
gpio_pad_int_entry(4),
gpio_pad_int_entry(5),
gpio_pad_int_entry(6),
gpio_pad_int_entry(7),
gpio_pad_int_entry(8),
gpio_pad_int_entry(9),
gpio_pad_int_entry(10),
gpio_pad_int_entry(11),
gpio_pad_int_entry(12),
gpio_pad_int_entry(13),
gpio_pad_int_entry(14),
gpio_pad_int_entry(15),
gpio_pad_int_entry(16),
gpio_pad_int_entry(17),
gpio_pad_int_entry(18),
gpio_pad_int_entry(19),
gpio_pad_int_entry(20),
gpio_pad_int_entry(21),
gpio_pad_int_entry(22),
gpio_pad_int_entry(23),
gpio_pad_int_entry(24),
gpio_pad_int_entry(25),
gpio_pad_int_entry(26),
gpio_pad_int_entry(27),
gpio_pad_int_entry(28),
gpio_pad_int_entry(29),
gpio_pad_int_entry(30),
dc_underflow_int_entry(1),
dc_underflow_int_entry(2),
dc_underflow_int_entry(3),
dc_underflow_int_entry(4),
dc_underflow_int_entry(5),
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
vupdate_no_lock_int_entry(0),
vupdate_no_lock_int_entry(1),
vupdate_no_lock_int_entry(2),
vupdate_no_lock_int_entry(3),
vupdate_no_lock_int_entry(4),
vupdate_no_lock_int_entry(5),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
vblank_int_entry(3),
vblank_int_entry(4),
vblank_int_entry(5),
vline0_int_entry(0),
vline0_int_entry(1),
vline0_int_entry(2),
vline0_int_entry(3),
vline0_int_entry(4),
vline0_int_entry(5),
};
static const struct irq_service_funcs irq_service_funcs_dcn10 = {
.to_dal_irq_source = to_dal_irq_source_dcn10
};
static void dcn10_irq_construct(
struct irq_service *irq_service,
struct irq_service_init_data *init_data)
{
dal_irq_service_construct(irq_service, init_data);
irq_service->info = irq_source_info_dcn10;
irq_service->funcs = &irq_service_funcs_dcn10;
}
struct irq_service *dal_irq_service_dcn10_create(
struct irq_service_init_data *init_data)
{
struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
GFP_KERNEL);
if (!irq_service)
return NULL;
dcn10_irq_construct(irq_service, init_data);
return irq_service;
}
| linux-master | drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include <linux/slab.h>
#include "dm_services.h"
#include "include/logger_interface.h"
#include "../dce110/irq_service_dce110.h"
#include "dcn/dcn_2_1_0_offset.h"
#include "dcn/dcn_2_1_0_sh_mask.h"
#include "renoir_ip_offset.h"
#include "irq_service_dcn21.h"
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
static enum dc_irq_source to_dal_irq_source_dcn21(struct irq_service *irq_service,
uint32_t src_id,
uint32_t ext_id)
{
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK1;
case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK2;
case DCN_1_0__SRCID__DC_D3_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK3;
case DCN_1_0__SRCID__DC_D4_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK4;
case DCN_1_0__SRCID__DC_D5_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK5;
case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK6;
case DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT:
return DC_IRQ_SOURCE_DMCUB_OUTBOX;
case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC1_VLINE0;
case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC2_VLINE0;
case DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC3_VLINE0;
case DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC4_VLINE0;
case DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC5_VLINE0;
case DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC6_VLINE0;
case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP1;
case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP2;
case DCN_1_0__SRCID__HUBP2_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP3;
case DCN_1_0__SRCID__HUBP3_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP4;
case DCN_1_0__SRCID__HUBP4_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP5;
case DCN_1_0__SRCID__HUBP5_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP6;
case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE1;
case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE2;
case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE3;
case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE4;
case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE5;
case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE6;
case DCN_1_0__SRCID__DC_HPD1_INT:
/* generic src_id for all HPD and HPDRX interrupts */
switch (ext_id) {
case DCN_1_0__CTXID__DC_HPD1_INT:
return DC_IRQ_SOURCE_HPD1;
case DCN_1_0__CTXID__DC_HPD2_INT:
return DC_IRQ_SOURCE_HPD2;
case DCN_1_0__CTXID__DC_HPD3_INT:
return DC_IRQ_SOURCE_HPD3;
case DCN_1_0__CTXID__DC_HPD4_INT:
return DC_IRQ_SOURCE_HPD4;
case DCN_1_0__CTXID__DC_HPD5_INT:
return DC_IRQ_SOURCE_HPD5;
case DCN_1_0__CTXID__DC_HPD6_INT:
return DC_IRQ_SOURCE_HPD6;
case DCN_1_0__CTXID__DC_HPD1_RX_INT:
return DC_IRQ_SOURCE_HPD1RX;
case DCN_1_0__CTXID__DC_HPD2_RX_INT:
return DC_IRQ_SOURCE_HPD2RX;
case DCN_1_0__CTXID__DC_HPD3_RX_INT:
return DC_IRQ_SOURCE_HPD3RX;
case DCN_1_0__CTXID__DC_HPD4_RX_INT:
return DC_IRQ_SOURCE_HPD4RX;
case DCN_1_0__CTXID__DC_HPD5_RX_INT:
return DC_IRQ_SOURCE_HPD5RX;
case DCN_1_0__CTXID__DC_HPD6_RX_INT:
return DC_IRQ_SOURCE_HPD6RX;
default:
return DC_IRQ_SOURCE_INVALID;
}
break;
default:
break;
}
return DC_IRQ_SOURCE_INVALID;
}
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
HPD0_DC_HPD_INT_STATUS,
DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
HPD0_DC_HPD_INT_CONTROL,
DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static const struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
};
static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs pflip_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs dmub_outbox_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vline0_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
#undef BASE_INNER
#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
/* compile time expand base address. */
#define BASE(seg) \
BASE_INNER(seg)
#define SRI(reg_name, block, id)\
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define SRI_DMUB(reg_name)\
BASE(mm ## reg_name ## _BASE_IDX) + \
mm ## reg_name
#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
.enable_reg = SRI(reg1, block, reg_num),\
.enable_mask = \
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI(reg2, block, reg_num),\
.ack_mask = \
block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
.ack_value = \
block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
#define IRQ_REG_ENTRY_DMUB(reg1, mask1, reg2, mask2)\
.enable_reg = SRI_DMUB(reg1),\
.enable_mask = \
reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
reg1 ## __ ## mask1 ## _MASK,\
~reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI_DMUB(reg2),\
.ack_mask = \
reg2 ## __ ## mask2 ## _MASK,\
.ack_value = \
reg2 ## __ ## mask2 ## _MASK \
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1 + reg_num] = {\
IRQ_REG_ENTRY(HPD, reg_num,\
DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
.status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
.funcs = &hpd_irq_info_funcs\
}
#define hpd_rx_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
IRQ_REG_ENTRY(HPD, reg_num,\
DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
.status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
.funcs = &hpd_rx_irq_info_funcs\
}
#define pflip_int_entry(reg_num)\
[DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
IRQ_REG_ENTRY(HUBPREQ, reg_num,\
DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\
DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\
.funcs = &pflip_irq_info_funcs\
}
/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
* of DCE's DC_IRQ_SOURCE_VUPDATEx.
*/
#define vupdate_no_lock_int_entry(reg_num)\
[DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
.funcs = &vupdate_no_lock_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
[DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
.funcs = &vblank_irq_info_funcs\
}
#define vline0_int_entry(reg_num)\
[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
.funcs = &vline0_irq_info_funcs\
}
#define dmub_outbox_int_entry()\
[DC_IRQ_SOURCE_DMCUB_OUTBOX] = {\
IRQ_REG_ENTRY_DMUB(DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX1_READY_INT_EN,\
DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX1_READY_INT_ACK),\
.funcs = &dmub_outbox_irq_info_funcs\
}
#define dummy_irq_entry() \
{\
.funcs = &dummy_irq_info_funcs\
}
#define i2c_int_entry(reg_num) \
[DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
#define dp_sink_int_entry(reg_num) \
[DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
#define gpio_pad_int_entry(reg_num) \
[DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
#define dc_underflow_int_entry(reg_num) \
[DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
static const struct irq_source_info_funcs dummy_irq_info_funcs = {
.set = dal_irq_service_dummy_set,
.ack = dal_irq_service_dummy_ack
};
static const struct irq_source_info
irq_source_info_dcn21[DAL_IRQ_SOURCES_NUMBER] = {
[DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
hpd_int_entry(0),
hpd_int_entry(1),
hpd_int_entry(2),
hpd_int_entry(3),
hpd_int_entry(4),
hpd_rx_int_entry(0),
hpd_rx_int_entry(1),
hpd_rx_int_entry(2),
hpd_rx_int_entry(3),
hpd_rx_int_entry(4),
i2c_int_entry(1),
i2c_int_entry(2),
i2c_int_entry(3),
i2c_int_entry(4),
i2c_int_entry(5),
i2c_int_entry(6),
dp_sink_int_entry(1),
dp_sink_int_entry(2),
dp_sink_int_entry(3),
dp_sink_int_entry(4),
dp_sink_int_entry(5),
dp_sink_int_entry(6),
[DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
pflip_int_entry(0),
pflip_int_entry(1),
pflip_int_entry(2),
pflip_int_entry(3),
[DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(),
[DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(),
[DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
gpio_pad_int_entry(0),
gpio_pad_int_entry(1),
gpio_pad_int_entry(2),
gpio_pad_int_entry(3),
gpio_pad_int_entry(4),
gpio_pad_int_entry(5),
gpio_pad_int_entry(6),
gpio_pad_int_entry(7),
gpio_pad_int_entry(8),
gpio_pad_int_entry(9),
gpio_pad_int_entry(10),
gpio_pad_int_entry(11),
gpio_pad_int_entry(12),
gpio_pad_int_entry(13),
gpio_pad_int_entry(14),
gpio_pad_int_entry(15),
gpio_pad_int_entry(16),
gpio_pad_int_entry(17),
gpio_pad_int_entry(18),
gpio_pad_int_entry(19),
gpio_pad_int_entry(20),
gpio_pad_int_entry(21),
gpio_pad_int_entry(22),
gpio_pad_int_entry(23),
gpio_pad_int_entry(24),
gpio_pad_int_entry(25),
gpio_pad_int_entry(26),
gpio_pad_int_entry(27),
gpio_pad_int_entry(28),
gpio_pad_int_entry(29),
gpio_pad_int_entry(30),
dc_underflow_int_entry(1),
dc_underflow_int_entry(2),
dc_underflow_int_entry(3),
dc_underflow_int_entry(4),
dc_underflow_int_entry(5),
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
vupdate_no_lock_int_entry(0),
vupdate_no_lock_int_entry(1),
vupdate_no_lock_int_entry(2),
vupdate_no_lock_int_entry(3),
vupdate_no_lock_int_entry(4),
vupdate_no_lock_int_entry(5),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
vblank_int_entry(3),
vblank_int_entry(4),
vblank_int_entry(5),
vline0_int_entry(0),
vline0_int_entry(1),
vline0_int_entry(2),
vline0_int_entry(3),
vline0_int_entry(4),
vline0_int_entry(5),
dmub_outbox_int_entry(),
};
static const struct irq_service_funcs irq_service_funcs_dcn21 = {
.to_dal_irq_source = to_dal_irq_source_dcn21
};
static void dcn21_irq_construct(
struct irq_service *irq_service,
struct irq_service_init_data *init_data)
{
dal_irq_service_construct(irq_service, init_data);
irq_service->info = irq_source_info_dcn21;
irq_service->funcs = &irq_service_funcs_dcn21;
}
struct irq_service *dal_irq_service_dcn21_create(
struct irq_service_init_data *init_data)
{
struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
GFP_KERNEL);
if (!irq_service)
return NULL;
dcn21_irq_construct(irq_service, init_data);
return irq_service;
}
| linux-master | drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c |
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "include/logger_interface.h"
#include "../dce110/irq_service_dce110.h"
#include "dcn/dcn_3_1_5_offset.h"
#include "dcn/dcn_3_1_5_sh_mask.h"
#include "irq_service_dcn315.h"
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
#define DCN_BASE__INST0_SEG0 0x00000012
#define DCN_BASE__INST0_SEG1 0x000000C0
#define DCN_BASE__INST0_SEG2 0x000034C0
#define DCN_BASE__INST0_SEG3 0x00009000
#define DCN_BASE__INST0_SEG4 0x02403C00
#define DCN_BASE__INST0_SEG5 0
static enum dc_irq_source to_dal_irq_source_dcn315(
struct irq_service *irq_service,
uint32_t src_id,
uint32_t ext_id)
{
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK1;
case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK2;
case DCN_1_0__SRCID__DC_D3_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK3;
case DCN_1_0__SRCID__DC_D4_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK4;
case DCN_1_0__SRCID__DC_D5_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK5;
case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK6;
case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC1_VLINE0;
case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC2_VLINE0;
case DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC3_VLINE0;
case DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC4_VLINE0;
case DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC5_VLINE0;
case DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC6_VLINE0;
case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP1;
case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP2;
case DCN_1_0__SRCID__HUBP2_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP3;
case DCN_1_0__SRCID__HUBP3_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP4;
case DCN_1_0__SRCID__HUBP4_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP5;
case DCN_1_0__SRCID__HUBP5_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP6;
case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE1;
case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE2;
case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE3;
case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE4;
case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE5;
case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE6;
case DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT:
return DC_IRQ_SOURCE_DMCUB_OUTBOX;
case DCN_1_0__SRCID__DC_HPD1_INT:
/* generic src_id for all HPD and HPDRX interrupts */
switch (ext_id) {
case DCN_1_0__CTXID__DC_HPD1_INT:
return DC_IRQ_SOURCE_HPD1;
case DCN_1_0__CTXID__DC_HPD2_INT:
return DC_IRQ_SOURCE_HPD2;
case DCN_1_0__CTXID__DC_HPD3_INT:
return DC_IRQ_SOURCE_HPD3;
case DCN_1_0__CTXID__DC_HPD4_INT:
return DC_IRQ_SOURCE_HPD4;
case DCN_1_0__CTXID__DC_HPD5_INT:
return DC_IRQ_SOURCE_HPD5;
case DCN_1_0__CTXID__DC_HPD6_INT:
return DC_IRQ_SOURCE_HPD6;
case DCN_1_0__CTXID__DC_HPD1_RX_INT:
return DC_IRQ_SOURCE_HPD1RX;
case DCN_1_0__CTXID__DC_HPD2_RX_INT:
return DC_IRQ_SOURCE_HPD2RX;
case DCN_1_0__CTXID__DC_HPD3_RX_INT:
return DC_IRQ_SOURCE_HPD3RX;
case DCN_1_0__CTXID__DC_HPD4_RX_INT:
return DC_IRQ_SOURCE_HPD4RX;
case DCN_1_0__CTXID__DC_HPD5_RX_INT:
return DC_IRQ_SOURCE_HPD5RX;
case DCN_1_0__CTXID__DC_HPD6_RX_INT:
return DC_IRQ_SOURCE_HPD6RX;
default:
return DC_IRQ_SOURCE_INVALID;
}
break;
default:
return DC_IRQ_SOURCE_INVALID;
}
}
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
HPD0_DC_HPD_INT_STATUS,
DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
HPD0_DC_HPD_INT_CONTROL,
DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static const struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
};
static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs pflip_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs outbox_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vline0_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
/* compile time expand base address. */
#define BASE(seg) \
BASE_INNER(seg)
#define SRI(reg_name, block, id)\
BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
#define SRI_DMUB(reg_name)\
BASE(reg ## reg_name ## _BASE_IDX) + \
reg ## reg_name
#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
.enable_reg = SRI(reg1, block, reg_num),\
.enable_mask = \
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI(reg2, block, reg_num),\
.ack_mask = \
block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
.ack_value = \
block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
#define IRQ_REG_ENTRY_DMUB(reg1, mask1, reg2, mask2)\
.enable_reg = SRI_DMUB(reg1),\
.enable_mask = \
reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
reg1 ## __ ## mask1 ## _MASK,\
~reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI_DMUB(reg2),\
.ack_mask = \
reg2 ## __ ## mask2 ## _MASK,\
.ack_value = \
reg2 ## __ ## mask2 ## _MASK \
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1 + reg_num] = {\
IRQ_REG_ENTRY(HPD, reg_num,\
DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
.status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
.funcs = &hpd_irq_info_funcs\
}
#define hpd_rx_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
IRQ_REG_ENTRY(HPD, reg_num,\
DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
.status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
.funcs = &hpd_rx_irq_info_funcs\
}
#define pflip_int_entry(reg_num)\
[DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
IRQ_REG_ENTRY(HUBPREQ, reg_num,\
DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\
DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\
.funcs = &pflip_irq_info_funcs\
}
/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
* of DCE's DC_IRQ_SOURCE_VUPDATEx.
*/
#define vupdate_no_lock_int_entry(reg_num)\
[DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
.funcs = &vupdate_no_lock_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
[DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
.funcs = &vblank_irq_info_funcs\
}
#define vline0_int_entry(reg_num)\
[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
.funcs = &vline0_irq_info_funcs\
}
#define dmub_outbox_int_entry()\
[DC_IRQ_SOURCE_DMCUB_OUTBOX] = {\
IRQ_REG_ENTRY_DMUB(\
DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX1_READY_INT_EN,\
DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX1_READY_INT_ACK),\
.funcs = &outbox_irq_info_funcs\
}
#define dummy_irq_entry() \
{\
.funcs = &dummy_irq_info_funcs\
}
#define i2c_int_entry(reg_num) \
[DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
#define dp_sink_int_entry(reg_num) \
[DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
#define gpio_pad_int_entry(reg_num) \
[DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
#define dc_underflow_int_entry(reg_num) \
[DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
static const struct irq_source_info_funcs dummy_irq_info_funcs = {
.set = dal_irq_service_dummy_set,
.ack = dal_irq_service_dummy_ack
};
static const struct irq_source_info
irq_source_info_dcn315[DAL_IRQ_SOURCES_NUMBER] = {
[DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
hpd_int_entry(0),
hpd_int_entry(1),
hpd_int_entry(2),
hpd_int_entry(3),
hpd_int_entry(4),
hpd_rx_int_entry(0),
hpd_rx_int_entry(1),
hpd_rx_int_entry(2),
hpd_rx_int_entry(3),
hpd_rx_int_entry(4),
i2c_int_entry(1),
i2c_int_entry(2),
i2c_int_entry(3),
i2c_int_entry(4),
i2c_int_entry(5),
i2c_int_entry(6),
dp_sink_int_entry(1),
dp_sink_int_entry(2),
dp_sink_int_entry(3),
dp_sink_int_entry(4),
dp_sink_int_entry(5),
dp_sink_int_entry(6),
[DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
pflip_int_entry(0),
pflip_int_entry(1),
pflip_int_entry(2),
pflip_int_entry(3),
[DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(),
[DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(),
[DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
gpio_pad_int_entry(0),
gpio_pad_int_entry(1),
gpio_pad_int_entry(2),
gpio_pad_int_entry(3),
gpio_pad_int_entry(4),
gpio_pad_int_entry(5),
gpio_pad_int_entry(6),
gpio_pad_int_entry(7),
gpio_pad_int_entry(8),
gpio_pad_int_entry(9),
gpio_pad_int_entry(10),
gpio_pad_int_entry(11),
gpio_pad_int_entry(12),
gpio_pad_int_entry(13),
gpio_pad_int_entry(14),
gpio_pad_int_entry(15),
gpio_pad_int_entry(16),
gpio_pad_int_entry(17),
gpio_pad_int_entry(18),
gpio_pad_int_entry(19),
gpio_pad_int_entry(20),
gpio_pad_int_entry(21),
gpio_pad_int_entry(22),
gpio_pad_int_entry(23),
gpio_pad_int_entry(24),
gpio_pad_int_entry(25),
gpio_pad_int_entry(26),
gpio_pad_int_entry(27),
gpio_pad_int_entry(28),
gpio_pad_int_entry(29),
gpio_pad_int_entry(30),
dc_underflow_int_entry(1),
dc_underflow_int_entry(2),
dc_underflow_int_entry(3),
dc_underflow_int_entry(4),
dc_underflow_int_entry(5),
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
vupdate_no_lock_int_entry(0),
vupdate_no_lock_int_entry(1),
vupdate_no_lock_int_entry(2),
vupdate_no_lock_int_entry(3),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
vblank_int_entry(3),
vline0_int_entry(0),
vline0_int_entry(1),
vline0_int_entry(2),
vline0_int_entry(3),
[DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(),
[DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(),
dmub_outbox_int_entry(),
};
static const struct irq_service_funcs irq_service_funcs_dcn315 = {
.to_dal_irq_source = to_dal_irq_source_dcn315
};
static void dcn315_irq_construct(
struct irq_service *irq_service,
struct irq_service_init_data *init_data)
{
dal_irq_service_construct(irq_service, init_data);
irq_service->info = irq_source_info_dcn315;
irq_service->funcs = &irq_service_funcs_dcn315;
}
struct irq_service *dal_irq_service_dcn315_create(
struct irq_service_init_data *init_data)
{
struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
GFP_KERNEL);
if (!irq_service)
return NULL;
dcn315_irq_construct(irq_service, init_data);
return irq_service;
}
| linux-master | drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c |
// SPDX-License-Identifier: MIT
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "include/logger_interface.h"
#include "../dce110/irq_service_dce110.h"
#include "dcn/dcn_3_1_4_offset.h"
#include "dcn/dcn_3_1_4_sh_mask.h"
#include "irq_service_dcn314.h"
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
#define DCN_BASE__INST0_SEG2 0x000034C0
static enum dc_irq_source to_dal_irq_source_dcn314(struct irq_service *irq_service,
uint32_t src_id,
uint32_t ext_id)
{
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK1;
case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK2;
case DCN_1_0__SRCID__DC_D3_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK3;
case DCN_1_0__SRCID__DC_D4_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK4;
case DCN_1_0__SRCID__DC_D5_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK5;
case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK6;
case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC1_VLINE0;
case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC2_VLINE0;
case DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC3_VLINE0;
case DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC4_VLINE0;
case DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC5_VLINE0;
case DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC6_VLINE0;
case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP1;
case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP2;
case DCN_1_0__SRCID__HUBP2_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP3;
case DCN_1_0__SRCID__HUBP3_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP4;
case DCN_1_0__SRCID__HUBP4_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP5;
case DCN_1_0__SRCID__HUBP5_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP6;
case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE1;
case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE2;
case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE3;
case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE4;
case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE5;
case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE6;
case DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT:
return DC_IRQ_SOURCE_DMCUB_OUTBOX;
case DCN_1_0__SRCID__DC_HPD1_INT:
/* generic src_id for all HPD and HPDRX interrupts */
switch (ext_id) {
case DCN_1_0__CTXID__DC_HPD1_INT:
return DC_IRQ_SOURCE_HPD1;
case DCN_1_0__CTXID__DC_HPD2_INT:
return DC_IRQ_SOURCE_HPD2;
case DCN_1_0__CTXID__DC_HPD3_INT:
return DC_IRQ_SOURCE_HPD3;
case DCN_1_0__CTXID__DC_HPD4_INT:
return DC_IRQ_SOURCE_HPD4;
case DCN_1_0__CTXID__DC_HPD5_INT:
return DC_IRQ_SOURCE_HPD5;
case DCN_1_0__CTXID__DC_HPD6_INT:
return DC_IRQ_SOURCE_HPD6;
case DCN_1_0__CTXID__DC_HPD1_RX_INT:
return DC_IRQ_SOURCE_HPD1RX;
case DCN_1_0__CTXID__DC_HPD2_RX_INT:
return DC_IRQ_SOURCE_HPD2RX;
case DCN_1_0__CTXID__DC_HPD3_RX_INT:
return DC_IRQ_SOURCE_HPD3RX;
case DCN_1_0__CTXID__DC_HPD4_RX_INT:
return DC_IRQ_SOURCE_HPD4RX;
case DCN_1_0__CTXID__DC_HPD5_RX_INT:
return DC_IRQ_SOURCE_HPD5RX;
case DCN_1_0__CTXID__DC_HPD6_RX_INT:
return DC_IRQ_SOURCE_HPD6RX;
default:
return DC_IRQ_SOURCE_INVALID;
}
break;
default:
return DC_IRQ_SOURCE_INVALID;
}
}
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
HPD0_DC_HPD_INT_STATUS,
DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
HPD0_DC_HPD_INT_CONTROL,
DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static const struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
};
static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs pflip_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs outbox_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
static const struct irq_source_info_funcs vline0_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
/* compile time expand base address. */
#define BASE(seg) \
BASE_INNER(seg)
#define SRI(reg_name, block, id)\
(BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name)
#define SRI_DMUB(reg_name)\
(BASE(reg ## reg_name ## _BASE_IDX) + \
reg ## reg_name)
#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
.enable_reg = SRI(reg1, block, reg_num),\
.enable_mask = \
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI(reg2, block, reg_num),\
.ack_mask = \
block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
.ack_value = \
block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
#define IRQ_REG_ENTRY_DMUB(reg1, mask1, reg2, mask2)\
.enable_reg = SRI_DMUB(reg1),\
.enable_mask = \
reg1 ## __ ## mask1 ## _MASK,\
.enable_value = {\
reg1 ## __ ## mask1 ## _MASK,\
~reg1 ## __ ## mask1 ## _MASK \
},\
.ack_reg = SRI_DMUB(reg2),\
.ack_mask = \
reg2 ## __ ## mask2 ## _MASK,\
.ack_value = \
reg2 ## __ ## mask2 ## _MASK \
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1 + reg_num] = {\
IRQ_REG_ENTRY(HPD, reg_num,\
DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
.status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
.funcs = &hpd_irq_info_funcs\
}
#define hpd_rx_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
IRQ_REG_ENTRY(HPD, reg_num,\
DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
.status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
.funcs = &hpd_rx_irq_info_funcs\
}
#define pflip_int_entry(reg_num)\
[DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
IRQ_REG_ENTRY(HUBPREQ, reg_num,\
DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\
DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\
.funcs = &pflip_irq_info_funcs\
}
/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
* of DCE's DC_IRQ_SOURCE_VUPDATEx.
*/
#define vupdate_no_lock_int_entry(reg_num)\
[DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
.funcs = &vupdate_no_lock_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
[DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
.funcs = &vblank_irq_info_funcs\
}
#define vline0_int_entry(reg_num)\
[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
.funcs = &vline0_irq_info_funcs\
}
#define dmub_outbox_int_entry()\
[DC_IRQ_SOURCE_DMCUB_OUTBOX] = {\
IRQ_REG_ENTRY_DMUB(\
DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX1_READY_INT_EN,\
DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX1_READY_INT_ACK),\
.funcs = &outbox_irq_info_funcs\
}
#define dummy_irq_entry() \
{\
.funcs = &dummy_irq_info_funcs\
}
#define i2c_int_entry(reg_num) \
[DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
#define dp_sink_int_entry(reg_num) \
[DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
#define gpio_pad_int_entry(reg_num) \
[DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
#define dc_underflow_int_entry(reg_num) \
[DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
static const struct irq_source_info_funcs dummy_irq_info_funcs = {
.set = dal_irq_service_dummy_set,
.ack = dal_irq_service_dummy_ack
};
static const struct irq_source_info
irq_source_info_dcn314[DAL_IRQ_SOURCES_NUMBER] = {
[DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
hpd_int_entry(0),
hpd_int_entry(1),
hpd_int_entry(2),
hpd_int_entry(3),
hpd_int_entry(4),
hpd_rx_int_entry(0),
hpd_rx_int_entry(1),
hpd_rx_int_entry(2),
hpd_rx_int_entry(3),
hpd_rx_int_entry(4),
i2c_int_entry(1),
i2c_int_entry(2),
i2c_int_entry(3),
i2c_int_entry(4),
i2c_int_entry(5),
i2c_int_entry(6),
dp_sink_int_entry(1),
dp_sink_int_entry(2),
dp_sink_int_entry(3),
dp_sink_int_entry(4),
dp_sink_int_entry(5),
dp_sink_int_entry(6),
[DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
pflip_int_entry(0),
pflip_int_entry(1),
pflip_int_entry(2),
pflip_int_entry(3),
[DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(),
[DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(),
[DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
gpio_pad_int_entry(0),
gpio_pad_int_entry(1),
gpio_pad_int_entry(2),
gpio_pad_int_entry(3),
gpio_pad_int_entry(4),
gpio_pad_int_entry(5),
gpio_pad_int_entry(6),
gpio_pad_int_entry(7),
gpio_pad_int_entry(8),
gpio_pad_int_entry(9),
gpio_pad_int_entry(10),
gpio_pad_int_entry(11),
gpio_pad_int_entry(12),
gpio_pad_int_entry(13),
gpio_pad_int_entry(14),
gpio_pad_int_entry(15),
gpio_pad_int_entry(16),
gpio_pad_int_entry(17),
gpio_pad_int_entry(18),
gpio_pad_int_entry(19),
gpio_pad_int_entry(20),
gpio_pad_int_entry(21),
gpio_pad_int_entry(22),
gpio_pad_int_entry(23),
gpio_pad_int_entry(24),
gpio_pad_int_entry(25),
gpio_pad_int_entry(26),
gpio_pad_int_entry(27),
gpio_pad_int_entry(28),
gpio_pad_int_entry(29),
gpio_pad_int_entry(30),
dc_underflow_int_entry(1),
dc_underflow_int_entry(2),
dc_underflow_int_entry(3),
dc_underflow_int_entry(4),
dc_underflow_int_entry(5),
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
vupdate_no_lock_int_entry(0),
vupdate_no_lock_int_entry(1),
vupdate_no_lock_int_entry(2),
vupdate_no_lock_int_entry(3),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
vblank_int_entry(3),
vline0_int_entry(0),
vline0_int_entry(1),
vline0_int_entry(2),
vline0_int_entry(3),
[DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(),
[DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(),
dmub_outbox_int_entry(),
};
static const struct irq_service_funcs irq_service_funcs_dcn314 = {
.to_dal_irq_source = to_dal_irq_source_dcn314
};
static void dcn314_irq_construct(
struct irq_service *irq_service,
struct irq_service_init_data *init_data)
{
dal_irq_service_construct(irq_service, init_data);
irq_service->info = irq_source_info_dcn314;
irq_service->funcs = &irq_service_funcs_dcn314;
}
struct irq_service *dal_irq_service_dcn314_create(
struct irq_service_init_data *init_data)
{
struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
GFP_KERNEL);
if (!irq_service)
return NULL;
dcn314_irq_construct(irq_service, init_data);
return irq_service;
}
| linux-master | drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "basics/dc_common.h"
#include "core_types.h"
#include "resource.h"
#include "dcn201_hwseq.h"
#include "dcn201_optc.h"
#include "dce/dce_hwseq.h"
#include "hubp.h"
#include "dchubbub.h"
#include "timing_generator.h"
#include "opp.h"
#include "ipp.h"
#include "mpc.h"
#include "dccg.h"
#include "clk_mgr.h"
#include "reg_helper.h"
#define CTX \
hws->ctx
#define REG(reg)\
hws->regs->reg
#define DC_LOGGER \
dc->ctx->logger
#undef FN
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
static bool patch_address_for_sbs_tb_stereo(
struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
{
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
bool sec_split = pipe_ctx->top_pipe &&
pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
(pipe_ctx->stream->timing.timing_3d_format ==
TIMING_3D_FORMAT_SIDE_BY_SIDE ||
pipe_ctx->stream->timing.timing_3d_format ==
TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
*addr = plane_state->address.grph_stereo.left_addr;
plane_state->address.grph_stereo.left_addr =
plane_state->address.grph_stereo.right_addr;
return true;
} else {
if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
plane_state->address.grph_stereo.right_addr =
plane_state->address.grph_stereo.left_addr;
plane_state->address.grph_stereo.right_meta_addr =
plane_state->address.grph_stereo.left_meta_addr;
}
}
return false;
}
static bool gpu_addr_to_uma(struct dce_hwseq *hwseq,
PHYSICAL_ADDRESS_LOC *addr)
{
bool is_in_uma;
if (hwseq->fb_base.quad_part <= addr->quad_part &&
addr->quad_part < hwseq->fb_top.quad_part) {
addr->quad_part -= hwseq->fb_base.quad_part;
addr->quad_part += hwseq->fb_offset.quad_part;
is_in_uma = true;
} else if (hwseq->fb_offset.quad_part <= addr->quad_part &&
addr->quad_part <= hwseq->uma_top.quad_part) {
is_in_uma = true;
} else {
is_in_uma = false;
}
return is_in_uma;
}
static void plane_address_in_gpu_space_to_uma(struct dce_hwseq *hwseq,
struct dc_plane_address *addr)
{
switch (addr->type) {
case PLN_ADDR_TYPE_GRAPHICS:
gpu_addr_to_uma(hwseq, &addr->grph.addr);
gpu_addr_to_uma(hwseq, &addr->grph.meta_addr);
break;
case PLN_ADDR_TYPE_GRPH_STEREO:
gpu_addr_to_uma(hwseq, &addr->grph_stereo.left_addr);
gpu_addr_to_uma(hwseq, &addr->grph_stereo.left_meta_addr);
gpu_addr_to_uma(hwseq, &addr->grph_stereo.right_addr);
gpu_addr_to_uma(hwseq, &addr->grph_stereo.right_meta_addr);
break;
case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE:
gpu_addr_to_uma(hwseq, &addr->video_progressive.luma_addr);
gpu_addr_to_uma(hwseq, &addr->video_progressive.luma_meta_addr);
gpu_addr_to_uma(hwseq, &addr->video_progressive.chroma_addr);
gpu_addr_to_uma(hwseq, &addr->video_progressive.chroma_meta_addr);
break;
default:
BREAK_TO_DEBUGGER();
break;
}
}
void dcn201_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
{
bool addr_patched = false;
PHYSICAL_ADDRESS_LOC addr;
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct dce_hwseq *hws = dc->hwseq;
struct dc_plane_address uma;
if (plane_state == NULL)
return;
uma = plane_state->address;
addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
plane_address_in_gpu_space_to_uma(hws, &uma);
pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
pipe_ctx->plane_res.hubp,
&uma,
plane_state->flip_immediate);
plane_state->status.requested_address = plane_state->address;
if (plane_state->flip_immediate)
plane_state->status.current_address = plane_state->address;
if (addr_patched)
pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
}
/* Blank pixel data during initialization */
void dcn201_init_blank(
struct dc *dc,
struct timing_generator *tg)
{
struct dce_hwseq *hws = dc->hwseq;
enum dc_color_space color_space;
struct tg_color black_color = {0};
struct output_pixel_processor *opp = NULL;
uint32_t num_opps, opp_id_src0, opp_id_src1;
uint32_t otg_active_width, otg_active_height;
/* program opp dpg blank color */
color_space = COLOR_SPACE_SRGB;
color_space_to_black_color(dc, color_space, &black_color);
/* get the OTG active size */
tg->funcs->get_otg_active_size(tg,
&otg_active_width,
&otg_active_height);
/* get the OPTC source */
tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp);
opp = dc->res_pool->opps[opp_id_src0];
opp->funcs->opp_set_disp_pattern_generator(
opp,
CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
CONTROLLER_DP_COLOR_SPACE_UDEFINED,
COLOR_DEPTH_UNDEFINED,
&black_color,
otg_active_width,
otg_active_height,
0);
hws->funcs.wait_for_blank_complete(opp);
}
static void read_mmhub_vm_setup(struct dce_hwseq *hws)
{
uint32_t fb_base = REG_READ(MC_VM_FB_LOCATION_BASE);
uint32_t fb_top = REG_READ(MC_VM_FB_LOCATION_TOP);
uint32_t fb_offset = REG_READ(MC_VM_FB_OFFSET);
/* MC_VM_FB_LOCATION_TOP is in pages, actual top should add 1 */
fb_top++;
/* bit 23:0 in register map to bit 47:24 in address */
hws->fb_base.low_part = fb_base;
hws->fb_base.quad_part <<= 24;
hws->fb_top.low_part = fb_top;
hws->fb_top.quad_part <<= 24;
hws->fb_offset.low_part = fb_offset;
hws->fb_offset.quad_part <<= 24;
hws->uma_top.quad_part = hws->fb_top.quad_part
- hws->fb_base.quad_part + hws->fb_offset.quad_part;
}
void dcn201_init_hw(struct dc *dc)
{
int i, j;
struct dce_hwseq *hws = dc->hwseq;
struct resource_pool *res_pool = dc->res_pool;
struct dc_state *context = dc->current_state;
if (res_pool->dccg->funcs->dccg_init)
res_pool->dccg->funcs->dccg_init(res_pool->dccg);
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
hws->funcs.bios_golden_init(dc);
if (dc->ctx->dc_bios->fw_info_valid) {
res_pool->ref_clocks.xtalin_clock_inKhz =
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
if (res_pool->dccg && res_pool->hubbub) {
(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
&res_pool->ref_clocks.dccg_ref_clock_inKhz);
(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
res_pool->ref_clocks.dccg_ref_clock_inKhz,
&res_pool->ref_clocks.dchub_ref_clock_inKhz);
} else {
res_pool->ref_clocks.dccg_ref_clock_inKhz =
res_pool->ref_clocks.xtalin_clock_inKhz;
res_pool->ref_clocks.dchub_ref_clock_inKhz =
res_pool->ref_clocks.xtalin_clock_inKhz;
}
} else
ASSERT_CRITICAL(false);
for (i = 0; i < dc->link_count; i++) {
/* Power up AND update implementation according to the
* required signal (which may be different from the
* default signal on connector).
*/
struct dc_link *link = dc->links[i];
link->link_enc->funcs->hw_init(link->link_enc);
}
if (hws->fb_offset.quad_part == 0)
read_mmhub_vm_setup(hws);
/* Blank pixel data with OPP DPG */
for (i = 0; i < res_pool->timing_generator_count; i++) {
struct timing_generator *tg = res_pool->timing_generators[i];
if (tg->funcs->is_tg_enabled(tg)) {
dcn201_init_blank(dc, tg);
}
}
for (i = 0; i < res_pool->timing_generator_count; i++) {
struct timing_generator *tg = res_pool->timing_generators[i];
if (tg->funcs->is_tg_enabled(tg))
tg->funcs->lock(tg);
}
for (i = 0; i < res_pool->pipe_count; i++) {
struct dpp *dpp = res_pool->dpps[i];
dpp->funcs->dpp_reset(dpp);
}
/* Reset all MPCC muxes */
res_pool->mpc->funcs->mpc_init(res_pool->mpc);
/* initialize OPP mpc_tree parameter */
for (i = 0; i < res_pool->res_cap->num_opp; i++) {
res_pool->opps[i]->mpc_tree_params.opp_id = res_pool->opps[i]->inst;
res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
for (j = 0; j < MAX_PIPES; j++)
res_pool->opps[i]->mpcc_disconnect_pending[j] = false;
}
for (i = 0; i < res_pool->timing_generator_count; i++) {
struct timing_generator *tg = res_pool->timing_generators[i];
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct hubp *hubp = res_pool->hubps[i];
struct dpp *dpp = res_pool->dpps[i];
pipe_ctx->stream_res.tg = tg;
pipe_ctx->pipe_idx = i;
pipe_ctx->plane_res.hubp = hubp;
pipe_ctx->plane_res.dpp = dpp;
pipe_ctx->plane_res.mpcc_inst = dpp->inst;
hubp->mpcc_id = dpp->inst;
hubp->opp_id = OPP_ID_INVALID;
hubp->power_gated = false;
pipe_ctx->stream_res.opp = NULL;
hubp->funcs->hubp_init(hubp);
res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
pipe_ctx->stream_res.opp = res_pool->opps[i];
/*To do: number of MPCC != number of opp*/
hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
}
/* initialize DWB pointer to MCIF_WB */
for (i = 0; i < res_pool->res_cap->num_dwb; i++)
res_pool->dwbc[i]->mcif = res_pool->mcif_wb[i];
for (i = 0; i < res_pool->timing_generator_count; i++) {
struct timing_generator *tg = res_pool->timing_generators[i];
if (tg->funcs->is_tg_enabled(tg))
tg->funcs->unlock(tg);
}
for (i = 0; i < res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
dc->hwss.disable_plane(dc, pipe_ctx);
pipe_ctx->stream_res.tg = NULL;
pipe_ctx->plane_res.hubp = NULL;
}
for (i = 0; i < res_pool->timing_generator_count; i++) {
struct timing_generator *tg = res_pool->timing_generators[i];
tg->funcs->tg_init(tg);
}
for (i = 0; i < res_pool->audio_count; i++) {
struct audio *audio = res_pool->audios[i];
audio->funcs->hw_init(audio);
}
/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
REG_WRITE(DIO_MEM_PWR_CTRL, 0);
if (!dc->debug.disable_clock_gate) {
/* enable all DCN clock gating */
REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
}
/* trigger HW to start disconnect plane from stream on the next vsync */
void dcn201_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
int dpp_id = pipe_ctx->plane_res.dpp->inst;
struct mpc *mpc = dc->res_pool->mpc;
struct mpc_tree *mpc_tree_params;
struct mpcc *mpcc_to_remove = NULL;
struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
bool mpcc_removed = false;
mpc_tree_params = &(opp->mpc_tree_params);
/* check if this plane is being used by an MPCC in the secondary blending chain */
if (mpc->funcs->get_mpcc_for_dpp_from_secondary)
mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp_from_secondary(mpc_tree_params, dpp_id);
/* remove MPCC from secondary if being used */
if (mpcc_to_remove != NULL && mpc->funcs->remove_mpcc_from_secondary) {
mpc->funcs->remove_mpcc_from_secondary(mpc, mpc_tree_params, mpcc_to_remove);
mpcc_removed = true;
}
/* check if this MPCC is already being used for this plane (dpp) in the primary blending chain */
mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
if (mpcc_to_remove != NULL) {
mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
mpcc_removed = true;
}
/*Already reset*/
if (mpcc_removed == false)
return;
if (opp != NULL)
opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
dc->optimized_required = true;
if (hubp->funcs->hubp_disconnect)
hubp->funcs->hubp_disconnect(hubp);
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
}
void dcn201_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct mpcc_blnd_cfg blnd_cfg;
bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
int mpcc_id, dpp_id;
struct mpcc *new_mpcc;
struct mpcc *remove_mpcc = NULL;
struct mpc *mpc = dc->res_pool->mpc;
struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
get_hdr_visual_confirm_color(
pipe_ctx, &blnd_cfg.black_color);
} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
get_surface_visual_confirm_color(
pipe_ctx, &blnd_cfg.black_color);
} else {
color_space_to_black_color(
dc, pipe_ctx->stream->output_color_space,
&blnd_cfg.black_color);
}
if (per_pixel_alpha)
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
else
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
blnd_cfg.overlap_only = false;
if (pipe_ctx->plane_state->global_alpha_value)
blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
else
blnd_cfg.global_alpha = 0xff;
blnd_cfg.global_gain = 0xff;
blnd_cfg.background_color_bpc = 4;
blnd_cfg.bottom_gain_mode = 0;
blnd_cfg.top_gain = 0x1f000;
blnd_cfg.bottom_inside_gain = 0x1f000;
blnd_cfg.bottom_outside_gain = 0x1f000;
/*the input to MPCC is RGB*/
blnd_cfg.black_color.color_b_cb = 0;
blnd_cfg.black_color.color_g_y = 0;
blnd_cfg.black_color.color_r_cr = 0;
/* DCN1.0 has output CM before MPC which seems to screw with
* pre-multiplied alpha. This is a w/a hopefully unnecessary for DCN2.
*/
blnd_cfg.pre_multiplied_alpha = per_pixel_alpha;
/*
* TODO: remove hack
* Note: currently there is a bug in init_hw such that
* on resume from hibernate, BIOS sets up MPCC0, and
* we do mpcc_remove but the mpcc cannot go to idle
* after remove. This cause us to pick mpcc1 here,
* which causes a pstate hang for yet unknown reason.
*/
dpp_id = hubp->inst;
mpcc_id = dpp_id;
/* If there is no full update, don't need to touch MPC tree*/
if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
return;
}
/* check if this plane is being used by an MPCC in the secondary blending chain */
if (mpc->funcs->get_mpcc_for_dpp_from_secondary)
remove_mpcc = mpc->funcs->get_mpcc_for_dpp_from_secondary(mpc_tree_params, dpp_id);
/* remove MPCC from secondary if being used */
if (remove_mpcc != NULL && mpc->funcs->remove_mpcc_from_secondary)
mpc->funcs->remove_mpcc_from_secondary(mpc, mpc_tree_params, remove_mpcc);
/* check if this MPCC is already being used for this plane (dpp) in the primary blending chain */
remove_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
/* remove MPCC if being used */
if (remove_mpcc != NULL)
mpc->funcs->remove_mpcc(mpc, mpc_tree_params, remove_mpcc);
else
if (dc->debug.sanity_checks)
mpc->funcs->assert_mpcc_idle_before_connect(
dc->res_pool->mpc, mpcc_id);
/* Call MPC to insert new plane */
dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
mpc_tree_params,
&blnd_cfg,
NULL,
NULL,
dpp_id,
mpcc_id);
ASSERT(new_mpcc != NULL);
hubp->opp_id = pipe_ctx->stream_res.opp->inst;
hubp->mpcc_id = mpcc_id;
}
void dcn201_pipe_control_lock(
struct dc *dc,
struct pipe_ctx *pipe,
bool lock)
{
struct dce_hwseq *hws = dc->hwseq;
/* use TG master update lock to lock everything on the TG
* therefore only top pipe need to lock
*/
if (pipe->top_pipe)
return;
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
if (pipe->plane_state != NULL && pipe->plane_state->triplebuffer_flips) {
if (lock)
pipe->stream_res.tg->funcs->triplebuffer_lock(pipe->stream_res.tg);
else
pipe->stream_res.tg->funcs->triplebuffer_unlock(pipe->stream_res.tg);
} else {
if (lock)
pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
else
pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
}
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
}
void dcn201_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
{
struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
gpu_addr_to_uma(pipe_ctx->stream->ctx->dc->hwseq, &attributes->address);
pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
pipe_ctx->plane_res.hubp, attributes);
pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
pipe_ctx->plane_res.dpp, attributes);
}
void dcn201_set_dmdata_attributes(struct pipe_ctx *pipe_ctx)
{
struct dc_dmdata_attributes attr = { 0 };
struct hubp *hubp = pipe_ctx->plane_res.hubp;
gpu_addr_to_uma(pipe_ctx->stream->ctx->dc->hwseq,
&pipe_ctx->stream->dmdata_address);
attr.dmdata_mode = DMDATA_HW_MODE;
attr.dmdata_size =
dc_is_hdmi_signal(pipe_ctx->stream->signal) ? 32 : 36;
attr.address.quad_part =
pipe_ctx->stream->dmdata_address.quad_part;
attr.dmdata_dl_delta = 0;
attr.dmdata_qos_mode = 0;
attr.dmdata_qos_level = 0;
attr.dmdata_repeat = 1; /* always repeat */
attr.dmdata_updated = 1;
attr.dmdata_sw_data = NULL;
hubp->funcs->dmdata_set_attributes(hubp, &attr);
}
void dcn201_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings)
{
struct encoder_unblank_param params = { { 0 } };
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
struct dce_hwseq *hws = link->dc->hwseq;
/* only 3 items below are used by unblank */
params.timing = pipe_ctx->stream->timing;
params.link_settings.link_rate = link_settings->link_rate;
if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
/*check whether it is half the rate*/
if (optc201_is_two_pixels_per_containter(&stream->timing))
params.timing.pix_clk_100hz /= 2;
pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms);
}
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
hws->funcs.edp_backlight_control(link, true);
}
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "core_types.h"
#include "reg_helper.h"
#include "dcn201_dpp.h"
#include "basics/conversion.h"
#define REG(reg)\
dpp->tf_regs->reg
#define CTX \
dpp->base.ctx
#undef FN
#define FN(reg_name, field_name) \
dpp->tf_shift->field_name, dpp->tf_mask->field_name
static void dpp201_cnv_setup(
struct dpp *dpp_base,
enum surface_pixel_format format,
enum expansion_mode mode,
struct dc_csc_transform input_csc_color_matrix,
enum dc_color_space input_color_space,
struct cnv_alpha_2bit_lut *alpha_2bit_lut)
{
struct dcn201_dpp *dpp = TO_DCN201_DPP(dpp_base);
uint32_t pixel_format = 0;
uint32_t alpha_en = 1;
enum dc_color_space color_space = COLOR_SPACE_SRGB;
enum dcn10_input_csc_select select = INPUT_CSC_SELECT_BYPASS;
bool force_disable_cursor = false;
uint32_t is_2bit = 0;
REG_SET_2(FORMAT_CONTROL, 0,
CNVC_BYPASS, 0,
FORMAT_EXPANSION_MODE, mode);
REG_UPDATE(FORMAT_CONTROL, FORMAT_CNV16, 0);
REG_UPDATE(FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, 0);
REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE, 0);
REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE_C, 0);
switch (format) {
case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
pixel_format = 1;
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
pixel_format = 3;
alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
pixel_format = 8;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
pixel_format = 10;
is_2bit = 1;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
force_disable_cursor = false;
pixel_format = 65;
color_space = COLOR_SPACE_YCBCR709;
select = INPUT_CSC_SELECT_ICSC;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
force_disable_cursor = true;
pixel_format = 64;
color_space = COLOR_SPACE_YCBCR709;
select = INPUT_CSC_SELECT_ICSC;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
force_disable_cursor = true;
pixel_format = 67;
color_space = COLOR_SPACE_YCBCR709;
select = INPUT_CSC_SELECT_ICSC;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
force_disable_cursor = true;
pixel_format = 66;
color_space = COLOR_SPACE_YCBCR709;
select = INPUT_CSC_SELECT_ICSC;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
pixel_format = 22;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
pixel_format = 24;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
pixel_format = 25;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
pixel_format = 12;
color_space = COLOR_SPACE_YCBCR709;
select = INPUT_CSC_SELECT_ICSC;
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
pixel_format = 112;
alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
pixel_format = 113;
alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
pixel_format = 114;
color_space = COLOR_SPACE_YCBCR709;
select = INPUT_CSC_SELECT_ICSC;
is_2bit = 1;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102:
pixel_format = 115;
color_space = COLOR_SPACE_YCBCR709;
select = INPUT_CSC_SELECT_ICSC;
is_2bit = 1;
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
pixel_format = 118;
alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
pixel_format = 119;
alpha_en = 0;
break;
default:
break;
}
/* Set default color space based on format if none is given. */
color_space = input_color_space ? input_color_space : color_space;
if (is_2bit == 1 && alpha_2bit_lut != NULL) {
REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0);
REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1);
REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, alpha_2bit_lut->lut2);
REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, alpha_2bit_lut->lut3);
}
REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
CNVC_SURFACE_PIXEL_FORMAT, pixel_format);
REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en);
dpp1_program_input_csc(dpp_base, color_space, select, NULL);
if (force_disable_cursor) {
REG_UPDATE(CURSOR_CONTROL,
CURSOR_ENABLE, 0);
REG_UPDATE(CURSOR0_CONTROL,
CUR0_ENABLE, 0);
}
dpp2_power_on_obuf(dpp_base, true);
}
#define IDENTITY_RATIO(ratio) (dc_fixpt_u3d19(ratio) == (1 << 19))
static bool dpp201_get_optimal_number_of_taps(
struct dpp *dpp,
struct scaler_data *scl_data,
const struct scaling_taps *in_taps)
{
if (scl_data->viewport.width != scl_data->h_active &&
scl_data->viewport.height != scl_data->v_active &&
dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
scl_data->format == PIXEL_FORMAT_FP16)
return false;
if (scl_data->viewport.width > scl_data->h_active &&
dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
return false;
if (scl_data->ratios.horz.value == (8ll << 32))
scl_data->ratios.horz.value--;
if (scl_data->ratios.vert.value == (8ll << 32))
scl_data->ratios.vert.value--;
if (scl_data->ratios.horz_c.value == (8ll << 32))
scl_data->ratios.horz_c.value--;
if (scl_data->ratios.vert_c.value == (8ll << 32))
scl_data->ratios.vert_c.value--;
if (in_taps->h_taps == 0) {
if (dc_fixpt_ceil(scl_data->ratios.horz) > 4)
scl_data->taps.h_taps = 8;
else
scl_data->taps.h_taps = 4;
} else
scl_data->taps.h_taps = in_taps->h_taps;
if (in_taps->v_taps == 0) {
if (dc_fixpt_ceil(scl_data->ratios.vert) > 4)
scl_data->taps.v_taps = 8;
else
scl_data->taps.v_taps = 4;
} else
scl_data->taps.v_taps = in_taps->v_taps;
if (in_taps->v_taps_c == 0) {
if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 4)
scl_data->taps.v_taps_c = 4;
else
scl_data->taps.v_taps_c = 2;
} else
scl_data->taps.v_taps_c = in_taps->v_taps_c;
if (in_taps->h_taps_c == 0) {
if (dc_fixpt_ceil(scl_data->ratios.horz_c) > 4)
scl_data->taps.h_taps_c = 4;
else
scl_data->taps.h_taps_c = 2;
} else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1)
scl_data->taps.h_taps_c = in_taps->h_taps_c - 1;
else
scl_data->taps.h_taps_c = in_taps->h_taps_c;
if (!dpp->ctx->dc->debug.always_scale) {
if (IDENTITY_RATIO(scl_data->ratios.horz))
scl_data->taps.h_taps = 1;
if (IDENTITY_RATIO(scl_data->ratios.vert))
scl_data->taps.v_taps = 1;
if (IDENTITY_RATIO(scl_data->ratios.horz_c))
scl_data->taps.h_taps_c = 1;
if (IDENTITY_RATIO(scl_data->ratios.vert_c))
scl_data->taps.v_taps_c = 1;
}
return true;
}
static struct dpp_funcs dcn201_dpp_funcs = {
.dpp_read_state = dpp20_read_state,
.dpp_reset = dpp_reset,
.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
.dpp_get_optimal_number_of_taps = dpp201_get_optimal_number_of_taps,
.dpp_set_gamut_remap = dpp1_cm_set_gamut_remap,
.dpp_set_csc_adjustment = NULL,
.dpp_set_csc_default = NULL,
.dpp_program_regamma_pwl = oppn20_dummy_program_regamma_pwl,
.dpp_set_degamma = dpp2_set_degamma,
.dpp_program_input_lut = dpp2_dummy_program_input_lut,
.dpp_full_bypass = dpp1_full_bypass,
.dpp_setup = dpp201_cnv_setup,
.dpp_program_degamma_pwl = dpp2_set_degamma_pwl,
.dpp_program_blnd_lut = dpp20_program_blnd_lut,
.dpp_program_shaper_lut = dpp20_program_shaper,
.dpp_program_3dlut = dpp20_program_3dlut,
.dpp_program_bias_and_scale = NULL,
.dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer,
.set_cursor_attributes = dpp2_set_cursor_attributes,
.set_cursor_position = dpp1_set_cursor_position,
.set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp2_set_hdr_multiplier,
};
static struct dpp_caps dcn201_dpp_cap = {
.dscl_data_proc_format = DSCL_DATA_PRCESSING_FLOAT_FORMAT,
.dscl_calc_lb_num_partitions = dscl2_calc_lb_num_partitions,
};
bool dpp201_construct(
struct dcn201_dpp *dpp,
struct dc_context *ctx,
uint32_t inst,
const struct dcn201_dpp_registers *tf_regs,
const struct dcn201_dpp_shift *tf_shift,
const struct dcn201_dpp_mask *tf_mask)
{
dpp->base.ctx = ctx;
dpp->base.inst = inst;
dpp->base.funcs = &dcn201_dpp_funcs;
dpp->base.caps = &dcn201_dpp_cap;
dpp->tf_regs = tf_regs;
dpp->tf_shift = tf_shift;
dpp->tf_mask = tf_mask;
dpp->lb_pixel_depth_supported =
LB_PIXEL_DEPTH_18BPP |
LB_PIXEL_DEPTH_24BPP |
LB_PIXEL_DEPTH_30BPP;
dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY;
dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES;
return true;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dcn201_dccg.h"
#include "reg_helper.h"
#include "core_types.h"
#define TO_DCN_DCCG(dccg)\
container_of(dccg, struct dcn_dccg, base)
#define REG(reg) \
(dccg_dcn->regs->reg)
#undef FN
#define FN(reg_name, field_name) \
dccg_dcn->dccg_shift->field_name, dccg_dcn->dccg_mask->field_name
#define CTX \
dccg_dcn->base.ctx
#define DC_LOGGER \
dccg->ctx->logger
static void dccg201_update_dpp_dto(struct dccg *dccg, int dpp_inst,
int req_dppclk)
{
/* vbios handles it */
}
static const struct dccg_funcs dccg201_funcs = {
.update_dpp_dto = dccg201_update_dpp_dto,
.get_dccg_ref_freq = dccg2_get_dccg_ref_freq,
.set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en,
.otg_add_pixel = dccg2_otg_add_pixel,
.otg_drop_pixel = dccg2_otg_drop_pixel,
.dccg_init = dccg2_init
};
struct dccg *dccg201_create(
struct dc_context *ctx,
const struct dccg_registers *regs,
const struct dccg_shift *dccg_shift,
const struct dccg_mask *dccg_mask)
{
struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
struct dccg *base;
if (dccg_dcn == NULL) {
BREAK_TO_DEBUGGER();
return NULL;
}
base = &dccg_dcn->base;
base->ctx = ctx;
base->funcs = &dccg201_funcs;
dccg_dcn->regs = regs;
dccg_dcn->dccg_shift = dccg_shift;
dccg_dcn->dccg_mask = dccg_mask;
return &dccg_dcn->base;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "reg_helper.h"
#include "dcn201_mpc.h"
#define REG(reg)\
mpc201->mpc_regs->reg
#define CTX \
mpc201->base.ctx
#define DC_LOGGER \
mpc201->base.ctx->logger
#undef FN
#define FN(reg_name, field_name) \
mpc201->mpc_shift->field_name, mpc201->mpc_mask->field_name
static void mpc201_set_out_rate_control(
struct mpc *mpc,
int opp_id,
bool enable,
bool rate_2x_mode,
struct mpc_dwb_flow_control *flow_control)
{
struct dcn201_mpc *mpc201 = TO_DCN201_MPC(mpc);
REG_UPDATE_2(MUX[opp_id],
MPC_OUT_RATE_CONTROL_DISABLE, !enable,
MPC_OUT_RATE_CONTROL, rate_2x_mode);
if (flow_control)
REG_UPDATE_3(MUX[opp_id],
MPC_OUT_FLOW_CONTROL_MODE, flow_control->flow_ctrl_mode,
MPC_OUT_FLOW_CONTROL_COUNT0, flow_control->flow_ctrl_cnt0,
MPC_OUT_FLOW_CONTROL_COUNT1, flow_control->flow_ctrl_cnt1);
}
static void mpc201_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
{
mpcc->mpcc_id = mpcc_inst;
mpcc->dpp_id = 0xf;
mpcc->mpcc_bot = NULL;
mpcc->blnd_cfg.overlap_only = false;
mpcc->blnd_cfg.global_alpha = 0xff;
mpcc->blnd_cfg.global_gain = 0xff;
mpcc->blnd_cfg.background_color_bpc = 4;
mpcc->blnd_cfg.bottom_gain_mode = 0;
mpcc->blnd_cfg.top_gain = 0x1f000;
mpcc->blnd_cfg.bottom_inside_gain = 0x1f000;
mpcc->blnd_cfg.bottom_outside_gain = 0x1f000;
mpcc->sm_cfg.enable = false;
mpcc->shared_bottom = false;
}
static const struct mpc_funcs dcn201_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
.mpc_init = mpc1_mpc_init,
.mpc_init_single_inst = mpc1_mpc_init_single_inst,
.update_blending = mpc2_update_blending,
.cursor_lock = mpc1_cursor_lock,
.get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp,
.get_mpcc_for_dpp_from_secondary = NULL,
.wait_for_idle = mpc2_assert_idle_mpcc,
.assert_mpcc_idle_before_connect = mpc2_assert_mpcc_idle_before_connect,
.init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw,
.set_denorm = mpc2_set_denorm,
.set_denorm_clamp = mpc2_set_denorm_clamp,
.set_output_csc = mpc2_set_output_csc,
.set_ocsc_default = mpc2_set_ocsc_default,
.set_output_gamma = mpc2_set_output_gamma,
.set_out_rate_control = mpc201_set_out_rate_control,
.power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
.set_bg_color = mpc1_set_bg_color,
};
void dcn201_mpc_construct(struct dcn201_mpc *mpc201,
struct dc_context *ctx,
const struct dcn201_mpc_registers *mpc_regs,
const struct dcn201_mpc_shift *mpc_shift,
const struct dcn201_mpc_mask *mpc_mask,
int num_mpcc)
{
int i;
mpc201->base.ctx = ctx;
mpc201->base.funcs = &dcn201_mpc_funcs;
mpc201->mpc_regs = mpc_regs;
mpc201->mpc_shift = mpc_shift;
mpc201->mpc_mask = mpc_mask;
mpc201->mpcc_in_use_mask = 0;
mpc201->num_mpcc = num_mpcc;
for (i = 0; i < MAX_MPCC; i++)
mpc201_init_mpcc(&mpc201->base.mpcc_array[i], i);
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "reg_helper.h"
#include "dcn201_optc.h"
#include "dcn10/dcn10_optc.h"
#include "dc.h"
#define REG(reg)\
optc1->tg_regs->reg
#define CTX \
optc1->base.ctx
#undef FN
#define FN(reg_name, field_name) \
optc1->tg_shift->field_name, optc1->tg_mask->field_name
/*TEMP: Need to figure out inheritance model here.*/
bool optc201_is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
{
return optc1_is_two_pixels_per_containter(timing);
}
static void optc201_triplebuffer_lock(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET(OTG_GLOBAL_CONTROL0, 0,
OTG_MASTER_UPDATE_LOCK_SEL, optc->inst);
REG_SET(OTG_VUPDATE_KEEPOUT, 0,
OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1);
REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
OTG_MASTER_UPDATE_LOCK, 1);
REG_WAIT(OTG_MASTER_UPDATE_LOCK,
UPDATE_LOCK_STATUS, 1,
1, 10);
}
static void optc201_triplebuffer_unlock(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
OTG_MASTER_UPDATE_LOCK, 0);
REG_SET(OTG_VUPDATE_KEEPOUT, 0,
OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 0);
}
static bool optc201_validate_timing(
struct timing_generator *optc,
const struct dc_crtc_timing *timing)
{
uint32_t v_blank;
uint32_t h_blank;
uint32_t min_v_blank;
struct optc *optc1 = DCN10TG_FROM_TG(optc);
ASSERT(timing != NULL);
v_blank = (timing->v_total - timing->v_addressable -
timing->v_border_top - timing->v_border_bottom);
h_blank = (timing->h_total - timing->h_addressable -
timing->h_border_right -
timing->h_border_left);
if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE &&
timing->timing_3d_format != TIMING_3D_FORMAT_HW_FRAME_PACKING &&
timing->timing_3d_format != TIMING_3D_FORMAT_TOP_AND_BOTTOM &&
timing->timing_3d_format != TIMING_3D_FORMAT_SIDE_BY_SIDE &&
timing->timing_3d_format != TIMING_3D_FORMAT_FRAME_ALTERNATE &&
timing->timing_3d_format != TIMING_3D_FORMAT_INBAND_FA)
return false;
/* Check maximum number of pixels supported by Timing Generator
* (Currently will never fail, in order to fail needs display which
* needs more than 8192 horizontal and
* more than 8192 vertical total pixels)
*/
if (timing->h_total > optc1->max_h_total ||
timing->v_total > optc1->max_v_total)
return false;
if (h_blank < optc1->min_h_blank)
return false;
if (timing->h_sync_width < optc1->min_h_sync_width ||
timing->v_sync_width < optc1->min_v_sync_width)
return false;
min_v_blank = timing->flags.INTERLACE?optc1->min_v_blank_interlace:optc1->min_v_blank;
if (v_blank < min_v_blank)
return false;
return true;
}
static void optc201_get_optc_source(struct timing_generator *optc,
uint32_t *num_of_src_opp,
uint32_t *src_opp_id_0,
uint32_t *src_opp_id_1)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_GET(OPTC_DATA_SOURCE_SELECT,
OPTC_SEG0_SRC_SEL, src_opp_id_0);
*num_of_src_opp = 1;
}
static struct timing_generator_funcs dcn201_tg_funcs = {
.validate_timing = optc201_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
.setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1,
.setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2,
.program_global_sync = optc1_program_global_sync,
.enable_crtc = optc2_enable_crtc,
.disable_crtc = optc1_disable_crtc,
/* used by enable_timing_synchronization. Not need for FPGA */
.is_counter_moving = optc1_is_counter_moving,
.get_position = optc1_get_position,
.get_frame_count = optc1_get_vblank_counter,
.get_scanoutpos = optc1_get_crtc_scanoutpos,
.get_otg_active_size = optc1_get_otg_active_size,
.set_early_control = optc1_set_early_control,
/* used by enable_timing_synchronization. Not need for FPGA */
.wait_for_state = optc1_wait_for_state,
.set_blank = optc1_set_blank,
.is_blanked = optc1_is_blanked,
.set_blank_color = optc1_program_blank_color,
.did_triggered_reset_occur = optc1_did_triggered_reset_occur,
.enable_reset_trigger = optc1_enable_reset_trigger,
.enable_crtc_reset = optc1_enable_crtc_reset,
.disable_reset_trigger = optc1_disable_reset_trigger,
.triplebuffer_lock = optc201_triplebuffer_lock,
.triplebuffer_unlock = optc201_triplebuffer_unlock,
.lock = optc1_lock,
.unlock = optc1_unlock,
.enable_optc_clock = optc1_enable_optc_clock,
.set_drr = optc1_set_drr,
.get_last_used_drr_vtotal = NULL,
.set_vtotal_min_max = optc1_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
.program_stereo = optc1_program_stereo,
.is_stereo_left_eye = optc1_is_stereo_left_eye,
.set_blank_data_double_buffer = optc1_set_blank_data_double_buffer,
.tg_init = optc1_tg_init,
.is_tg_enabled = optc1_is_tg_enabled,
.is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
.clear_optc_underflow = optc1_clear_optc_underflow,
.get_crc = optc1_get_crc,
.configure_crc = optc2_configure_crc,
.set_dsc_config = optc2_set_dsc_config,
.set_dwb_source = NULL,
.get_optc_source = optc201_get_optc_source,
.set_vtg_params = optc1_set_vtg_params,
.program_manual_trigger = optc2_program_manual_trigger,
.setup_manual_trigger = optc2_setup_manual_trigger,
.get_hw_timing = optc1_get_hw_timing,
};
void dcn201_timing_generator_init(struct optc *optc1)
{
optc1->base.funcs = &dcn201_tg_funcs;
optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1;
optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1;
optc1->min_h_blank = 32;
optc1->min_v_blank = 3;
optc1->min_v_blank_interlace = 5;
optc1->min_h_sync_width = 8;
optc1->min_v_sync_width = 1;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dce110/dce110_hw_sequencer.h"
#include "dcn10/dcn10_hw_sequencer.h"
#include "dcn20/dcn20_hwseq.h"
#include "dcn201_hwseq.h"
#include "dcn201_init.h"
static const struct hw_sequencer_funcs dcn201_funcs = {
.program_gamut_remap = dcn10_program_gamut_remap,
.init_hw = dcn201_init_hw,
.power_down_on_boot = NULL,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.post_unlock_program_front_end = dcn10_post_unlock_program_front_end,
.update_plane_addr = dcn201_update_plane_addr,
.update_dchub = dcn10_update_dchub,
.update_pending_status = dcn10_update_pending_status,
.program_output_csc = dcn20_program_output_csc,
.enable_accelerated_mode = dce110_enable_accelerated_mode,
.enable_timing_synchronization = dcn10_enable_timing_synchronization,
.enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
.update_info_frame = dce110_update_info_frame,
.send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
.enable_stream = dce110_enable_stream,
.disable_stream = dce110_disable_stream,
.unblank_stream = dcn201_unblank_stream,
.blank_stream = dce110_blank_stream,
.enable_audio_stream = dce110_enable_audio_stream,
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn10_disable_plane,
.pipe_control_lock = dcn201_pipe_control_lock,
.interdependent_update_lock = dcn10_lock_all_pipes,
.cursor_lock = dcn10_cursor_lock,
.prepare_bandwidth = dcn20_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
.set_static_screen_control = dcn10_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dce110_set_avmute,
.log_hw_state = dcn10_log_hw_state,
.get_hw_state = dcn10_get_hw_state,
.clear_status_bits = dcn10_clear_status_bits,
.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
.edp_backlight_control = dce110_edp_backlight_control,
.edp_power_control = dce110_edp_power_control,
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
.setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
.set_clock = dcn10_set_clock,
.get_clock = dcn10_get_clock,
.program_triplebuffer = dcn20_program_triple_buffer,
.dmdata_status_done = dcn20_dmdata_status_done,
.set_dmdata_attributes = dcn201_set_dmdata_attributes,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
.set_cursor_position = dcn10_set_cursor_position,
.set_cursor_attribute = dcn201_set_cursor_attribute,
.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
.set_pipe = dce110_set_pipe,
.enable_lvds_link_output = dce110_enable_lvds_link_output,
.enable_tmds_link_output = dce110_enable_tmds_link_output,
.enable_dp_link_output = dce110_enable_dp_link_output,
.disable_link_output = dce110_disable_link_output,
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
};
static const struct hwseq_private_funcs dcn201_private_funcs = {
.init_pipes = NULL,
.update_plane_addr = dcn201_update_plane_addr,
.plane_atomic_disconnect = dcn201_plane_atomic_disconnect,
.program_pipe = dcn10_program_pipe,
.update_mpcc = dcn201_update_mpcc,
.set_input_transfer_func = dcn20_set_input_transfer_func,
.set_output_transfer_func = dcn20_set_output_transfer_func,
.power_down = dce110_power_down,
.enable_display_power_gating = dcn10_dummy_display_power_gating,
.blank_pixel_data = dcn20_blank_pixel_data,
.reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap,
.enable_stream_timing = dcn20_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
.disable_stream_gating = NULL,
.enable_stream_gating = NULL,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
.did_underflow_occur = dcn10_did_underflow_occur,
.init_blank = dcn201_init_blank,
.disable_vga = dcn10_disable_vga,
.bios_golden_init = dcn10_bios_golden_init,
.plane_atomic_disable = dcn10_plane_atomic_disable,
.plane_atomic_power_down = dcn10_plane_atomic_power_down,
.enable_power_gating_plane = dcn10_enable_power_gating_plane,
.dpp_pg_control = dcn10_dpp_pg_control,
.hubp_pg_control = dcn10_hubp_pg_control,
.dsc_pg_control = NULL,
.set_hdr_multiplier = dcn10_set_hdr_multiplier,
.verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
.wait_for_blank_complete = dcn20_wait_for_blank_complete,
.dccg_init = dcn20_dccg_init,
.set_blend_lut = dcn20_set_blend_lut,
.set_shaper_3dlut = dcn20_set_shaper_3dlut,
};
void dcn201_hw_sequencer_construct(struct dc *dc)
{
dc->hwss = dcn201_funcs;
dc->hwseq->funcs = dcn201_private_funcs;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "dc.h"
#include "dcn201_init.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "resource.h"
#include "include/irq_service_interface.h"
#include "dcn201_resource.h"
#include "dcn20/dcn20_resource.h"
#include "dcn10/dcn10_hubp.h"
#include "dcn10/dcn10_ipp.h"
#include "dcn201_mpc.h"
#include "dcn201_hubp.h"
#include "irq/dcn201/irq_service_dcn201.h"
#include "dcn201/dcn201_dpp.h"
#include "dcn201/dcn201_hubbub.h"
#include "dcn201_dccg.h"
#include "dcn201_optc.h"
#include "dcn201_hwseq.h"
#include "dce110/dce110_hw_sequencer.h"
#include "dcn201_opp.h"
#include "dcn201/dcn201_link_encoder.h"
#include "dcn20/dcn20_stream_encoder.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
#include "virtual/virtual_stream_encoder.h"
#include "dce110/dce110_resource.h"
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
#include "dcn201_hubbub.h"
#include "dcn10/dcn10_resource.h"
#include "cyan_skillfish_ip_offset.h"
#include "dcn/dcn_2_0_3_offset.h"
#include "dcn/dcn_2_0_3_sh_mask.h"
#include "dpcs/dpcs_2_0_3_offset.h"
#include "dpcs/dpcs_2_0_3_sh_mask.h"
#include "mmhub/mmhub_2_0_0_offset.h"
#include "mmhub/mmhub_2_0_0_sh_mask.h"
#include "nbio/nbio_7_4_offset.h"
#include "reg_helper.h"
#define MIN_DISP_CLK_KHZ 100000
#define MIN_DPP_CLK_KHZ 100000
static struct _vcs_dpi_ip_params_st dcn201_ip = {
.gpuvm_enable = 0,
.hostvm_enable = 0,
.gpuvm_max_page_table_levels = 4,
.hostvm_max_page_table_levels = 4,
.hostvm_cached_page_table_levels = 0,
.pte_group_size_bytes = 2048,
.rob_buffer_size_kbytes = 168,
.det_buffer_size_kbytes = 164,
.dpte_buffer_size_in_pte_reqs_luma = 84,
.pde_proc_buffer_size_64k_reqs = 48,
.dpp_output_buffer_pixels = 2560,
.opp_output_buffer_lines = 1,
.pixel_chunk_size_kbytes = 8,
.pte_chunk_size_kbytes = 2,
.meta_chunk_size_kbytes = 2,
.writeback_chunk_size_kbytes = 2,
.line_buffer_size_bits = 789504,
.is_line_buffer_bpp_fixed = 0,
.line_buffer_fixed_bpp = 0,
.dcc_supported = true,
.max_line_buffer_lines = 12,
.writeback_luma_buffer_size_kbytes = 12,
.writeback_chroma_buffer_size_kbytes = 8,
.writeback_chroma_line_buffer_width_pixels = 4,
.writeback_max_hscl_ratio = 1,
.writeback_max_vscl_ratio = 1,
.writeback_min_hscl_ratio = 1,
.writeback_min_vscl_ratio = 1,
.writeback_max_hscl_taps = 12,
.writeback_max_vscl_taps = 12,
.writeback_line_buffer_luma_buffer_size = 0,
.writeback_line_buffer_chroma_buffer_size = 9600,
.cursor_buffer_size = 8,
.cursor_chunk_size = 2,
.max_num_otg = 2,
.max_num_dpp = 4,
.max_num_wb = 0,
.max_dchub_pscl_bw_pix_per_clk = 4,
.max_pscl_lb_bw_pix_per_clk = 2,
.max_lb_vscl_bw_pix_per_clk = 4,
.max_vscl_hscl_bw_pix_per_clk = 4,
.max_hscl_ratio = 8,
.max_vscl_ratio = 8,
.hscl_mults = 4,
.vscl_mults = 4,
.max_hscl_taps = 8,
.max_vscl_taps = 8,
.dispclk_ramp_margin_percent = 1,
.underscan_factor = 1.10,
.min_vblank_lines = 30,
.dppclk_delay_subtotal = 77,
.dppclk_delay_scl_lb_only = 16,
.dppclk_delay_scl = 50,
.dppclk_delay_cnvc_formatter = 8,
.dppclk_delay_cnvc_cursor = 6,
.dispclk_delay_subtotal = 87,
.dcfclk_cstate_latency = 10,
.max_inter_dcn_tile_repeaters = 8,
.number_of_cursors = 1,
};
static struct _vcs_dpi_soc_bounding_box_st dcn201_soc = {
.clock_limits = {
{
.state = 0,
.dscclk_mhz = 400.0,
.dcfclk_mhz = 1000.0,
.fabricclk_mhz = 200.0,
.dispclk_mhz = 300.0,
.dppclk_mhz = 300.0,
.phyclk_mhz = 810.0,
.socclk_mhz = 1254.0,
.dram_speed_mts = 2000.0,
},
{
.state = 1,
.dscclk_mhz = 400.0,
.dcfclk_mhz = 1000.0,
.fabricclk_mhz = 250.0,
.dispclk_mhz = 1200.0,
.dppclk_mhz = 1200.0,
.phyclk_mhz = 810.0,
.socclk_mhz = 1254.0,
.dram_speed_mts = 3600.0,
},
{
.state = 2,
.dscclk_mhz = 400.0,
.dcfclk_mhz = 1000.0,
.fabricclk_mhz = 750.0,
.dispclk_mhz = 1200.0,
.dppclk_mhz = 1200.0,
.phyclk_mhz = 810.0,
.socclk_mhz = 1254.0,
.dram_speed_mts = 6800.0,
},
{
.state = 3,
.dscclk_mhz = 400.0,
.dcfclk_mhz = 1000.0,
.fabricclk_mhz = 250.0,
.dispclk_mhz = 1200.0,
.dppclk_mhz = 1200.0,
.phyclk_mhz = 810.0,
.socclk_mhz = 1254.0,
.dram_speed_mts = 14000.0,
},
{
.state = 4,
.dscclk_mhz = 400.0,
.dcfclk_mhz = 1000.0,
.fabricclk_mhz = 750.0,
.dispclk_mhz = 1200.0,
.dppclk_mhz = 1200.0,
.phyclk_mhz = 810.0,
.socclk_mhz = 1254.0,
.dram_speed_mts = 14000.0,
}
},
.num_states = 4,
.sr_exit_time_us = 9.0,
.sr_enter_plus_exit_time_us = 11.0,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
.urgent_latency_vm_data_only_us = 4.0,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 256,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 256,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 256,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 80.0,
.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 80.0,
.max_avg_sdp_bw_use_normal_percent = 80.0,
.max_avg_dram_bw_use_normal_percent = 69.0,
.writeback_latency_us = 12.0,
.ideal_dram_bw_after_urgent_percent = 80.0,
.max_request_size_bytes = 256,
.dram_channel_width_bytes = 2,
.fabric_datapath_to_dcn_data_return_bytes = 64,
.dcn_downspread_percent = 0.3,
.downspread_percent = 0.3,
.dram_page_open_time_ns = 50.0,
.dram_rw_turnaround_time_ns = 17.5,
.dram_return_buffer_per_channel_bytes = 8192,
.round_trip_ping_latency_dcfclk_cycles = 128,
.urgent_out_of_order_return_per_channel_bytes = 256,
.channel_interleave_bytes = 256,
.num_banks = 8,
.num_chans = 16,
.vmm_page_size_bytes = 4096,
.dram_clock_change_latency_us = 250.0,
.writeback_dram_clock_change_latency_us = 23.0,
.return_bus_width_bytes = 64,
.dispclk_dppclk_vco_speed_mhz = 3000,
.use_urgent_burst_bw = 0,
};
enum dcn20_clk_src_array_id {
DCN20_CLK_SRC_PLL0,
DCN20_CLK_SRC_PLL1,
DCN20_CLK_SRC_TOTAL_DCN201
};
/* begin *********************
* macros to expend register list macro defined in HW object header file */
/* DCN */
#undef BASE_INNER
#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
#define SR(reg_name)\
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
mm ## reg_name
#define SRI(reg_name, block, id)\
.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define SRIR(var_name, reg_name, block, id)\
.var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define SRII(reg_name, block, id)\
.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define SRI_IX(reg_name, block, id)\
.reg_name = ix ## block ## id ## _ ## reg_name
#define DCCG_SRII(reg_name, block, id)\
.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define VUPDATE_SRII(reg_name, block, id)\
.reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
mm ## reg_name ## _ ## block ## id
/* NBIO */
#define NBIO_BASE_INNER(seg) \
NBIO_BASE__INST0_SEG ## seg
#define NBIO_BASE(seg) \
NBIO_BASE_INNER(seg)
#define NBIO_SR(reg_name)\
.reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
mm ## reg_name
/* MMHUB */
#define MMHUB_BASE_INNER(seg) \
MMHUB_BASE__INST0_SEG ## seg
#define MMHUB_BASE(seg) \
MMHUB_BASE_INNER(seg)
#define MMHUB_SR(reg_name)\
.reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \
mmMM ## reg_name
static const struct bios_registers bios_regs = {
NBIO_SR(BIOS_SCRATCH_3),
NBIO_SR(BIOS_SCRATCH_6)
};
#define clk_src_regs(index, pllid)\
[index] = {\
CS_COMMON_REG_LIST_DCN201(index, pllid),\
}
static const struct dce110_clk_src_regs clk_src_regs[] = {
clk_src_regs(0, A),
clk_src_regs(1, B)
};
static const struct dce110_clk_src_shift cs_shift = {
CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
};
static const struct dce110_clk_src_mask cs_mask = {
CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
};
#define audio_regs(id)\
[id] = {\
AUD_COMMON_REG_LIST(id)\
}
static const struct dce_audio_registers audio_regs[] = {
audio_regs(0),
audio_regs(1),
};
#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
static const struct dce_audio_shift audio_shift = {
DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
};
static const struct dce_audio_mask audio_mask = {
DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
};
#define stream_enc_regs(id)\
[id] = {\
SE_DCN2_REG_LIST(id)\
}
static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
stream_enc_regs(0),
stream_enc_regs(1)
};
static const struct dcn10_stream_encoder_shift se_shift = {
SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT)
};
static const struct dcn10_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCN20(_MASK)
};
static const struct dce110_aux_registers_shift aux_shift = {
DCN_AUX_MASK_SH_LIST(__SHIFT)
};
static const struct dce110_aux_registers_mask aux_mask = {
DCN_AUX_MASK_SH_LIST(_MASK)
};
#define aux_regs(id)\
[id] = {\
DCN2_AUX_REG_LIST(id)\
}
static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
aux_regs(0),
aux_regs(1),
};
#define hpd_regs(id)\
[id] = {\
HPD_REG_LIST(id)\
}
static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
hpd_regs(0),
hpd_regs(1),
};
#define link_regs(id, phyid)\
[id] = {\
LE_DCN_COMMON_REG_LIST(id), \
UNIPHY_DCN2_REG_LIST(phyid) \
}
static const struct dcn10_link_enc_registers link_enc_regs[] = {
link_regs(0, A),
link_regs(1, B),
};
#define LINK_ENCODER_MASK_SH_LIST_DCN201(mask_sh)\
LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh)
static const struct dcn10_link_enc_shift le_shift = {
LINK_ENCODER_MASK_SH_LIST_DCN201(__SHIFT)
};
static const struct dcn10_link_enc_mask le_mask = {
LINK_ENCODER_MASK_SH_LIST_DCN201(_MASK)
};
#define ipp_regs(id)\
[id] = {\
IPP_REG_LIST_DCN201(id),\
}
static const struct dcn10_ipp_registers ipp_regs[] = {
ipp_regs(0),
ipp_regs(1),
ipp_regs(2),
ipp_regs(3),
};
static const struct dcn10_ipp_shift ipp_shift = {
IPP_MASK_SH_LIST_DCN201(__SHIFT)
};
static const struct dcn10_ipp_mask ipp_mask = {
IPP_MASK_SH_LIST_DCN201(_MASK)
};
#define opp_regs(id)\
[id] = {\
OPP_REG_LIST_DCN201(id),\
}
static const struct dcn201_opp_registers opp_regs[] = {
opp_regs(0),
opp_regs(1),
};
static const struct dcn201_opp_shift opp_shift = {
OPP_MASK_SH_LIST_DCN201(__SHIFT)
};
static const struct dcn201_opp_mask opp_mask = {
OPP_MASK_SH_LIST_DCN201(_MASK)
};
#define aux_engine_regs(id)\
[id] = {\
AUX_COMMON_REG_LIST0(id), \
.AUX_RESET_MASK = 0 \
}
static const struct dce110_aux_registers aux_engine_regs[] = {
aux_engine_regs(0),
aux_engine_regs(1)
};
#define tf_regs(id)\
[id] = {\
TF_REG_LIST_DCN201(id),\
}
static const struct dcn201_dpp_registers tf_regs[] = {
tf_regs(0),
tf_regs(1),
tf_regs(2),
tf_regs(3),
};
static const struct dcn201_dpp_shift tf_shift = {
TF_REG_LIST_SH_MASK_DCN201(__SHIFT)
};
static const struct dcn201_dpp_mask tf_mask = {
TF_REG_LIST_SH_MASK_DCN201(_MASK)
};
static const struct dcn201_mpc_registers mpc_regs = {
MPC_REG_LIST_DCN201(0),
MPC_REG_LIST_DCN201(1),
MPC_REG_LIST_DCN201(2),
MPC_REG_LIST_DCN201(3),
MPC_REG_LIST_DCN201(4),
MPC_OUT_MUX_REG_LIST_DCN201(0),
MPC_OUT_MUX_REG_LIST_DCN201(1),
};
static const struct dcn201_mpc_shift mpc_shift = {
MPC_COMMON_MASK_SH_LIST_DCN201(__SHIFT)
};
static const struct dcn201_mpc_mask mpc_mask = {
MPC_COMMON_MASK_SH_LIST_DCN201(_MASK)
};
#define tg_regs_dcn201(id)\
[id] = {TG_COMMON_REG_LIST_DCN201(id)}
static const struct dcn_optc_registers tg_regs[] = {
tg_regs_dcn201(0),
tg_regs_dcn201(1)
};
static const struct dcn_optc_shift tg_shift = {
TG_COMMON_MASK_SH_LIST_DCN201(__SHIFT)
};
static const struct dcn_optc_mask tg_mask = {
TG_COMMON_MASK_SH_LIST_DCN201(_MASK)
};
#define hubp_regsDCN201(id)\
[id] = {\
HUBP_REG_LIST_DCN201(id)\
}
static const struct dcn201_hubp_registers hubp_regs[] = {
hubp_regsDCN201(0),
hubp_regsDCN201(1),
hubp_regsDCN201(2),
hubp_regsDCN201(3)
};
static const struct dcn201_hubp_shift hubp_shift = {
HUBP_MASK_SH_LIST_DCN201(__SHIFT)
};
static const struct dcn201_hubp_mask hubp_mask = {
HUBP_MASK_SH_LIST_DCN201(_MASK)
};
static const struct dcn_hubbub_registers hubbub_reg = {
HUBBUB_REG_LIST_DCN201(0)
};
static const struct dcn_hubbub_shift hubbub_shift = {
HUBBUB_MASK_SH_LIST_DCN201(__SHIFT)
};
static const struct dcn_hubbub_mask hubbub_mask = {
HUBBUB_MASK_SH_LIST_DCN201(_MASK)
};
static const struct dccg_registers dccg_regs = {
DCCG_COMMON_REG_LIST_DCN_BASE()
};
static const struct dccg_shift dccg_shift = {
DCCG_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(__SHIFT)
};
static const struct dccg_mask dccg_mask = {
DCCG_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(_MASK)
};
static const struct resource_caps res_cap_dnc201 = {
.num_timing_generator = 2,
.num_opp = 2,
.num_video_plane = 4,
.num_audio = 2,
.num_stream_encoder = 2,
.num_pll = 2,
.num_ddc = 2,
};
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
.per_pixel_alpha = true,
.pixel_format_support = {
.argb8888 = true,
.nv12 = false,
.fp16 = true,
.p010 = false,
},
.max_upscale_factor = {
.argb8888 = 16000,
.nv12 = 16000,
.fp16 = 1
},
.max_downscale_factor = {
.argb8888 = 250,
.nv12 = 250,
.fp16 = 250
},
64,
64
};
static const struct dc_debug_options debug_defaults_drv = {
.disable_dmcu = true,
.force_abm_enable = false,
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
.az_endpoint_mute_only = true,
.max_downscale_src_width = 3840,
.disable_pplib_wm_range = true,
.scl_reset_length10 = true,
.sanity_checks = false,
.underflow_assert_delay_us = 0xFFFFFFFF,
.enable_tri_buf = false,
.enable_legacy_fast_update = true,
};
static void dcn201_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN201_DPP(*dpp));
*dpp = NULL;
}
static struct dpp *dcn201_dpp_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dcn201_dpp *dpp =
kzalloc(sizeof(struct dcn201_dpp), GFP_ATOMIC);
if (!dpp)
return NULL;
if (dpp201_construct(dpp, ctx, inst,
&tf_regs[inst], &tf_shift, &tf_mask))
return &dpp->base;
kfree(dpp);
return NULL;
}
static struct input_pixel_processor *dcn201_ipp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn10_ipp *ipp =
kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC);
if (!ipp) {
return NULL;
}
dcn20_ipp_construct(ipp, ctx, inst,
&ipp_regs[inst], &ipp_shift, &ipp_mask);
return &ipp->base;
}
static struct output_pixel_processor *dcn201_opp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn201_opp *opp =
kzalloc(sizeof(struct dcn201_opp), GFP_ATOMIC);
if (!opp) {
return NULL;
}
dcn201_opp_construct(opp, ctx, inst,
&opp_regs[inst], &opp_shift, &opp_mask);
return &opp->base;
}
static struct dce_aux *dcn201_aux_engine_create(struct dc_context *ctx,
uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC);
if (!aux_engine)
return NULL;
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
&aux_engine_regs[inst],
&aux_mask,
&aux_shift,
ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) }
static const struct dce_i2c_registers i2c_hw_regs[] = {
i2c_inst_regs(1),
i2c_inst_regs(2),
};
static const struct dce_i2c_shift i2c_shifts = {
I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT)
};
static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCN2(_MASK)
};
static struct dce_i2c_hw *dcn201_i2c_hw_create(struct dc_context *ctx,
uint32_t inst)
{
struct dce_i2c_hw *dce_i2c_hw =
kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC);
if (!dce_i2c_hw)
return NULL;
dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
&i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
return dce_i2c_hw;
}
static struct mpc *dcn201_mpc_create(struct dc_context *ctx, uint32_t num_mpcc)
{
struct dcn201_mpc *mpc201 = kzalloc(sizeof(struct dcn201_mpc),
GFP_ATOMIC);
if (!mpc201)
return NULL;
dcn201_mpc_construct(mpc201, ctx,
&mpc_regs,
&mpc_shift,
&mpc_mask,
num_mpcc);
return &mpc201->base;
}
static struct hubbub *dcn201_hubbub_create(struct dc_context *ctx)
{
struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
GFP_ATOMIC);
if (!hubbub)
return NULL;
hubbub201_construct(hubbub, ctx,
&hubbub_reg,
&hubbub_shift,
&hubbub_mask);
return &hubbub->base;
}
static struct timing_generator *dcn201_timing_generator_create(
struct dc_context *ctx,
uint32_t instance)
{
struct optc *tgn10 =
kzalloc(sizeof(struct optc), GFP_ATOMIC);
if (!tgn10)
return NULL;
tgn10->base.inst = instance;
tgn10->base.ctx = ctx;
tgn10->tg_regs = &tg_regs[instance];
tgn10->tg_shift = &tg_shift;
tgn10->tg_mask = &tg_mask;
dcn201_timing_generator_init(tgn10);
return &tgn10->base;
}
static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_deep_color = COLOR_DEPTH_121212,
.max_hdmi_pixel_clock = 600000,
.hdmi_ycbcr420_supported = true,
.dp_ycbcr420_supported = true,
.fec_supported = true,
.flags.bits.IS_HBR2_CAPABLE = true,
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
.flags.bits.IS_TPS4_CAPABLE = true
};
static struct link_encoder *dcn201_link_encoder_create(
struct dc_context *ctx,
const struct encoder_init_data *enc_init_data)
{
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_ATOMIC);
struct dcn10_link_encoder *enc10 = &enc20->enc10;
if (!enc20)
return NULL;
dcn201_link_encoder_construct(enc20,
enc_init_data,
&link_enc_feature,
&link_enc_regs[enc_init_data->transmitter],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source],
&le_shift,
&le_mask);
return &enc10->base;
}
static struct clock_source *dcn201_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
const struct dce110_clk_src_regs *regs,
bool dp_clk_src)
{
struct dce110_clk_src *clk_src =
kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC);
if (!clk_src)
return NULL;
if (dce112_clk_src_construct(clk_src, ctx, bios, id,
regs, &cs_shift, &cs_mask)) {
clk_src->base.dp_clk_src = dp_clk_src;
return &clk_src->base;
}
kfree(clk_src);
return NULL;
}
static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
{
generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX),
FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
}
static struct audio *dcn201_create_audio(
struct dc_context *ctx, unsigned int inst)
{
return dce_audio_create(ctx, inst,
&audio_regs[inst], &audio_shift, &audio_mask);
}
static struct stream_encoder *dcn201_stream_encoder_create(
enum engine_id eng_id,
struct dc_context *ctx)
{
struct dcn10_stream_encoder *enc1 =
kzalloc(sizeof(struct dcn10_stream_encoder), GFP_ATOMIC);
if (!enc1)
return NULL;
dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id,
&stream_enc_regs[eng_id],
&se_shift, &se_mask);
return &enc1->base;
}
static const struct dce_hwseq_registers hwseq_reg = {
HWSEQ_DCN201_REG_LIST()
};
static const struct dce_hwseq_shift hwseq_shift = {
HWSEQ_DCN201_MASK_SH_LIST(__SHIFT)
};
static const struct dce_hwseq_mask hwseq_mask = {
HWSEQ_DCN201_MASK_SH_LIST(_MASK)
};
static struct dce_hwseq *dcn201_hwseq_create(
struct dc_context *ctx)
{
struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_ATOMIC);
if (hws) {
hws->ctx = ctx;
hws->regs = &hwseq_reg;
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
}
return hws;
}
static const struct resource_create_funcs res_create_funcs = {
.read_dce_straps = read_dce_straps,
.create_audio = dcn201_create_audio,
.create_stream_encoder = dcn201_stream_encoder_create,
.create_hwseq = dcn201_hwseq_create,
};
static void dcn201_clock_source_destroy(struct clock_source **clk_src)
{
kfree(TO_DCE110_CLK_SRC(*clk_src));
*clk_src = NULL;
}
static void dcn201_resource_destruct(struct dcn201_resource_pool *pool)
{
unsigned int i;
for (i = 0; i < pool->base.stream_enc_count; i++) {
if (pool->base.stream_enc[i] != NULL) {
kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
pool->base.stream_enc[i] = NULL;
}
}
if (pool->base.mpc != NULL) {
kfree(TO_DCN201_MPC(pool->base.mpc));
pool->base.mpc = NULL;
}
if (pool->base.hubbub != NULL) {
kfree(pool->base.hubbub);
pool->base.hubbub = NULL;
}
for (i = 0; i < pool->base.pipe_count; i++) {
if (pool->base.dpps[i] != NULL)
dcn201_dpp_destroy(&pool->base.dpps[i]);
if (pool->base.ipps[i] != NULL)
pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
if (pool->base.hubps[i] != NULL) {
kfree(TO_DCN10_HUBP(pool->base.hubps[i]));
pool->base.hubps[i] = NULL;
}
if (pool->base.irqs != NULL) {
dal_irq_service_destroy(&pool->base.irqs);
}
}
for (i = 0; i < pool->base.res_cap->num_opp; i++) {
if (pool->base.opps[i] != NULL)
pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
}
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
if (pool->base.timing_generators[i] != NULL) {
kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
}
for (i = 0; i < pool->base.audio_count; i++) {
if (pool->base.audios[i])
dce_aud_destroy(&pool->base.audios[i]);
}
for (i = 0; i < pool->base.clk_src_count; i++) {
if (pool->base.clock_sources[i] != NULL) {
dcn201_clock_source_destroy(&pool->base.clock_sources[i]);
pool->base.clock_sources[i] = NULL;
}
}
if (pool->base.dp_clock_source != NULL) {
dcn201_clock_source_destroy(&pool->base.dp_clock_source);
pool->base.dp_clock_source = NULL;
}
if (pool->base.dccg != NULL)
dcn_dccg_destroy(&pool->base.dccg);
}
static struct hubp *dcn201_hubp_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dcn201_hubp *hubp201 =
kzalloc(sizeof(struct dcn201_hubp), GFP_ATOMIC);
if (!hubp201)
return NULL;
if (dcn201_hubp_construct(hubp201, ctx, inst,
&hubp_regs[inst], &hubp_shift, &hubp_mask))
return &hubp201->base;
kfree(hubp201);
return NULL;
}
static struct pipe_ctx *dcn201_acquire_free_pipe_for_layer(
const struct dc_state *cur_ctx,
struct dc_state *new_ctx,
const struct resource_pool *pool,
const struct pipe_ctx *opp_head_pipe)
{
struct resource_context *res_ctx = &new_ctx->res_ctx;
struct pipe_ctx *head_pipe = resource_get_otg_master_for_stream(res_ctx, opp_head_pipe->stream);
struct pipe_ctx *idle_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, head_pipe);
if (!head_pipe)
ASSERT(0);
if (!idle_pipe)
return NULL;
idle_pipe->stream = head_pipe->stream;
idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
idle_pipe->stream_res.opp = head_pipe->stream_res.opp;
idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx];
idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx];
idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst;
return idle_pipe;
}
static bool dcn201_get_dcc_compression_cap(const struct dc *dc,
const struct dc_dcc_surface_param *input,
struct dc_surface_dcc_cap *output)
{
return dc->res_pool->hubbub->funcs->get_dcc_compression_cap(
dc->res_pool->hubbub,
input,
output);
}
static void dcn201_populate_dml_writeback_from_context(struct dc *dc,
struct resource_context *res_ctx,
display_e2e_pipe_params_st *pipes)
{
DC_FP_START();
dcn201_populate_dml_writeback_from_context_fpu(dc, res_ctx, pipes);
DC_FP_END();
}
static void dcn201_destroy_resource_pool(struct resource_pool **pool)
{
struct dcn201_resource_pool *dcn201_pool = TO_DCN201_RES_POOL(*pool);
dcn201_resource_destruct(dcn201_pool);
kfree(dcn201_pool);
*pool = NULL;
}
static void dcn201_link_init(struct dc_link *link)
{
if (link->ctx->dc_bios->integrated_info)
link->dp_ss_off = !link->ctx->dc_bios->integrated_info->dp_ss_control;
}
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn201_get_dcc_compression_cap,
};
static struct resource_funcs dcn201_res_pool_funcs = {
.link_init = dcn201_link_init,
.destroy = dcn201_destroy_resource_pool,
.link_enc_create = dcn201_link_encoder_create,
.panel_cntl_create = NULL,
.validate_bandwidth = dcn20_validate_bandwidth,
.populate_dml_pipes = dcn20_populate_dml_pipes_from_context,
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
.add_dsc_to_stream_resource = NULL,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.acquire_free_pipe_as_secondary_dpp_pipe = dcn201_acquire_free_pipe_for_layer,
.populate_dml_writeback_from_context = dcn201_populate_dml_writeback_from_context,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
};
static bool dcn201_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
struct dcn201_resource_pool *pool)
{
int i;
struct dc_context *ctx = dc->ctx;
ctx->dc_bios->regs = &bios_regs;
pool->base.res_cap = &res_cap_dnc201;
pool->base.funcs = &dcn201_res_pool_funcs;
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
pool->base.pipe_count = 4;
pool->base.mpcc_count = 5;
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.5 w/a applied by default*/
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.max_slave_planes = 1;
dc->caps.max_slave_yuv_planes = 1;
dc->caps.max_slave_rgb_planes = 1;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.extended_aux_timeout_support = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
dc->caps.color.dpp.input_lut_shared = 0;
dc->caps.color.dpp.icsc = 1;
dc->caps.color.dpp.dgam_ram = 1;
dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0;
dc->caps.color.dpp.dgam_rom_caps.pq = 0;
dc->caps.color.dpp.dgam_rom_caps.hlg = 0;
dc->caps.color.dpp.post_csc = 0;
dc->caps.color.dpp.gamma_corr = 0;
dc->caps.color.dpp.dgam_rom_for_yuv = 1;
dc->caps.color.dpp.hw_3d_lut = 1;
dc->caps.color.dpp.ogam_ram = 1;
// no OGAM ROM on DCN2
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
dc->caps.color.dpp.ogam_rom_caps.pq = 0;
dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
dc->caps.color.dpp.ocsc = 0;
dc->caps.color.mpc.gamut_remap = 0;
dc->caps.color.mpc.num_3dluts = 0;
dc->caps.color.mpc.shared_3d_lut = 0;
dc->caps.color.mpc.ogam_ram = 1;
dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
dc->debug = debug_defaults_drv;
/*a0 only, remove later*/
dc->work_arounds.no_connect_phy_config = true;
dc->work_arounds.dedcn20_305_wa = true;
/*************************************************
* Create resources *
*************************************************/
pool->base.clock_sources[DCN20_CLK_SRC_PLL0] =
dcn201_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL0,
&clk_src_regs[0], false);
pool->base.clock_sources[DCN20_CLK_SRC_PLL1] =
dcn201_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL1,
&clk_src_regs[1], false);
pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN201;
/* todo: not reuse phy_pll registers */
pool->base.dp_clock_source =
dcn201_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_ID_DP_DTO,
&clk_src_regs[0], true);
for (i = 0; i < pool->base.clk_src_count; i++) {
if (pool->base.clock_sources[i] == NULL) {
dm_error("DC: failed to create clock sources!\n");
goto create_fail;
}
}
pool->base.dccg = dccg201_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask);
if (pool->base.dccg == NULL) {
dm_error("DC: failed to create dccg!\n");
goto create_fail;
}
dcn201_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
dcn201_ip.max_num_dpp = pool->base.pipe_count;
dml_init_instance(&dc->dml, &dcn201_soc, &dcn201_ip, DML_PROJECT_DCN201);
{
struct irq_service_init_data init_data;
init_data.ctx = dc->ctx;
pool->base.irqs = dal_irq_service_dcn201_create(&init_data);
if (!pool->base.irqs)
goto create_fail;
}
/* mem input -> ipp -> dpp -> opp -> TG */
for (i = 0; i < pool->base.pipe_count; i++) {
pool->base.hubps[i] = dcn201_hubp_create(ctx, i);
if (pool->base.hubps[i] == NULL) {
dm_error(
"DC: failed to create memory input!\n");
goto create_fail;
}
pool->base.ipps[i] = dcn201_ipp_create(ctx, i);
if (pool->base.ipps[i] == NULL) {
dm_error(
"DC: failed to create input pixel processor!\n");
goto create_fail;
}
pool->base.dpps[i] = dcn201_dpp_create(ctx, i);
if (pool->base.dpps[i] == NULL) {
dm_error(
"DC: failed to create dpps!\n");
goto create_fail;
}
}
for (i = 0; i < pool->base.res_cap->num_opp; i++) {
pool->base.opps[i] = dcn201_opp_create(ctx, i);
if (pool->base.opps[i] == NULL) {
dm_error(
"DC: failed to create output pixel processor!\n");
goto create_fail;
}
}
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
pool->base.engines[i] = dcn201_aux_engine_create(ctx, i);
if (pool->base.engines[i] == NULL) {
dm_error(
"DC:failed to create aux engine!!\n");
goto create_fail;
}
pool->base.hw_i2cs[i] = dcn201_i2c_hw_create(ctx, i);
if (pool->base.hw_i2cs[i] == NULL) {
dm_error(
"DC:failed to create hw i2c!!\n");
goto create_fail;
}
pool->base.sw_i2cs[i] = NULL;
}
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
pool->base.timing_generators[i] = dcn201_timing_generator_create(
ctx, i);
if (pool->base.timing_generators[i] == NULL) {
dm_error("DC: failed to create tg!\n");
goto create_fail;
}
}
pool->base.timing_generator_count = i;
pool->base.mpc = dcn201_mpc_create(ctx, pool->base.mpcc_count);
if (pool->base.mpc == NULL) {
dm_error("DC: failed to create mpc!\n");
goto create_fail;
}
pool->base.hubbub = dcn201_hubbub_create(ctx);
if (pool->base.hubbub == NULL) {
dm_error("DC: failed to create hubbub!\n");
goto create_fail;
}
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))
goto create_fail;
dcn201_hw_sequencer_construct(dc);
dc->caps.max_planes = pool->base.pipe_count;
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
dc->cap_funcs = cap_funcs;
return true;
create_fail:
dcn201_resource_destruct(pool);
return false;
}
struct resource_pool *dcn201_create_resource_pool(
const struct dc_init_data *init_data,
struct dc *dc)
{
struct dcn201_resource_pool *pool =
kzalloc(sizeof(struct dcn201_resource_pool), GFP_ATOMIC);
if (!pool)
return NULL;
if (dcn201_resource_construct(init_data->num_virtual_links, dc, pool))
return &pool->base;
kfree(pool);
return NULL;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "reg_helper.h"
#include "core_types.h"
#include "link_encoder.h"
#include "dcn201_link_encoder.h"
#include "stream_encoder.h"
#include "dc_bios_types.h"
#include "gpio_service_interface.h"
#define CTX \
enc10->base.ctx
#define DC_LOGGER \
enc10->base.ctx->logger
#define REG(reg)\
(enc10->link_regs->reg)
#undef FN
#define FN(reg_name, field_name) \
enc10->link_shift->field_name, enc10->link_mask->field_name
#define IND_REG(index) \
(enc10->link_regs->index)
static void dcn201_link_encoder_get_max_link_cap(struct link_encoder *enc,
struct dc_link_settings *link_settings)
{
uint32_t value1, value2;
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
dcn10_link_encoder_get_max_link_cap(enc, link_settings);
REG_GET_2(RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE, &value1,
RDPCS_PHY_DPALT_DP4, &value2);
/*limit to combo_phy*/
if (enc->usbc_combo_phy) {
if (!value1 && !value2 && link_settings->lane_count > LANE_COUNT_TWO)
link_settings->lane_count = LANE_COUNT_TWO;
}
}
static bool dcn201_link_encoder_is_in_alt_mode(struct link_encoder *enc)
{
uint32_t value;
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
REG_GET(RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE, &value);
// if value == 1 alt mode is disabled, otherwise it is enabled
return !value;
}
static const struct link_encoder_funcs dcn201_link_enc_funcs = {
.read_state = link_enc2_read_state,
.validate_output_with_stream =
dcn10_link_encoder_validate_output_with_stream,
.hw_init = enc2_hw_init,
.setup = dcn10_link_encoder_setup,
.enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
.enable_dp_output = dcn10_link_encoder_enable_dp_output,
.enable_dp_mst_output = dcn10_link_encoder_enable_dp_mst_output,
.disable_output = dcn10_link_encoder_disable_output,
.dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings,
.dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern,
.update_mst_stream_allocation_table =
dcn10_link_encoder_update_mst_stream_allocation_table,
.psr_program_dp_dphy_fast_training =
dcn10_psr_program_dp_dphy_fast_training,
.psr_program_secondary_packet = dcn10_psr_program_secondary_packet,
.connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe,
.enable_hpd = dcn10_link_encoder_enable_hpd,
.disable_hpd = dcn10_link_encoder_disable_hpd,
.is_dig_enabled = dcn10_is_dig_enabled,
.destroy = dcn10_link_encoder_destroy,
.fec_set_enable = enc2_fec_set_enable,
.fec_set_ready = enc2_fec_set_ready,
.get_dig_frontend = dcn10_get_dig_frontend,
.fec_is_active = enc2_fec_is_active,
.is_in_alt_mode = dcn201_link_encoder_is_in_alt_mode,
.get_max_link_cap = dcn201_link_encoder_get_max_link_cap,
};
void dcn201_link_encoder_construct(
struct dcn20_link_encoder *enc20,
const struct encoder_init_data *init_data,
const struct encoder_feature_support *enc_features,
const struct dcn10_link_enc_registers *link_regs,
const struct dcn10_link_enc_aux_registers *aux_regs,
const struct dcn10_link_enc_hpd_registers *hpd_regs,
const struct dcn10_link_enc_shift *link_shift,
const struct dcn10_link_enc_mask *link_mask)
{
struct bp_encoder_cap_info bp_cap_info = {0};
const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
enum bp_result result = BP_RESULT_OK;
struct dcn10_link_encoder *enc10 = &enc20->enc10;
enc10->base.funcs = &dcn201_link_enc_funcs;
enc10->base.ctx = init_data->ctx;
enc10->base.id = init_data->encoder;
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
enc10->base.features = *enc_features;
enc10->base.transmitter = init_data->transmitter;
/* set the flag to indicate whether driver poll the I2C data pin
* while doing the DP sink detect
*/
/* if (dal_adapter_service_is_feature_supported(as,
* FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
* enc10->base.features.flags.bits.
* DP_SINK_DETECT_POLL_DATA_PIN = true;
*/
enc10->base.output_signals =
SIGNAL_TYPE_DVI_SINGLE_LINK |
SIGNAL_TYPE_DVI_DUAL_LINK |
SIGNAL_TYPE_LVDS |
SIGNAL_TYPE_DISPLAY_PORT |
SIGNAL_TYPE_DISPLAY_PORT_MST |
SIGNAL_TYPE_EDP |
SIGNAL_TYPE_HDMI_TYPE_A;
/* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
* SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
* SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
* DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS.
* Prefer DIG assignment is decided by board design.
* For DCE 8.0, there are only max 6 UNIPHYs, we assume board design
* and VBIOS will filter out 7 UNIPHY for DCE 8.0.
* By this, adding DIGG should not hurt DCE 8.0.
* This will let DCE 8.1 share DCE 8.0 as much as possible
*/
enc10->link_regs = link_regs;
enc10->aux_regs = aux_regs;
enc10->hpd_regs = hpd_regs;
enc10->link_shift = link_shift;
enc10->link_mask = link_mask;
switch (enc10->base.transmitter) {
case TRANSMITTER_UNIPHY_A:
enc10->base.preferred_engine = ENGINE_ID_DIGA;
break;
case TRANSMITTER_UNIPHY_B:
enc10->base.preferred_engine = ENGINE_ID_DIGB;
break;
default:
ASSERT_CRITICAL(false);
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
}
/* default to one to mirror Windows behavior */
enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios,
enc10->base.id, &bp_cap_info);
/* Override features with DCE-specific values */
if (result == BP_RESULT_OK) {
enc10->base.features.flags.bits.IS_HBR2_CAPABLE =
bp_cap_info.DP_HBR2_EN;
enc10->base.features.flags.bits.IS_HBR3_CAPABLE =
bp_cap_info.DP_HBR3_EN;
enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
enc10->base.features.flags.bits.DP_IS_USB_C =
bp_cap_info.DP_IS_USB_C;
} else {
DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
__func__,
result);
}
if (enc10->base.ctx->dc->debug.hdmi20_disable) {
enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
}
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c |
/*
* Copyright 2012-17 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dcn201_hubp.h"
#include "dm_services.h"
#include "dce_calcs.h"
#include "reg_helper.h"
#include "basics/conversion.h"
#define REG(reg)\
hubp201->hubp_regs->reg
#define CTX \
hubp201->base.ctx
#undef FN
#define FN(reg_name, field_name) \
hubp201->hubp_shift->field_name, hubp201->hubp_mask->field_name
static void hubp201_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
union dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
bool horizontal_mirror,
unsigned int compat_level)
{
hubp1_dcc_control(hubp, dcc->enable, dcc->independent_64b_blks);
hubp1_program_tiling(hubp, tiling_info, format);
hubp1_program_size(hubp, format, plane_size, dcc);
hubp1_program_pixel_format(hubp, format);
}
static void hubp201_program_deadline(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
{
hubp1_program_deadline(hubp, dlg_attr, ttu_attr);
}
static void hubp201_program_requestor(struct hubp *hubp,
struct _vcs_dpi_display_rq_regs_st *rq_regs)
{
struct dcn201_hubp *hubp201 = TO_DCN201_HUBP(hubp);
REG_UPDATE(HUBPRET_CONTROL,
DET_BUF_PLANE1_BASE_ADDRESS, rq_regs->plane1_base_address);
REG_SET_4(DCN_EXPANSION_MODE, 0,
DRQ_EXPANSION_MODE, rq_regs->drq_expansion_mode,
PRQ_EXPANSION_MODE, rq_regs->prq_expansion_mode,
MRQ_EXPANSION_MODE, rq_regs->mrq_expansion_mode,
CRQ_EXPANSION_MODE, rq_regs->crq_expansion_mode);
REG_SET_5(DCHUBP_REQ_SIZE_CONFIG, 0,
CHUNK_SIZE, rq_regs->rq_regs_l.chunk_size,
MIN_CHUNK_SIZE, rq_regs->rq_regs_l.min_chunk_size,
META_CHUNK_SIZE, rq_regs->rq_regs_l.meta_chunk_size,
MIN_META_CHUNK_SIZE, rq_regs->rq_regs_l.min_meta_chunk_size,
SWATH_HEIGHT, rq_regs->rq_regs_l.swath_height);
REG_SET_5(DCHUBP_REQ_SIZE_CONFIG_C, 0,
CHUNK_SIZE_C, rq_regs->rq_regs_c.chunk_size,
MIN_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_chunk_size,
META_CHUNK_SIZE_C, rq_regs->rq_regs_c.meta_chunk_size,
MIN_META_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_meta_chunk_size,
SWATH_HEIGHT_C, rq_regs->rq_regs_c.swath_height);
}
static void hubp201_setup(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
struct _vcs_dpi_display_ttu_regs_st *ttu_attr,
struct _vcs_dpi_display_rq_regs_st *rq_regs,
struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
{
hubp2_vready_at_or_After_vsync(hubp, pipe_dest);
hubp201_program_requestor(hubp, rq_regs);
hubp201_program_deadline(hubp, dlg_attr, ttu_attr);
}
static struct hubp_funcs dcn201_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
.hubp_program_surface_flip_and_addr = hubp1_program_surface_flip_and_addr,
.hubp_program_surface_config = hubp201_program_surface_config,
.hubp_is_flip_pending = hubp1_is_flip_pending,
.hubp_setup = hubp201_setup,
.hubp_setup_interdependent = hubp2_setup_interdependent,
.set_cursor_attributes = hubp2_cursor_set_attributes,
.set_cursor_position = hubp1_cursor_set_position,
.set_blank = hubp1_set_blank,
.dcc_control = hubp1_dcc_control,
.mem_program_viewport = min_set_viewport,
.hubp_clk_cntl = hubp1_clk_cntl,
.hubp_vtg_sel = hubp1_vtg_sel,
.dmdata_set_attributes = hubp2_dmdata_set_attributes,
.dmdata_load = hubp2_dmdata_load,
.dmdata_status_done = hubp2_dmdata_status_done,
.hubp_read_state = hubp2_read_state,
.hubp_clear_underflow = hubp1_clear_underflow,
.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
.hubp_init = hubp1_init,
};
bool dcn201_hubp_construct(
struct dcn201_hubp *hubp201,
struct dc_context *ctx,
uint32_t inst,
const struct dcn201_hubp_registers *hubp_regs,
const struct dcn201_hubp_shift *hubp_shift,
const struct dcn201_hubp_mask *hubp_mask)
{
hubp201->base.funcs = &dcn201_hubp_funcs;
hubp201->base.ctx = ctx;
hubp201->hubp_regs = hubp_regs;
hubp201->hubp_shift = hubp_shift;
hubp201->hubp_mask = hubp_mask;
hubp201->base.inst = inst;
hubp201->base.opp_id = OPP_ID_INVALID;
hubp201->base.mpcc_id = 0xf;
return true;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "dcn201_opp.h"
#include "reg_helper.h"
#define REG(reg) \
(oppn201->regs->reg)
#undef FN
#define FN(reg_name, field_name) \
oppn201->opp_shift->field_name, oppn201->opp_mask->field_name
#define CTX \
oppn201->base.ctx
/*****************************************/
/* Constructor, Destructor */
/*****************************************/
static struct opp_funcs dcn201_opp_funcs = {
.opp_set_dyn_expansion = opp1_set_dyn_expansion,
.opp_program_fmt = opp1_program_fmt,
.opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction,
.opp_program_stereo = opp1_program_stereo,
.opp_pipe_clock_control = opp1_pipe_clock_control,
.opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator,
.opp_program_dpg_dimensions = opp2_program_dpg_dimensions,
.dpg_is_blanked = opp2_dpg_is_blanked,
.opp_dpg_set_blank_color = opp2_dpg_set_blank_color,
.opp_destroy = opp1_destroy,
.opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,
};
void dcn201_opp_construct(struct dcn201_opp *oppn201,
struct dc_context *ctx,
uint32_t inst,
const struct dcn201_opp_registers *regs,
const struct dcn201_opp_shift *opp_shift,
const struct dcn201_opp_mask *opp_mask)
{
oppn201->base.ctx = ctx;
oppn201->base.inst = inst;
oppn201->base.funcs = &dcn201_opp_funcs;
oppn201->regs = regs;
oppn201->opp_shift = opp_shift;
oppn201->opp_mask = opp_mask;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "dcn20/dcn20_hubbub.h"
#include "dcn201_hubbub.h"
#include "reg_helper.h"
#define REG(reg)\
hubbub1->regs->reg
#define DC_LOGGER \
hubbub1->base.ctx->logger
#define CTX \
hubbub1->base.ctx
#undef FN
#define FN(reg_name, field_name) \
hubbub1->shifts->field_name, hubbub1->masks->field_name
#define REG(reg)\
hubbub1->regs->reg
#define CTX \
hubbub1->base.ctx
#undef FN
#define FN(reg_name, field_name) \
hubbub1->shifts->field_name, hubbub1->masks->field_name
static bool hubbub201_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
bool wm_pending = false;
if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
wm_pending = true;
if (hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
wm_pending = true;
REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0,
DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 68);
hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
return wm_pending;
}
static const struct hubbub_funcs hubbub201_funcs = {
.update_dchub = hubbub2_update_dchub,
.init_dchub_sys_ctx = NULL,
.init_vm_ctx = NULL,
.dcc_support_swizzle = hubbub2_dcc_support_swizzle,
.dcc_support_pixel_format = hubbub2_dcc_support_pixel_format,
.get_dcc_compression_cap = hubbub2_get_dcc_compression_cap,
.wm_read_state = hubbub2_wm_read_state,
.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
.program_watermarks = hubbub201_program_watermarks,
.hubbub_read_state = hubbub2_read_state,
};
void hubbub201_construct(struct dcn20_hubbub *hubbub,
struct dc_context *ctx,
const struct dcn_hubbub_registers *hubbub_regs,
const struct dcn_hubbub_shift *hubbub_shift,
const struct dcn_hubbub_mask *hubbub_mask)
{
hubbub->base.ctx = ctx;
hubbub->base.funcs = &hubbub201_funcs;
hubbub->regs = hubbub_regs;
hubbub->shifts = hubbub_shift;
hubbub->masks = hubbub_mask;
hubbub->debug_test_index_pstate = 0xB;
hubbub->detile_buf_size = 164 * 1024;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c |
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "dc.h"
#include "dcn31/dcn31_init.h"
#include "resource.h"
#include "include/irq_service_interface.h"
#include "dcn316_resource.h"
#include "dcn20/dcn20_resource.h"
#include "dcn30/dcn30_resource.h"
#include "dcn31/dcn31_resource.h"
#include "dcn10/dcn10_ipp.h"
#include "dcn30/dcn30_hubbub.h"
#include "dcn31/dcn31_hubbub.h"
#include "dcn30/dcn30_mpc.h"
#include "dcn31/dcn31_hubp.h"
#include "irq/dcn31/irq_service_dcn31.h"
#include "dcn30/dcn30_dpp.h"
#include "dcn31/dcn31_optc.h"
#include "dcn20/dcn20_hwseq.h"
#include "dcn30/dcn30_hwseq.h"
#include "dce110/dce110_hw_sequencer.h"
#include "dcn30/dcn30_opp.h"
#include "dcn20/dcn20_dsc.h"
#include "dcn30/dcn30_vpg.h"
#include "dcn30/dcn30_afmt.h"
#include "dcn30/dcn30_dio_stream_encoder.h"
#include "dcn31/dcn31_hpo_dp_stream_encoder.h"
#include "dcn31/dcn31_hpo_dp_link_encoder.h"
#include "dcn31/dcn31_apg.h"
#include "dcn31/dcn31_dio_link_encoder.h"
#include "dcn31/dcn31_vpg.h"
#include "dcn31/dcn31_afmt.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
#include "clk_mgr.h"
#include "virtual/virtual_stream_encoder.h"
#include "dce110/dce110_resource.h"
#include "dml/display_mode_vba.h"
#include "dml/dcn31/dcn31_fpu.h"
#include "dcn31/dcn31_dccg.h"
#include "dcn10/dcn10_resource.h"
#include "dcn31/dcn31_panel_cntl.h"
#include "dcn30/dcn30_dwb.h"
#include "dcn30/dcn30_mmhubbub.h"
#include "dcn/dcn_3_1_6_offset.h"
#include "dcn/dcn_3_1_6_sh_mask.h"
#include "dpcs/dpcs_4_2_3_offset.h"
#include "dpcs/dpcs_4_2_3_sh_mask.h"
#define regBIF_BX1_BIOS_SCRATCH_2 0x003a
#define regBIF_BX1_BIOS_SCRATCH_2_BASE_IDX 1
#define regBIF_BX1_BIOS_SCRATCH_3 0x003b
#define regBIF_BX1_BIOS_SCRATCH_3_BASE_IDX 1
#define regBIF_BX1_BIOS_SCRATCH_6 0x003e
#define regBIF_BX1_BIOS_SCRATCH_6_BASE_IDX 1
#define regDCHUBBUB_DEBUG_CTRL_0 0x04d6
#define regDCHUBBUB_DEBUG_CTRL_0_BASE_IDX 2
#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH__SHIFT 0x10
#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH_MASK 0x01FF0000L
#define DCN_BASE__INST0_SEG0 0x00000012
#define DCN_BASE__INST0_SEG1 0x000000C0
#define DCN_BASE__INST0_SEG2 0x000034C0
#define DCN_BASE__INST0_SEG3 0x00009000
#define DCN_BASE__INST0_SEG4 0x02403C00
#define DCN_BASE__INST0_SEG5 0
#define DPCS_BASE__INST0_SEG0 0x00000012
#define DPCS_BASE__INST0_SEG1 0x000000C0
#define DPCS_BASE__INST0_SEG2 0x000034C0
#define DPCS_BASE__INST0_SEG3 0x00009000
#define DPCS_BASE__INST0_SEG4 0x02403C00
#define DPCS_BASE__INST0_SEG5 0
#define NBIO_BASE__INST0_SEG0 0x00000000
#define NBIO_BASE__INST0_SEG1 0x00000014
#define NBIO_BASE__INST0_SEG2 0x00000D20
#define NBIO_BASE__INST0_SEG3 0x00010400
#define NBIO_BASE__INST0_SEG4 0x0241B000
#define NBIO_BASE__INST0_SEG5 0x04040000
#include "reg_helper.h"
#include "dce/dmub_abm.h"
#include "dce/dmub_psr.h"
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
#include "dml/dcn30/display_mode_vba_30.h"
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
#include "link_enc_cfg.h"
#define DCN3_16_MAX_DET_SIZE 384
#define DCN3_16_MIN_COMPBUF_SIZE_KB 128
#define DCN3_16_CRB_SEGMENT_SIZE_KB 64
enum dcn31_clk_src_array_id {
DCN31_CLK_SRC_PLL0,
DCN31_CLK_SRC_PLL1,
DCN31_CLK_SRC_PLL2,
DCN31_CLK_SRC_PLL3,
DCN31_CLK_SRC_PLL4,
DCN30_CLK_SRC_TOTAL
};
/* begin *********************
* macros to expend register list macro defined in HW object header file
*/
/* DCN */
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
#define SR(reg_name)\
.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
reg ## reg_name
#define SRI(reg_name, block, id)\
.reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
#define SRI2(reg_name, block, id)\
.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
reg ## reg_name
#define SRIR(var_name, reg_name, block, id)\
.var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
#define SRII(reg_name, block, id)\
.reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
#define SRII_MPC_RMU(reg_name, block, id)\
.RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
#define SRII_DWB(reg_name, temp_name, block, id)\
.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## temp_name
#define SF_DWB2(reg_name, block, id, field_name, post_fix) \
.field_name = reg_name ## __ ## field_name ## post_fix
#define DCCG_SRII(reg_name, block, id)\
.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
#define VUPDATE_SRII(reg_name, block, id)\
.reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
reg ## reg_name ## _ ## block ## id
/* NBIO */
#define NBIO_BASE_INNER(seg) \
NBIO_BASE__INST0_SEG ## seg
#define NBIO_BASE(seg) \
NBIO_BASE_INNER(seg)
#define NBIO_SR(reg_name)\
.reg_name = NBIO_BASE(regBIF_BX1_ ## reg_name ## _BASE_IDX) + \
regBIF_BX1_ ## reg_name
static const struct bios_registers bios_regs = {
NBIO_SR(BIOS_SCRATCH_3),
NBIO_SR(BIOS_SCRATCH_6)
};
#define clk_src_regs(index, pllid)\
[index] = {\
CS_COMMON_REG_LIST_DCN3_0(index, pllid),\
}
static const struct dce110_clk_src_regs clk_src_regs[] = {
clk_src_regs(0, A),
clk_src_regs(1, B),
clk_src_regs(2, C),
clk_src_regs(3, D),
clk_src_regs(4, E)
};
static const struct dce110_clk_src_shift cs_shift = {
CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
};
static const struct dce110_clk_src_mask cs_mask = {
CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
};
#define abm_regs(id)\
[id] = {\
ABM_DCN302_REG_LIST(id)\
}
static const struct dce_abm_registers abm_regs[] = {
abm_regs(0),
abm_regs(1),
abm_regs(2),
abm_regs(3),
};
static const struct dce_abm_shift abm_shift = {
ABM_MASK_SH_LIST_DCN30(__SHIFT)
};
static const struct dce_abm_mask abm_mask = {
ABM_MASK_SH_LIST_DCN30(_MASK)
};
#define audio_regs(id)\
[id] = {\
AUD_COMMON_REG_LIST(id)\
}
static const struct dce_audio_registers audio_regs[] = {
audio_regs(0),
audio_regs(1),
audio_regs(2),
audio_regs(3),
audio_regs(4),
audio_regs(5),
audio_regs(6)
};
#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
static const struct dce_audio_shift audio_shift = {
DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
};
static const struct dce_audio_mask audio_mask = {
DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
};
#define vpg_regs(id)\
[id] = {\
VPG_DCN31_REG_LIST(id)\
}
static const struct dcn31_vpg_registers vpg_regs[] = {
vpg_regs(0),
vpg_regs(1),
vpg_regs(2),
vpg_regs(3),
vpg_regs(4),
vpg_regs(5),
vpg_regs(6),
vpg_regs(7),
vpg_regs(8),
vpg_regs(9),
};
static const struct dcn31_vpg_shift vpg_shift = {
DCN31_VPG_MASK_SH_LIST(__SHIFT)
};
static const struct dcn31_vpg_mask vpg_mask = {
DCN31_VPG_MASK_SH_LIST(_MASK)
};
#define afmt_regs(id)\
[id] = {\
AFMT_DCN31_REG_LIST(id)\
}
static const struct dcn31_afmt_registers afmt_regs[] = {
afmt_regs(0),
afmt_regs(1),
afmt_regs(2),
afmt_regs(3),
afmt_regs(4),
afmt_regs(5)
};
static const struct dcn31_afmt_shift afmt_shift = {
DCN31_AFMT_MASK_SH_LIST(__SHIFT)
};
static const struct dcn31_afmt_mask afmt_mask = {
DCN31_AFMT_MASK_SH_LIST(_MASK)
};
#define apg_regs(id)\
[id] = {\
APG_DCN31_REG_LIST(id)\
}
static const struct dcn31_apg_registers apg_regs[] = {
apg_regs(0),
apg_regs(1),
apg_regs(2),
apg_regs(3)
};
static const struct dcn31_apg_shift apg_shift = {
DCN31_APG_MASK_SH_LIST(__SHIFT)
};
static const struct dcn31_apg_mask apg_mask = {
DCN31_APG_MASK_SH_LIST(_MASK)
};
#define stream_enc_regs(id)\
[id] = {\
SE_DCN3_REG_LIST(id)\
}
static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
stream_enc_regs(0),
stream_enc_regs(1),
stream_enc_regs(2),
stream_enc_regs(3),
stream_enc_regs(4)
};
static const struct dcn10_stream_encoder_shift se_shift = {
SE_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
};
static const struct dcn10_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
#define aux_regs(id)\
[id] = {\
DCN2_AUX_REG_LIST(id)\
}
static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
aux_regs(0),
aux_regs(1),
aux_regs(2),
aux_regs(3),
aux_regs(4)
};
#define hpd_regs(id)\
[id] = {\
HPD_REG_LIST(id)\
}
static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
hpd_regs(0),
hpd_regs(1),
hpd_regs(2),
hpd_regs(3),
hpd_regs(4)
};
#define link_regs(id, phyid)\
[id] = {\
LE_DCN31_REG_LIST(id), \
UNIPHY_DCN2_REG_LIST(phyid), \
DPCS_DCN31_REG_LIST(id), \
}
static const struct dce110_aux_registers_shift aux_shift = {
DCN_AUX_MASK_SH_LIST(__SHIFT)
};
static const struct dce110_aux_registers_mask aux_mask = {
DCN_AUX_MASK_SH_LIST(_MASK)
};
static const struct dcn10_link_enc_registers link_enc_regs[] = {
link_regs(0, A),
link_regs(1, B),
link_regs(2, C),
link_regs(3, D),
link_regs(4, E)
};
static const struct dcn10_link_enc_shift le_shift = {
LINK_ENCODER_MASK_SH_LIST_DCN31(__SHIFT), \
DPCS_DCN31_MASK_SH_LIST(__SHIFT)
};
static const struct dcn10_link_enc_mask le_mask = {
LINK_ENCODER_MASK_SH_LIST_DCN31(_MASK), \
DPCS_DCN31_MASK_SH_LIST(_MASK)
};
#define hpo_dp_stream_encoder_reg_list(id)\
[id] = {\
DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id)\
}
static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[] = {
hpo_dp_stream_encoder_reg_list(0),
hpo_dp_stream_encoder_reg_list(1),
hpo_dp_stream_encoder_reg_list(2),
hpo_dp_stream_encoder_reg_list(3),
};
static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = {
DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT)
};
static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = {
DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK)
};
#define hpo_dp_link_encoder_reg_list(id)\
[id] = {\
DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id),\
DCN3_1_RDPCSTX_REG_LIST(0),\
DCN3_1_RDPCSTX_REG_LIST(1),\
DCN3_1_RDPCSTX_REG_LIST(2),\
DCN3_1_RDPCSTX_REG_LIST(3),\
DCN3_1_RDPCSTX_REG_LIST(4)\
}
static const struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[] = {
hpo_dp_link_encoder_reg_list(0),
hpo_dp_link_encoder_reg_list(1),
};
static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = {
DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT)
};
static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = {
DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK)
};
#define dpp_regs(id)\
[id] = {\
DPP_REG_LIST_DCN30(id),\
}
static const struct dcn3_dpp_registers dpp_regs[] = {
dpp_regs(0),
dpp_regs(1),
dpp_regs(2),
dpp_regs(3)
};
static const struct dcn3_dpp_shift tf_shift = {
DPP_REG_LIST_SH_MASK_DCN30(__SHIFT)
};
static const struct dcn3_dpp_mask tf_mask = {
DPP_REG_LIST_SH_MASK_DCN30(_MASK)
};
#define opp_regs(id)\
[id] = {\
OPP_REG_LIST_DCN30(id),\
}
static const struct dcn20_opp_registers opp_regs[] = {
opp_regs(0),
opp_regs(1),
opp_regs(2),
opp_regs(3)
};
static const struct dcn20_opp_shift opp_shift = {
OPP_MASK_SH_LIST_DCN20(__SHIFT)
};
static const struct dcn20_opp_mask opp_mask = {
OPP_MASK_SH_LIST_DCN20(_MASK)
};
#define aux_engine_regs(id)\
[id] = {\
AUX_COMMON_REG_LIST0(id), \
.AUXN_IMPCAL = 0, \
.AUXP_IMPCAL = 0, \
.AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \
}
static const struct dce110_aux_registers aux_engine_regs[] = {
aux_engine_regs(0),
aux_engine_regs(1),
aux_engine_regs(2),
aux_engine_regs(3),
aux_engine_regs(4)
};
#define dwbc_regs_dcn3(id)\
[id] = {\
DWBC_COMMON_REG_LIST_DCN30(id),\
}
static const struct dcn30_dwbc_registers dwbc30_regs[] = {
dwbc_regs_dcn3(0),
};
static const struct dcn30_dwbc_shift dwbc30_shift = {
DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
};
static const struct dcn30_dwbc_mask dwbc30_mask = {
DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
#define mcif_wb_regs_dcn3(id)\
[id] = {\
MCIF_WB_COMMON_REG_LIST_DCN30(id),\
}
static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = {
mcif_wb_regs_dcn3(0)
};
static const struct dcn30_mmhubbub_shift mcif_wb30_shift = {
MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
};
static const struct dcn30_mmhubbub_mask mcif_wb30_mask = {
MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
#define dsc_regsDCN20(id)\
[id] = {\
DSC_REG_LIST_DCN20(id)\
}
static const struct dcn20_dsc_registers dsc_regs[] = {
dsc_regsDCN20(0),
dsc_regsDCN20(1),
dsc_regsDCN20(2)
};
static const struct dcn20_dsc_shift dsc_shift = {
DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
};
static const struct dcn20_dsc_mask dsc_mask = {
DSC_REG_LIST_SH_MASK_DCN20(_MASK)
};
static const struct dcn30_mpc_registers mpc_regs = {
MPC_REG_LIST_DCN3_0(0),
MPC_REG_LIST_DCN3_0(1),
MPC_REG_LIST_DCN3_0(2),
MPC_REG_LIST_DCN3_0(3),
MPC_OUT_MUX_REG_LIST_DCN3_0(0),
MPC_OUT_MUX_REG_LIST_DCN3_0(1),
MPC_OUT_MUX_REG_LIST_DCN3_0(2),
MPC_OUT_MUX_REG_LIST_DCN3_0(3),
MPC_RMU_GLOBAL_REG_LIST_DCN3AG,
MPC_RMU_REG_LIST_DCN3AG(0),
MPC_RMU_REG_LIST_DCN3AG(1),
//MPC_RMU_REG_LIST_DCN3AG(2),
MPC_DWB_MUX_REG_LIST_DCN3_0(0),
};
static const struct dcn30_mpc_shift mpc_shift = {
MPC_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
};
static const struct dcn30_mpc_mask mpc_mask = {
MPC_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
#define optc_regs(id)\
[id] = {OPTC_COMMON_REG_LIST_DCN3_1(id)}
static const struct dcn_optc_registers optc_regs[] = {
optc_regs(0),
optc_regs(1),
optc_regs(2),
optc_regs(3)
};
static const struct dcn_optc_shift optc_shift = {
OPTC_COMMON_MASK_SH_LIST_DCN3_1(__SHIFT)
};
static const struct dcn_optc_mask optc_mask = {
OPTC_COMMON_MASK_SH_LIST_DCN3_1(_MASK)
};
#define hubp_regs(id)\
[id] = {\
HUBP_REG_LIST_DCN30(id)\
}
static const struct dcn_hubp2_registers hubp_regs[] = {
hubp_regs(0),
hubp_regs(1),
hubp_regs(2),
hubp_regs(3)
};
static const struct dcn_hubp2_shift hubp_shift = {
HUBP_MASK_SH_LIST_DCN31(__SHIFT)
};
static const struct dcn_hubp2_mask hubp_mask = {
HUBP_MASK_SH_LIST_DCN31(_MASK)
};
static const struct dcn_hubbub_registers hubbub_reg = {
HUBBUB_REG_LIST_DCN31(0)
};
static const struct dcn_hubbub_shift hubbub_shift = {
HUBBUB_MASK_SH_LIST_DCN31(__SHIFT)
};
static const struct dcn_hubbub_mask hubbub_mask = {
HUBBUB_MASK_SH_LIST_DCN31(_MASK)
};
static const struct dccg_registers dccg_regs = {
DCCG_REG_LIST_DCN31()
};
static const struct dccg_shift dccg_shift = {
DCCG_MASK_SH_LIST_DCN31(__SHIFT)
};
static const struct dccg_mask dccg_mask = {
DCCG_MASK_SH_LIST_DCN31(_MASK)
};
#define SRII2(reg_name_pre, reg_name_post, id)\
.reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \
## id ## _ ## reg_name_post ## _BASE_IDX) + \
reg ## reg_name_pre ## id ## _ ## reg_name_post
#define HWSEQ_DCN31_REG_LIST()\
SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
SR(DCHUBBUB_ARB_HOSTVM_CNTL), \
SR(DIO_MEM_PWR_CTRL), \
SR(ODM_MEM_PWR_CTRL3), \
SR(DMU_MEM_PWR_CNTL), \
SR(MMHUBBUB_MEM_PWR_CNTL), \
SR(DCCG_GATE_DISABLE_CNTL), \
SR(DCCG_GATE_DISABLE_CNTL2), \
SR(DCFCLK_CNTL),\
SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \
SRII(PIXEL_RATE_CNTL, OTG, 0), \
SRII(PIXEL_RATE_CNTL, OTG, 1),\
SRII(PIXEL_RATE_CNTL, OTG, 2),\
SRII(PIXEL_RATE_CNTL, OTG, 3),\
SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 0),\
SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 1),\
SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 2),\
SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 3),\
SR(MICROSECOND_TIME_BASE_DIV), \
SR(MILLISECOND_TIME_BASE_DIV), \
SR(DISPCLK_FREQ_CHANGE_CNTL), \
SR(RBBMIF_TIMEOUT_DIS), \
SR(RBBMIF_TIMEOUT_DIS_2), \
SR(DCHUBBUB_CRC_CTRL), \
SR(DPP_TOP0_DPP_CRC_CTRL), \
SR(DPP_TOP0_DPP_CRC_VAL_B_A), \
SR(DPP_TOP0_DPP_CRC_VAL_R_G), \
SR(MPC_CRC_CTRL), \
SR(MPC_CRC_RESULT_GB), \
SR(MPC_CRC_RESULT_C), \
SR(MPC_CRC_RESULT_AR), \
SR(DOMAIN0_PG_CONFIG), \
SR(DOMAIN1_PG_CONFIG), \
SR(DOMAIN2_PG_CONFIG), \
SR(DOMAIN3_PG_CONFIG), \
SR(DOMAIN16_PG_CONFIG), \
SR(DOMAIN17_PG_CONFIG), \
SR(DOMAIN18_PG_CONFIG), \
SR(DOMAIN0_PG_STATUS), \
SR(DOMAIN1_PG_STATUS), \
SR(DOMAIN2_PG_STATUS), \
SR(DOMAIN3_PG_STATUS), \
SR(DOMAIN16_PG_STATUS), \
SR(DOMAIN17_PG_STATUS), \
SR(DOMAIN18_PG_STATUS), \
SR(D1VGA_CONTROL), \
SR(D2VGA_CONTROL), \
SR(D3VGA_CONTROL), \
SR(D4VGA_CONTROL), \
SR(D5VGA_CONTROL), \
SR(D6VGA_CONTROL), \
SR(DC_IP_REQUEST_CNTL), \
SR(AZALIA_AUDIO_DTO), \
SR(AZALIA_CONTROLLER_CLOCK_GATING), \
SR(HPO_TOP_HW_CONTROL)
static const struct dce_hwseq_registers hwseq_reg = {
HWSEQ_DCN31_REG_LIST()
};
#define HWSEQ_DCN31_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \
HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \
HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \
HWS_SF(, DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, mask_sh), \
HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \
HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \
HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh), \
HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \
HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh)
static const struct dce_hwseq_shift hwseq_shift = {
HWSEQ_DCN31_MASK_SH_LIST(__SHIFT)
};
static const struct dce_hwseq_mask hwseq_mask = {
HWSEQ_DCN31_MASK_SH_LIST(_MASK)
};
#define vmid_regs(id)\
[id] = {\
DCN20_VMID_REG_LIST(id)\
}
static const struct dcn_vmid_registers vmid_regs[] = {
vmid_regs(0),
vmid_regs(1),
vmid_regs(2),
vmid_regs(3),
vmid_regs(4),
vmid_regs(5),
vmid_regs(6),
vmid_regs(7),
vmid_regs(8),
vmid_regs(9),
vmid_regs(10),
vmid_regs(11),
vmid_regs(12),
vmid_regs(13),
vmid_regs(14),
vmid_regs(15)
};
static const struct dcn20_vmid_shift vmid_shifts = {
DCN20_VMID_MASK_SH_LIST(__SHIFT)
};
static const struct dcn20_vmid_mask vmid_masks = {
DCN20_VMID_MASK_SH_LIST(_MASK)
};
static const struct resource_caps res_cap_dcn31 = {
.num_timing_generator = 4,
.num_opp = 4,
.num_video_plane = 4,
.num_audio = 5,
.num_stream_encoder = 5,
.num_dig_link_enc = 5,
.num_hpo_dp_stream_encoder = 4,
.num_hpo_dp_link_encoder = 2,
.num_pll = 5,
.num_dwb = 1,
.num_ddc = 5,
.num_vmid = 16,
.num_mpc_3dlut = 2,
.num_dsc = 3,
};
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
.per_pixel_alpha = true,
.pixel_format_support = {
.argb8888 = true,
.nv12 = true,
.fp16 = true,
.p010 = true,
.ayuv = false,
},
.max_upscale_factor = {
.argb8888 = 16000,
.nv12 = 16000,
.fp16 = 16000
},
// 6:1 downscaling ratio: 1000/6 = 166.666
.max_downscale_factor = {
.argb8888 = 167,
.nv12 = 167,
.fp16 = 167
},
64,
64
};
static const struct dc_debug_options debug_defaults_drv = {
.disable_z10 = true, /*hw not support it*/
.disable_dmcu = true,
.force_abm_enable = false,
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = false,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
.max_downscale_src_width = 4096,/*upto true 4k*/
.disable_pplib_wm_range = false,
.scl_reset_length10 = true,
.sanity_checks = false,
.underflow_assert_delay_us = 0xFFFFFFFF,
.dwb_fi_phase = -1, // -1 = disable,
.dmub_command_table = true,
.pstate_enabled = true,
.use_max_lb = true,
.enable_mem_low_power = {
.bits = {
.vga = true,
.i2c = true,
.dmcu = false, // This is previously known to cause hang on S3 cycles if enabled
.dscl = true,
.cm = true,
.mpc = true,
.optc = true,
.vpg = true,
.afmt = true,
}
},
.enable_legacy_fast_update = true,
};
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
.disallow_replay = false,
},
.ilr = {
.optimize_edp_link_rate = true,
},
};
static void dcn31_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
*dpp = NULL;
}
static struct dpp *dcn31_dpp_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dcn3_dpp *dpp =
kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL);
if (!dpp)
return NULL;
if (dpp3_construct(dpp, ctx, inst,
&dpp_regs[inst], &tf_shift, &tf_mask))
return &dpp->base;
BREAK_TO_DEBUGGER();
kfree(dpp);
return NULL;
}
static struct output_pixel_processor *dcn31_opp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn20_opp *opp =
kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
if (!opp) {
BREAK_TO_DEBUGGER();
return NULL;
}
dcn20_opp_construct(opp, ctx, inst,
&opp_regs[inst], &opp_shift, &opp_mask);
return &opp->base;
}
static struct dce_aux *dcn31_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
if (!aux_engine)
return NULL;
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
&aux_engine_regs[inst],
&aux_mask,
&aux_shift,
ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST_DCN30(id) }
static const struct dce_i2c_registers i2c_hw_regs[] = {
i2c_inst_regs(1),
i2c_inst_regs(2),
i2c_inst_regs(3),
i2c_inst_regs(4),
i2c_inst_regs(5),
};
static const struct dce_i2c_shift i2c_shifts = {
I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
};
static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
static struct dce_i2c_hw *dcn31_i2c_hw_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dce_i2c_hw *dce_i2c_hw =
kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
if (!dce_i2c_hw)
return NULL;
dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
&i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
return dce_i2c_hw;
}
static struct mpc *dcn31_mpc_create(
struct dc_context *ctx,
int num_mpcc,
int num_rmu)
{
struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc),
GFP_KERNEL);
if (!mpc30)
return NULL;
dcn30_mpc_construct(mpc30, ctx,
&mpc_regs,
&mpc_shift,
&mpc_mask,
num_mpcc,
num_rmu);
return &mpc30->base;
}
static struct hubbub *dcn31_hubbub_create(struct dc_context *ctx)
{
int i;
struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub),
GFP_KERNEL);
if (!hubbub3)
return NULL;
hubbub31_construct(hubbub3, ctx,
&hubbub_reg,
&hubbub_shift,
&hubbub_mask,
dcn3_16_ip.det_buffer_size_kbytes,
dcn3_16_ip.pixel_chunk_size_kbytes,
dcn3_16_ip.config_return_buffer_size_in_kbytes);
for (i = 0; i < res_cap_dcn31.num_vmid; i++) {
struct dcn20_vmid *vmid = &hubbub3->vmid[i];
vmid->ctx = ctx;
vmid->regs = &vmid_regs[i];
vmid->shifts = &vmid_shifts;
vmid->masks = &vmid_masks;
}
return &hubbub3->base;
}
static struct timing_generator *dcn31_timing_generator_create(
struct dc_context *ctx,
uint32_t instance)
{
struct optc *tgn10 =
kzalloc(sizeof(struct optc), GFP_KERNEL);
if (!tgn10)
return NULL;
tgn10->base.inst = instance;
tgn10->base.ctx = ctx;
tgn10->tg_regs = &optc_regs[instance];
tgn10->tg_shift = &optc_shift;
tgn10->tg_mask = &optc_mask;
dcn31_timing_generator_init(tgn10);
return &tgn10->base;
}
static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_deep_color = COLOR_DEPTH_121212,
.max_hdmi_pixel_clock = 600000,
.hdmi_ycbcr420_supported = true,
.dp_ycbcr420_supported = true,
.fec_supported = true,
.flags.bits.IS_HBR2_CAPABLE = true,
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
.flags.bits.IS_TPS4_CAPABLE = true
};
static struct link_encoder *dcn31_link_encoder_create(
struct dc_context *ctx,
const struct encoder_init_data *enc_init_data)
{
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
if (!enc20)
return NULL;
dcn31_link_encoder_construct(enc20,
enc_init_data,
&link_enc_feature,
&link_enc_regs[enc_init_data->transmitter],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source],
&le_shift,
&le_mask);
return &enc20->enc10.base;
}
/* Create a minimal link encoder object not associated with a particular
* physical connector.
* resource_funcs.link_enc_create_minimal
*/
static struct link_encoder *dcn31_link_enc_create_minimal(
struct dc_context *ctx, enum engine_id eng_id)
{
struct dcn20_link_encoder *enc20;
if ((eng_id - ENGINE_ID_DIGA) > ctx->dc->res_pool->res_cap->num_dig_link_enc)
return NULL;
enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
if (!enc20)
return NULL;
dcn31_link_encoder_construct_minimal(
enc20,
ctx,
&link_enc_feature,
&link_enc_regs[eng_id - ENGINE_ID_DIGA],
eng_id);
return &enc20->enc10.base;
}
static struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data)
{
struct dcn31_panel_cntl *panel_cntl =
kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL);
if (!panel_cntl)
return NULL;
dcn31_panel_cntl_construct(panel_cntl, init_data);
return &panel_cntl->base;
}
static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
{
generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX),
FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
}
static struct audio *dcn31_create_audio(
struct dc_context *ctx, unsigned int inst)
{
return dce_audio_create(ctx, inst,
&audio_regs[inst], &audio_shift, &audio_mask);
}
static struct vpg *dcn31_vpg_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dcn31_vpg *vpg31 = kzalloc(sizeof(struct dcn31_vpg), GFP_KERNEL);
if (!vpg31)
return NULL;
vpg31_construct(vpg31, ctx, inst,
&vpg_regs[inst],
&vpg_shift,
&vpg_mask);
return &vpg31->base;
}
static struct afmt *dcn31_afmt_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dcn31_afmt *afmt31 = kzalloc(sizeof(struct dcn31_afmt), GFP_KERNEL);
if (!afmt31)
return NULL;
afmt31_construct(afmt31, ctx, inst,
&afmt_regs[inst],
&afmt_shift,
&afmt_mask);
// Light sleep by default, no need to power down here
return &afmt31->base;
}
static struct apg *dcn31_apg_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL);
if (!apg31)
return NULL;
apg31_construct(apg31, ctx, inst,
&apg_regs[inst],
&apg_shift,
&apg_mask);
return &apg31->base;
}
static struct stream_encoder *dcn316_stream_encoder_create(
enum engine_id eng_id,
struct dc_context *ctx)
{
struct dcn10_stream_encoder *enc1;
struct vpg *vpg;
struct afmt *afmt;
int vpg_inst;
int afmt_inst;
/* Mapping of VPG, AFMT, DME register blocks to DIO block instance */
if (eng_id <= ENGINE_ID_DIGF) {
vpg_inst = eng_id;
afmt_inst = eng_id;
} else
return NULL;
enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
vpg = dcn31_vpg_create(ctx, vpg_inst);
afmt = dcn31_afmt_create(ctx, afmt_inst);
if (!enc1 || !vpg || !afmt) {
kfree(enc1);
kfree(vpg);
kfree(afmt);
return NULL;
}
dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
eng_id, vpg, afmt,
&stream_enc_regs[eng_id],
&se_shift, &se_mask);
return &enc1->base;
}
static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create(
enum engine_id eng_id,
struct dc_context *ctx)
{
struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31;
struct vpg *vpg;
struct apg *apg;
uint32_t hpo_dp_inst;
uint32_t vpg_inst;
uint32_t apg_inst;
ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3));
hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0;
/* Mapping of VPG register blocks to HPO DP block instance:
* VPG[6] -> HPO_DP[0]
* VPG[7] -> HPO_DP[1]
* VPG[8] -> HPO_DP[2]
* VPG[9] -> HPO_DP[3]
*/
vpg_inst = hpo_dp_inst + 6;
/* Mapping of APG register blocks to HPO DP block instance:
* APG[0] -> HPO_DP[0]
* APG[1] -> HPO_DP[1]
* APG[2] -> HPO_DP[2]
* APG[3] -> HPO_DP[3]
*/
apg_inst = hpo_dp_inst;
/* allocate HPO stream encoder and create VPG sub-block */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL);
vpg = dcn31_vpg_create(ctx, vpg_inst);
apg = dcn31_apg_create(ctx, apg_inst);
if (!hpo_dp_enc31 || !vpg || !apg) {
kfree(hpo_dp_enc31);
kfree(vpg);
kfree(apg);
return NULL;
}
dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios,
hpo_dp_inst, eng_id, vpg, apg,
&hpo_dp_stream_enc_regs[hpo_dp_inst],
&hpo_dp_se_shift, &hpo_dp_se_mask);
return &hpo_dp_enc31->base;
}
static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
uint8_t inst,
struct dc_context *ctx)
{
struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31;
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
&hpo_dp_link_enc_regs[inst],
&hpo_dp_le_shift, &hpo_dp_le_mask);
return &hpo_dp_enc31->base;
}
static struct dce_hwseq *dcn31_hwseq_create(
struct dc_context *ctx)
{
struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
if (hws) {
hws->ctx = ctx;
hws->regs = &hwseq_reg;
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
}
return hws;
}
static const struct resource_create_funcs res_create_funcs = {
.read_dce_straps = read_dce_straps,
.create_audio = dcn31_create_audio,
.create_stream_encoder = dcn316_stream_encoder_create,
.create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create,
.create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create,
.create_hwseq = dcn31_hwseq_create,
};
static void dcn316_resource_destruct(struct dcn316_resource_pool *pool)
{
unsigned int i;
for (i = 0; i < pool->base.stream_enc_count; i++) {
if (pool->base.stream_enc[i] != NULL) {
if (pool->base.stream_enc[i]->vpg != NULL) {
kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg));
pool->base.stream_enc[i]->vpg = NULL;
}
if (pool->base.stream_enc[i]->afmt != NULL) {
kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt));
pool->base.stream_enc[i]->afmt = NULL;
}
kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
pool->base.stream_enc[i] = NULL;
}
}
for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) {
if (pool->base.hpo_dp_stream_enc[i] != NULL) {
if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) {
kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg));
pool->base.hpo_dp_stream_enc[i]->vpg = NULL;
}
if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) {
kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg));
pool->base.hpo_dp_stream_enc[i]->apg = NULL;
}
kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i]));
pool->base.hpo_dp_stream_enc[i] = NULL;
}
}
for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) {
if (pool->base.hpo_dp_link_enc[i] != NULL) {
kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i]));
pool->base.hpo_dp_link_enc[i] = NULL;
}
}
for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
if (pool->base.dscs[i] != NULL)
dcn20_dsc_destroy(&pool->base.dscs[i]);
}
if (pool->base.mpc != NULL) {
kfree(TO_DCN20_MPC(pool->base.mpc));
pool->base.mpc = NULL;
}
if (pool->base.hubbub != NULL) {
kfree(pool->base.hubbub);
pool->base.hubbub = NULL;
}
for (i = 0; i < pool->base.pipe_count; i++) {
if (pool->base.dpps[i] != NULL)
dcn31_dpp_destroy(&pool->base.dpps[i]);
if (pool->base.ipps[i] != NULL)
pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
if (pool->base.hubps[i] != NULL) {
kfree(TO_DCN20_HUBP(pool->base.hubps[i]));
pool->base.hubps[i] = NULL;
}
if (pool->base.irqs != NULL) {
dal_irq_service_destroy(&pool->base.irqs);
}
}
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
if (pool->base.engines[i] != NULL)
dce110_engine_destroy(&pool->base.engines[i]);
if (pool->base.hw_i2cs[i] != NULL) {
kfree(pool->base.hw_i2cs[i]);
pool->base.hw_i2cs[i] = NULL;
}
if (pool->base.sw_i2cs[i] != NULL) {
kfree(pool->base.sw_i2cs[i]);
pool->base.sw_i2cs[i] = NULL;
}
}
for (i = 0; i < pool->base.res_cap->num_opp; i++) {
if (pool->base.opps[i] != NULL)
pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
}
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
if (pool->base.timing_generators[i] != NULL) {
kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
}
for (i = 0; i < pool->base.res_cap->num_dwb; i++) {
if (pool->base.dwbc[i] != NULL) {
kfree(TO_DCN30_DWBC(pool->base.dwbc[i]));
pool->base.dwbc[i] = NULL;
}
if (pool->base.mcif_wb[i] != NULL) {
kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i]));
pool->base.mcif_wb[i] = NULL;
}
}
for (i = 0; i < pool->base.audio_count; i++) {
if (pool->base.audios[i])
dce_aud_destroy(&pool->base.audios[i]);
}
for (i = 0; i < pool->base.clk_src_count; i++) {
if (pool->base.clock_sources[i] != NULL) {
dcn20_clock_source_destroy(&pool->base.clock_sources[i]);
pool->base.clock_sources[i] = NULL;
}
}
for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) {
if (pool->base.mpc_lut[i] != NULL) {
dc_3dlut_func_release(pool->base.mpc_lut[i]);
pool->base.mpc_lut[i] = NULL;
}
if (pool->base.mpc_shaper[i] != NULL) {
dc_transfer_func_release(pool->base.mpc_shaper[i]);
pool->base.mpc_shaper[i] = NULL;
}
}
if (pool->base.dp_clock_source != NULL) {
dcn20_clock_source_destroy(&pool->base.dp_clock_source);
pool->base.dp_clock_source = NULL;
}
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
if (pool->base.multiple_abms[i] != NULL)
dce_abm_destroy(&pool->base.multiple_abms[i]);
}
if (pool->base.psr != NULL)
dmub_psr_destroy(&pool->base.psr);
if (pool->base.dccg != NULL)
dcn_dccg_destroy(&pool->base.dccg);
}
static struct hubp *dcn31_hubp_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dcn20_hubp *hubp2 =
kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
if (!hubp2)
return NULL;
if (hubp31_construct(hubp2, ctx, inst,
&hubp_regs[inst], &hubp_shift, &hubp_mask))
return &hubp2->base;
BREAK_TO_DEBUGGER();
kfree(hubp2);
return NULL;
}
static bool dcn31_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
{
int i;
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc),
GFP_KERNEL);
if (!dwbc30) {
dm_error("DC: failed to create dwbc30!\n");
return false;
}
dcn30_dwbc_construct(dwbc30, ctx,
&dwbc30_regs[i],
&dwbc30_shift,
&dwbc30_mask,
i);
pool->dwbc[i] = &dwbc30->base;
}
return true;
}
static bool dcn31_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
{
int i;
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub),
GFP_KERNEL);
if (!mcif_wb30) {
dm_error("DC: failed to create mcif_wb30!\n");
return false;
}
dcn30_mmhubbub_construct(mcif_wb30, ctx,
&mcif_wb30_regs[i],
&mcif_wb30_shift,
&mcif_wb30_mask,
i);
pool->mcif_wb[i] = &mcif_wb30->base;
}
return true;
}
static struct display_stream_compressor *dcn31_dsc_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn20_dsc *dsc =
kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
if (!dsc) {
BREAK_TO_DEBUGGER();
return NULL;
}
dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);
return &dsc->base;
}
static void dcn316_destroy_resource_pool(struct resource_pool **pool)
{
struct dcn316_resource_pool *dcn31_pool = TO_DCN316_RES_POOL(*pool);
dcn316_resource_destruct(dcn31_pool);
kfree(dcn31_pool);
*pool = NULL;
}
static struct clock_source *dcn31_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
const struct dce110_clk_src_regs *regs,
bool dp_clk_src)
{
struct dce110_clk_src *clk_src =
kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
if (!clk_src)
return NULL;
if (dcn31_clk_src_construct(clk_src, ctx, bios, id,
regs, &cs_shift, &cs_mask)) {
clk_src->base.dp_clk_src = dp_clk_src;
return &clk_src->base;
}
kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
static bool is_dual_plane(enum surface_pixel_format format)
{
return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
}
static int dcn316_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
struct pipe_ctx *pipe;
const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_16_MIN_COMPBUF_SIZE_KB;
DC_FP_START();
dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
DC_FP_END();
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
if (!res_ctx->pipe_ctx[i].stream)
continue;
pipe = &res_ctx->pipe_ctx[i];
timing = &pipe->stream->timing;
/*
* Immediate flip can be set dynamically after enabling the plane.
* We need to require support for immediate flip or underflow can be
* intermittently experienced depending on peak b/w requirements.
*/
pipes[pipe_cnt].pipe.src.immediate_flip = true;
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
pipes[pipe_cnt].pipe.src.dcc_rate = 3;
pipes[pipe_cnt].dout.dsc_input_bpc = 0;
DC_FP_START();
dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
DC_FP_END();
if (pipes[pipe_cnt].dout.dsc_enable) {
switch (timing->display_color_depth) {
case COLOR_DEPTH_888:
pipes[pipe_cnt].dout.dsc_input_bpc = 8;
break;
case COLOR_DEPTH_101010:
pipes[pipe_cnt].dout.dsc_input_bpc = 10;
break;
case COLOR_DEPTH_121212:
pipes[pipe_cnt].dout.dsc_input_bpc = 12;
break;
default:
ASSERT(0);
break;
}
}
pipe_cnt++;
}
if (pipe_cnt)
context->bw_ctx.dml.ip.det_buffer_size_kbytes =
(max_usable_det / DCN3_16_CRB_SEGMENT_SIZE_KB / pipe_cnt) * DCN3_16_CRB_SEGMENT_SIZE_KB;
if (context->bw_ctx.dml.ip.det_buffer_size_kbytes > DCN3_16_MAX_DET_SIZE)
context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_16_MAX_DET_SIZE;
ASSERT(context->bw_ctx.dml.ip.det_buffer_size_kbytes >= DCN3_16_DEFAULT_DET_SIZE);
dc->config.enable_4to1MPC = false;
if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
if (is_dual_plane(pipe->plane_state->format)
&& pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
dc->config.enable_4to1MPC = true;
context->bw_ctx.dml.ip.det_buffer_size_kbytes =
(max_usable_det / DCN3_16_CRB_SEGMENT_SIZE_KB / 4) * DCN3_16_CRB_SEGMENT_SIZE_KB;
} else if (!is_dual_plane(pipe->plane_state->format)) {
context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
pipes[0].pipe.src.unbounded_req_mode = true;
}
}
return pipe_cnt;
}
static void dcn316_get_panel_config_defaults(struct dc_panel_config *panel_config)
{
*panel_config = panel_config_defaults;
}
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
static struct resource_funcs dcn316_res_pool_funcs = {
.destroy = dcn316_destroy_resource_pool,
.link_enc_create = dcn31_link_encoder_create,
.link_enc_create_minimal = dcn31_link_enc_create_minimal,
.link_encs_assign = link_enc_cfg_link_encs_assign,
.link_enc_unassign = link_enc_cfg_link_enc_unassign,
.panel_cntl_create = dcn31_panel_cntl_create,
.validate_bandwidth = dcn31_validate_bandwidth,
.calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
.populate_dml_pipes = dcn316_populate_dml_pipes_from_context,
.acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.populate_dml_writeback_from_context = dcn31_populate_dml_writeback_from_context,
.set_mcif_arb_params = dcn31_set_mcif_arb_params,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn316_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn316_get_panel_config_defaults,
};
static bool dcn316_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
struct dcn316_resource_pool *pool)
{
int i;
struct dc_context *ctx = dc->ctx;
struct irq_service_init_data init_data;
ctx->dc_bios->regs = &bios_regs;
pool->base.res_cap = &res_cap_dcn31;
pool->base.funcs = &dcn316_res_pool_funcs;
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 600;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.5 w/a applied by default*/
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.max_slave_planes = 2;
dc->caps.max_slave_yuv_planes = 2;
dc->caps.max_slave_rgb_planes = 2;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
if (dc->config.forceHBR2CP2520)
dc->caps.force_dp_tps4_for_cp2520 = false;
dc->caps.dp_hpo = true;
dc->caps.dp_hdmi21_pcon_support = true;
dc->caps.edp_dsc_support = true;
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
dc->caps.is_apu = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
dc->caps.color.dpp.input_lut_shared = 0;
dc->caps.color.dpp.icsc = 1;
dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr
dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1;
dc->caps.color.dpp.dgam_rom_caps.pq = 1;
dc->caps.color.dpp.dgam_rom_caps.hlg = 1;
dc->caps.color.dpp.post_csc = 1;
dc->caps.color.dpp.gamma_corr = 1;
dc->caps.color.dpp.dgam_rom_for_yuv = 0;
dc->caps.color.dpp.hw_3d_lut = 1;
dc->caps.color.dpp.ogam_ram = 1;
// no OGAM ROM on DCN301
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
dc->caps.color.dpp.ogam_rom_caps.pq = 0;
dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
dc->caps.color.dpp.ocsc = 0;
dc->caps.color.mpc.gamut_remap = 1;
dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //2
dc->caps.color.mpc.ogam_ram = 1;
dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
enum bp_result bp_query_result;
uint8_t is_vbios_lttpr_enable = 0;
bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable);
dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
}
/* interop bit is implicit */
{
dc->caps.vbios_lttpr_aware = true;
}
}
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
// Init the vm_helper
if (dc->vm_helper)
vm_helper_init(dc->vm_helper, 16);
/*************************************************
* Create resources *
*************************************************/
/* Clock Sources for Pixel Clock*/
pool->base.clock_sources[DCN31_CLK_SRC_PLL0] =
dcn31_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL0,
&clk_src_regs[0], false);
pool->base.clock_sources[DCN31_CLK_SRC_PLL1] =
dcn31_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL1,
&clk_src_regs[1], false);
pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
dcn31_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL2,
&clk_src_regs[2], false);
pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
dcn31_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL3,
&clk_src_regs[3], false);
pool->base.clock_sources[DCN31_CLK_SRC_PLL4] =
dcn31_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL4,
&clk_src_regs[4], false);
pool->base.clk_src_count = DCN30_CLK_SRC_TOTAL;
/* todo: not reuse phy_pll registers */
pool->base.dp_clock_source =
dcn31_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_ID_DP_DTO,
&clk_src_regs[0], true);
for (i = 0; i < pool->base.clk_src_count; i++) {
if (pool->base.clock_sources[i] == NULL) {
dm_error("DC: failed to create clock sources!\n");
BREAK_TO_DEBUGGER();
goto create_fail;
}
}
/* TODO: DCCG */
pool->base.dccg = dccg31_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask);
if (pool->base.dccg == NULL) {
dm_error("DC: failed to create dccg!\n");
BREAK_TO_DEBUGGER();
goto create_fail;
}
/* TODO: IRQ */
init_data.ctx = dc->ctx;
pool->base.irqs = dal_irq_service_dcn31_create(&init_data);
if (!pool->base.irqs)
goto create_fail;
/* HUBBUB */
pool->base.hubbub = dcn31_hubbub_create(ctx);
if (pool->base.hubbub == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create hubbub!\n");
goto create_fail;
}
/* HUBPs, DPPs, OPPs and TGs */
for (i = 0; i < pool->base.pipe_count; i++) {
pool->base.hubps[i] = dcn31_hubp_create(ctx, i);
if (pool->base.hubps[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create hubps!\n");
goto create_fail;
}
pool->base.dpps[i] = dcn31_dpp_create(ctx, i);
if (pool->base.dpps[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create dpps!\n");
goto create_fail;
}
}
for (i = 0; i < pool->base.res_cap->num_opp; i++) {
pool->base.opps[i] = dcn31_opp_create(ctx, i);
if (pool->base.opps[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create output pixel processor!\n");
goto create_fail;
}
}
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
pool->base.timing_generators[i] = dcn31_timing_generator_create(
ctx, i);
if (pool->base.timing_generators[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create tg!\n");
goto create_fail;
}
}
pool->base.timing_generator_count = i;
/* PSR */
pool->base.psr = dmub_psr_create(ctx);
if (pool->base.psr == NULL) {
dm_error("DC: failed to create psr obj!\n");
BREAK_TO_DEBUGGER();
goto create_fail;
}
/* ABM */
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
pool->base.multiple_abms[i] = dmub_abm_create(ctx,
&abm_regs[i],
&abm_shift,
&abm_mask);
if (pool->base.multiple_abms[i] == NULL) {
dm_error("DC: failed to create abm for pipe %d!\n", i);
BREAK_TO_DEBUGGER();
goto create_fail;
}
}
/* MPC and DSC */
pool->base.mpc = dcn31_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut);
if (pool->base.mpc == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create mpc!\n");
goto create_fail;
}
for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
pool->base.dscs[i] = dcn31_dsc_create(ctx, i);
if (pool->base.dscs[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create display stream compressor %d!\n", i);
goto create_fail;
}
}
/* DWB and MMHUBBUB */
if (!dcn31_dwbc_create(ctx, &pool->base)) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create dwbc!\n");
goto create_fail;
}
if (!dcn31_mmhubbub_create(ctx, &pool->base)) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create mcif_wb!\n");
goto create_fail;
}
/* AUX and I2C */
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
pool->base.engines[i] = dcn31_aux_engine_create(ctx, i);
if (pool->base.engines[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC:failed to create aux engine!!\n");
goto create_fail;
}
pool->base.hw_i2cs[i] = dcn31_i2c_hw_create(ctx, i);
if (pool->base.hw_i2cs[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC:failed to create hw i2c!!\n");
goto create_fail;
}
pool->base.sw_i2cs[i] = NULL;
}
/* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))
goto create_fail;
/* HW Sequencer and Plane caps */
dcn31_hw_sequencer_construct(dc);
dc->caps.max_planes = pool->base.pipe_count;
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = dcn3_16_ip.max_num_dpp;
return true;
create_fail:
dcn316_resource_destruct(pool);
return false;
}
struct resource_pool *dcn316_create_resource_pool(
const struct dc_init_data *init_data,
struct dc *dc)
{
struct dcn316_resource_pool *pool =
kzalloc(sizeof(struct dcn316_resource_pool), GFP_KERNEL);
if (!pool)
return NULL;
if (dcn316_resource_construct(init_data->num_virtual_links, dc, pool))
return &pool->base;
BREAK_TO_DEBUGGER();
kfree(pool);
return NULL;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c |
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include <linux/slab.h>
#include "dal_asic_id.h"
#include "dc_types.h"
#include "dccg.h"
#include "clk_mgr_internal.h"
#include "link.h"
#include "dce100/dce_clk_mgr.h"
#include "dce110/dce110_clk_mgr.h"
#include "dce112/dce112_clk_mgr.h"
#include "dce120/dce120_clk_mgr.h"
#include "dce60/dce60_clk_mgr.h"
#include "dcn10/rv1_clk_mgr.h"
#include "dcn10/rv2_clk_mgr.h"
#include "dcn20/dcn20_clk_mgr.h"
#include "dcn21/rn_clk_mgr.h"
#include "dcn201/dcn201_clk_mgr.h"
#include "dcn30/dcn30_clk_mgr.h"
#include "dcn301/vg_clk_mgr.h"
#include "dcn31/dcn31_clk_mgr.h"
#include "dcn314/dcn314_clk_mgr.h"
#include "dcn315/dcn315_clk_mgr.h"
#include "dcn316/dcn316_clk_mgr.h"
#include "dcn32/dcn32_clk_mgr.h"
int clk_mgr_helper_get_active_display_cnt(
struct dc *dc,
struct dc_state *context)
{
int i, display_count;
display_count = 0;
for (i = 0; i < context->stream_count; i++) {
const struct dc_stream_state *stream = context->streams[i];
/* Don't count SubVP phantom pipes as part of active
* display count
*/
if (stream->mall_stream_config.type == SUBVP_PHANTOM)
continue;
/*
* Only notify active stream or virtual stream.
* Need to notify virtual stream to work around
* headless case. HPD does not fire when system is in
* S0i2.
*/
if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL)
display_count++;
}
return display_count;
}
int clk_mgr_helper_get_active_plane_cnt(
struct dc *dc,
struct dc_state *context)
{
int i, total_plane_count;
total_plane_count = 0;
for (i = 0; i < context->stream_count; i++) {
const struct dc_stream_status stream_status = context->stream_status[i];
/*
* Sum up plane_count for all streams ( active and virtual ).
*/
total_plane_count += stream_status.plane_count;
}
return total_plane_count;
}
void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
{
struct dc_link *edp_links[MAX_NUM_EDP];
struct dc_link *edp_link = NULL;
int edp_num;
unsigned int panel_inst;
dc_get_edp_links(dc, edp_links, &edp_num);
if (dc->hwss.exit_optimized_pwr_state)
dc->hwss.exit_optimized_pwr_state(dc, dc->current_state);
if (edp_num) {
for (panel_inst = 0; panel_inst < edp_num; panel_inst++) {
bool allow_active = false;
edp_link = edp_links[panel_inst];
if (!edp_link->psr_settings.psr_feature_enabled)
continue;
clk_mgr->psr_allow_active_cache = edp_link->psr_settings.psr_allow_active;
dc->link_srv->edp_set_psr_allow_active(edp_link, &allow_active, false, false, NULL);
dc->link_srv->edp_set_replay_allow_active(edp_link, &allow_active, false, false, NULL);
}
}
}
void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
{
struct dc_link *edp_links[MAX_NUM_EDP];
struct dc_link *edp_link = NULL;
int edp_num;
unsigned int panel_inst;
dc_get_edp_links(dc, edp_links, &edp_num);
if (edp_num) {
for (panel_inst = 0; panel_inst < edp_num; panel_inst++) {
edp_link = edp_links[panel_inst];
if (!edp_link->psr_settings.psr_feature_enabled)
continue;
dc->link_srv->edp_set_psr_allow_active(edp_link,
&clk_mgr->psr_allow_active_cache, false, false, NULL);
dc->link_srv->edp_set_replay_allow_active(edp_link,
&clk_mgr->psr_allow_active_cache, false, false, NULL);
}
}
if (dc->hwss.optimize_pwr_state)
dc->hwss.optimize_pwr_state(dc, dc->current_state);
}
struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *pp_smu, struct dccg *dccg)
{
struct hw_asic_id asic_id = ctx->asic_id;
switch (asic_id.chip_family) {
#if defined(CONFIG_DRM_AMD_DC_SI)
case FAMILY_SI: {
struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
if (clk_mgr == NULL) {
BREAK_TO_DEBUGGER();
return NULL;
}
dce60_clk_mgr_construct(ctx, clk_mgr);
dce_clk_mgr_construct(ctx, clk_mgr);
return &clk_mgr->base;
}
#endif
case FAMILY_CI:
case FAMILY_KV: {
struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
if (clk_mgr == NULL) {
BREAK_TO_DEBUGGER();
return NULL;
}
dce_clk_mgr_construct(ctx, clk_mgr);
return &clk_mgr->base;
}
case FAMILY_CZ: {
struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
if (clk_mgr == NULL) {
BREAK_TO_DEBUGGER();
return NULL;
}
dce110_clk_mgr_construct(ctx, clk_mgr);
return &clk_mgr->base;
}
case FAMILY_VI: {
struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
if (clk_mgr == NULL) {
BREAK_TO_DEBUGGER();
return NULL;
}
if (ASIC_REV_IS_TONGA_P(asic_id.hw_internal_rev) ||
ASIC_REV_IS_FIJI_P(asic_id.hw_internal_rev)) {
dce_clk_mgr_construct(ctx, clk_mgr);
return &clk_mgr->base;
}
if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev) ||
ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev) ||
ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) {
dce112_clk_mgr_construct(ctx, clk_mgr);
return &clk_mgr->base;
}
if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev)) {
dce112_clk_mgr_construct(ctx, clk_mgr);
return &clk_mgr->base;
}
return &clk_mgr->base;
}
case FAMILY_AI: {
struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
if (clk_mgr == NULL) {
BREAK_TO_DEBUGGER();
return NULL;
}
if (ASICREV_IS_VEGA20_P(asic_id.hw_internal_rev))
dce121_clk_mgr_construct(ctx, clk_mgr);
else
dce120_clk_mgr_construct(ctx, clk_mgr);
return &clk_mgr->base;
}
#if defined(CONFIG_DRM_AMD_DC_FP)
case FAMILY_RV: {
struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
if (clk_mgr == NULL) {
BREAK_TO_DEBUGGER();
return NULL;
}
if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) {
rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base;
}
if (ASICREV_IS_GREEN_SARDINE(asic_id.hw_internal_rev)) {
rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base;
}
if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) {
rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
return &clk_mgr->base;
}
if (ASICREV_IS_RAVEN(asic_id.hw_internal_rev) ||
ASICREV_IS_PICASSO(asic_id.hw_internal_rev)) {
rv1_clk_mgr_construct(ctx, clk_mgr, pp_smu);
return &clk_mgr->base;
}
return &clk_mgr->base;
}
case FAMILY_NV: {
struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
if (clk_mgr == NULL) {
BREAK_TO_DEBUGGER();
return NULL;
}
if (ASICREV_IS_SIENNA_CICHLID_P(asic_id.hw_internal_rev)) {
dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base;
}
if (ASICREV_IS_DIMGREY_CAVEFISH_P(asic_id.hw_internal_rev)) {
dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base;
}
if (ASICREV_IS_BEIGE_GOBY_P(asic_id.hw_internal_rev)) {
dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base;
}
if (asic_id.chip_id == DEVICE_ID_NV_13FE) {
dcn201_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base;
}
dcn20_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base;
}
case FAMILY_VGH:
if (ASICREV_IS_VANGOGH(asic_id.hw_internal_rev)) {
struct clk_mgr_vgh *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
if (clk_mgr == NULL) {
BREAK_TO_DEBUGGER();
return NULL;
}
vg_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base.base;
}
break;
case FAMILY_YELLOW_CARP: {
struct clk_mgr_dcn31 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
if (clk_mgr == NULL) {
BREAK_TO_DEBUGGER();
return NULL;
}
dcn31_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base.base;
}
break;
case AMDGPU_FAMILY_GC_10_3_6: {
struct clk_mgr_dcn315 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
if (clk_mgr == NULL) {
BREAK_TO_DEBUGGER();
return NULL;
}
dcn315_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base.base;
}
break;
case AMDGPU_FAMILY_GC_10_3_7: {
struct clk_mgr_dcn316 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
if (clk_mgr == NULL) {
BREAK_TO_DEBUGGER();
return NULL;
}
dcn316_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base.base;
}
break;
case AMDGPU_FAMILY_GC_11_0_0: {
struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
if (clk_mgr == NULL) {
BREAK_TO_DEBUGGER();
return NULL;
}
dcn32_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base;
break;
}
case AMDGPU_FAMILY_GC_11_0_1: {
struct clk_mgr_dcn314 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
if (clk_mgr == NULL) {
BREAK_TO_DEBUGGER();
return NULL;
}
dcn314_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base.base;
}
break;
#endif /* CONFIG_DRM_AMD_DC_FP - Family RV */
default:
ASSERT(0); /* Unknown Asic */
break;
}
return NULL;
}
void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
#ifdef CONFIG_DRM_AMD_DC_FP
switch (clk_mgr_base->ctx->asic_id.chip_family) {
case FAMILY_NV:
if (ASICREV_IS_SIENNA_CICHLID_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
dcn3_clk_mgr_destroy(clk_mgr);
} else if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
dcn3_clk_mgr_destroy(clk_mgr);
}
if (ASICREV_IS_BEIGE_GOBY_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
dcn3_clk_mgr_destroy(clk_mgr);
}
break;
case FAMILY_VGH:
if (ASICREV_IS_VANGOGH(clk_mgr_base->ctx->asic_id.hw_internal_rev))
vg_clk_mgr_destroy(clk_mgr);
break;
case FAMILY_YELLOW_CARP:
dcn31_clk_mgr_destroy(clk_mgr);
break;
case AMDGPU_FAMILY_GC_10_3_6:
dcn315_clk_mgr_destroy(clk_mgr);
break;
case AMDGPU_FAMILY_GC_10_3_7:
dcn316_clk_mgr_destroy(clk_mgr);
break;
case AMDGPU_FAMILY_GC_11_0_0:
dcn32_clk_mgr_destroy(clk_mgr);
break;
case AMDGPU_FAMILY_GC_11_0_1:
dcn314_clk_mgr_destroy(clk_mgr);
break;
default:
break;
}
#endif /* CONFIG_DRM_AMD_DC_FP */
kfree(clk_mgr);
}
| linux-master | drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c |
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "core_types.h"
#include "clk_mgr_internal.h"
#include "dce/dce_11_2_d.h"
#include "dce/dce_11_2_sh_mask.h"
#include "dce100/dce_clk_mgr.h"
#include "dce110/dce110_clk_mgr.h"
#include "dce112_clk_mgr.h"
#include "dal_asic_id.h"
/* set register offset */
#define SR(reg_name)\
.reg_name = mm ## reg_name
/* set register offset with instance */
#define SRI(reg_name, block, id)\
.reg_name = mm ## block ## id ## _ ## reg_name
static const struct clk_mgr_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
static const struct clk_mgr_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
static const struct clk_mgr_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
static const struct state_dependent_clocks dce112_max_clks_by_state[] = {
/*ClocksStateInvalid - should not be used*/
{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
{ .display_clk_khz = 389189, .pixel_clk_khz = 346672 },
/*ClocksStateLow*/
{ .display_clk_khz = 459000, .pixel_clk_khz = 400000 },
/*ClocksStateNominal*/
{ .display_clk_khz = 667000, .pixel_clk_khz = 600000 },
/*ClocksStatePerformance*/
{ .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } };
//TODO: remove use the two broken down functions
int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz)
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct bp_set_dce_clock_parameters dce_clk_params;
struct dc_bios *bp = clk_mgr_base->ctx->dc_bios;
struct dc *dc = clk_mgr_base->ctx->dc;
struct dmcu *dmcu = dc->res_pool->dmcu;
int actual_clock = requested_clk_khz;
/* Prepare to program display clock*/
memset(&dce_clk_params, 0, sizeof(dce_clk_params));
/* Make sure requested clock isn't lower than minimum threshold*/
requested_clk_khz = max(requested_clk_khz,
clk_mgr_dce->base.dentist_vco_freq_khz / 62);
dce_clk_params.target_clock_frequency = requested_clk_khz;
dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
bp->funcs->set_dce_clock(bp, &dce_clk_params);
actual_clock = dce_clk_params.target_clock_frequency;
/*
* from power down, we need mark the clock state as ClocksStateNominal
* from HWReset, so when resume we will call pplib voltage regulator.
*/
if (requested_clk_khz == 0)
clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
/*Program DP ref Clock*/
/*VBIOS will determine DPREFCLK frequency, so we don't set it*/
dce_clk_params.target_clock_frequency = 0;
dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
if (!((clk_mgr_base->ctx->asic_id.chip_family == FAMILY_AI) &&
ASICREV_IS_VEGA20_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)))
dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
(dce_clk_params.pll_id ==
CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
else
dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
bp->funcs->set_dce_clock(bp, &dce_clk_params);
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock)
dmcu->funcs->set_psr_wait_loop(dmcu,
actual_clock / 1000 / 7);
}
clk_mgr_dce->dfs_bypass_disp_clk = actual_clock;
return actual_clock;
}
int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz)
{
struct bp_set_dce_clock_parameters dce_clk_params;
struct dc_bios *bp = clk_mgr->base.ctx->dc_bios;
struct dc *dc = clk_mgr->base.ctx->dc;
struct dmcu *dmcu = dc->res_pool->dmcu;
int actual_clock = requested_clk_khz;
/* Prepare to program display clock*/
memset(&dce_clk_params, 0, sizeof(dce_clk_params));
/* Make sure requested clock isn't lower than minimum threshold*/
if (requested_clk_khz > 0)
requested_clk_khz = max(requested_clk_khz,
clk_mgr->base.dentist_vco_freq_khz / 62);
dce_clk_params.target_clock_frequency = requested_clk_khz;
dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
bp->funcs->set_dce_clock(bp, &dce_clk_params);
actual_clock = dce_clk_params.target_clock_frequency;
/*
* from power down, we need mark the clock state as ClocksStateNominal
* from HWReset, so when resume we will call pplib voltage regulator.
*/
if (requested_clk_khz == 0)
clk_mgr->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
if (clk_mgr->dfs_bypass_disp_clk != actual_clock)
dmcu->funcs->set_psr_wait_loop(dmcu,
actual_clock / 1000 / 7);
}
clk_mgr->dfs_bypass_disp_clk = actual_clock;
return actual_clock;
}
int dce112_set_dprefclk(struct clk_mgr_internal *clk_mgr)
{
struct bp_set_dce_clock_parameters dce_clk_params;
struct dc_bios *bp = clk_mgr->base.ctx->dc_bios;
memset(&dce_clk_params, 0, sizeof(dce_clk_params));
/*Program DP ref Clock*/
/*VBIOS will determine DPREFCLK frequency, so we don't set it*/
dce_clk_params.target_clock_frequency = 0;
dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
if (!((clk_mgr->base.ctx->asic_id.chip_family == FAMILY_AI) &&
ASICREV_IS_VEGA20_P(clk_mgr->base.ctx->asic_id.hw_internal_rev)))
dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
(dce_clk_params.pll_id ==
CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
else
dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
bp->funcs->set_dce_clock(bp, &dce_clk_params);
/* Returns the dp_refclk that was set */
return dce_clk_params.target_clock_frequency;
}
static void dce112_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dm_pp_power_level_change_request level_change_req;
int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
patched_disp_clk = patched_disp_clk * 115 / 100;
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
/* get max clock state from PPLIB */
if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
|| level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
if (dm_pp_apply_power_level_change_request(clk_mgr_base->ctx, &level_change_req))
clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
}
if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr_base->clks.dispclk_khz)) {
patched_disp_clk = dce112_set_clock(clk_mgr_base, patched_disp_clk);
clk_mgr_base->clks.dispclk_khz = patched_disp_clk;
}
dce11_pplib_apply_display_requirements(clk_mgr_base->ctx->dc, context);
}
static struct clk_mgr_funcs dce112_funcs = {
.get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
.update_clocks = dce112_update_clocks
};
void dce112_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr)
{
dce_clk_mgr_construct(ctx, clk_mgr);
memcpy(clk_mgr->max_clks_by_state,
dce112_max_clks_by_state,
sizeof(dce112_max_clks_by_state));
clk_mgr->regs = &disp_clk_regs;
clk_mgr->clk_mgr_shift = &disp_clk_shift;
clk_mgr->clk_mgr_mask = &disp_clk_mask;
clk_mgr->base.funcs = &dce112_funcs;
}
| linux-master | drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c |
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dccg.h"
#include "clk_mgr_internal.h"
// For dce12_get_dp_ref_freq_khz
#include "dce100/dce_clk_mgr.h"
// For dcn20_update_clocks_update_dpp_dto
#include "dcn20/dcn20_clk_mgr.h"
#include "dcn31_clk_mgr.h"
#include "reg_helper.h"
#include "core_types.h"
#include "dcn31_smu.h"
#include "dm_helpers.h"
/* TODO: remove this include once we ported over remaining clk mgr functions*/
#include "dcn30/dcn30_clk_mgr.h"
#include "dc_dmub_srv.h"
#include "link.h"
#include "logger_types.h"
#undef DC_LOGGER
#define DC_LOGGER \
clk_mgr->base.base.ctx->logger
#include "yellow_carp_offset.h"
#define regCLK1_CLK_PLL_REQ 0x0237
#define regCLK1_CLK_PLL_REQ_BASE_IDX 0
#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
#define REG(reg_name) \
(CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
#define TO_CLK_MGR_DCN31(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn31, base)
static int dcn31_get_active_display_cnt_wa(
struct dc *dc,
struct dc_state *context)
{
int i, display_count;
bool tmds_present = false;
display_count = 0;
for (i = 0; i < context->stream_count; i++) {
const struct dc_stream_state *stream = context->streams[i];
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
tmds_present = true;
/* Checking stream / link detection ensuring that PHY is active*/
if (dc_is_dp_signal(stream->signal) && !stream->dpms_off)
display_count++;
}
for (i = 0; i < dc->link_count; i++) {
const struct dc_link *link = dc->links[i];
/* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc))
display_count++;
}
/* WA for hang on HDMI after display off back back on*/
if (display_count == 0 && tmds_present)
display_count = 1;
return display_count;
}
static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
if (disable) {
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
reset_sync_context_for_pipe(dc, context, i);
} else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
}
}
}
void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
{
union dmub_rb_cmd cmd;
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
int display_count;
bool update_dppclk = false;
bool update_dispclk = false;
bool dpp_clock_lowered = false;
if (dc->work_arounds.skip_clock_update)
return;
/*
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
* also if safe to lower is false, we just go in the higher state
*/
if (safe_to_lower) {
if (new_clocks->zstate_support != DCN_ZSTATE_SUPPORT_DISALLOW &&
new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
dcn31_smu_set_zstate_support(clk_mgr, new_clocks->zstate_support);
dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, true);
clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
}
if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
dcn31_smu_set_dtbclk(clk_mgr, false);
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
}
/* check that we're not already in lower */
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
display_count = dcn31_get_active_display_cnt_wa(dc, context);
/* if we can go lower, go lower */
if (display_count == 0) {
union display_idle_optimization_u idle_info = { 0 };
idle_info.idle_info.df_request_disabled = 1;
idle_info.idle_info.phy_ref_clk_off = 1;
idle_info.idle_info.s0i2_rdy = 1;
dcn31_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
/* update power state */
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
}
}
} else {
if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_DISALLOW &&
new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
dcn31_smu_set_zstate_support(clk_mgr, DCN_ZSTATE_SUPPORT_DISALLOW);
dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, false);
clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
}
if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
dcn31_smu_set_dtbclk(clk_mgr, true);
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
}
/* check that we're not already in D0 */
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) {
union display_idle_optimization_u idle_info = { 0 };
dcn31_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
/* update power state */
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_MISSION_MODE;
}
}
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
dcn31_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz);
}
if (should_set_clock(safe_to_lower,
new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
dcn31_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz);
}
// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
if (new_clocks->dppclk_khz < 100000)
new_clocks->dppclk_khz = 100000;
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
dpp_clock_lowered = true;
clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
update_dppclk = true;
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
dcn31_disable_otg_wa(clk_mgr_base, context, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn31_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
dcn31_disable_otg_wa(clk_mgr_base, context, false);
update_dispclk = true;
}
if (dpp_clock_lowered) {
// increase per DPP DTO before lowering global dppclk
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
dcn31_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
} else {
// increase global DPPCLK before lowering per DPP DTO
if (update_dppclk || update_dispclk)
dcn31_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
// always update dtos unless clock is lowered and not safe to lower
if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
// notify DMCUB of latest clocks
memset(&cmd, 0, sizeof(cmd));
cmd.notify_clocks.header.type = DMUB_CMD__CLK_MGR;
cmd.notify_clocks.header.sub_type = DMUB_CMD__CLK_MGR_NOTIFY_CLOCKS;
cmd.notify_clocks.clocks.dcfclk_khz = clk_mgr_base->clks.dcfclk_khz;
cmd.notify_clocks.clocks.dcfclk_deep_sleep_khz =
clk_mgr_base->clks.dcfclk_deep_sleep_khz;
cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
{
/* get FbMult value */
struct fixed31_32 pll_req;
unsigned int fbmult_frac_val = 0;
unsigned int fbmult_int_val = 0;
/*
* Register value of fbmult is in 8.16 format, we are converting to 31.32
* to leverage the fix point operations available in driver
*/
REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/
REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */
pll_req = dc_fixpt_from_int(fbmult_int_val);
/*
* since fractional part is only 16 bit in register definition but is 32 bit
* in our fix point definiton, need to shift left by 16 to obtain correct value
*/
pll_req.value |= fbmult_frac_val << 16;
/* multiply by REFCLK period */
pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz);
/* integer part is now VCO frequency in kHz */
return dc_fixpt_floor(pll_req);
}
static void dcn31_enable_pme_wa(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
dcn31_smu_enable_pme_wa(clk_mgr);
}
void dcn31_init_clocks(struct clk_mgr *clk_mgr)
{
uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
// Assumption is that boot state always supports pstate
clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
clk_mgr->clks.p_state_change_support = true;
clk_mgr->clks.prev_p_state_change_support = true;
clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
}
bool dcn31_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b)
{
if (a->dispclk_khz != b->dispclk_khz)
return false;
else if (a->dppclk_khz != b->dppclk_khz)
return false;
else if (a->dcfclk_khz != b->dcfclk_khz)
return false;
else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
return false;
else if (a->zstate_support != b->zstate_support)
return false;
else if (a->dtbclk_en != b->dtbclk_en)
return false;
return true;
}
static void dcn31_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
{
return;
}
static struct clk_bw_params dcn31_bw_params = {
.vram_type = Ddr4MemType,
.num_channels = 1,
.clk_table = {
.num_entries = 4,
},
};
static struct wm_table ddr5_wm_table = {
.entries = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 9,
.sr_enter_plus_exit_time_us = 11,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 9,
.sr_enter_plus_exit_time_us = 11,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 9,
.sr_enter_plus_exit_time_us = 11,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 9,
.sr_enter_plus_exit_time_us = 11,
.valid = true,
},
}
};
static struct wm_table lpddr5_wm_table = {
.entries = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
}
};
static DpmClocks_t dummy_clocks;
static struct dcn31_watermarks dummy_wms = { 0 };
static void dcn31_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn31_watermarks *table)
{
int i, num_valid_sets;
num_valid_sets = 0;
for (i = 0; i < WM_SET_COUNT; i++) {
/* skip empty entries, the smu array has no holes*/
if (!bw_params->wm_table.entries[i].valid)
continue;
table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst;
table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type;
/* We will not select WM based on fclk, so leave it as unconstrained */
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
if (table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType == WM_TYPE_PSTATE_CHG) {
if (i == 0)
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = 0;
else {
/* add 1 to make it non-overlapping with next lvl */
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk =
bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1;
}
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxMclk =
bw_params->clk_table.entries[i].dcfclk_mhz;
} else {
/* unconstrained for memory retraining */
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
/* Modify previous watermark range to cover up to max */
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
}
num_valid_sets++;
}
ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */
/* modify the min and max to make sure we cover the whole range*/
table->WatermarkRow[WM_DCFCLK][0].MinMclk = 0;
table->WatermarkRow[WM_DCFCLK][0].MinClock = 0;
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxMclk = 0xFFFF;
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
/* This is for writeback only, does not matter currently as no writeback support*/
table->WatermarkRow[WM_SOCCLK][0].WmSetting = WM_A;
table->WatermarkRow[WM_SOCCLK][0].MinClock = 0;
table->WatermarkRow[WM_SOCCLK][0].MaxClock = 0xFFFF;
table->WatermarkRow[WM_SOCCLK][0].MinMclk = 0;
table->WatermarkRow[WM_SOCCLK][0].MaxMclk = 0xFFFF;
}
static void dcn31_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct clk_mgr_dcn31 *clk_mgr_dcn31 = TO_CLK_MGR_DCN31(clk_mgr);
struct dcn31_watermarks *table = clk_mgr_dcn31->smu_wm_set.wm_set;
if (!clk_mgr->smu_ver)
return;
if (!table || clk_mgr_dcn31->smu_wm_set.mc_address.quad_part == 0)
return;
memset(table, 0, sizeof(*table));
dcn31_build_watermark_ranges(clk_mgr_base->bw_params, table);
dcn31_smu_set_dram_addr_high(clk_mgr,
clk_mgr_dcn31->smu_wm_set.mc_address.high_part);
dcn31_smu_set_dram_addr_low(clk_mgr,
clk_mgr_dcn31->smu_wm_set.mc_address.low_part);
dcn31_smu_transfer_wm_table_dram_2_smu(clk_mgr);
}
static void dcn31_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
struct dcn31_smu_dpm_clks *smu_dpm_clks)
{
DpmClocks_t *table = smu_dpm_clks->dpm_clks;
if (!clk_mgr->smu_ver)
return;
if (!table || smu_dpm_clks->mc_address.quad_part == 0)
return;
memset(table, 0, sizeof(*table));
dcn31_smu_set_dram_addr_high(clk_mgr,
smu_dpm_clks->mc_address.high_part);
dcn31_smu_set_dram_addr_low(clk_mgr,
smu_dpm_clks->mc_address.low_part);
dcn31_smu_transfer_dpm_table_smu_2_dram(clk_mgr);
}
static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
{
uint32_t max = 0;
int i;
for (i = 0; i < num_clocks; ++i) {
if (clocks[i] > max)
max = clocks[i];
}
return max;
}
static unsigned int find_clk_for_voltage(
const DpmClocks_t *clock_table,
const uint32_t clocks[],
unsigned int voltage)
{
int i;
int max_voltage = 0;
int clock = 0;
for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) {
if (clock_table->SocVoltage[i] == voltage) {
return clocks[i];
} else if (clock_table->SocVoltage[i] >= max_voltage &&
clock_table->SocVoltage[i] < voltage) {
max_voltage = clock_table->SocVoltage[i];
clock = clocks[i];
}
}
ASSERT(clock);
return clock;
}
static void dcn31_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr,
struct integrated_info *bios_info,
const DpmClocks_t *clock_table)
{
int i, j;
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
uint32_t max_dispclk = 0, max_dppclk = 0;
j = -1;
ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL);
/* Find lowest DPM, FCLK is filled in reverse order*/
for (i = NUM_DF_PSTATE_LEVELS - 1; i >= 0; i--) {
if (clock_table->DfPstateTable[i].FClk != 0) {
j = i;
break;
}
}
if (j == -1) {
/* clock table is all 0s, just use our own hardcode */
ASSERT(0);
return;
}
bw_params->clk_table.num_entries = j + 1;
/* dispclk and dppclk can be max at any voltage, same number of levels for both */
if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) {
max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
} else {
ASSERT(0);
}
for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].FClk;
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].MemClk;
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].Voltage;
switch (clock_table->DfPstateTable[j].WckRatio) {
case WCK_RATIO_1_2:
bw_params->clk_table.entries[i].wck_ratio = 2;
break;
case WCK_RATIO_1_4:
bw_params->clk_table.entries[i].wck_ratio = 4;
break;
default:
bw_params->clk_table.entries[i].wck_ratio = 1;
}
bw_params->clk_table.entries[i].dcfclk_mhz = find_clk_for_voltage(clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[j].Voltage);
bw_params->clk_table.entries[i].socclk_mhz = find_clk_for_voltage(clock_table, clock_table->SocClocks, clock_table->DfPstateTable[j].Voltage);
bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
}
bw_params->vram_type = bios_info->memory_type;
bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4;
//bw_params->dram_channel_width_bytes = dc->ctx->asic_id.vram_width;
bw_params->num_channels = bios_info->ma_channel_number ? bios_info->ma_channel_number : 4;
for (i = 0; i < WM_SET_COUNT; i++) {
bw_params->wm_table.entries[i].wm_inst = i;
if (i >= bw_params->clk_table.num_entries) {
bw_params->wm_table.entries[i].valid = false;
continue;
}
bw_params->wm_table.entries[i].wm_type = WM_TYPE_PSTATE_CHG;
bw_params->wm_table.entries[i].valid = true;
}
}
static void dcn31_set_low_power_state(struct clk_mgr *clk_mgr_base)
{
int display_count;
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc *dc = clk_mgr_base->ctx->dc;
struct dc_state *context = dc->current_state;
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
display_count = dcn31_get_active_display_cnt_wa(dc, context);
/* if we can go lower, go lower */
if (display_count == 0) {
union display_idle_optimization_u idle_info = { 0 };
idle_info.idle_info.df_request_disabled = 1;
idle_info.idle_info.phy_ref_clk_off = 1;
idle_info.idle_info.s0i2_rdy = 1;
dcn31_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
/* update power state */
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
}
}
}
int dcn31_get_dtb_ref_freq_khz(struct clk_mgr *clk_mgr_base)
{
return clk_mgr_base->clks.ref_dtbclk_khz;
}
static struct clk_mgr_funcs dcn31_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
.update_clocks = dcn31_update_clocks,
.init_clocks = dcn31_init_clocks,
.enable_pme_wa = dcn31_enable_pme_wa,
.are_clock_states_equal = dcn31_are_clock_states_equal,
.notify_wm_ranges = dcn31_notify_wm_ranges,
.set_low_power_state = dcn31_set_low_power_state
};
extern struct clk_mgr_funcs dcn3_fpga_funcs;
void dcn31_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_dcn31 *clk_mgr,
struct pp_smu_funcs *pp_smu,
struct dccg *dccg)
{
struct dcn31_smu_dpm_clks smu_dpm_clks = { 0 };
struct clk_log_info log_info = {0};
clk_mgr->base.base.ctx = ctx;
clk_mgr->base.base.funcs = &dcn31_funcs;
clk_mgr->base.pp_smu = pp_smu;
clk_mgr->base.dccg = dccg;
clk_mgr->base.dfs_bypass_disp_clk = 0;
clk_mgr->base.dprefclk_ss_percentage = 0;
clk_mgr->base.dprefclk_ss_divider = 1000;
clk_mgr->base.ss_on_dprefclk = false;
clk_mgr->base.dfs_ref_freq_khz = 48000;
clk_mgr->smu_wm_set.wm_set = (struct dcn31_watermarks *)dm_helpers_allocate_gpu_mem(
clk_mgr->base.base.ctx,
DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
sizeof(struct dcn31_watermarks),
&clk_mgr->smu_wm_set.mc_address.quad_part);
if (!clk_mgr->smu_wm_set.wm_set) {
clk_mgr->smu_wm_set.wm_set = &dummy_wms;
clk_mgr->smu_wm_set.mc_address.quad_part = 0;
}
ASSERT(clk_mgr->smu_wm_set.wm_set);
smu_dpm_clks.dpm_clks = (DpmClocks_t *)dm_helpers_allocate_gpu_mem(
clk_mgr->base.base.ctx,
DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
sizeof(DpmClocks_t),
&smu_dpm_clks.mc_address.quad_part);
if (smu_dpm_clks.dpm_clks == NULL) {
smu_dpm_clks.dpm_clks = &dummy_clocks;
smu_dpm_clks.mc_address.quad_part = 0;
}
ASSERT(smu_dpm_clks.dpm_clks);
clk_mgr->base.smu_ver = dcn31_smu_get_smu_version(&clk_mgr->base);
if (clk_mgr->base.smu_ver)
clk_mgr->base.smu_present = true;
/* TODO: Check we get what we expect during bringup */
clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
dcn31_bw_params.wm_table = lpddr5_wm_table;
} else {
dcn31_bw_params.wm_table = ddr5_wm_table;
}
/* Saved clocks configured at boot for debug purposes */
dcn31_dump_clk_registers(&clk_mgr->base.base.boot_snapshot,
&clk_mgr->base.base, &log_info);
clk_mgr->base.base.dprefclk_khz = 600000;
clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
dce_clock_read_ss_info(&clk_mgr->base);
/*if bios enabled SS, driver needs to adjust dtb clock, only enable with correct bios*/
//clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(clk_mgr_internal, clk_mgr->base.base.dprefclk_khz);
clk_mgr->base.base.bw_params = &dcn31_bw_params;
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
int i;
dcn31_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
DC_LOG_SMU("NumDcfClkLevelsEnabled: %d\n"
"NumDispClkLevelsEnabled: %d\n"
"NumSocClkLevelsEnabled: %d\n"
"VcnClkLevelsEnabled: %d\n"
"NumDfPst atesEnabled: %d\n"
"MinGfxClk: %d\n"
"MaxGfxClk: %d\n",
smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled,
smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled,
smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled,
smu_dpm_clks.dpm_clks->VcnClkLevelsEnabled,
smu_dpm_clks.dpm_clks->NumDfPstatesEnabled,
smu_dpm_clks.dpm_clks->MinGfxClk,
smu_dpm_clks.dpm_clks->MaxGfxClk);
for (i = 0; i < smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled; i++) {
DC_LOG_SMU("smu_dpm_clks.dpm_clks->DcfClocks[%d] = %d\n",
i,
smu_dpm_clks.dpm_clks->DcfClocks[i]);
}
for (i = 0; i < smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled; i++) {
DC_LOG_SMU("smu_dpm_clks.dpm_clks->DispClocks[%d] = %d\n",
i, smu_dpm_clks.dpm_clks->DispClocks[i]);
}
for (i = 0; i < smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled; i++) {
DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocClocks[%d] = %d\n",
i, smu_dpm_clks.dpm_clks->SocClocks[i]);
}
for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++)
DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocVoltage[%d] = %d\n",
i, smu_dpm_clks.dpm_clks->SocVoltage[i]);
for (i = 0; i < NUM_DF_PSTATE_LEVELS; i++) {
DC_LOG_SMU("smu_dpm_clks.dpm_clks.DfPstateTable[%d].FClk = %d\n"
"smu_dpm_clks.dpm_clks->DfPstateTable[%d].MemClk= %d\n"
"smu_dpm_clks.dpm_clks->DfPstateTable[%d].Voltage = %d\n",
i, smu_dpm_clks.dpm_clks->DfPstateTable[i].FClk,
i, smu_dpm_clks.dpm_clks->DfPstateTable[i].MemClk,
i, smu_dpm_clks.dpm_clks->DfPstateTable[i].Voltage);
}
if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
dcn31_clk_mgr_helper_populate_bw_params(
&clk_mgr->base,
ctx->dc_bios->integrated_info,
smu_dpm_clks.dpm_clks);
}
}
if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0)
dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
smu_dpm_clks.dpm_clks);
}
void dcn31_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int)
{
struct clk_mgr_dcn31 *clk_mgr = TO_CLK_MGR_DCN31(clk_mgr_int);
if (clk_mgr->smu_wm_set.wm_set && clk_mgr->smu_wm_set.mc_address.quad_part != 0)
dm_helpers_free_gpu_mem(clk_mgr_int->base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
clk_mgr->smu_wm_set.wm_set);
}
| linux-master | drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c |
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include <linux/delay.h>
#include "core_types.h"
#include "clk_mgr_internal.h"
#include "reg_helper.h"
#include "dm_helpers.h"
#include "dcn31_smu.h"
#include "yellow_carp_offset.h"
#include "mp/mp_13_0_2_offset.h"
#include "mp/mp_13_0_2_sh_mask.h"
#define REG(reg_name) \
(MP0_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
#define FN(reg_name, field) \
FD(reg_name##__##field)
#include "logger_types.h"
#undef DC_LOGGER
#define DC_LOGGER \
CTX->logger
#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); }
#define VBIOSSMC_MSG_TestMessage 0x1
#define VBIOSSMC_MSG_GetSmuVersion 0x2
#define VBIOSSMC_MSG_PowerUpGfx 0x3
#define VBIOSSMC_MSG_SetDispclkFreq 0x4
#define VBIOSSMC_MSG_SetDprefclkFreq 0x5 //Not used. DPRef is constant
#define VBIOSSMC_MSG_SetDppclkFreq 0x6
#define VBIOSSMC_MSG_SetHardMinDcfclkByFreq 0x7
#define VBIOSSMC_MSG_SetMinDeepSleepDcfclk 0x8
#define VBIOSSMC_MSG_SetPhyclkVoltageByFreq 0x9 //Keep it in case VMIN dees not support phy clk
#define VBIOSSMC_MSG_GetFclkFrequency 0xA
#define VBIOSSMC_MSG_SetDisplayCount 0xB //Not used anymore
#define VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown 0xC //Not used anymore
#define VBIOSSMC_MSG_UpdatePmeRestore 0xD
#define VBIOSSMC_MSG_SetVbiosDramAddrHigh 0xE //Used for WM table txfr
#define VBIOSSMC_MSG_SetVbiosDramAddrLow 0xF
#define VBIOSSMC_MSG_TransferTableSmu2Dram 0x10
#define VBIOSSMC_MSG_TransferTableDram2Smu 0x11
#define VBIOSSMC_MSG_SetDisplayIdleOptimizations 0x12
#define VBIOSSMC_MSG_GetDprefclkFreq 0x13
#define VBIOSSMC_MSG_GetDtbclkFreq 0x14
#define VBIOSSMC_MSG_AllowZstatesEntry 0x15
#define VBIOSSMC_MSG_DisallowZstatesEntry 0x16
#define VBIOSSMC_MSG_SetDtbClk 0x17
#define VBIOSSMC_Message_Count 0x18
#define VBIOSSMC_Status_BUSY 0x0
#define VBIOSSMC_Result_OK 0x1
#define VBIOSSMC_Result_Failed 0xFF
#define VBIOSSMC_Result_UnknownCmd 0xFE
#define VBIOSSMC_Result_CmdRejectedPrereq 0xFD
#define VBIOSSMC_Result_CmdRejectedBusy 0xFC
/*
* Function to be used instead of REG_WAIT macro because the wait ends when
* the register is NOT EQUAL to zero, and because the translation in msg_if.h
* won't work with REG_WAIT.
*/
static uint32_t dcn31_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries)
{
uint32_t res_val = VBIOSSMC_Status_BUSY;
do {
res_val = REG_READ(MP1_SMN_C2PMSG_91);
if (res_val != VBIOSSMC_Status_BUSY)
break;
if (delay_us >= 1000)
msleep(delay_us/1000);
else if (delay_us > 0)
udelay(delay_us);
} while (max_retries--);
return res_val;
}
static int dcn31_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
unsigned int msg_id,
unsigned int param)
{
uint32_t result;
result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000);
if (result != VBIOSSMC_Result_OK)
smu_print("SMU Response was not OK. SMU response after wait received is: %d\n", result);
if (result == VBIOSSMC_Status_BUSY) {
return -1;
}
/* First clear response register */
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Status_BUSY);
/* Set the parameter register for the SMU message, unit is Mhz */
REG_WRITE(MP1_SMN_C2PMSG_83, param);
/* Trigger the message transaction by writing the message ID */
REG_WRITE(MP1_SMN_C2PMSG_67, msg_id);
result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000);
if (result == VBIOSSMC_Result_Failed) {
if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu &&
param == TABLE_WATERMARKS)
DC_LOG_DEBUG("Watermarks table not configured properly by SMU");
else
ASSERT(0);
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK);
return -1;
}
if (IS_SMU_TIMEOUT(result)) {
ASSERT(0);
dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000);
}
return REG_READ(MP1_SMN_C2PMSG_83);
}
int dcn31_smu_get_smu_version(struct clk_mgr_internal *clk_mgr)
{
return dcn31_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_GetSmuVersion,
0);
}
int dcn31_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz)
{
int actual_dispclk_set_mhz = -1;
if (!clk_mgr->smu_present)
return requested_dispclk_khz;
/* Unit of SMU msg parameter is Mhz */
actual_dispclk_set_mhz = dcn31_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDispclkFreq,
khz_to_mhz_ceil(requested_dispclk_khz));
return actual_dispclk_set_mhz * 1000;
}
int dcn31_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
{
int actual_dprefclk_set_mhz = -1;
if (!clk_mgr->smu_present)
return clk_mgr->base.dprefclk_khz;
actual_dprefclk_set_mhz = dcn31_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDprefclkFreq,
khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz));
/* TODO: add code for programing DP DTO, currently this is down by command table */
return actual_dprefclk_set_mhz * 1000;
}
int dcn31_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz)
{
int actual_dcfclk_set_mhz = -1;
if (!clk_mgr->base.ctx->dc->debug.pstate_enabled)
return -1;
if (!clk_mgr->smu_present)
return requested_dcfclk_khz;
actual_dcfclk_set_mhz = dcn31_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
khz_to_mhz_ceil(requested_dcfclk_khz));
#ifdef DBG
smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
#endif
return actual_dcfclk_set_mhz * 1000;
}
int dcn31_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz)
{
int actual_min_ds_dcfclk_mhz = -1;
if (!clk_mgr->base.ctx->dc->debug.pstate_enabled)
return -1;
if (!clk_mgr->smu_present)
return requested_min_ds_dcfclk_khz;
actual_min_ds_dcfclk_mhz = dcn31_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetMinDeepSleepDcfclk,
khz_to_mhz_ceil(requested_min_ds_dcfclk_khz));
return actual_min_ds_dcfclk_mhz * 1000;
}
int dcn31_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz)
{
int actual_dppclk_set_mhz = -1;
if (!clk_mgr->smu_present)
return requested_dpp_khz;
actual_dppclk_set_mhz = dcn31_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDppclkFreq,
khz_to_mhz_ceil(requested_dpp_khz));
return actual_dppclk_set_mhz * 1000;
}
void dcn31_smu_set_display_idle_optimization(struct clk_mgr_internal *clk_mgr, uint32_t idle_info)
{
if (!clk_mgr->base.ctx->dc->debug.pstate_enabled)
return;
if (!clk_mgr->smu_present)
return;
//TODO: Work with smu team to define optimization options.
dcn31_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDisplayIdleOptimizations,
idle_info);
}
void dcn31_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
{
union display_idle_optimization_u idle_info = { 0 };
if (!clk_mgr->smu_present)
return;
if (enable) {
idle_info.idle_info.df_request_disabled = 1;
idle_info.idle_info.phy_ref_clk_off = 1;
}
dcn31_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDisplayIdleOptimizations,
idle_info.data);
}
void dcn31_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr)
{
if (!clk_mgr->smu_present)
return;
dcn31_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_UpdatePmeRestore,
0);
}
void dcn31_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high)
{
if (!clk_mgr->smu_present)
return;
dcn31_smu_send_msg_with_param(clk_mgr,
VBIOSSMC_MSG_SetVbiosDramAddrHigh, addr_high);
}
void dcn31_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low)
{
if (!clk_mgr->smu_present)
return;
dcn31_smu_send_msg_with_param(clk_mgr,
VBIOSSMC_MSG_SetVbiosDramAddrLow, addr_low);
}
void dcn31_smu_transfer_dpm_table_smu_2_dram(struct clk_mgr_internal *clk_mgr)
{
if (!clk_mgr->smu_present)
return;
dcn31_smu_send_msg_with_param(clk_mgr,
VBIOSSMC_MSG_TransferTableSmu2Dram, TABLE_DPMCLOCKS);
}
void dcn31_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr)
{
if (!clk_mgr->smu_present)
return;
dcn31_smu_send_msg_with_param(clk_mgr,
VBIOSSMC_MSG_TransferTableDram2Smu, TABLE_WATERMARKS);
}
void dcn31_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zstate_support_state support)
{
unsigned int msg_id, param;
if (!clk_mgr->smu_present)
return;
if (!clk_mgr->base.ctx->dc->debug.enable_z9_disable_interface &&
(support == DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY))
support = DCN_ZSTATE_SUPPORT_DISALLOW;
if (support == DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY ||
support == DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY)
param = 1;
else
param = 0;
if (support == DCN_ZSTATE_SUPPORT_DISALLOW)
msg_id = VBIOSSMC_MSG_DisallowZstatesEntry;
else
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
dcn31_smu_send_msg_with_param(
clk_mgr,
msg_id,
param);
}
/* Arg = 1: Turn DTB on; 0: Turn DTB CLK OFF. when it is on, it is 600MHZ */
void dcn31_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable)
{
if (!clk_mgr->smu_present)
return;
dcn31_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDtbClk,
enable);
}
| linux-master | drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dccg.h"
#include "clk_mgr_internal.h"
#include "dcn30_clk_mgr_smu_msg.h"
#include "dcn20/dcn20_clk_mgr.h"
#include "dce100/dce_clk_mgr.h"
#include "dcn30/dcn30_clk_mgr.h"
#include "dml/dcn30/dcn30_fpu.h"
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
#include "atomfirmware.h"
#include "sienna_cichlid_ip_offset.h"
#include "dcn/dcn_3_0_0_offset.h"
#include "dcn/dcn_3_0_0_sh_mask.h"
#include "nbio/nbio_7_4_offset.h"
#include "dpcs/dpcs_3_0_0_offset.h"
#include "dpcs/dpcs_3_0_0_sh_mask.h"
#include "mmhub/mmhub_2_0_0_offset.h"
#include "mmhub/mmhub_2_0_0_sh_mask.h"
#include "dcn30_smu11_driver_if.h"
#undef FN
#define FN(reg_name, field_name) \
clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name
#define REG(reg) \
(clk_mgr->regs->reg)
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
#define SR(reg_name)\
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
mm ## reg_name
#undef CLK_SRI
#define CLK_SRI(reg_name, block, inst)\
.reg_name = mm ## block ## _ ## reg_name
static const struct clk_mgr_registers clk_mgr_regs = {
CLK_REG_LIST_DCN3()
};
static const struct clk_mgr_shift clk_mgr_shift = {
CLK_COMMON_MASK_SH_LIST_DCN20_BASE(__SHIFT)
};
static const struct clk_mgr_mask clk_mgr_mask = {
CLK_COMMON_MASK_SH_LIST_DCN20_BASE(_MASK)
};
/* Query SMU for all clock states for a particular clock */
static void dcn3_init_single_clock(struct clk_mgr_internal *clk_mgr, uint32_t clk, unsigned int *entry_0, unsigned int *num_levels)
{
unsigned int i;
char *entry_i = (char *)entry_0;
uint32_t ret = dcn30_smu_get_dpm_freq_by_index(clk_mgr, clk, 0xFF);
if (ret & (1 << 31))
/* fine-grained, only min and max */
*num_levels = 2;
else
/* discrete, a number of fixed states */
/* will set num_levels to 0 on failure */
*num_levels = ret & 0xFF;
/* if the initial message failed, num_levels will be 0 */
for (i = 0; i < *num_levels; i++) {
*((unsigned int *)entry_i) = (dcn30_smu_get_dpm_freq_by_index(clk_mgr, clk, i) & 0xFFFF);
entry_i += sizeof(clk_mgr->base.bw_params->clk_table.entries[0]);
}
}
static void dcn3_build_wm_range_table(struct clk_mgr_internal *clk_mgr)
{
DC_FP_START();
dcn3_fpu_build_wm_range_table(&clk_mgr->base);
DC_FP_END();
}
void dcn3_init_clocks(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
unsigned int num_levels;
memset(&(clk_mgr_base->clks), 0, sizeof(struct dc_clocks));
clk_mgr_base->clks.p_state_change_support = true;
clk_mgr_base->clks.prev_p_state_change_support = true;
clk_mgr->smu_present = false;
if (!clk_mgr_base->bw_params)
return;
if (!clk_mgr_base->force_smu_not_present && dcn30_smu_get_smu_version(clk_mgr, &clk_mgr->smu_ver))
clk_mgr->smu_present = true;
if (!clk_mgr->smu_present)
return;
// do we fail if these fail? if so, how? do we not care to check?
dcn30_smu_check_driver_if_version(clk_mgr);
dcn30_smu_check_msg_header_version(clk_mgr);
/* DCFCLK */
dcn3_init_single_clock(clk_mgr, PPCLK_DCEFCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
&num_levels);
dcn30_smu_set_min_deep_sleep_dcef_clk(clk_mgr, 0);
/* DTBCLK */
dcn3_init_single_clock(clk_mgr, PPCLK_DTBCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
&num_levels);
/* SOCCLK */
dcn3_init_single_clock(clk_mgr, PPCLK_SOCCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz,
&num_levels);
// DPREFCLK ???
/* DISPCLK */
dcn3_init_single_clock(clk_mgr, PPCLK_DISPCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz,
&num_levels);
/* DPPCLK */
dcn3_init_single_clock(clk_mgr, PPCLK_PIXCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dppclk_mhz,
&num_levels);
/* PHYCLK */
dcn3_init_single_clock(clk_mgr, PPCLK_PHYCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].phyclk_mhz,
&num_levels);
/* Get UCLK, update bounding box */
clk_mgr_base->funcs->get_memclk_states_from_smu(clk_mgr_base);
/* WM range table */
DC_FP_START();
dcn3_build_wm_range_table(clk_mgr);
DC_FP_END();
}
static int dcn30_get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
{
/* get FbMult value */
struct fixed31_32 pll_req;
/* get FbMult value */
uint32_t pll_req_reg = REG_READ(CLK0_CLK_PLL_REQ);
/* set up a fixed-point number
* this works because the int part is on the right edge of the register
* and the frac part is on the left edge
*/
pll_req = dc_fixpt_from_int(pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_int);
pll_req.value |= pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_frac;
/* multiply by REFCLK period */
pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz);
return dc_fixpt_floor(pll_req);
}
static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
int display_count;
bool update_dppclk = false;
bool update_dispclk = false;
bool enter_display_off = false;
bool dpp_clock_lowered = false;
bool update_pstate_unsupported_clk = false;
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
bool force_reset = false;
bool update_uclk = false;
bool p_state_change_support;
if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present)
return;
if (clk_mgr_base->clks.dispclk_khz == 0 ||
(dc->debug.force_clock_mode & 0x1)) {
/* this is from resume or boot up, if forced_clock cfg option used, we bypass program dispclk and DPPCLK, but need set them for S3. */
force_reset = true;
dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
/* force_clock_mode 0x1: force reset the clock even it is the same clock as long as it is in Passive level. */
}
display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
if (display_count == 0)
enter_display_off = true;
if (enter_display_off == safe_to_lower)
dcn30_smu_set_num_of_displays(clk_mgr, display_count);
if (dc->debug.force_min_dcfclk_mhz > 0)
new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DCEFCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_khz));
}
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
dcn30_smu_set_min_deep_sleep_dcef_clk(clk_mgr, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_deep_sleep_khz));
}
if (should_set_clock(safe_to_lower, new_clocks->socclk_khz, clk_mgr_base->clks.socclk_khz))
/* We don't actually care about socclk, don't notify SMU of hard min */
clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
p_state_change_support = new_clocks->p_state_change_support;
// invalidate the current P-State forced min in certain dc_mode_softmax situations
if (dc->clk_mgr->dc_mode_softmax_enabled && safe_to_lower && !p_state_change_support) {
if ((new_clocks->dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) !=
(clk_mgr_base->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000))
update_pstate_unsupported_clk = true;
}
if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support) ||
update_pstate_unsupported_clk) {
clk_mgr_base->clks.p_state_change_support = p_state_change_support;
/* to disable P-State switching, set UCLK min = max */
if (!clk_mgr_base->clks.p_state_change_support) {
if (dc->clk_mgr->dc_mode_softmax_enabled &&
new_clocks->dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
else
dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
}
}
/* Always update saved value, even if new value not set due to P-State switching unsupported */
if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) {
clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz;
update_uclk = true;
}
/* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
if (clk_mgr_base->clks.p_state_change_support &&
(update_uclk || !clk_mgr_base->clks.prev_p_state_change_support))
dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr_base->clks.dppclk_khz)) {
if (clk_mgr_base->clks.dppclk_khz > new_clocks->dppclk_khz)
dpp_clock_lowered = true;
clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_PIXCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz));
update_dppclk = true;
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dispclk_khz));
update_dispclk = true;
}
if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
if (dpp_clock_lowered) {
/* if clock is being lowered, increase DTO before lowering refclk */
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
dcn20_update_clocks_update_dentist(clk_mgr, context);
} else {
/* if clock is being raised, increase refclk before lowering DTO */
if (update_dppclk || update_dispclk)
dcn20_update_clocks_update_dentist(clk_mgr, context);
/* There is a check inside dcn20_update_clocks_update_dpp_dto which ensures
* that we do not lower dto when it is not safe to lower. We do not need to
* compare the current and new dppclk before calling this function.*/
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
}
if (update_dispclk && dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
/*update dmcu for wait_loop count*/
dmcu->funcs->set_psr_wait_loop(dmcu,
clk_mgr_base->clks.dispclk_khz / 1000 / 7);
}
static void dcn3_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
{
unsigned int i;
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
WatermarksExternal_t *table = (WatermarksExternal_t *) clk_mgr->wm_range_table;
if (!clk_mgr->smu_present)
return;
if (!table)
// should log failure
return;
memset(table, 0, sizeof(*table));
/* collect valid ranges, place in pmfw table */
for (i = 0; i < WM_SET_COUNT; i++)
if (clk_mgr->base.bw_params->wm_table.nv_entries[i].valid) {
table->Watermarks.WatermarkRow[WM_DCEFCLK][i].MinClock = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.min_dcfclk;
table->Watermarks.WatermarkRow[WM_DCEFCLK][i].MaxClock = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.max_dcfclk;
table->Watermarks.WatermarkRow[WM_DCEFCLK][i].MinUclk = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.min_uclk;
table->Watermarks.WatermarkRow[WM_DCEFCLK][i].MaxUclk = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.max_uclk;
table->Watermarks.WatermarkRow[WM_DCEFCLK][i].WmSetting = i;
table->Watermarks.WatermarkRow[WM_DCEFCLK][i].Flags = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.wm_type;
}
dcn30_smu_set_dram_addr_high(clk_mgr, clk_mgr->wm_range_table_addr >> 32);
dcn30_smu_set_dram_addr_low(clk_mgr, clk_mgr->wm_range_table_addr & 0xFFFFFFFF);
dcn30_smu_transfer_wm_table_dram_2_smu(clk_mgr);
}
/* Set min memclk to minimum, either constrained by the current mode or DPM0 */
static void dcn3_set_hard_min_memclk(struct clk_mgr *clk_mgr_base, bool current_mode)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
if (!clk_mgr->smu_present)
return;
if (current_mode) {
if (clk_mgr_base->clks.p_state_change_support)
dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
else
dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
} else {
dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz);
}
}
/* Set max memclk to highest DPM value */
static void dcn3_set_hard_max_memclk(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
if (!clk_mgr->smu_present)
return;
dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK,
clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
}
static void dcn3_set_max_memclk(struct clk_mgr *clk_mgr_base, unsigned int memclk_mhz)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
if (!clk_mgr->smu_present)
return;
dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, memclk_mhz);
}
static void dcn3_set_min_memclk(struct clk_mgr *clk_mgr_base, unsigned int memclk_mhz)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
if (!clk_mgr->smu_present)
return;
dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, memclk_mhz);
}
/* Get current memclk states, update bounding box */
static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
unsigned int num_levels;
if (!clk_mgr->smu_present)
return;
/* Refresh memclk states */
dcn3_init_single_clock(clk_mgr, PPCLK_UCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz,
&num_levels);
clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
clk_mgr_base->bw_params->dc_mode_softmax_memclk = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK);
/* Refresh bounding box */
DC_FP_START();
clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
clk_mgr->base.ctx->dc, clk_mgr_base->bw_params);
DC_FP_END();
}
static bool dcn3_is_smu_present(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
return clk_mgr->smu_present;
}
static bool dcn3_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b)
{
if (a->dispclk_khz != b->dispclk_khz)
return false;
else if (a->dppclk_khz != b->dppclk_khz)
return false;
else if (a->dcfclk_khz != b->dcfclk_khz)
return false;
else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
return false;
else if (a->dramclk_khz != b->dramclk_khz)
return false;
else if (a->p_state_change_support != b->p_state_change_support)
return false;
return true;
}
static void dcn3_enable_pme_wa(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
if (!clk_mgr->smu_present)
return;
dcn30_smu_set_pme_workaround(clk_mgr);
}
/* Notify clk_mgr of a change in link rate, update phyclk frequency if necessary */
static void dcn30_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct dc_link *link)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
unsigned int i, max_phyclk_req = clk_mgr_base->bw_params->clk_table.entries[0].phyclk_mhz * 1000;
if (!clk_mgr->smu_present)
return;
/* TODO - DP2.0 HW: calculate link 128b/132 link rate in clock manager with new formula */
clk_mgr->cur_phyclk_req_table[link->link_index] = link->cur_link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
for (i = 0; i < MAX_PIPES * 2; i++) {
if (clk_mgr->cur_phyclk_req_table[i] > max_phyclk_req)
max_phyclk_req = clk_mgr->cur_phyclk_req_table[i];
}
if (max_phyclk_req != clk_mgr_base->clks.phyclk_khz) {
clk_mgr_base->clks.phyclk_khz = max_phyclk_req;
dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_PHYCLK, khz_to_mhz_ceil(clk_mgr_base->clks.phyclk_khz));
}
}
static struct clk_mgr_funcs dcn3_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = dcn3_update_clocks,
.init_clocks = dcn3_init_clocks,
.notify_wm_ranges = dcn3_notify_wm_ranges,
.set_hard_min_memclk = dcn3_set_hard_min_memclk,
.set_hard_max_memclk = dcn3_set_hard_max_memclk,
.set_max_memclk = dcn3_set_max_memclk,
.set_min_memclk = dcn3_set_min_memclk,
.get_memclk_states_from_smu = dcn3_get_memclk_states_from_smu,
.are_clock_states_equal = dcn3_are_clock_states_equal,
.enable_pme_wa = dcn3_enable_pme_wa,
.notify_link_rate_change = dcn30_notify_link_rate_change,
.is_smu_present = dcn3_is_smu_present
};
static void dcn3_init_clocks_fpga(struct clk_mgr *clk_mgr)
{
dcn2_init_clocks(clk_mgr);
/* TODO: Implement the functions and remove the ifndef guard */
}
struct clk_mgr_funcs dcn3_fpga_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = dcn2_update_clocks_fpga,
.init_clocks = dcn3_init_clocks_fpga,
};
/*todo for dcn30 for clk register offset*/
void dcn3_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr,
struct pp_smu_funcs *pp_smu,
struct dccg *dccg)
{
struct clk_state_registers_and_bypass s = { 0 };
clk_mgr->base.ctx = ctx;
clk_mgr->base.funcs = &dcn3_funcs;
clk_mgr->regs = &clk_mgr_regs;
clk_mgr->clk_mgr_shift = &clk_mgr_shift;
clk_mgr->clk_mgr_mask = &clk_mgr_mask;
clk_mgr->dccg = dccg;
clk_mgr->dfs_bypass_disp_clk = 0;
clk_mgr->dprefclk_ss_percentage = 0;
clk_mgr->dprefclk_ss_divider = 1000;
clk_mgr->ss_on_dprefclk = false;
clk_mgr->dfs_ref_freq_khz = 100000;
clk_mgr->base.dprefclk_khz = 730000; // 700 MHz planned if VCO is 3.85 GHz, will be retrieved
/* integer part is now VCO frequency in kHz */
clk_mgr->base.dentist_vco_freq_khz = dcn30_get_vco_frequency_from_reg(clk_mgr);
/* in case we don't get a value from the register, use default */
if (clk_mgr->base.dentist_vco_freq_khz == 0)
clk_mgr->base.dentist_vco_freq_khz = 3650000;
/* Convert dprefclk units from MHz to KHz */
/* Value already divided by 10, some resolution lost */
/*TODO: uncomment assert once dcn3_dump_clk_registers is implemented */
//ASSERT(s.dprefclk != 0);
if (s.dprefclk != 0)
clk_mgr->base.dprefclk_khz = s.dprefclk * 1000;
clk_mgr->dfs_bypass_enabled = false;
clk_mgr->smu_present = false;
dce_clock_read_ss_info(clk_mgr);
clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL);
/* need physical address of table to give to PMFW */
clk_mgr->wm_range_table = dm_helpers_allocate_gpu_mem(clk_mgr->base.ctx,
DC_MEM_ALLOC_TYPE_GART, sizeof(WatermarksExternal_t),
&clk_mgr->wm_range_table_addr);
}
void dcn3_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)
{
kfree(clk_mgr->base.bw_params);
if (clk_mgr->wm_range_table)
dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART,
clk_mgr->wm_range_table);
}
| linux-master | drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include <linux/delay.h>
#include "dcn30_clk_mgr_smu_msg.h"
#include "clk_mgr_internal.h"
#include "reg_helper.h"
#include "dm_helpers.h"
#include "dalsmc.h"
#include "dcn30_smu11_driver_if.h"
#define mmDAL_MSG_REG 0x1628A
#define mmDAL_ARG_REG 0x16273
#define mmDAL_RESP_REG 0x16274
#define REG(reg_name) \
mm ## reg_name
#include "logger_types.h"
#undef DC_LOGGER
#define DC_LOGGER \
CTX->logger
#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); }
/*
* Function to be used instead of REG_WAIT macro because the wait ends when
* the register is NOT EQUAL to zero, and because the translation in msg_if.h
* won't work with REG_WAIT.
*/
static uint32_t dcn30_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries)
{
uint32_t reg = 0;
do {
reg = REG_READ(DAL_RESP_REG);
if (reg)
break;
if (delay_us >= 1000)
msleep(delay_us/1000);
else if (delay_us > 0)
udelay(delay_us);
} while (max_retries--);
/* handle DALSMC_Result_CmdRejectedBusy? */
/* Log? */
return reg;
}
static bool dcn30_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uint32_t msg_id, uint32_t param_in, uint32_t *param_out)
{
uint32_t result;
/* Wait for response register to be ready */
dcn30_smu_wait_for_response(clk_mgr, 10, 200000);
/* Clear response register */
REG_WRITE(DAL_RESP_REG, 0);
/* Set the parameter register for the SMU message */
REG_WRITE(DAL_ARG_REG, param_in);
/* Trigger the message transaction by writing the message ID */
REG_WRITE(DAL_MSG_REG, msg_id);
result = dcn30_smu_wait_for_response(clk_mgr, 10, 200000);
if (IS_SMU_TIMEOUT(result)) {
dm_helpers_smu_timeout(CTX, msg_id, param_in, 10 * 200000);
}
/* Wait for response */
if (result == DALSMC_Result_OK) {
if (param_out)
*param_out = REG_READ(DAL_ARG_REG);
return true;
}
return false;
}
/* Test message should return input + 1 */
bool dcn30_smu_test_message(struct clk_mgr_internal *clk_mgr, uint32_t input)
{
uint32_t response = 0;
smu_print("SMU Test message: %d\n", input);
if (dcn30_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_TestMessage, input, &response))
if (response == input + 1)
return true;
return false;
}
bool dcn30_smu_get_smu_version(struct clk_mgr_internal *clk_mgr, unsigned int *version)
{
smu_print("SMU Get SMU version\n");
if (dcn30_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_GetSmuVersion, 0, version)) {
smu_print("SMU version: %d\n", *version);
return true;
}
return false;
}
/* Message output should match SMU11_DRIVER_IF_VERSION in smu11_driver_if.h */
bool dcn30_smu_check_driver_if_version(struct clk_mgr_internal *clk_mgr)
{
uint32_t response = 0;
smu_print("SMU Check driver if version\n");
if (dcn30_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_GetDriverIfVersion, 0, &response)) {
smu_print("SMU driver if version: %d\n", response);
if (response == SMU11_DRIVER_IF_VERSION)
return true;
}
return false;
}
/* Message output should match DALSMC_VERSION in dalsmc.h */
bool dcn30_smu_check_msg_header_version(struct clk_mgr_internal *clk_mgr)
{
uint32_t response = 0;
smu_print("SMU Check msg header version\n");
if (dcn30_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_GetMsgHeaderVersion, 0, &response)) {
smu_print("SMU msg header version: %d\n", response);
if (response == DALSMC_VERSION)
return true;
}
return false;
}
void dcn30_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high)
{
smu_print("SMU Set DRAM addr high: %d\n", addr_high);
dcn30_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_SetDalDramAddrHigh, addr_high, NULL);
}
void dcn30_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low)
{
smu_print("SMU Set DRAM addr low: %d\n", addr_low);
dcn30_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_SetDalDramAddrLow, addr_low, NULL);
}
void dcn30_smu_transfer_wm_table_smu_2_dram(struct clk_mgr_internal *clk_mgr)
{
smu_print("SMU Transfer WM table SMU 2 DRAM\n");
dcn30_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_TransferTableSmu2Dram, TABLE_WATERMARKS, NULL);
}
void dcn30_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr)
{
smu_print("SMU Transfer WM table DRAM 2 SMU\n");
dcn30_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_TransferTableDram2Smu, TABLE_WATERMARKS, NULL);
}
/* Returns the actual frequency that was set in MHz, 0 on failure */
unsigned int dcn30_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz)
{
uint32_t response = 0;
/* bits 23:16 for clock type, lower 16 bits for frequency in MHz */
uint32_t param = (clk << 16) | freq_mhz;
smu_print("SMU Set hard min by freq: clk = %d, freq_mhz = %d MHz\n", clk, freq_mhz);
dcn30_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_SetHardMinByFreq, param, &response);
smu_print("SMU Frequency set = %d MHz\n", response);
return response;
}
/* Returns the actual frequency that was set in MHz, 0 on failure */
unsigned int dcn30_smu_set_hard_max_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz)
{
uint32_t response = 0;
/* bits 23:16 for clock type, lower 16 bits for frequency in MHz */
uint32_t param = (clk << 16) | freq_mhz;
smu_print("SMU Set hard max by freq: clk = %d, freq_mhz = %d MHz\n", clk, freq_mhz);
dcn30_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_SetHardMaxByFreq, param, &response);
smu_print("SMU Frequency set = %d MHz\n", response);
return response;
}
/*
* Frequency in MHz returned in lower 16 bits for valid DPM level
*
* Call with dpm_level = 0xFF to query features, return value will be:
* Bits 7:0 - number of DPM levels
* Bit 28 - 1 = auto DPM on
* Bit 29 - 1 = sweep DPM on
* Bit 30 - 1 = forced DPM on
* Bit 31 - 0 = discrete, 1 = fine-grained
*
* With fine-grained DPM, only min and max frequencies will be reported
*
* Returns 0 on failure
*/
unsigned int dcn30_smu_get_dpm_freq_by_index(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint8_t dpm_level)
{
uint32_t response = 0;
/* bits 23:16 for clock type, lower 8 bits for DPM level */
uint32_t param = (clk << 16) | dpm_level;
smu_print("SMU Get dpm freq by index: clk = %d, dpm_level = %d\n", clk, dpm_level);
dcn30_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_GetDpmFreqByIndex, param, &response);
smu_print("SMU dpm freq: %d MHz\n", response);
return response;
}
/* Returns the max DPM frequency in DC mode in MHz, 0 on failure */
unsigned int dcn30_smu_get_dc_mode_max_dpm_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk)
{
uint32_t response = 0;
/* bits 23:16 for clock type */
uint32_t param = clk << 16;
smu_print("SMU Get DC mode max DPM freq: clk = %d\n", clk);
dcn30_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_GetDcModeMaxDpmFreq, param, &response);
smu_print("SMU DC mode max DMP freq: %d MHz\n", response);
return response;
}
void dcn30_smu_set_min_deep_sleep_dcef_clk(struct clk_mgr_internal *clk_mgr, uint32_t freq_mhz)
{
smu_print("SMU Set min deep sleep dcef clk: freq_mhz = %d MHz\n", freq_mhz);
dcn30_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_SetMinDeepSleepDcefclk, freq_mhz, NULL);
}
void dcn30_smu_set_num_of_displays(struct clk_mgr_internal *clk_mgr, uint32_t num_displays)
{
smu_print("SMU Set num of displays: num_displays = %d\n", num_displays);
dcn30_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_NumOfDisplays, num_displays, NULL);
}
void dcn30_smu_set_display_refresh_from_mall(struct clk_mgr_internal *clk_mgr, bool enable, uint8_t cache_timer_delay, uint8_t cache_timer_scale)
{
/* bits 8:7 for cache timer scale, bits 6:1 for cache timer delay, bit 0 = 1 for enable, = 0 for disable */
uint32_t param = (cache_timer_scale << 7) | (cache_timer_delay << 1) | (enable ? 1 : 0);
smu_print("SMU Set display refresh from mall: enable = %d, cache_timer_delay = %d, cache_timer_scale = %d\n",
enable, cache_timer_delay, cache_timer_scale);
dcn30_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_SetDisplayRefreshFromMall, param, NULL);
}
void dcn30_smu_set_external_client_df_cstate_allow(struct clk_mgr_internal *clk_mgr, bool enable)
{
smu_print("SMU Set external client df cstate allow: enable = %d\n", enable);
dcn30_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_SetExternalClientDfCstateAllow, enable ? 1 : 0, NULL);
}
void dcn30_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr)
{
smu_print("SMU Set PME workaround\n");
dcn30_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_BacoAudioD3PME, 0, NULL);
}
| linux-master | drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "reg_helper.h"
#include "core_types.h"
#include "dccg.h"
#include "clk_mgr_internal.h"
#include "dcn201_clk_mgr.h"
#include "dcn20/dcn20_clk_mgr.h"
#include "dce100/dce_clk_mgr.h"
#include "dm_helpers.h"
#include "dm_services.h"
#include "cyan_skillfish_ip_offset.h"
#include "dcn/dcn_2_0_3_offset.h"
#include "dcn/dcn_2_0_3_sh_mask.h"
#include "clk/clk_11_0_1_offset.h"
#include "clk/clk_11_0_1_sh_mask.h"
#define REG(reg) \
(clk_mgr->regs->reg)
#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
#define SR(reg_name)\
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
mm ## reg_name
#define CLK_BASE_INNER(seg) \
CLK_BASE__INST0_SEG ## seg
#undef FN
#define FN(reg_name, field_name) \
clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name
#define CTX \
clk_mgr->base.ctx
#define DC_LOGGER \
clk_mgr->base.ctx->logger
static const struct clk_mgr_registers clk_mgr_regs = {
CLK_COMMON_REG_LIST_DCN_201()
};
static const struct clk_mgr_shift clk_mgr_shift = {
CLK_COMMON_MASK_SH_LIST_DCN201_BASE(__SHIFT)
};
static const struct clk_mgr_mask clk_mgr_mask = {
CLK_COMMON_MASK_SH_LIST_DCN201_BASE(_MASK)
};
static void dcn201_init_clocks(struct clk_mgr *clk_mgr)
{
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
clk_mgr->clks.p_state_change_support = true;
clk_mgr->clks.prev_p_state_change_support = true;
clk_mgr->clks.max_supported_dppclk_khz = 1200000;
clk_mgr->clks.max_supported_dispclk_khz = 1200000;
}
static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
bool update_dppclk = false;
bool update_dispclk = false;
bool dpp_clock_lowered = false;
bool force_reset = false;
bool p_state_change_support;
int total_plane_count;
if (dc->work_arounds.skip_clock_update)
return;
if (clk_mgr_base->clks.dispclk_khz == 0 ||
dc->debug.force_clock_mode & 0x1) {
force_reset = true;
dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
}
clk_mgr_helper_get_active_display_cnt(dc, context);
if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz))
clk_mgr_base->clks.phyclk_khz = new_clocks->phyclk_khz;
if (dc->debug.force_min_dcfclk_mhz > 0)
new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz))
clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
if (should_set_clock(safe_to_lower,
new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz))
clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
if (should_set_clock(safe_to_lower, new_clocks->socclk_khz, clk_mgr_base->clks.socclk_khz))
clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
clk_mgr_base->clks.p_state_change_support = p_state_change_support;
}
if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz))
clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz;
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
dpp_clock_lowered = true;
clk_mgr->base.clks.dppclk_khz = new_clocks->dppclk_khz;
update_dppclk = true;
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
update_dispclk = true;
}
if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
if (dpp_clock_lowered) {
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
dcn20_update_clocks_update_dentist(clk_mgr, context);
} else {
if (update_dppclk || update_dispclk)
dcn20_update_clocks_update_dentist(clk_mgr, context);
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
}
}
static struct clk_mgr_funcs dcn201_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = dcn201_update_clocks,
.init_clocks = dcn201_init_clocks,
.get_clock = dcn2_get_clock,
};
void dcn201_clk_mgr_construct(struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr,
struct pp_smu_funcs *pp_smu,
struct dccg *dccg)
{
struct dc_debug_options *debug = &ctx->dc->debug;
struct dc_bios *bp = ctx->dc_bios;
clk_mgr->base.ctx = ctx;
clk_mgr->base.funcs = &dcn201_funcs;
clk_mgr->regs = &clk_mgr_regs;
clk_mgr->clk_mgr_shift = &clk_mgr_shift;
clk_mgr->clk_mgr_mask = &clk_mgr_mask;
clk_mgr->dccg = dccg;
clk_mgr->dfs_bypass_disp_clk = 0;
clk_mgr->dprefclk_ss_percentage = 0;
clk_mgr->dprefclk_ss_divider = 1000;
clk_mgr->ss_on_dprefclk = false;
clk_mgr->base.dprefclk_khz = REG_READ(CLK4_CLK2_CURRENT_CNT);
clk_mgr->base.dprefclk_khz *= 100;
if (clk_mgr->base.dprefclk_khz == 0)
clk_mgr->base.dprefclk_khz = 600000;
REG_GET(CLK4_CLK_PLL_REQ, FbMult_int, &clk_mgr->base.dentist_vco_freq_khz);
clk_mgr->base.dentist_vco_freq_khz *= 100000;
if (clk_mgr->base.dentist_vco_freq_khz == 0)
clk_mgr->base.dentist_vco_freq_khz = 3000000;
if (!debug->disable_dfs_bypass && bp->integrated_info)
if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
clk_mgr->dfs_bypass_enabled = true;
dce_clock_read_ss_info(clk_mgr);
}
| linux-master | drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c |
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "core_types.h"
#include "clk_mgr_internal.h"
#include "reg_helper.h"
#include "dm_helpers.h"
#include "dcn316_smu.h"
#include "mp/mp_13_0_8_offset.h"
#include "mp/mp_13_0_8_sh_mask.h"
#define MAX_INSTANCE 7
#define MAX_SEGMENT 6
struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
};
static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0x00DC0000, 0x00E00000, 0x00E40000, 0x0243FC00, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
#define REG(reg_name) \
(MP0_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
#define FN(reg_name, field) \
FD(reg_name##__##field)
#include "logger_types.h"
#undef DC_LOGGER
#define DC_LOGGER \
CTX->logger
#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); }
#define VBIOSSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team
#define VBIOSSMC_MSG_GetPmfwVersion 0x02 ///< Get PMFW version
#define VBIOSSMC_MSG_Spare0 0x03 ///< Spare0
#define VBIOSSMC_MSG_SetDispclkFreq 0x04 ///< Set display clock frequency in MHZ
#define VBIOSSMC_MSG_Spare1 0x05 ///< Spare1
#define VBIOSSMC_MSG_SetDppclkFreq 0x06 ///< Set DPP clock frequency in MHZ
#define VBIOSSMC_MSG_SetHardMinDcfclkByFreq 0x07 ///< Set DCF clock frequency hard min in MHZ
#define VBIOSSMC_MSG_SetMinDeepSleepDcfclk 0x08 ///< Set DCF clock minimum frequency in deep sleep in MHZ
#define VBIOSSMC_MSG_SetPhyclkVoltageByFreq 0x09 ///< Set display phy clock frequency in MHZ in case VMIN does not support phy frequency
#define VBIOSSMC_MSG_GetFclkFrequency 0x0A ///< Get FCLK frequency, return frequemcy in MHZ
#define VBIOSSMC_MSG_SetDisplayCount 0x0B ///< Inform PMFW of number of display connected
#define VBIOSSMC_MSG_SPARE 0x0C ///< SPARE
#define VBIOSSMC_MSG_UpdatePmeRestore 0x0D ///< To ask PMFW to write into Azalia for PME wake up event
#define VBIOSSMC_MSG_SetVbiosDramAddrHigh 0x0E ///< Set DRAM address high 32 bits for WM table transfer
#define VBIOSSMC_MSG_SetVbiosDramAddrLow 0x0F ///< Set DRAM address low 32 bits for WM table transfer
#define VBIOSSMC_MSG_TransferTableSmu2Dram 0x10 ///< Transfer table from PMFW SRAM to system DRAM
#define VBIOSSMC_MSG_TransferTableDram2Smu 0x11 ///< Transfer table from system DRAM to PMFW
#define VBIOSSMC_MSG_SetDisplayIdleOptimizations 0x12 ///< Set Idle state optimization for display off
#define VBIOSSMC_MSG_GetDprefclkFreq 0x13 ///< Get DPREF clock frequency. Return in MHZ
#define VBIOSSMC_MSG_GetDtbclkFreq 0x14 ///< Get DPREF clock frequency. Return in MHZ
#define VBIOSSMC_MSG_SetDtbclkFreq 0x15 ///< Inform PMFW to turn on/off DTB clock arg = 1, turn DTB clock on 600MHz/ arg = 0 turn DTB clock off
#define VBIOSSMC_Message_Count 0x16 ///< Total number of VBIS and DAL messages
#define VBIOSSMC_Status_BUSY 0x0
#define VBIOSSMC_Result_OK 0x01 ///< Message Response OK
#define VBIOSSMC_Result_Failed 0xFF ///< Message Response Failed
#define VBIOSSMC_Result_UnknownCmd 0xFE ///< Message Response Unknown Command
#define VBIOSSMC_Result_CmdRejectedPrereq 0xFD ///< Message Response Command Failed Prerequisite
#define VBIOSSMC_Result_CmdRejectedBusy 0xFC ///< Message Response Command Rejected due to PMFW is busy. Sender should retry sending this message
/*
* Function to be used instead of REG_WAIT macro because the wait ends when
* the register is NOT EQUAL to zero, and because the translation in msg_if.h
* won't work with REG_WAIT.
*/
static uint32_t dcn316_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries)
{
uint32_t res_val = VBIOSSMC_Status_BUSY;
do {
res_val = REG_READ(MP1_SMN_C2PMSG_91);
if (res_val != VBIOSSMC_Status_BUSY)
break;
if (delay_us >= 1000)
msleep(delay_us/1000);
else if (delay_us > 0)
udelay(delay_us);
} while (max_retries--);
return res_val;
}
static int dcn316_smu_send_msg_with_param(
struct clk_mgr_internal *clk_mgr,
unsigned int msg_id, unsigned int param)
{
uint32_t result;
result = dcn316_smu_wait_for_response(clk_mgr, 10, 200000);
if (result != VBIOSSMC_Result_OK)
smu_print("SMU Response was not OK. SMU response after wait received is: %d\n", result);
if (result == VBIOSSMC_Status_BUSY) {
return -1;
}
/* First clear response register */
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Status_BUSY);
/* Set the parameter register for the SMU message, unit is Mhz */
REG_WRITE(MP1_SMN_C2PMSG_83, param);
/* Trigger the message transaction by writing the message ID */
REG_WRITE(MP1_SMN_C2PMSG_67, msg_id);
result = dcn316_smu_wait_for_response(clk_mgr, 10, 200000);
if (result == VBIOSSMC_Status_BUSY) {
ASSERT(0);
dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000);
}
return REG_READ(MP1_SMN_C2PMSG_83);
}
int dcn316_smu_get_smu_version(struct clk_mgr_internal *clk_mgr)
{
return dcn316_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_GetPmfwVersion,
0);
}
int dcn316_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz)
{
int actual_dispclk_set_mhz = -1;
if (!clk_mgr->smu_present)
return requested_dispclk_khz;
/* Unit of SMU msg parameter is Mhz */
actual_dispclk_set_mhz = dcn316_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDispclkFreq,
khz_to_mhz_ceil(requested_dispclk_khz));
return actual_dispclk_set_mhz * 1000;
}
int dcn316_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz)
{
int actual_dcfclk_set_mhz = -1;
if (!clk_mgr->base.ctx->dc->debug.pstate_enabled)
return -1;
if (!clk_mgr->smu_present)
return requested_dcfclk_khz;
actual_dcfclk_set_mhz = dcn316_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
khz_to_mhz_ceil(requested_dcfclk_khz));
#ifdef DBG
smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
#endif
return actual_dcfclk_set_mhz * 1000;
}
int dcn316_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz)
{
int actual_min_ds_dcfclk_mhz = -1;
if (!clk_mgr->base.ctx->dc->debug.pstate_enabled)
return -1;
if (!clk_mgr->smu_present)
return requested_min_ds_dcfclk_khz;
actual_min_ds_dcfclk_mhz = dcn316_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetMinDeepSleepDcfclk,
khz_to_mhz_ceil(requested_min_ds_dcfclk_khz));
return actual_min_ds_dcfclk_mhz * 1000;
}
int dcn316_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz)
{
int actual_dppclk_set_mhz = -1;
if (!clk_mgr->smu_present)
return requested_dpp_khz;
actual_dppclk_set_mhz = dcn316_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDppclkFreq,
khz_to_mhz_ceil(requested_dpp_khz));
return actual_dppclk_set_mhz * 1000;
}
void dcn316_smu_set_display_idle_optimization(struct clk_mgr_internal *clk_mgr, uint32_t idle_info)
{
if (!clk_mgr->base.ctx->dc->debug.pstate_enabled)
return;
if (!clk_mgr->smu_present)
return;
//TODO: Work with smu team to define optimization options.
dcn316_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDisplayIdleOptimizations,
idle_info);
}
void dcn316_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
{
union display_idle_optimization_u idle_info = { 0 };
if (!clk_mgr->smu_present)
return;
if (enable) {
idle_info.idle_info.df_request_disabled = 1;
idle_info.idle_info.phy_ref_clk_off = 1;
}
dcn316_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDisplayIdleOptimizations,
idle_info.data);
}
void dcn316_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high)
{
if (!clk_mgr->smu_present)
return;
dcn316_smu_send_msg_with_param(clk_mgr,
VBIOSSMC_MSG_SetVbiosDramAddrHigh, addr_high);
}
void dcn316_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low)
{
if (!clk_mgr->smu_present)
return;
dcn316_smu_send_msg_with_param(clk_mgr,
VBIOSSMC_MSG_SetVbiosDramAddrLow, addr_low);
}
void dcn316_smu_transfer_dpm_table_smu_2_dram(struct clk_mgr_internal *clk_mgr)
{
if (!clk_mgr->smu_present)
return;
dcn316_smu_send_msg_with_param(clk_mgr,
VBIOSSMC_MSG_TransferTableSmu2Dram, TABLE_DPMCLOCKS);
}
void dcn316_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr)
{
if (!clk_mgr->smu_present)
return;
dcn316_smu_send_msg_with_param(clk_mgr,
VBIOSSMC_MSG_TransferTableDram2Smu, TABLE_WATERMARKS);
}
void dcn316_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr)
{
if (!clk_mgr->smu_present)
return;
dcn316_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_UpdatePmeRestore,
0);
}
/* Arg = 1: Turn DTB on; 0: Turn DTB CLK OFF. when it is on, it is 600MHZ */
void dcn316_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable)
{
if (!clk_mgr->smu_present)
return;
dcn316_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDtbclkFreq,
enable);
}
int dcn316_smu_get_dpref_clk(struct clk_mgr_internal *clk_mgr)
{
int dprefclk_get_mhz = -1;
if (clk_mgr->smu_present) {
dprefclk_get_mhz = dcn316_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_GetDprefclkFreq,
0);
}
return (dprefclk_get_mhz * 1000);
}
int dcn316_smu_get_smu_fclk(struct clk_mgr_internal *clk_mgr)
{
int fclk_get_mhz = -1;
if (clk_mgr->smu_present) {
fclk_get_mhz = dcn316_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_GetFclkFrequency,
0);
}
return (fclk_get_mhz * 1000);
}
| linux-master | drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c |
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dccg.h"
#include "clk_mgr_internal.h"
// For dce12_get_dp_ref_freq_khz
#include "dce100/dce_clk_mgr.h"
// For dcn20_update_clocks_update_dpp_dto
#include "dcn20/dcn20_clk_mgr.h"
#include "dcn31/dcn31_clk_mgr.h"
#include "dcn316_clk_mgr.h"
#include "reg_helper.h"
#include "core_types.h"
#include "dcn316_smu.h"
#include "dm_helpers.h"
#include "dc_dmub_srv.h"
#include "link.h"
// DCN316 this is CLK1 instance
#define MAX_INSTANCE 7
#define MAX_SEGMENT 6
struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
};
#define regCLK1_CLK_PLL_REQ 0x0237
#define regCLK1_CLK_PLL_REQ_BASE_IDX 0
#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
#define TO_CLK_MGR_DCN316(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn316, base)
static int dcn316_get_active_display_cnt_wa(
struct dc *dc,
struct dc_state *context)
{
int i, display_count;
bool tmds_present = false;
display_count = 0;
for (i = 0; i < context->stream_count; i++) {
const struct dc_stream_state *stream = context->streams[i];
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
tmds_present = true;
}
for (i = 0; i < dc->link_count; i++) {
const struct dc_link *link = dc->links[i];
/* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc))
display_count++;
}
/* WA for hang on HDMI after display off back back on*/
if (display_count == 0 && tmds_present)
display_count = 1;
return display_count;
}
static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
dc_is_virtual_signal(pipe->stream->signal))) {
if (disable) {
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
reset_sync_context_for_pipe(dc, context, i);
} else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
}
}
}
static void dcn316_enable_pme_wa(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
dcn316_smu_enable_pme_wa(clk_mgr);
}
static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
{
union dmub_rb_cmd cmd;
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
int display_count;
bool update_dppclk = false;
bool update_dispclk = false;
bool dpp_clock_lowered = false;
if (dc->work_arounds.skip_clock_update)
return;
/*
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
* also if safe to lower is false, we just go in the higher state
*/
clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
if (safe_to_lower) {
if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
dcn316_smu_set_dtbclk(clk_mgr, false);
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
}
/* check that we're not already in lower */
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
display_count = dcn316_get_active_display_cnt_wa(dc, context);
/* if we can go lower, go lower */
if (display_count == 0) {
union display_idle_optimization_u idle_info = { 0 };
idle_info.idle_info.df_request_disabled = 1;
idle_info.idle_info.phy_ref_clk_off = 1;
idle_info.idle_info.s0i2_rdy = 1;
dcn316_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
/* update power state */
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
}
}
} else {
if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
dcn316_smu_set_dtbclk(clk_mgr, true);
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
}
/* check that we're not already in D0 */
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) {
union display_idle_optimization_u idle_info = { 0 };
dcn316_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
/* update power state */
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_MISSION_MODE;
}
}
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
dcn316_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz);
}
if (should_set_clock(safe_to_lower,
new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
dcn316_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz);
}
// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
if (new_clocks->dppclk_khz < 100000)
new_clocks->dppclk_khz = 100000;
if (new_clocks->dispclk_khz < 100000)
new_clocks->dispclk_khz = 100000;
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
dpp_clock_lowered = true;
clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
update_dppclk = true;
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
dcn316_disable_otg_wa(clk_mgr_base, context, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
dcn316_disable_otg_wa(clk_mgr_base, context, false);
update_dispclk = true;
}
if (dpp_clock_lowered) {
// increase per DPP DTO before lowering global dppclk
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
dcn316_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
} else {
// increase global DPPCLK before lowering per DPP DTO
if (update_dppclk || update_dispclk)
dcn316_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
// always update dtos unless clock is lowered and not safe to lower
if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
// notify DMCUB of latest clocks
memset(&cmd, 0, sizeof(cmd));
cmd.notify_clocks.header.type = DMUB_CMD__CLK_MGR;
cmd.notify_clocks.header.sub_type = DMUB_CMD__CLK_MGR_NOTIFY_CLOCKS;
cmd.notify_clocks.clocks.dcfclk_khz = clk_mgr_base->clks.dcfclk_khz;
cmd.notify_clocks.clocks.dcfclk_deep_sleep_khz =
clk_mgr_base->clks.dcfclk_deep_sleep_khz;
cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static void dcn316_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
{
return;
}
static struct clk_bw_params dcn316_bw_params = {
.vram_type = Ddr4MemType,
.num_channels = 1,
.clk_table = {
.num_entries = 5,
},
};
static struct wm_table ddr4_wm_table = {
.entries = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 6.09,
.sr_enter_plus_exit_time_us = 7.14,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 10.12,
.sr_enter_plus_exit_time_us = 11.48,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 10.12,
.sr_enter_plus_exit_time_us = 11.48,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 10.12,
.sr_enter_plus_exit_time_us = 11.48,
.valid = true,
},
}
};
static struct wm_table lpddr5_wm_table = {
.entries = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
}
};
static DpmClocks_316_t dummy_clocks;
static struct dcn316_watermarks dummy_wms = { 0 };
static void dcn316_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn316_watermarks *table)
{
int i, num_valid_sets;
num_valid_sets = 0;
for (i = 0; i < WM_SET_COUNT; i++) {
/* skip empty entries, the smu array has no holes*/
if (!bw_params->wm_table.entries[i].valid)
continue;
table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst;
table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type;
/* We will not select WM based on fclk, so leave it as unconstrained */
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
if (table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType == WM_TYPE_PSTATE_CHG) {
if (i == 0)
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = 0;
else {
/* add 1 to make it non-overlapping with next lvl */
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk =
bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1;
}
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxMclk =
bw_params->clk_table.entries[i].dcfclk_mhz;
} else {
/* unconstrained for memory retraining */
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
/* Modify previous watermark range to cover up to max */
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
}
num_valid_sets++;
}
ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */
/* modify the min and max to make sure we cover the whole range*/
table->WatermarkRow[WM_DCFCLK][0].MinMclk = 0;
table->WatermarkRow[WM_DCFCLK][0].MinClock = 0;
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxMclk = 0xFFFF;
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
/* This is for writeback only, does not matter currently as no writeback support*/
table->WatermarkRow[WM_SOCCLK][0].WmSetting = WM_A;
table->WatermarkRow[WM_SOCCLK][0].MinClock = 0;
table->WatermarkRow[WM_SOCCLK][0].MaxClock = 0xFFFF;
table->WatermarkRow[WM_SOCCLK][0].MinMclk = 0;
table->WatermarkRow[WM_SOCCLK][0].MaxMclk = 0xFFFF;
}
static void dcn316_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct clk_mgr_dcn316 *clk_mgr_dcn316 = TO_CLK_MGR_DCN316(clk_mgr);
struct dcn316_watermarks *table = clk_mgr_dcn316->smu_wm_set.wm_set;
if (!clk_mgr->smu_ver)
return;
if (!table || clk_mgr_dcn316->smu_wm_set.mc_address.quad_part == 0)
return;
memset(table, 0, sizeof(*table));
dcn316_build_watermark_ranges(clk_mgr_base->bw_params, table);
dcn316_smu_set_dram_addr_high(clk_mgr,
clk_mgr_dcn316->smu_wm_set.mc_address.high_part);
dcn316_smu_set_dram_addr_low(clk_mgr,
clk_mgr_dcn316->smu_wm_set.mc_address.low_part);
dcn316_smu_transfer_wm_table_dram_2_smu(clk_mgr);
}
static void dcn316_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
struct dcn316_smu_dpm_clks *smu_dpm_clks)
{
DpmClocks_316_t *table = smu_dpm_clks->dpm_clks;
if (!clk_mgr->smu_ver)
return;
if (!table || smu_dpm_clks->mc_address.quad_part == 0)
return;
memset(table, 0, sizeof(*table));
dcn316_smu_set_dram_addr_high(clk_mgr,
smu_dpm_clks->mc_address.high_part);
dcn316_smu_set_dram_addr_low(clk_mgr,
smu_dpm_clks->mc_address.low_part);
dcn316_smu_transfer_dpm_table_smu_2_dram(clk_mgr);
}
static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
{
uint32_t max = 0;
int i;
for (i = 0; i < num_clocks; ++i) {
if (clocks[i] > max)
max = clocks[i];
}
return max;
}
static unsigned int find_clk_for_voltage(
const DpmClocks_316_t *clock_table,
const uint32_t clocks[],
unsigned int voltage)
{
int i;
int max_voltage = 0;
int clock = 0;
for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) {
if (clock_table->SocVoltage[i] == voltage) {
return clocks[i];
} else if (clock_table->SocVoltage[i] >= max_voltage &&
clock_table->SocVoltage[i] < voltage) {
max_voltage = clock_table->SocVoltage[i];
clock = clocks[i];
}
}
ASSERT(clock);
return clock;
}
static void dcn316_clk_mgr_helper_populate_bw_params(
struct clk_mgr_internal *clk_mgr,
struct integrated_info *bios_info,
const DpmClocks_316_t *clock_table)
{
int i, j;
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
uint32_t max_dispclk = 0, max_dppclk = 0;
j = -1;
ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL);
/* Find lowest DPM, FCLK is filled in reverse order*/
for (i = NUM_DF_PSTATE_LEVELS - 1; i >= 0; i--) {
if (clock_table->DfPstateTable[i].FClk != 0) {
j = i;
break;
}
}
if (j == -1) {
/* clock table is all 0s, just use our own hardcode */
ASSERT(0);
return;
}
bw_params->clk_table.num_entries = j + 1;
/* dispclk and dppclk can be max at any voltage, same number of levels for both */
if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) {
max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
} else {
ASSERT(0);
}
for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
int temp;
bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].FClk;
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].MemClk;
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].Voltage;
switch (clock_table->DfPstateTable[j].WckRatio) {
case WCK_RATIO_1_2:
bw_params->clk_table.entries[i].wck_ratio = 2;
break;
case WCK_RATIO_1_4:
bw_params->clk_table.entries[i].wck_ratio = 4;
break;
default:
bw_params->clk_table.entries[i].wck_ratio = 1;
}
temp = find_clk_for_voltage(clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[j].Voltage);
if (temp)
bw_params->clk_table.entries[i].dcfclk_mhz = temp;
temp = find_clk_for_voltage(clock_table, clock_table->SocClocks, clock_table->DfPstateTable[j].Voltage);
if (temp)
bw_params->clk_table.entries[i].socclk_mhz = temp;
bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
}
bw_params->vram_type = bios_info->memory_type;
bw_params->num_channels = bios_info->ma_channel_number;
bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4;
for (i = 0; i < WM_SET_COUNT; i++) {
bw_params->wm_table.entries[i].wm_inst = i;
if (i >= bw_params->clk_table.num_entries) {
bw_params->wm_table.entries[i].valid = false;
continue;
}
bw_params->wm_table.entries[i].wm_type = WM_TYPE_PSTATE_CHG;
bw_params->wm_table.entries[i].valid = true;
}
}
static struct clk_mgr_funcs dcn316_funcs = {
.enable_pme_wa = dcn316_enable_pme_wa,
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
.update_clocks = dcn316_update_clocks,
.init_clocks = dcn31_init_clocks,
.are_clock_states_equal = dcn31_are_clock_states_equal,
.notify_wm_ranges = dcn316_notify_wm_ranges
};
extern struct clk_mgr_funcs dcn3_fpga_funcs;
void dcn316_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_dcn316 *clk_mgr,
struct pp_smu_funcs *pp_smu,
struct dccg *dccg)
{
struct dcn316_smu_dpm_clks smu_dpm_clks = { 0 };
struct clk_log_info log_info = {0};
clk_mgr->base.base.ctx = ctx;
clk_mgr->base.base.funcs = &dcn316_funcs;
clk_mgr->base.pp_smu = pp_smu;
clk_mgr->base.dccg = dccg;
clk_mgr->base.dfs_bypass_disp_clk = 0;
clk_mgr->base.dprefclk_ss_percentage = 0;
clk_mgr->base.dprefclk_ss_divider = 1000;
clk_mgr->base.ss_on_dprefclk = false;
clk_mgr->base.dfs_ref_freq_khz = 48000;
clk_mgr->smu_wm_set.wm_set = (struct dcn316_watermarks *)dm_helpers_allocate_gpu_mem(
clk_mgr->base.base.ctx,
DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
sizeof(struct dcn316_watermarks),
&clk_mgr->smu_wm_set.mc_address.quad_part);
if (!clk_mgr->smu_wm_set.wm_set) {
clk_mgr->smu_wm_set.wm_set = &dummy_wms;
clk_mgr->smu_wm_set.mc_address.quad_part = 0;
}
ASSERT(clk_mgr->smu_wm_set.wm_set);
smu_dpm_clks.dpm_clks = (DpmClocks_316_t *)dm_helpers_allocate_gpu_mem(
clk_mgr->base.base.ctx,
DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
sizeof(DpmClocks_316_t),
&smu_dpm_clks.mc_address.quad_part);
if (smu_dpm_clks.dpm_clks == NULL) {
smu_dpm_clks.dpm_clks = &dummy_clocks;
smu_dpm_clks.mc_address.quad_part = 0;
}
ASSERT(smu_dpm_clks.dpm_clks);
clk_mgr->base.smu_ver = dcn316_smu_get_smu_version(&clk_mgr->base);
if (clk_mgr->base.smu_ver > 0)
clk_mgr->base.smu_present = true;
// Skip this for now as it did not work on DCN315, renable during bring up
//clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
clk_mgr->base.base.dentist_vco_freq_khz = 2500000;
/* in case we don't get a value from the register, use default */
if (clk_mgr->base.base.dentist_vco_freq_khz == 0)
clk_mgr->base.base.dentist_vco_freq_khz = 2500000; /* 2400MHz */
if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
dcn316_bw_params.wm_table = lpddr5_wm_table;
} else {
dcn316_bw_params.wm_table = ddr4_wm_table;
}
/* Saved clocks configured at boot for debug purposes */
dcn316_dump_clk_registers(&clk_mgr->base.base.boot_snapshot,
&clk_mgr->base.base, &log_info);
clk_mgr->base.base.dprefclk_khz = 600000;
clk_mgr->base.base.dprefclk_khz = dcn316_smu_get_dpref_clk(&clk_mgr->base);
clk_mgr->base.base.clks.ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz;
dce_clock_read_ss_info(&clk_mgr->base);
/*clk_mgr->base.dccg->ref_dtbclk_khz =
dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);*/
clk_mgr->base.base.bw_params = &dcn316_bw_params;
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
dcn316_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
dcn316_clk_mgr_helper_populate_bw_params(
&clk_mgr->base,
ctx->dc_bios->integrated_info,
smu_dpm_clks.dpm_clks);
}
}
if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0)
dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
smu_dpm_clks.dpm_clks);
}
void dcn316_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int)
{
struct clk_mgr_dcn316 *clk_mgr = TO_CLK_MGR_DCN316(clk_mgr_int);
if (clk_mgr->smu_wm_set.wm_set && clk_mgr->smu_wm_set.mc_address.quad_part != 0)
dm_helpers_free_gpu_mem(clk_mgr_int->base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
clk_mgr->smu_wm_set.wm_set);
}
| linux-master | drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c |
/*
* Copyright 2020 Mauro Rossi <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dccg.h"
#include "clk_mgr_internal.h"
#include "dce100/dce_clk_mgr.h"
#include "dce110/dce110_clk_mgr.h"
#include "dce60_clk_mgr.h"
#include "reg_helper.h"
#include "dmcu.h"
#include "core_types.h"
#include "dal_asic_id.h"
/*
* Currently the register shifts and masks in this file are used for dce60
* which has no DPREFCLK_CNTL register
* TODO: remove this when DENTIST_DISPCLK_CNTL
* is moved to dccg, where it belongs
*/
#include "dce/dce_6_0_d.h"
#include "dce/dce_6_0_sh_mask.h"
#define REG(reg) \
(clk_mgr->regs->reg)
#undef FN
#define FN(reg_name, field_name) \
clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name
/* set register offset */
#define SR(reg_name)\
.reg_name = mm ## reg_name
static const struct clk_mgr_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE60_BASE()
};
static const struct clk_mgr_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE60_COMMON_BASE(__SHIFT)
};
static const struct clk_mgr_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE60_COMMON_BASE(_MASK)
};
/* Max clock values for each state indexed by "enum clocks_state": */
static const struct state_dependent_clocks dce60_max_clks_by_state[] = {
/* ClocksStateInvalid - should not be used */
{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
/* ClocksStateUltraLow - not expected to be used for DCE 6.0 */
{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
/* ClocksStateLow */
{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
/* ClocksStateNominal */
{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
/* ClocksStatePerformance */
{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
static int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
int dprefclk_wdivider;
int dp_ref_clk_khz;
int target_div;
/* DCE6 has no DPREFCLK_CNTL to read DP Reference Clock source */
/* Read the mmDENTIST_DISPCLK_CNTL to get the currently
* programmed DID DENTIST_DPREFCLK_WDIVIDER*/
REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
/* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
target_div = dentist_get_divider_from_did(dprefclk_wdivider);
/* Calculate the current DFS clock, in kHz.*/
dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
* clk_mgr->base.dentist_vco_freq_khz) / target_div;
return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz);
}
static void dce60_pplib_apply_display_requirements(
struct dc *dc,
struct dc_state *context)
{
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
dce110_fill_display_configs(context, pp_display_cfg);
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
}
static void dce60_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dm_pp_power_level_change_request level_change_req;
int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
patched_disp_clk = patched_disp_clk * 115 / 100;
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
/* get max clock state from PPLIB */
if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
|| level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
if (dm_pp_apply_power_level_change_request(clk_mgr_base->ctx, &level_change_req))
clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
}
if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr_base->clks.dispclk_khz)) {
patched_disp_clk = dce_set_clock(clk_mgr_base, patched_disp_clk);
clk_mgr_base->clks.dispclk_khz = patched_disp_clk;
}
dce60_pplib_apply_display_requirements(clk_mgr_base->ctx->dc, context);
}
static struct clk_mgr_funcs dce60_funcs = {
.get_dp_ref_clk_frequency = dce60_get_dp_ref_freq_khz,
.update_clocks = dce60_update_clocks
};
void dce60_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr)
{
dce_clk_mgr_construct(ctx, clk_mgr);
memcpy(clk_mgr->max_clks_by_state,
dce60_max_clks_by_state,
sizeof(dce60_max_clks_by_state));
clk_mgr->regs = &disp_clk_regs;
clk_mgr->clk_mgr_shift = &disp_clk_shift;
clk_mgr->clk_mgr_mask = &disp_clk_mask;
clk_mgr->base.funcs = &dce60_funcs;
}
| linux-master | drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c |
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dccg.h"
#include "clk_mgr_internal.h"
#include "dce_clk_mgr.h"
#include "dce110/dce110_clk_mgr.h"
#include "dce112/dce112_clk_mgr.h"
#include "reg_helper.h"
#include "dmcu.h"
#include "core_types.h"
#include "dal_asic_id.h"
/*
* Currently the register shifts and masks in this file are used for dce100 and dce80
* which has identical definitions.
* TODO: remove this when DPREFCLK_CNTL and dpref DENTIST_DISPCLK_CNTL
* is moved to dccg, where it belongs
*/
#include "dce/dce_8_0_d.h"
#include "dce/dce_8_0_sh_mask.h"
#define REG(reg) \
(clk_mgr->regs->reg)
#undef FN
#define FN(reg_name, field_name) \
clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name
static const struct clk_mgr_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
static const struct clk_mgr_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
static const struct clk_mgr_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
/* Max clock values for each state indexed by "enum clocks_state": */
static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
/* ClocksStateInvalid - should not be used */
{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
/* ClocksStateUltraLow - not expected to be used for DCE 8.0 */
{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
/* ClocksStateLow */
{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
/* ClocksStateNominal */
{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
/* ClocksStatePerformance */
{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
int dentist_get_divider_from_did(int did)
{
if (did < DENTIST_BASE_DID_1)
did = DENTIST_BASE_DID_1;
if (did > DENTIST_MAX_DID)
did = DENTIST_MAX_DID;
if (did < DENTIST_BASE_DID_2) {
return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP
* (did - DENTIST_BASE_DID_1);
} else if (did < DENTIST_BASE_DID_3) {
return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP
* (did - DENTIST_BASE_DID_2);
} else if (did < DENTIST_BASE_DID_4) {
return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP
* (did - DENTIST_BASE_DID_3);
} else {
return DENTIST_DIVIDER_RANGE_4_START + DENTIST_DIVIDER_RANGE_4_STEP
* (did - DENTIST_BASE_DID_4);
}
}
/* SW will adjust DP REF Clock average value for all purposes
* (DP DTO / DP Audio DTO and DP GTC)
if clock is spread for all cases:
-if SS enabled on DP Ref clock and HW de-spreading enabled with SW
calculations for DS_INCR/DS_MODULO (this is planned to be default case)
-if SS enabled on DP Ref clock and HW de-spreading enabled with HW
calculations (not planned to be used, but average clock should still
be valid)
-if SS enabled on DP Ref clock and HW de-spreading disabled
(should not be case with CIK) then SW should program all rates
generated according to average value (case as with previous ASICs)
*/
int dce_adjust_dp_ref_freq_for_ss(struct clk_mgr_internal *clk_mgr_dce, int dp_ref_clk_khz)
{
if (clk_mgr_dce->ss_on_dprefclk && clk_mgr_dce->dprefclk_ss_divider != 0) {
struct fixed31_32 ss_percentage = dc_fixpt_div_int(
dc_fixpt_from_fraction(clk_mgr_dce->dprefclk_ss_percentage,
clk_mgr_dce->dprefclk_ss_divider), 200);
struct fixed31_32 adj_dp_ref_clk_khz;
ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
}
return dp_ref_clk_khz;
}
int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
int dprefclk_wdivider;
int dprefclk_src_sel;
int dp_ref_clk_khz;
int target_div;
/* ASSERT DP Reference Clock source is from DFS*/
REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
ASSERT(dprefclk_src_sel == 0);
/* Read the mmDENTIST_DISPCLK_CNTL to get the currently
* programmed DID DENTIST_DPREFCLK_WDIVIDER*/
REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
/* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
target_div = dentist_get_divider_from_did(dprefclk_wdivider);
/* Calculate the current DFS clock, in kHz.*/
dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
* clk_mgr->base.dentist_vco_freq_khz) / target_div;
return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz);
}
int dce12_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
return dce_adjust_dp_ref_freq_for_ss(clk_mgr_dce, clk_mgr_base->dprefclk_khz);
}
/* unit: in_khz before mode set, get pixel clock from context. ASIC register
* may not be programmed yet
*/
uint32_t dce_get_max_pixel_clock_for_all_paths(struct dc_state *context)
{
uint32_t max_pix_clk = 0;
int i;
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream == NULL)
continue;
/* do not check under lay */
if (pipe_ctx->top_pipe)
continue;
if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10 > max_pix_clk)
max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10;
/* raise clock state for HBR3/2 if required. Confirmed with HW DCE/DPCS
* logic for HBR3 still needs Nominal (0.8V) on VDDC rail
*/
if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
pipe_ctx->stream_res.pix_clk_params.requested_sym_clk > max_pix_clk)
max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_sym_clk;
}
return max_pix_clk;
}
enum dm_pp_clocks_state dce_get_required_clocks_state(
struct clk_mgr *clk_mgr_base,
struct dc_state *context)
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
int i;
enum dm_pp_clocks_state low_req_clk;
int max_pix_clk = dce_get_max_pixel_clock_for_all_paths(context);
/* Iterate from highest supported to lowest valid state, and update
* lowest RequiredState with the lowest state that satisfies
* all required clocks
*/
for (i = clk_mgr_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
if (context->bw_ctx.bw.dce.dispclk_khz >
clk_mgr_dce->max_clks_by_state[i].display_clk_khz
|| max_pix_clk >
clk_mgr_dce->max_clks_by_state[i].pixel_clk_khz)
break;
low_req_clk = i + 1;
if (low_req_clk > clk_mgr_dce->max_clks_state) {
/* set max clock state for high phyclock, invalid on exceeding display clock */
if (clk_mgr_dce->max_clks_by_state[clk_mgr_dce->max_clks_state].display_clk_khz
< context->bw_ctx.bw.dce.dispclk_khz)
low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
else
low_req_clk = clk_mgr_dce->max_clks_state;
}
return low_req_clk;
}
/* TODO: remove use the two broken down functions */
int dce_set_clock(
struct clk_mgr *clk_mgr_base,
int requested_clk_khz)
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
struct dc_bios *bp = clk_mgr_base->ctx->dc_bios;
int actual_clock = requested_clk_khz;
struct dmcu *dmcu = clk_mgr_dce->base.ctx->dc->res_pool->dmcu;
/* Make sure requested clock isn't lower than minimum threshold*/
if (requested_clk_khz > 0)
requested_clk_khz = max(requested_clk_khz,
clk_mgr_dce->base.dentist_vco_freq_khz / 64);
/* Prepare to program display clock*/
pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10;
pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
if (clk_mgr_dce->dfs_bypass_active)
pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
if (clk_mgr_dce->dfs_bypass_active) {
/* Cache the fixed display clock*/
clk_mgr_dce->dfs_bypass_disp_clk =
pxl_clk_params.dfs_bypass_display_clock;
actual_clock = pxl_clk_params.dfs_bypass_display_clock;
}
/* from power down, we need mark the clock state as ClocksStateNominal
* from HWReset, so when resume we will call pplib voltage regulator.*/
if (requested_clk_khz == 0)
clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7);
return actual_clock;
}
static void dce_clock_read_integrated_info(struct clk_mgr_internal *clk_mgr_dce)
{
struct dc_debug_options *debug = &clk_mgr_dce->base.ctx->dc->debug;
struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
int i;
if (bp->integrated_info)
clk_mgr_dce->base.dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
if (clk_mgr_dce->base.dentist_vco_freq_khz == 0) {
clk_mgr_dce->base.dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq;
if (clk_mgr_dce->base.dentist_vco_freq_khz == 0)
clk_mgr_dce->base.dentist_vco_freq_khz = 3600000;
}
/*update the maximum display clock for each power state*/
for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID;
switch (i) {
case 0:
clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW;
break;
case 1:
clk_state = DM_PP_CLOCKS_STATE_LOW;
break;
case 2:
clk_state = DM_PP_CLOCKS_STATE_NOMINAL;
break;
case 3:
clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE;
break;
default:
clk_state = DM_PP_CLOCKS_STATE_INVALID;
break;
}
/*Do not allow bad VBIOS/SBIOS to override with invalid values,
* check for > 100MHz*/
if (bp->integrated_info)
if (bp->integrated_info->disp_clk_voltage[i].max_supported_clk >= 100000)
clk_mgr_dce->max_clks_by_state[clk_state].display_clk_khz =
bp->integrated_info->disp_clk_voltage[i].max_supported_clk;
}
if (!debug->disable_dfs_bypass && bp->integrated_info)
if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
clk_mgr_dce->dfs_bypass_enabled = true;
}
void dce_clock_read_ss_info(struct clk_mgr_internal *clk_mgr_dce)
{
struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
int ss_info_num = bp->funcs->get_ss_entry_number(
bp, AS_SIGNAL_TYPE_GPU_PLL);
if (ss_info_num) {
struct spread_spectrum_info info = { { 0 } };
enum bp_result result = bp->funcs->get_spread_spectrum_info(
bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info);
/* Based on VBIOS, VBIOS will keep entry for GPU PLL SS
* even if SS not enabled and in that case
* SSInfo.spreadSpectrumPercentage !=0 would be sign
* that SS is enabled
*/
if (result == BP_RESULT_OK &&
info.spread_spectrum_percentage != 0) {
clk_mgr_dce->ss_on_dprefclk = true;
clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider;
if (info.type.CENTER_MODE == 0) {
/* TODO: Currently for DP Reference clock we
* need only SS percentage for
* downspread */
clk_mgr_dce->dprefclk_ss_percentage =
info.spread_spectrum_percentage;
}
return;
}
result = bp->funcs->get_spread_spectrum_info(
bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info);
/* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS
* even if SS not enabled and in that case
* SSInfo.spreadSpectrumPercentage !=0 would be sign
* that SS is enabled
*/
if (result == BP_RESULT_OK &&
info.spread_spectrum_percentage != 0) {
clk_mgr_dce->ss_on_dprefclk = true;
clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider;
if (info.type.CENTER_MODE == 0) {
/* Currently for DP Reference clock we
* need only SS percentage for
* downspread */
clk_mgr_dce->dprefclk_ss_percentage =
info.spread_spectrum_percentage;
}
if (clk_mgr_dce->base.ctx->dc->config.ignore_dpref_ss)
clk_mgr_dce->dprefclk_ss_percentage = 0;
}
}
}
static void dce_pplib_apply_display_requirements(
struct dc *dc,
struct dc_state *context)
{
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
dce110_fill_display_configs(context, pp_display_cfg);
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
}
static void dce_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dm_pp_power_level_change_request level_change_req;
int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
patched_disp_clk = patched_disp_clk * 115 / 100;
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
/* get max clock state from PPLIB */
if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
|| level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
if (dm_pp_apply_power_level_change_request(clk_mgr_base->ctx, &level_change_req))
clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
}
if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr_base->clks.dispclk_khz)) {
patched_disp_clk = dce_set_clock(clk_mgr_base, patched_disp_clk);
clk_mgr_base->clks.dispclk_khz = patched_disp_clk;
}
dce_pplib_apply_display_requirements(clk_mgr_base->ctx->dc, context);
}
static struct clk_mgr_funcs dce_funcs = {
.get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
.update_clocks = dce_update_clocks
};
void dce_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr)
{
struct clk_mgr *base = &clk_mgr->base;
struct dm_pp_static_clock_info static_clk_info = {0};
memcpy(clk_mgr->max_clks_by_state,
dce80_max_clks_by_state,
sizeof(dce80_max_clks_by_state));
base->ctx = ctx;
base->funcs = &dce_funcs;
clk_mgr->regs = &disp_clk_regs;
clk_mgr->clk_mgr_shift = &disp_clk_shift;
clk_mgr->clk_mgr_mask = &disp_clk_mask;
clk_mgr->dfs_bypass_disp_clk = 0;
clk_mgr->dprefclk_ss_percentage = 0;
clk_mgr->dprefclk_ss_divider = 1000;
clk_mgr->ss_on_dprefclk = false;
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
clk_mgr->max_clks_state = static_clk_info.max_clocks_state;
else
clk_mgr->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
clk_mgr->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
dce_clock_read_integrated_info(clk_mgr);
dce_clock_read_ss_info(clk_mgr);
}
| linux-master | drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c |
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "core_types.h"
#include "clk_mgr_internal.h"
#include "dce/dce_11_0_d.h"
#include "dce/dce_11_0_sh_mask.h"
#include "dce110_clk_mgr.h"
#include "../clk_mgr/dce100/dce_clk_mgr.h"
/* set register offset */
#define SR(reg_name)\
.reg_name = mm ## reg_name
/* set register offset with instance */
#define SRI(reg_name, block, id)\
.reg_name = mm ## block ## id ## _ ## reg_name
static const struct clk_mgr_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
static const struct clk_mgr_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
static const struct clk_mgr_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
static const struct state_dependent_clocks dce110_max_clks_by_state[] = {
/*ClocksStateInvalid - should not be used*/
{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
/*ClocksStateLow*/
{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
/*ClocksStateNominal*/
{ .display_clk_khz = 467000, .pixel_clk_khz = 400000 },
/*ClocksStatePerformance*/
{ .display_clk_khz = 643000, .pixel_clk_khz = 400000 } };
static int determine_sclk_from_bounding_box(
const struct dc *dc,
int required_sclk)
{
int i;
/*
* Some asics do not give us sclk levels, so we just report the actual
* required sclk
*/
if (dc->sclk_lvls.num_levels == 0)
return required_sclk;
for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
return dc->sclk_lvls.clocks_in_khz[i];
}
/*
* even maximum level could not satisfy requirement, this
* is unexpected at this stage, should have been caught at
* validation time
*/
ASSERT(0);
return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
}
uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
{
uint8_t j;
uint32_t min_vertical_blank_time = -1;
for (j = 0; j < context->stream_count; j++) {
struct dc_stream_state *stream = context->streams[j];
uint32_t vertical_blank_in_pixels = 0;
uint32_t vertical_blank_time = 0;
uint32_t vertical_total_min = stream->timing.v_total;
struct dc_crtc_timing_adjust adjust = stream->adjust;
if (adjust.v_total_max != adjust.v_total_min)
vertical_total_min = adjust.v_total_min;
vertical_blank_in_pixels = stream->timing.h_total *
(vertical_total_min
- stream->timing.v_addressable);
vertical_blank_time = vertical_blank_in_pixels
* 10000 / stream->timing.pix_clk_100hz;
if (min_vertical_blank_time > vertical_blank_time)
min_vertical_blank_time = vertical_blank_time;
}
return min_vertical_blank_time;
}
void dce110_fill_display_configs(
const struct dc_state *context,
struct dm_pp_display_configuration *pp_display_cfg)
{
int j;
int num_cfgs = 0;
for (j = 0; j < context->stream_count; j++) {
int k;
const struct dc_stream_state *stream = context->streams[j];
struct dm_pp_single_disp_config *cfg =
&pp_display_cfg->disp_configs[num_cfgs];
const struct pipe_ctx *pipe_ctx = NULL;
for (k = 0; k < MAX_PIPES; k++)
if (stream == context->res_ctx.pipe_ctx[k].stream) {
pipe_ctx = &context->res_ctx.pipe_ctx[k];
break;
}
ASSERT(pipe_ctx != NULL);
/* only notify active stream */
if (stream->dpms_off)
continue;
num_cfgs++;
cfg->signal = pipe_ctx->stream->signal;
cfg->pipe_idx = pipe_ctx->stream_res.tg->inst;
cfg->src_height = stream->src.height;
cfg->src_width = stream->src.width;
cfg->ddi_channel_mapping =
stream->link->ddi_channel_mapping.raw;
cfg->transmitter =
stream->link->link_enc->transmitter;
cfg->link_settings.lane_count =
stream->link->cur_link_settings.lane_count;
cfg->link_settings.link_rate =
stream->link->cur_link_settings.link_rate;
cfg->link_settings.link_spread =
stream->link->cur_link_settings.link_spread;
cfg->sym_clock = stream->phy_pix_clk;
/* Round v_refresh*/
cfg->v_refresh = stream->timing.pix_clk_100hz * 100;
cfg->v_refresh /= stream->timing.h_total;
cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
/ stream->timing.v_total;
}
pp_display_cfg->display_count = num_cfgs;
}
void dce11_pplib_apply_display_requirements(
struct dc *dc,
struct dc_state *context)
{
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
int memory_type_multiplier = MEMORY_TYPE_MULTIPLIER_CZ;
if (dc->bw_vbios && dc->bw_vbios->memory_type == bw_def_hbm)
memory_type_multiplier = MEMORY_TYPE_HBM;
pp_display_cfg->all_displays_in_sync =
context->bw_ctx.bw.dce.all_displays_in_sync;
pp_display_cfg->nb_pstate_switch_disable =
context->bw_ctx.bw.dce.nbp_state_change_enable == false;
pp_display_cfg->cpu_cc6_disable =
context->bw_ctx.bw.dce.cpuc_state_change_enable == false;
pp_display_cfg->cpu_pstate_disable =
context->bw_ctx.bw.dce.cpup_state_change_enable == false;
pp_display_cfg->cpu_pstate_separation_time =
context->bw_ctx.bw.dce.blackout_recovery_time_us;
/*
* TODO: determine whether the bandwidth has reached memory's limitation
* , then change minimum memory clock based on real-time bandwidth
* limitation.
*/
if ((dc->ctx->asic_id.chip_family == FAMILY_AI) &&
ASICREV_IS_VEGA20_P(dc->ctx->asic_id.hw_internal_rev) && (context->stream_count >= 2)) {
pp_display_cfg->min_memory_clock_khz = max(pp_display_cfg->min_memory_clock_khz,
(uint32_t) div64_s64(
div64_s64(dc->bw_vbios->high_yclk.value,
memory_type_multiplier), 10000));
} else {
pp_display_cfg->min_memory_clock_khz = context->bw_ctx.bw.dce.yclk_khz
/ memory_type_multiplier;
}
pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
dc,
context->bw_ctx.bw.dce.sclk_khz);
/*
* As workaround for >4x4K lightup set dcfclock to min_engine_clock value.
* This is not required for less than 5 displays,
* thus don't request decfclk in dc to avoid impact
* on power saving.
*
*/
pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4) ?
pp_display_cfg->min_engine_clock_khz : 0;
pp_display_cfg->min_engine_clock_deep_sleep_khz
= context->bw_ctx.bw.dce.sclk_deep_sleep_khz;
pp_display_cfg->avail_mclk_switch_time_us =
dce110_get_min_vblank_time_us(context);
/* TODO: dce11.2*/
pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz;
dce110_fill_display_configs(context, pp_display_cfg);
/* TODO: is this still applicable?*/
if (pp_display_cfg->display_count == 1) {
const struct dc_crtc_timing *timing =
&context->streams[0]->timing;
pp_display_cfg->crtc_index =
pp_display_cfg->disp_configs[0].pipe_idx;
pp_display_cfg->line_time_in_us = timing->h_total * 10000 / timing->pix_clk_100hz;
}
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
}
static void dce11_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dm_pp_power_level_change_request level_change_req;
int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
patched_disp_clk = patched_disp_clk * 115 / 100;
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
/* get max clock state from PPLIB */
if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
|| level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
if (dm_pp_apply_power_level_change_request(clk_mgr_base->ctx, &level_change_req))
clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
}
if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr_base->clks.dispclk_khz)) {
context->bw_ctx.bw.dce.dispclk_khz = dce_set_clock(clk_mgr_base, patched_disp_clk);
clk_mgr_base->clks.dispclk_khz = patched_disp_clk;
}
dce11_pplib_apply_display_requirements(clk_mgr_base->ctx->dc, context);
}
static struct clk_mgr_funcs dce110_funcs = {
.get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
.update_clocks = dce11_update_clocks
};
void dce110_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr)
{
dce_clk_mgr_construct(ctx, clk_mgr);
memcpy(clk_mgr->max_clks_by_state,
dce110_max_clks_by_state,
sizeof(dce110_max_clks_by_state));
clk_mgr->regs = &disp_clk_regs;
clk_mgr->clk_mgr_shift = &disp_clk_shift;
clk_mgr->clk_mgr_mask = &disp_clk_mask;
clk_mgr->base.funcs = &dce110_funcs;
}
| linux-master | drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c |
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "core_types.h"
#include "clk_mgr_internal.h"
#include "dce112/dce112_clk_mgr.h"
#include "dce110/dce110_clk_mgr.h"
#include "dce120_clk_mgr.h"
#include "dce100/dce_clk_mgr.h"
#include "dce120/dce120_hw_sequencer.h"
static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
/*ClocksStateInvalid - should not be used*/
{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
/*ClocksStateLow*/
{ .display_clk_khz = 460000, .pixel_clk_khz = 400000 },
/*ClocksStateNominal*/
{ .display_clk_khz = 670000, .pixel_clk_khz = 600000 },
/*ClocksStatePerformance*/
{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
/**
* dce121_clock_patch_xgmi_ss_info() - Save XGMI spread spectrum info
* @clk_mgr_dce: clock manager internal structure
*
* Reads from VBIOS the XGMI spread spectrum info and saves it within
* the dce clock manager. This operation will overwrite the existing dprefclk
* SS values if the vBIOS query succeeds. Otherwise, it does nothing. It also
* sets the ->xgmi_enabled flag.
*/
static void dce121_clock_patch_xgmi_ss_info(struct clk_mgr_internal *clk_mgr_dce)
{
enum bp_result result;
struct spread_spectrum_info info = { { 0 } };
struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
clk_mgr_dce->xgmi_enabled = false;
result = bp->funcs->get_spread_spectrum_info(bp, AS_SIGNAL_TYPE_XGMI,
0, &info);
if (result == BP_RESULT_OK && info.spread_spectrum_percentage != 0) {
clk_mgr_dce->xgmi_enabled = true;
clk_mgr_dce->ss_on_dprefclk = true;
clk_mgr_dce->dprefclk_ss_divider =
info.spread_percentage_divider;
if (info.type.CENTER_MODE == 0) {
/*
* Currently for DP Reference clock we
* need only SS percentage for
* downspread
*/
clk_mgr_dce->dprefclk_ss_percentage =
info.spread_spectrum_percentage;
}
}
}
static void dce12_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
int max_pix_clk = dce_get_max_pixel_clock_for_all_paths(context);
int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
patched_disp_clk = patched_disp_clk * 115 / 100;
if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr_base->clks.dispclk_khz)) {
clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
/*
* When xGMI is enabled, the display clk needs to be adjusted
* with the WAFL link's SS percentage.
*/
if (clk_mgr_dce->xgmi_enabled)
patched_disp_clk = dce_adjust_dp_ref_freq_for_ss(
clk_mgr_dce, patched_disp_clk);
clock_voltage_req.clocks_in_khz = patched_disp_clk;
clk_mgr_base->clks.dispclk_khz = dce112_set_clock(clk_mgr_base, patched_disp_clk);
dm_pp_apply_clock_for_voltage_request(clk_mgr_base->ctx, &clock_voltage_req);
}
if (should_set_clock(safe_to_lower, max_pix_clk, clk_mgr_base->clks.phyclk_khz)) {
clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
clock_voltage_req.clocks_in_khz = max_pix_clk;
clk_mgr_base->clks.phyclk_khz = max_pix_clk;
dm_pp_apply_clock_for_voltage_request(clk_mgr_base->ctx, &clock_voltage_req);
}
dce11_pplib_apply_display_requirements(clk_mgr_base->ctx->dc, context);
}
static struct clk_mgr_funcs dce120_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = dce12_update_clocks
};
void dce120_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr)
{
dce_clk_mgr_construct(ctx, clk_mgr);
memcpy(clk_mgr->max_clks_by_state,
dce120_max_clks_by_state,
sizeof(dce120_max_clks_by_state));
clk_mgr->base.dprefclk_khz = 600000;
clk_mgr->base.funcs = &dce120_funcs;
}
void dce121_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr)
{
dce120_clk_mgr_construct(ctx, clk_mgr);
clk_mgr->base.dprefclk_khz = 625000;
/*
* The xGMI enabled info is used to determine if audio and display
* clocks need to be adjusted with the WAFL link's SS info.
*/
if (dce121_xgmi_enabled(ctx->dc->hwseq))
dce121_clock_patch_xgmi_ss_info(clk_mgr);
}
| linux-master | drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "core_types.h"
#include "clk_mgr_internal.h"
#include "reg_helper.h"
#include <linux/delay.h>
#include "dcn301_smu.h"
#include "vangogh_ip_offset.h"
#include "mp/mp_11_5_0_offset.h"
#include "mp/mp_11_5_0_sh_mask.h"
#define REG(reg_name) \
(MP0_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
#define FN(reg_name, field) \
FD(reg_name##__##field)
#include "logger_types.h"
#undef DC_LOGGER
#define DC_LOGGER \
CTX->logger
#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); }
#define VBIOSSMC_MSG_GetSmuVersion 0x2
#define VBIOSSMC_MSG_SetDispclkFreq 0x4
#define VBIOSSMC_MSG_SetDprefclkFreq 0x5
#define VBIOSSMC_MSG_SetDppclkFreq 0x6
#define VBIOSSMC_MSG_SetHardMinDcfclkByFreq 0x7
#define VBIOSSMC_MSG_SetMinDeepSleepDcfclk 0x8
//#define VBIOSSMC_MSG_SetPhyclkVoltageByFreq 0xA
#define VBIOSSMC_MSG_GetFclkFrequency 0xA
//#define VBIOSSMC_MSG_SetDisplayCount 0xC
//#define VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown 0xD
#define VBIOSSMC_MSG_UpdatePmeRestore 0xD
#define VBIOSSMC_MSG_SetVbiosDramAddrHigh 0xE //Used for WM table txfr
#define VBIOSSMC_MSG_SetVbiosDramAddrLow 0xF
#define VBIOSSMC_MSG_TransferTableSmu2Dram 0x10
#define VBIOSSMC_MSG_TransferTableDram2Smu 0x11
#define VBIOSSMC_MSG_SetDisplayIdleOptimizations 0x12
#define VBIOSSMC_Status_BUSY 0x0
#define VBIOSSMC_Result_OK 0x1
#define VBIOSSMC_Result_Failed 0xFF
#define VBIOSSMC_Result_UnknownCmd 0xFE
#define VBIOSSMC_Result_CmdRejectedPrereq 0xFD
#define VBIOSSMC_Result_CmdRejectedBusy 0xFC
/*
* Function to be used instead of REG_WAIT macro because the wait ends when
* the register is NOT EQUAL to zero, and because the translation in msg_if.h
* won't work with REG_WAIT.
*/
static uint32_t dcn301_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries)
{
uint32_t res_val = VBIOSSMC_Status_BUSY;
do {
res_val = REG_READ(MP1_SMN_C2PMSG_91);
if (res_val != VBIOSSMC_Status_BUSY)
break;
if (delay_us >= 1000)
msleep(delay_us/1000);
else if (delay_us > 0)
udelay(delay_us);
} while (max_retries--);
return res_val;
}
static int dcn301_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
unsigned int msg_id,
unsigned int param)
{
uint32_t result;
result = dcn301_smu_wait_for_response(clk_mgr, 10, 200000);
if (result != VBIOSSMC_Result_OK)
smu_print("SMU Response was not OK. SMU response after wait received is: %d\n", result);
if (result == VBIOSSMC_Status_BUSY) {
return -1;
}
/* First clear response register */
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Status_BUSY);
/* Set the parameter register for the SMU message, unit is Mhz */
REG_WRITE(MP1_SMN_C2PMSG_83, param);
/* Trigger the message transaction by writing the message ID */
REG_WRITE(MP1_SMN_C2PMSG_67, msg_id);
result = dcn301_smu_wait_for_response(clk_mgr, 10, 200000);
ASSERT(result == VBIOSSMC_Result_OK);
/* Actual dispclk set is returned in the parameter register */
return REG_READ(MP1_SMN_C2PMSG_83);
}
int dcn301_smu_get_smu_version(struct clk_mgr_internal *clk_mgr)
{
int smu_version = dcn301_smu_send_msg_with_param(clk_mgr,
VBIOSSMC_MSG_GetSmuVersion,
0);
DC_LOG_DEBUG("%s %x\n", __func__, smu_version);
return smu_version;
}
int dcn301_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz)
{
int actual_dispclk_set_mhz = -1;
DC_LOG_DEBUG("%s(%d)\n", __func__, requested_dispclk_khz);
/* Unit of SMU msg parameter is Mhz */
actual_dispclk_set_mhz = dcn301_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDispclkFreq,
khz_to_mhz_ceil(requested_dispclk_khz));
return actual_dispclk_set_mhz * 1000;
}
int dcn301_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
{
int actual_dprefclk_set_mhz = -1;
DC_LOG_DEBUG("%s %d\n", __func__, clk_mgr->base.dprefclk_khz / 1000);
actual_dprefclk_set_mhz = dcn301_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDprefclkFreq,
khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz));
/* TODO: add code for programing DP DTO, currently this is down by command table */
return actual_dprefclk_set_mhz * 1000;
}
int dcn301_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz)
{
int actual_dcfclk_set_mhz = -1;
DC_LOG_DEBUG("%s(%d)\n", __func__, requested_dcfclk_khz);
actual_dcfclk_set_mhz = dcn301_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
khz_to_mhz_ceil(requested_dcfclk_khz));
#ifdef DBG
smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
#endif
return actual_dcfclk_set_mhz * 1000;
}
int dcn301_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz)
{
int actual_min_ds_dcfclk_mhz = -1;
DC_LOG_DEBUG("%s(%d)\n", __func__, requested_min_ds_dcfclk_khz);
actual_min_ds_dcfclk_mhz = dcn301_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetMinDeepSleepDcfclk,
khz_to_mhz_ceil(requested_min_ds_dcfclk_khz));
return actual_min_ds_dcfclk_mhz * 1000;
}
int dcn301_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz)
{
int actual_dppclk_set_mhz = -1;
DC_LOG_DEBUG("%s(%d)\n", __func__, requested_dpp_khz);
actual_dppclk_set_mhz = dcn301_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDppclkFreq,
khz_to_mhz_ceil(requested_dpp_khz));
return actual_dppclk_set_mhz * 1000;
}
void dcn301_smu_set_display_idle_optimization(struct clk_mgr_internal *clk_mgr, uint32_t idle_info)
{
//TODO: Work with smu team to define optimization options.
DC_LOG_DEBUG("%s(%x)\n", __func__, idle_info);
dcn301_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDisplayIdleOptimizations,
idle_info);
}
void dcn301_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
{
union display_idle_optimization_u idle_info = { 0 };
if (enable) {
idle_info.idle_info.df_request_disabled = 1;
idle_info.idle_info.phy_ref_clk_off = 1;
}
DC_LOG_DEBUG("%s(%d)\n", __func__, enable);
dcn301_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDisplayIdleOptimizations,
idle_info.data);
}
void dcn301_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr)
{
dcn301_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_UpdatePmeRestore,
0);
}
void dcn301_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high)
{
DC_LOG_DEBUG("%s(%x)\n", __func__, addr_high);
dcn301_smu_send_msg_with_param(clk_mgr,
VBIOSSMC_MSG_SetVbiosDramAddrHigh, addr_high);
}
void dcn301_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low)
{
DC_LOG_DEBUG("%s(%x)\n", __func__, addr_low);
dcn301_smu_send_msg_with_param(clk_mgr,
VBIOSSMC_MSG_SetVbiosDramAddrLow, addr_low);
}
void dcn301_smu_transfer_dpm_table_smu_2_dram(struct clk_mgr_internal *clk_mgr)
{
dcn301_smu_send_msg_with_param(clk_mgr,
VBIOSSMC_MSG_TransferTableSmu2Dram, TABLE_DPMCLOCKS);
}
void dcn301_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr)
{
dcn301_smu_send_msg_with_param(clk_mgr,
VBIOSSMC_MSG_TransferTableDram2Smu, TABLE_WATERMARKS);
}
| linux-master | drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dccg.h"
#include "clk_mgr_internal.h"
// For dce12_get_dp_ref_freq_khz
#include "dce100/dce_clk_mgr.h"
// For dcn20_update_clocks_update_dpp_dto
#include "dcn20/dcn20_clk_mgr.h"
// For DML FPU code
#include "dml/dcn20/dcn20_fpu.h"
#include "vg_clk_mgr.h"
#include "dcn301_smu.h"
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
#include "atomfirmware.h"
#include "vangogh_ip_offset.h"
#include "clk/clk_11_5_0_offset.h"
#include "clk/clk_11_5_0_sh_mask.h"
/* Constants */
#define LPDDR_MEM_RETRAIN_LATENCY 4.977 /* Number obtained from LPDDR4 Training Counter Requirement doc */
/* Macros */
#define TO_CLK_MGR_VGH(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_vgh, base)
#define REG(reg_name) \
(CLK_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
/* TODO: evaluate how to lower or disable all dcn clocks in screen off case */
static int vg_get_active_display_cnt_wa(
struct dc *dc,
struct dc_state *context)
{
int i, display_count;
bool tmds_present = false;
display_count = 0;
for (i = 0; i < context->stream_count; i++) {
const struct dc_stream_state *stream = context->streams[i];
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
tmds_present = true;
}
for (i = 0; i < dc->link_count; i++) {
const struct dc_link *link = dc->links[i];
/* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
if (link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc))
display_count++;
}
/* WA for hang on HDMI after display off back back on*/
if (display_count == 0 && tmds_present)
display_count = 1;
return display_count;
}
static void vg_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
int display_count;
bool update_dppclk = false;
bool update_dispclk = false;
bool dpp_clock_lowered = false;
if (dc->work_arounds.skip_clock_update)
return;
/*
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
* also if safe to lower is false, we just go in the higher state
*/
if (safe_to_lower) {
/* check that we're not already in lower */
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
display_count = vg_get_active_display_cnt_wa(dc, context);
/* if we can go lower, go lower */
if (display_count == 0) {
union display_idle_optimization_u idle_info = { 0 };
idle_info.idle_info.df_request_disabled = 1;
idle_info.idle_info.phy_ref_clk_off = 1;
dcn301_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
/* update power state */
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
}
}
} else {
/* check that we're not already in D0 */
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) {
union display_idle_optimization_u idle_info = { 0 };
dcn301_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
/* update power state */
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_MISSION_MODE;
}
}
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz) && !dc->debug.disable_min_fclk) {
clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
dcn301_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz);
}
if (should_set_clock(safe_to_lower,
new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz) && !dc->debug.disable_min_fclk) {
clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
dcn301_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz);
}
// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
if (new_clocks->dppclk_khz < 100000)
new_clocks->dppclk_khz = 100000;
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
dpp_clock_lowered = true;
clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
update_dppclk = true;
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn301_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
update_dispclk = true;
}
if (dpp_clock_lowered) {
// increase per DPP DTO before lowering global dppclk
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
dcn301_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
} else {
// increase global DPPCLK before lowering per DPP DTO
if (update_dppclk || update_dispclk)
dcn301_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
// always update dtos unless clock is lowered and not safe to lower
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
}
static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
{
/* get FbMult value */
struct fixed31_32 pll_req;
unsigned int fbmult_frac_val = 0;
unsigned int fbmult_int_val = 0;
/*
* Register value of fbmult is in 8.16 format, we are converting to 31.32
* to leverage the fix point operations available in driver
*/
REG_GET(CLK1_0_CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/
REG_GET(CLK1_0_CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */
pll_req = dc_fixpt_from_int(fbmult_int_val);
/*
* since fractional part is only 16 bit in register definition but is 32 bit
* in our fix point definiton, need to shift left by 16 to obtain correct value
*/
pll_req.value |= fbmult_frac_val << 16;
/* multiply by REFCLK period */
pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz);
/* integer part is now VCO frequency in kHz */
return dc_fixpt_floor(pll_req);
}
static void vg_dump_clk_registers_internal(struct dcn301_clk_internal *internal, struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
internal->CLK1_CLK3_CURRENT_CNT = REG_READ(CLK1_0_CLK1_CLK3_CURRENT_CNT);
internal->CLK1_CLK3_BYPASS_CNTL = REG_READ(CLK1_0_CLK1_CLK3_BYPASS_CNTL);
internal->CLK1_CLK3_DS_CNTL = REG_READ(CLK1_0_CLK1_CLK3_DS_CNTL); //dcf deep sleep divider
internal->CLK1_CLK3_ALLOW_DS = REG_READ(CLK1_0_CLK1_CLK3_ALLOW_DS);
internal->CLK1_CLK1_CURRENT_CNT = REG_READ(CLK1_0_CLK1_CLK1_CURRENT_CNT);
internal->CLK1_CLK1_BYPASS_CNTL = REG_READ(CLK1_0_CLK1_CLK1_BYPASS_CNTL);
internal->CLK1_CLK2_CURRENT_CNT = REG_READ(CLK1_0_CLK1_CLK2_CURRENT_CNT);
internal->CLK1_CLK2_BYPASS_CNTL = REG_READ(CLK1_0_CLK1_CLK2_BYPASS_CNTL);
internal->CLK1_CLK0_CURRENT_CNT = REG_READ(CLK1_0_CLK1_CLK0_CURRENT_CNT);
internal->CLK1_CLK0_BYPASS_CNTL = REG_READ(CLK1_0_CLK1_CLK0_BYPASS_CNTL);
}
/* This function collect raw clk register values */
static void vg_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
{
struct dcn301_clk_internal internal = {0};
char *bypass_clks[5] = {"0x0 DFS", "0x1 REFCLK", "0x2 ERROR", "0x3 400 FCH", "0x4 600 FCH"};
unsigned int chars_printed = 0;
unsigned int remaining_buffer = log_info->bufSize;
vg_dump_clk_registers_internal(&internal, clk_mgr_base);
regs_and_bypass->dcfclk = internal.CLK1_CLK3_CURRENT_CNT / 10;
regs_and_bypass->dcf_deep_sleep_divider = internal.CLK1_CLK3_DS_CNTL / 10;
regs_and_bypass->dcf_deep_sleep_allow = internal.CLK1_CLK3_ALLOW_DS;
regs_and_bypass->dprefclk = internal.CLK1_CLK2_CURRENT_CNT / 10;
regs_and_bypass->dispclk = internal.CLK1_CLK0_CURRENT_CNT / 10;
regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10;
regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007;
if (regs_and_bypass->dppclk_bypass < 0 || regs_and_bypass->dppclk_bypass > 4)
regs_and_bypass->dppclk_bypass = 0;
regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007;
if (regs_and_bypass->dcfclk_bypass < 0 || regs_and_bypass->dcfclk_bypass > 4)
regs_and_bypass->dcfclk_bypass = 0;
regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007;
if (regs_and_bypass->dispclk_bypass < 0 || regs_and_bypass->dispclk_bypass > 4)
regs_and_bypass->dispclk_bypass = 0;
regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007;
if (regs_and_bypass->dprefclk_bypass < 0 || regs_and_bypass->dprefclk_bypass > 4)
regs_and_bypass->dprefclk_bypass = 0;
if (log_info->enabled) {
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "clk_type,clk_value,deepsleep_cntl,deepsleep_allow,bypass\n");
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "dcfclk,%d,%d,%d,%s\n",
regs_and_bypass->dcfclk,
regs_and_bypass->dcf_deep_sleep_divider,
regs_and_bypass->dcf_deep_sleep_allow,
bypass_clks[(int) regs_and_bypass->dcfclk_bypass]);
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "dprefclk,%d,N/A,N/A,%s\n",
regs_and_bypass->dprefclk,
bypass_clks[(int) regs_and_bypass->dprefclk_bypass]);
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "dispclk,%d,N/A,N/A,%s\n",
regs_and_bypass->dispclk,
bypass_clks[(int) regs_and_bypass->dispclk_bypass]);
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
//split
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "SPLIT\n");
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
// REGISTER VALUES
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "reg_name,value,clk_type\n");
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK3_CURRENT_CNT,%d,dcfclk\n",
internal.CLK1_CLK3_CURRENT_CNT);
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK3_DS_CNTL,%d,dcf_deep_sleep_divider\n",
internal.CLK1_CLK3_DS_CNTL);
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK3_ALLOW_DS,%d,dcf_deep_sleep_allow\n",
internal.CLK1_CLK3_ALLOW_DS);
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK2_CURRENT_CNT,%d,dprefclk\n",
internal.CLK1_CLK2_CURRENT_CNT);
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK0_CURRENT_CNT,%d,dispclk\n",
internal.CLK1_CLK0_CURRENT_CNT);
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK1_CURRENT_CNT,%d,dppclk\n",
internal.CLK1_CLK1_CURRENT_CNT);
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK3_BYPASS_CNTL,%d,dcfclk_bypass\n",
internal.CLK1_CLK3_BYPASS_CNTL);
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK2_BYPASS_CNTL,%d,dprefclk_bypass\n",
internal.CLK1_CLK2_BYPASS_CNTL);
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK0_BYPASS_CNTL,%d,dispclk_bypass\n",
internal.CLK1_CLK0_BYPASS_CNTL);
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK1_BYPASS_CNTL,%d,dppclk_bypass\n",
internal.CLK1_CLK1_BYPASS_CNTL);
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
}
}
static void vg_enable_pme_wa(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
dcn301_smu_enable_pme_wa(clk_mgr);
}
static void vg_init_clocks(struct clk_mgr *clk_mgr)
{
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
// Assumption is that boot state always supports pstate
clk_mgr->clks.p_state_change_support = true;
clk_mgr->clks.prev_p_state_change_support = true;
clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
}
static void vg_build_watermark_ranges(struct clk_bw_params *bw_params, struct watermarks *table)
{
int i, num_valid_sets;
num_valid_sets = 0;
for (i = 0; i < WM_SET_COUNT; i++) {
/* skip empty entries, the smu array has no holes*/
if (!bw_params->wm_table.entries[i].valid)
continue;
table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst;
table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type;
/* We will not select WM based on fclk, so leave it as unconstrained */
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
if (table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType == WM_TYPE_PSTATE_CHG) {
if (i == 0)
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = 0;
else {
/* add 1 to make it non-overlapping with next lvl */
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk =
bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1;
}
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxMclk =
bw_params->clk_table.entries[i].dcfclk_mhz;
} else {
/* unconstrained for memory retraining */
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
/* Modify previous watermark range to cover up to max */
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
}
num_valid_sets++;
}
ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */
/* modify the min and max to make sure we cover the whole range*/
table->WatermarkRow[WM_DCFCLK][0].MinMclk = 0;
table->WatermarkRow[WM_DCFCLK][0].MinClock = 0;
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxMclk = 0xFFFF;
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
/* This is for writeback only, does not matter currently as no writeback support*/
table->WatermarkRow[WM_SOCCLK][0].WmSetting = WM_A;
table->WatermarkRow[WM_SOCCLK][0].MinClock = 0;
table->WatermarkRow[WM_SOCCLK][0].MaxClock = 0xFFFF;
table->WatermarkRow[WM_SOCCLK][0].MinMclk = 0;
table->WatermarkRow[WM_SOCCLK][0].MaxMclk = 0xFFFF;
}
static void vg_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct clk_mgr_vgh *clk_mgr_vgh = TO_CLK_MGR_VGH(clk_mgr);
struct watermarks *table = clk_mgr_vgh->smu_wm_set.wm_set;
if (!clk_mgr->smu_ver)
return;
if (!table || clk_mgr_vgh->smu_wm_set.mc_address.quad_part == 0)
return;
memset(table, 0, sizeof(*table));
vg_build_watermark_ranges(clk_mgr_base->bw_params, table);
dcn301_smu_set_dram_addr_high(clk_mgr,
clk_mgr_vgh->smu_wm_set.mc_address.high_part);
dcn301_smu_set_dram_addr_low(clk_mgr,
clk_mgr_vgh->smu_wm_set.mc_address.low_part);
dcn301_smu_transfer_wm_table_dram_2_smu(clk_mgr);
}
static bool vg_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b)
{
if (a->dispclk_khz != b->dispclk_khz)
return false;
else if (a->dppclk_khz != b->dppclk_khz)
return false;
else if (a->dcfclk_khz != b->dcfclk_khz)
return false;
else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
return false;
return true;
}
static struct clk_mgr_funcs vg_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = vg_update_clocks,
.init_clocks = vg_init_clocks,
.enable_pme_wa = vg_enable_pme_wa,
.are_clock_states_equal = vg_are_clock_states_equal,
.notify_wm_ranges = vg_notify_wm_ranges
};
static struct clk_bw_params vg_bw_params = {
.vram_type = Ddr4MemType,
.num_channels = 1,
.clk_table = {
.entries = {
{
.voltage = 0,
.dcfclk_mhz = 400,
.fclk_mhz = 400,
.memclk_mhz = 800,
.socclk_mhz = 0,
},
{
.voltage = 0,
.dcfclk_mhz = 483,
.fclk_mhz = 800,
.memclk_mhz = 1600,
.socclk_mhz = 0,
},
{
.voltage = 0,
.dcfclk_mhz = 602,
.fclk_mhz = 1067,
.memclk_mhz = 1067,
.socclk_mhz = 0,
},
{
.voltage = 0,
.dcfclk_mhz = 738,
.fclk_mhz = 1333,
.memclk_mhz = 1600,
.socclk_mhz = 0,
},
},
.num_entries = 4,
},
};
static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
{
uint32_t max = 0;
int i;
for (i = 0; i < num_clocks; ++i) {
if (clocks[i] > max)
max = clocks[i];
}
return max;
}
static unsigned int find_dcfclk_for_voltage(const struct vg_dpm_clocks *clock_table,
unsigned int voltage)
{
int i;
for (i = 0; i < VG_NUM_SOC_VOLTAGE_LEVELS; i++) {
if (clock_table->SocVoltage[i] == voltage)
return clock_table->DcfClocks[i];
}
ASSERT(0);
return 0;
}
static void vg_clk_mgr_helper_populate_bw_params(
struct clk_mgr_internal *clk_mgr,
struct integrated_info *bios_info,
const struct vg_dpm_clocks *clock_table)
{
int i, j;
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
j = -1;
ASSERT(VG_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL);
/* Find lowest DPM, FCLK is filled in reverse order*/
for (i = VG_NUM_FCLK_DPM_LEVELS - 1; i >= 0; i--) {
if (clock_table->DfPstateTable[i].fclk != 0) {
j = i;
break;
}
}
if (j == -1) {
/* clock table is all 0s, just use our own hardcode */
ASSERT(0);
return;
}
bw_params->clk_table.num_entries = j + 1;
for (i = 0; i < bw_params->clk_table.num_entries - 1; i++, j--) {
bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].fclk;
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk;
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage;
bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->DfPstateTable[j].voltage);
}
bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].fclk;
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk;
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage;
bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, VG_NUM_DCFCLK_DPM_LEVELS);
bw_params->vram_type = bios_info->memory_type;
bw_params->num_channels = bios_info->ma_channel_number;
for (i = 0; i < WM_SET_COUNT; i++) {
bw_params->wm_table.entries[i].wm_inst = i;
if (i >= bw_params->clk_table.num_entries) {
bw_params->wm_table.entries[i].valid = false;
continue;
}
bw_params->wm_table.entries[i].wm_type = WM_TYPE_PSTATE_CHG;
bw_params->wm_table.entries[i].valid = true;
}
if (bw_params->vram_type == LpDdr4MemType) {
/*
* WM set D will be re-purposed for memory retraining
*/
DC_FP_START();
dcn21_clk_mgr_set_bw_params_wm_table(bw_params);
DC_FP_END();
}
}
/* Temporary Place holder until we can get them from fuse */
static struct vg_dpm_clocks dummy_clocks = {
.DcfClocks = { 201, 403, 403, 403, 403, 403, 403 },
.SocClocks = { 400, 600, 600, 600, 600, 600, 600 },
.SocVoltage = { 2800, 2860, 2860, 2860, 2860, 2860, 2860, 2860 },
.DfPstateTable = {
{ .fclk = 400, .memclk = 400, .voltage = 2800 },
{ .fclk = 400, .memclk = 400, .voltage = 2800 },
{ .fclk = 400, .memclk = 400, .voltage = 2800 },
{ .fclk = 400, .memclk = 400, .voltage = 2800 }
}
};
static struct watermarks dummy_wms = { 0 };
static void vg_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
struct smu_dpm_clks *smu_dpm_clks)
{
struct vg_dpm_clocks *table = smu_dpm_clks->dpm_clks;
if (!clk_mgr->smu_ver)
return;
if (!table || smu_dpm_clks->mc_address.quad_part == 0)
return;
memset(table, 0, sizeof(*table));
dcn301_smu_set_dram_addr_high(clk_mgr,
smu_dpm_clks->mc_address.high_part);
dcn301_smu_set_dram_addr_low(clk_mgr,
smu_dpm_clks->mc_address.low_part);
dcn301_smu_transfer_dpm_table_smu_2_dram(clk_mgr);
}
void vg_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_vgh *clk_mgr,
struct pp_smu_funcs *pp_smu,
struct dccg *dccg)
{
struct smu_dpm_clks smu_dpm_clks = { 0 };
struct clk_log_info log_info = {0};
clk_mgr->base.base.ctx = ctx;
clk_mgr->base.base.funcs = &vg_funcs;
clk_mgr->base.pp_smu = pp_smu;
clk_mgr->base.dccg = dccg;
clk_mgr->base.dfs_bypass_disp_clk = 0;
clk_mgr->base.dprefclk_ss_percentage = 0;
clk_mgr->base.dprefclk_ss_divider = 1000;
clk_mgr->base.ss_on_dprefclk = false;
clk_mgr->base.dfs_ref_freq_khz = 48000;
clk_mgr->smu_wm_set.wm_set = (struct watermarks *)dm_helpers_allocate_gpu_mem(
clk_mgr->base.base.ctx,
DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
sizeof(struct watermarks),
&clk_mgr->smu_wm_set.mc_address.quad_part);
if (!clk_mgr->smu_wm_set.wm_set) {
clk_mgr->smu_wm_set.wm_set = &dummy_wms;
clk_mgr->smu_wm_set.mc_address.quad_part = 0;
}
ASSERT(clk_mgr->smu_wm_set.wm_set);
smu_dpm_clks.dpm_clks = (struct vg_dpm_clocks *)dm_helpers_allocate_gpu_mem(
clk_mgr->base.base.ctx,
DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
sizeof(struct vg_dpm_clocks),
&smu_dpm_clks.mc_address.quad_part);
if (smu_dpm_clks.dpm_clks == NULL) {
smu_dpm_clks.dpm_clks = &dummy_clocks;
smu_dpm_clks.mc_address.quad_part = 0;
}
ASSERT(smu_dpm_clks.dpm_clks);
clk_mgr->base.smu_ver = dcn301_smu_get_smu_version(&clk_mgr->base);
if (clk_mgr->base.smu_ver)
clk_mgr->base.smu_present = true;
/* TODO: Check we get what we expect during bringup */
clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
/* in case we don't get a value from the register, use default */
if (clk_mgr->base.base.dentist_vco_freq_khz == 0)
clk_mgr->base.base.dentist_vco_freq_khz = 3600000;
if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
vg_bw_params.wm_table = lpddr5_wm_table;
} else {
vg_bw_params.wm_table = ddr4_wm_table;
}
/* Saved clocks configured at boot for debug purposes */
vg_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
clk_mgr->base.base.dprefclk_khz = 600000;
dce_clock_read_ss_info(&clk_mgr->base);
clk_mgr->base.base.bw_params = &vg_bw_params;
vg_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
vg_clk_mgr_helper_populate_bw_params(
&clk_mgr->base,
ctx->dc_bios->integrated_info,
smu_dpm_clks.dpm_clks);
}
if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0)
dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
smu_dpm_clks.dpm_clks);
}
void vg_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int)
{
struct clk_mgr_vgh *clk_mgr = TO_CLK_MGR_VGH(clk_mgr_int);
if (clk_mgr->smu_wm_set.wm_set && clk_mgr->smu_wm_set.mc_address.quad_part != 0)
dm_helpers_free_gpu_mem(clk_mgr_int->base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
clk_mgr->smu_wm_set.wm_set);
}
| linux-master | drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c |
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dcn32_clk_mgr_smu_msg.h"
#include "clk_mgr_internal.h"
#include "reg_helper.h"
#include "dalsmc.h"
#include "smu13_driver_if.h"
#define mmDAL_MSG_REG 0x1628A
#define mmDAL_ARG_REG 0x16273
#define mmDAL_RESP_REG 0x16274
#define REG(reg_name) \
mm ## reg_name
#include "logger_types.h"
#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); }
/*
* Function to be used instead of REG_WAIT macro because the wait ends when
* the register is NOT EQUAL to zero, and because the translation in msg_if.h
* won't work with REG_WAIT.
*/
static uint32_t dcn32_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries)
{
uint32_t reg = 0;
do {
reg = REG_READ(DAL_RESP_REG);
if (reg)
break;
if (delay_us >= 1000)
msleep(delay_us/1000);
else if (delay_us > 0)
udelay(delay_us);
} while (max_retries--);
return reg;
}
static bool dcn32_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uint32_t msg_id, uint32_t param_in, uint32_t *param_out)
{
/* Wait for response register to be ready */
dcn32_smu_wait_for_response(clk_mgr, 10, 200000);
/* Clear response register */
REG_WRITE(DAL_RESP_REG, 0);
/* Set the parameter register for the SMU message */
REG_WRITE(DAL_ARG_REG, param_in);
/* Trigger the message transaction by writing the message ID */
REG_WRITE(DAL_MSG_REG, msg_id);
/* Wait for response */
if (dcn32_smu_wait_for_response(clk_mgr, 10, 200000) == DALSMC_Result_OK) {
if (param_out)
*param_out = REG_READ(DAL_ARG_REG);
return true;
}
return false;
}
void dcn32_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool enable)
{
smu_print("FCLK P-state support value is : %d\n", enable);
dcn32_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_SetFclkSwitchAllow, enable ? FCLK_PSTATE_SUPPORTED : FCLK_PSTATE_NOTSUPPORTED, NULL);
}
void dcn32_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways)
{
uint32_t param = (num_ways << 1) | (num_ways > 0);
dcn32_smu_send_msg_with_param(clk_mgr, DALSMC_MSG_SetCabForUclkPstate, param, NULL);
smu_print("Numways for SubVP : %d\n", num_ways);
}
void dcn32_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr)
{
smu_print("SMU Transfer WM table DRAM 2 SMU\n");
dcn32_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_TransferTableDram2Smu, TABLE_WATERMARKS, NULL);
}
void dcn32_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr)
{
smu_print("SMU Set PME workaround\n");
dcn32_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_BacoAudioD3PME, 0, NULL);
}
/* Returns the actual frequency that was set in MHz, 0 on failure */
unsigned int dcn32_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz)
{
uint32_t response = 0;
/* bits 23:16 for clock type, lower 16 bits for frequency in MHz */
uint32_t param = (clk << 16) | freq_mhz;
smu_print("SMU Set hard min by freq: clk = %d, freq_mhz = %d MHz\n", clk, freq_mhz);
dcn32_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_SetHardMinByFreq, param, &response);
smu_print("SMU Frequency set = %d KHz\n", response);
return response;
}
void dcn32_smu_wait_for_dmub_ack_mclk(struct clk_mgr_internal *clk_mgr, bool enable)
{
smu_print("PMFW to wait for DMCUB ack for MCLK : %d\n", enable);
dcn32_smu_send_msg_with_param(clk_mgr, 0x14, enable ? 1 : 0, NULL);
}
| linux-master | drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c |
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dccg.h"
#include "clk_mgr_internal.h"
#include "dcn32/dcn32_clk_mgr_smu_msg.h"
#include "dcn20/dcn20_clk_mgr.h"
#include "dce100/dce_clk_mgr.h"
#include "dcn31/dcn31_clk_mgr.h"
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
#include "link.h"
#include "atomfirmware.h"
#include "smu13_driver_if.h"
#include "dcn/dcn_3_2_0_offset.h"
#include "dcn/dcn_3_2_0_sh_mask.h"
#include "dcn32/dcn32_clk_mgr.h"
#include "dml/dcn32/dcn32_fpu.h"
#define DCN_BASE__INST0_SEG1 0x000000C0
#define mmCLK1_CLK_PLL_REQ 0x16E37
#define mmCLK1_CLK0_DFS_CNTL 0x16E69
#define mmCLK1_CLK1_DFS_CNTL 0x16E6C
#define mmCLK1_CLK2_DFS_CNTL 0x16E6F
#define mmCLK1_CLK3_DFS_CNTL 0x16E72
#define mmCLK1_CLK4_DFS_CNTL 0x16E75
#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001ffUL
#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000f000UL
#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xffff0000UL
#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x00000000
#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0x0000000c
#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x00000010
#define mmCLK01_CLK0_CLK_PLL_REQ 0x16E37
#define mmCLK01_CLK0_CLK0_DFS_CNTL 0x16E64
#define mmCLK01_CLK0_CLK1_DFS_CNTL 0x16E67
#define mmCLK01_CLK0_CLK2_DFS_CNTL 0x16E6A
#define mmCLK01_CLK0_CLK3_DFS_CNTL 0x16E6D
#define mmCLK01_CLK0_CLK4_DFS_CNTL 0x16E70
#define CLK0_CLK_PLL_REQ__FbMult_int_MASK 0x000001ffL
#define CLK0_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000f000L
#define CLK0_CLK_PLL_REQ__FbMult_frac_MASK 0xffff0000L
#define CLK0_CLK_PLL_REQ__FbMult_int__SHIFT 0x00000000
#define CLK0_CLK_PLL_REQ__PllSpineDiv__SHIFT 0x0000000c
#define CLK0_CLK_PLL_REQ__FbMult_frac__SHIFT 0x00000010
#undef FN
#define FN(reg_name, field_name) \
clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name
#define REG(reg) \
(clk_mgr->regs->reg)
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
#define SR(reg_name)\
.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
reg ## reg_name
#define CLK_SR_DCN32(reg_name)\
.reg_name = mm ## reg_name
static const struct clk_mgr_registers clk_mgr_regs_dcn32 = {
CLK_REG_LIST_DCN32()
};
static const struct clk_mgr_shift clk_mgr_shift_dcn32 = {
CLK_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
};
static const struct clk_mgr_mask clk_mgr_mask_dcn32 = {
CLK_COMMON_MASK_SH_LIST_DCN32(_MASK)
};
#define CLK_SR_DCN321(reg_name, block, inst)\
.reg_name = mm ## block ## _ ## reg_name
static const struct clk_mgr_registers clk_mgr_regs_dcn321 = {
CLK_REG_LIST_DCN321()
};
static const struct clk_mgr_shift clk_mgr_shift_dcn321 = {
CLK_COMMON_MASK_SH_LIST_DCN321(__SHIFT)
};
static const struct clk_mgr_mask clk_mgr_mask_dcn321 = {
CLK_COMMON_MASK_SH_LIST_DCN321(_MASK)
};
/* Query SMU for all clock states for a particular clock */
static void dcn32_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, unsigned int *entry_0,
unsigned int *num_levels)
{
unsigned int i;
char *entry_i = (char *)entry_0;
uint32_t ret = dcn30_smu_get_dpm_freq_by_index(clk_mgr, clk, 0xFF);
if (ret & (1 << 31))
/* fine-grained, only min and max */
*num_levels = 2;
else
/* discrete, a number of fixed states */
/* will set num_levels to 0 on failure */
*num_levels = ret & 0xFF;
/* if the initial message failed, num_levels will be 0 */
for (i = 0; i < *num_levels; i++) {
*((unsigned int *)entry_i) = (dcn30_smu_get_dpm_freq_by_index(clk_mgr, clk, i) & 0xFFFF);
entry_i += sizeof(clk_mgr->base.bw_params->clk_table.entries[0]);
}
}
static void dcn32_build_wm_range_table(struct clk_mgr_internal *clk_mgr)
{
DC_FP_START();
dcn32_build_wm_range_table_fpu(clk_mgr);
DC_FP_END();
}
void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
unsigned int num_levels;
struct clk_limit_num_entries *num_entries_per_clk = &clk_mgr_base->bw_params->clk_table.num_entries_per_clk;
unsigned int i;
memset(&(clk_mgr_base->clks), 0, sizeof(struct dc_clocks));
clk_mgr_base->clks.p_state_change_support = true;
clk_mgr_base->clks.prev_p_state_change_support = true;
clk_mgr_base->clks.fclk_prev_p_state_change_support = true;
clk_mgr->smu_present = false;
clk_mgr->dpm_present = false;
if (!clk_mgr_base->bw_params)
return;
if (!clk_mgr_base->force_smu_not_present && dcn30_smu_get_smu_version(clk_mgr, &clk_mgr->smu_ver))
clk_mgr->smu_present = true;
if (!clk_mgr->smu_present)
return;
dcn30_smu_check_driver_if_version(clk_mgr);
dcn30_smu_check_msg_header_version(clk_mgr);
/* DCFCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_DCFCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
&num_entries_per_clk->num_dcfclk_levels);
clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DCFCLK);
/* SOCCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_SOCCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz,
&num_entries_per_clk->num_socclk_levels);
clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_SOCCLK);
/* DTBCLK */
if (!clk_mgr->base.ctx->dc->debug.disable_dtb_ref_clk_switch) {
dcn32_init_single_clock(clk_mgr, PPCLK_DTBCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
&num_entries_per_clk->num_dtbclk_levels);
clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz =
dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DTBCLK);
}
/* DISPCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_DISPCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz,
&num_entries_per_clk->num_dispclk_levels);
num_levels = num_entries_per_clk->num_dispclk_levels;
clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DISPCLK);
//HW recommends limit of 1950 MHz in display clock for all DCN3.2.x
if (clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz > 1950)
clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = 1950;
if (num_entries_per_clk->num_dcfclk_levels &&
num_entries_per_clk->num_dtbclk_levels &&
num_entries_per_clk->num_dispclk_levels)
clk_mgr->dpm_present = true;
if (clk_mgr_base->ctx->dc->debug.min_disp_clk_khz) {
for (i = 0; i < num_levels; i++)
if (clk_mgr_base->bw_params->clk_table.entries[i].dispclk_mhz
< khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_disp_clk_khz))
clk_mgr_base->bw_params->clk_table.entries[i].dispclk_mhz
= khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_disp_clk_khz);
}
for (i = 0; i < num_levels; i++)
if (clk_mgr_base->bw_params->clk_table.entries[i].dispclk_mhz > 1950)
clk_mgr_base->bw_params->clk_table.entries[i].dispclk_mhz = 1950;
if (clk_mgr_base->ctx->dc->debug.min_dpp_clk_khz) {
for (i = 0; i < num_levels; i++)
if (clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz
< khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_dpp_clk_khz))
clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz
= khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_dpp_clk_khz);
}
/* Get UCLK, update bounding box */
clk_mgr_base->funcs->get_memclk_states_from_smu(clk_mgr_base);
DC_FP_START();
/* WM range table */
dcn32_build_wm_range_table(clk_mgr);
DC_FP_END();
}
static void dcn32_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr,
struct dc_state *context,
int ref_dtbclk_khz)
{
struct dccg *dccg = clk_mgr->dccg;
uint32_t tg_mask = 0;
int i;
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct dtbclk_dto_params dto_params = {0};
/* use mask to program DTO once per tg */
if (pipe_ctx->stream_res.tg &&
!(tg_mask & (1 << pipe_ctx->stream_res.tg->inst))) {
tg_mask |= (1 << pipe_ctx->stream_res.tg->inst);
dto_params.otg_inst = pipe_ctx->stream_res.tg->inst;
dto_params.ref_dtbclk_khz = ref_dtbclk_khz;
dccg->funcs->set_dtbclk_dto(clk_mgr->dccg, &dto_params);
//dccg->funcs->set_audio_dtbclk_dto(clk_mgr->dccg, &dto_params);
}
}
}
/* Since DPPCLK request to PMFW needs to be exact (due to DPP DTO programming),
* update DPPCLK to be the exact frequency that will be set after the DPPCLK
* divider is updated. This will prevent rounding issues that could cause DPP
* refclk and DPP DTO to not match up.
*/
static void dcn32_update_dppclk_dispclk_freq(struct clk_mgr_internal *clk_mgr, struct dc_clocks *new_clocks)
{
int dpp_divider = 0;
int disp_divider = 0;
if (new_clocks->dppclk_khz) {
dpp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
* clk_mgr->base.dentist_vco_freq_khz / new_clocks->dppclk_khz;
new_clocks->dppclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR * clk_mgr->base.dentist_vco_freq_khz) / dpp_divider;
}
if (new_clocks->dispclk_khz > 0) {
disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
* clk_mgr->base.dentist_vco_freq_khz / new_clocks->dispclk_khz;
new_clocks->dispclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR * clk_mgr->base.dentist_vco_freq_khz) / disp_divider;
}
}
void dcn32_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
struct dc_state *context, bool safe_to_lower)
{
int i;
clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz;
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
int dpp_inst = 0, dppclk_khz, prev_dppclk_khz;
dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
if (context->res_ctx.pipe_ctx[i].plane_res.dpp)
dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
else if (!context->res_ctx.pipe_ctx[i].plane_res.dpp && dppclk_khz == 0) {
/* dpp == NULL && dppclk_khz == 0 is valid because of pipe harvesting.
* In this case just continue in loop
*/
continue;
} else if (!context->res_ctx.pipe_ctx[i].plane_res.dpp && dppclk_khz > 0) {
/* The software state is not valid if dpp resource is NULL and
* dppclk_khz > 0.
*/
ASSERT(false);
continue;
}
prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[i];
if (safe_to_lower || prev_dppclk_khz < dppclk_khz)
clk_mgr->dccg->funcs->update_dpp_dto(
clk_mgr->dccg, dpp_inst, dppclk_khz);
}
}
static void dcn32_update_clocks_update_dentist(
struct clk_mgr_internal *clk_mgr,
struct dc_state *context)
{
uint32_t new_disp_divider = 0;
uint32_t new_dispclk_wdivider = 0;
uint32_t old_dispclk_wdivider = 0;
uint32_t i;
uint32_t dentist_dispclk_wdivider_readback = 0;
struct dc *dc = clk_mgr->base.ctx->dc;
if (clk_mgr->base.clks.dispclk_khz == 0)
return;
new_disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
* clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dispclk_khz;
new_dispclk_wdivider = dentist_get_did_from_divider(new_disp_divider);
REG_GET(DENTIST_DISPCLK_CNTL,
DENTIST_DISPCLK_WDIVIDER, &old_dispclk_wdivider);
/* When changing divider to or from 127, some extra programming is required to prevent corruption */
if (old_dispclk_wdivider == 127 && new_dispclk_wdivider != 127) {
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
uint32_t fifo_level;
struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg;
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
int32_t N;
int32_t j;
if (!pipe_ctx->stream)
continue;
/* Virtual encoders don't have this function */
if (!stream_enc->funcs->get_fifo_cal_average_level)
continue;
fifo_level = stream_enc->funcs->get_fifo_cal_average_level(
stream_enc);
N = fifo_level / 4;
dccg->funcs->set_fifo_errdet_ovr_en(
dccg,
true);
for (j = 0; j < N - 4; j++)
dccg->funcs->otg_drop_pixel(
dccg,
pipe_ctx->stream_res.tg->inst);
dccg->funcs->set_fifo_errdet_ovr_en(
dccg,
false);
}
} else if (new_dispclk_wdivider == 127 && old_dispclk_wdivider != 127) {
/* request clock with 126 divider first */
uint32_t temp_disp_divider = dentist_get_divider_from_did(126);
uint32_t temp_dispclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR * clk_mgr->base.dentist_vco_freq_khz) / temp_disp_divider;
if (clk_mgr->smu_present)
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(temp_dispclk_khz));
if (dc->debug.override_dispclk_programming) {
REG_GET(DENTIST_DISPCLK_CNTL,
DENTIST_DISPCLK_WDIVIDER, &dentist_dispclk_wdivider_readback);
if (dentist_dispclk_wdivider_readback != 126) {
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DISPCLK_WDIVIDER, 126);
REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
}
}
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg;
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
uint32_t fifo_level;
int32_t N;
int32_t j;
if (!pipe_ctx->stream)
continue;
/* Virtual encoders don't have this function */
if (!stream_enc->funcs->get_fifo_cal_average_level)
continue;
fifo_level = stream_enc->funcs->get_fifo_cal_average_level(
stream_enc);
N = fifo_level / 4;
dccg->funcs->set_fifo_errdet_ovr_en(dccg, true);
for (j = 0; j < 12 - N; j++)
dccg->funcs->otg_add_pixel(dccg,
pipe_ctx->stream_res.tg->inst);
dccg->funcs->set_fifo_errdet_ovr_en(dccg, false);
}
}
/* do requested DISPCLK updates*/
if (clk_mgr->smu_present)
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(clk_mgr->base.clks.dispclk_khz));
if (dc->debug.override_dispclk_programming) {
REG_GET(DENTIST_DISPCLK_CNTL,
DENTIST_DISPCLK_WDIVIDER, &dentist_dispclk_wdivider_readback);
if (dentist_dispclk_wdivider_readback > new_dispclk_wdivider) {
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DISPCLK_WDIVIDER, new_dispclk_wdivider);
REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
}
}
}
static int dcn32_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
uint32_t dispclk_wdivider;
int disp_divider;
REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, &dispclk_wdivider);
disp_divider = dentist_get_divider_from_did(dispclk_wdivider);
/* Return DISPCLK freq in Khz */
if (disp_divider)
return (DENTIST_DIVIDER_RANGE_SCALE_FACTOR * clk_mgr->base.dentist_vco_freq_khz) / disp_divider;
return 0;
}
static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
int display_count;
bool update_dppclk = false;
bool update_dispclk = false;
bool enter_display_off = false;
bool dpp_clock_lowered = false;
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
bool force_reset = false;
bool update_uclk = false, update_fclk = false;
bool p_state_change_support;
bool fclk_p_state_change_support;
if (clk_mgr_base->clks.dispclk_khz == 0 ||
(dc->debug.force_clock_mode & 0x1)) {
/* This is from resume or boot up, if forced_clock cfg option used,
* we bypass program dispclk and DPPCLK, but need set them for S3.
*/
force_reset = true;
dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
/* Force_clock_mode 0x1: force reset the clock even it is the same clock
* as long as it is in Passive level.
*/
}
display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
if (display_count == 0)
enter_display_off = true;
if (clk_mgr->smu_present) {
if (enter_display_off == safe_to_lower)
dcn30_smu_set_num_of_displays(clk_mgr, display_count);
clk_mgr_base->clks.fclk_prev_p_state_change_support = clk_mgr_base->clks.fclk_p_state_change_support;
fclk_p_state_change_support = new_clocks->fclk_p_state_change_support;
if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support) &&
!dc->work_arounds.clock_update_disable_mask.fclk) {
clk_mgr_base->clks.fclk_p_state_change_support = fclk_p_state_change_support;
/* To enable FCLK P-state switching, send FCLK_PSTATE_SUPPORTED message to PMFW */
if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && clk_mgr_base->clks.fclk_p_state_change_support) {
/* Handle the code for sending a message to PMFW that FCLK P-state change is supported */
dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_SUPPORTED);
}
}
if (dc->debug.force_min_dcfclk_mhz > 0)
new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz) &&
!dc->work_arounds.clock_update_disable_mask.dcfclk) {
clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DCFCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_khz));
}
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz) &&
!dc->work_arounds.clock_update_disable_mask.dcfclk_ds) {
clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
dcn30_smu_set_min_deep_sleep_dcef_clk(clk_mgr, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_deep_sleep_khz));
}
if (should_set_clock(safe_to_lower, new_clocks->socclk_khz, clk_mgr_base->clks.socclk_khz))
/* We don't actually care about socclk, don't notify SMU of hard min */
clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
clk_mgr_base->clks.prev_num_ways = clk_mgr_base->clks.num_ways;
if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
clk_mgr_base->clks.num_ways < new_clocks->num_ways) {
clk_mgr_base->clks.num_ways = new_clocks->num_ways;
dcn32_smu_send_cab_for_uclk_message(clk_mgr, clk_mgr_base->clks.num_ways);
}
p_state_change_support = new_clocks->p_state_change_support;
if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support) &&
!dc->work_arounds.clock_update_disable_mask.uclk) {
clk_mgr_base->clks.p_state_change_support = p_state_change_support;
/* to disable P-State switching, set UCLK min = max */
if (!clk_mgr_base->clks.p_state_change_support) {
if (dc->clk_mgr->dc_mode_softmax_enabled) {
/* On DCN32x we will never have the functional UCLK min above the softmax
* since we calculate mode support based on softmax being the max UCLK
* frequency.
*/
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
} else {
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz);
}
}
}
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching)
dcn32_smu_wait_for_dmub_ack_mclk(clk_mgr, true);
else
dcn32_smu_wait_for_dmub_ack_mclk(clk_mgr, false);
/* Always update saved value, even if new value not set due to P-State switching unsupported. Also check safe_to_lower for FCLK */
if (safe_to_lower && (clk_mgr_base->clks.fclk_p_state_change_support != clk_mgr_base->clks.fclk_prev_p_state_change_support)) {
update_fclk = true;
}
if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && !clk_mgr_base->clks.fclk_p_state_change_support && update_fclk &&
!dc->work_arounds.clock_update_disable_mask.fclk) {
/* Handle code for sending a message to PMFW that FCLK P-state change is not supported */
dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_NOTSUPPORTED);
}
/* Always update saved value, even if new value not set due to P-State switching unsupported */
if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz) &&
!dc->work_arounds.clock_update_disable_mask.uclk) {
clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz;
update_uclk = true;
}
/* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
if (clk_mgr_base->clks.p_state_change_support &&
(update_uclk || !clk_mgr_base->clks.prev_p_state_change_support) &&
!dc->work_arounds.clock_update_disable_mask.uclk)
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
clk_mgr_base->clks.num_ways > new_clocks->num_ways) {
clk_mgr_base->clks.num_ways = new_clocks->num_ways;
dcn32_smu_send_cab_for_uclk_message(clk_mgr, clk_mgr_base->clks.num_ways);
}
}
dcn32_update_dppclk_dispclk_freq(clk_mgr, new_clocks);
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr_base->clks.dppclk_khz)) {
if (clk_mgr_base->clks.dppclk_khz > new_clocks->dppclk_khz)
dpp_clock_lowered = true;
clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
if (clk_mgr->smu_present && !dpp_clock_lowered)
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz));
update_dppclk = true;
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
update_dispclk = true;
}
if (!new_clocks->dtbclk_en) {
new_clocks->ref_dtbclk_khz = clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
}
/* clock limits are received with MHz precision, divide by 1000 to prevent setting clocks at every call */
if (!dc->debug.disable_dtb_ref_clk_switch &&
should_set_clock(safe_to_lower, new_clocks->ref_dtbclk_khz / 1000, clk_mgr_base->clks.ref_dtbclk_khz / 1000)) {
/* DCCG requires KHz precision for DTBCLK */
clk_mgr_base->clks.ref_dtbclk_khz =
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DTBCLK, khz_to_mhz_ceil(new_clocks->ref_dtbclk_khz));
dcn32_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz);
}
if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
if (dpp_clock_lowered) {
/* if clock is being lowered, increase DTO before lowering refclk */
dcn32_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
dcn32_update_clocks_update_dentist(clk_mgr, context);
if (clk_mgr->smu_present)
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz));
} else {
/* if clock is being raised, increase refclk before lowering DTO */
if (update_dppclk || update_dispclk)
dcn32_update_clocks_update_dentist(clk_mgr, context);
/* There is a check inside dcn20_update_clocks_update_dpp_dto which ensures
* that we do not lower dto when it is not safe to lower. We do not need to
* compare the current and new dppclk before calling this function.
*/
dcn32_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
}
if (update_dispclk && dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
/*update dmcu for wait_loop count*/
dmcu->funcs->set_psr_wait_loop(dmcu,
clk_mgr_base->clks.dispclk_khz / 1000 / 7);
}
static uint32_t dcn32_get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
{
struct fixed31_32 pll_req;
uint32_t pll_req_reg = 0;
/* get FbMult value */
if (ASICREV_IS_GC_11_0_2(clk_mgr->base.ctx->asic_id.hw_internal_rev))
pll_req_reg = REG_READ(CLK0_CLK_PLL_REQ);
else
pll_req_reg = REG_READ(CLK1_CLK_PLL_REQ);
/* set up a fixed-point number
* this works because the int part is on the right edge of the register
* and the frac part is on the left edge
*/
pll_req = dc_fixpt_from_int(pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_int);
pll_req.value |= pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_frac;
/* multiply by REFCLK period */
pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz);
return dc_fixpt_floor(pll_req);
}
static void dcn32_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
uint32_t dprefclk_did = 0;
uint32_t dcfclk_did = 0;
uint32_t dtbclk_did = 0;
uint32_t dispclk_did = 0;
uint32_t dppclk_did = 0;
uint32_t target_div = 0;
if (ASICREV_IS_GC_11_0_2(clk_mgr->base.ctx->asic_id.hw_internal_rev)) {
/* DFS Slice 0 is used for DISPCLK */
dispclk_did = REG_READ(CLK0_CLK0_DFS_CNTL);
/* DFS Slice 1 is used for DPPCLK */
dppclk_did = REG_READ(CLK0_CLK1_DFS_CNTL);
/* DFS Slice 2 is used for DPREFCLK */
dprefclk_did = REG_READ(CLK0_CLK2_DFS_CNTL);
/* DFS Slice 3 is used for DCFCLK */
dcfclk_did = REG_READ(CLK0_CLK3_DFS_CNTL);
/* DFS Slice 4 is used for DTBCLK */
dtbclk_did = REG_READ(CLK0_CLK4_DFS_CNTL);
} else {
/* DFS Slice 0 is used for DISPCLK */
dispclk_did = REG_READ(CLK1_CLK0_DFS_CNTL);
/* DFS Slice 1 is used for DPPCLK */
dppclk_did = REG_READ(CLK1_CLK1_DFS_CNTL);
/* DFS Slice 2 is used for DPREFCLK */
dprefclk_did = REG_READ(CLK1_CLK2_DFS_CNTL);
/* DFS Slice 3 is used for DCFCLK */
dcfclk_did = REG_READ(CLK1_CLK3_DFS_CNTL);
/* DFS Slice 4 is used for DTBCLK */
dtbclk_did = REG_READ(CLK1_CLK4_DFS_CNTL);
}
/* Convert DISPCLK DFS Slice DID to divider*/
target_div = dentist_get_divider_from_did(dispclk_did);
//Get dispclk in khz
regs_and_bypass->dispclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
* clk_mgr->base.dentist_vco_freq_khz) / target_div;
/* Convert DISPCLK DFS Slice DID to divider*/
target_div = dentist_get_divider_from_did(dppclk_did);
//Get dppclk in khz
regs_and_bypass->dppclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
* clk_mgr->base.dentist_vco_freq_khz) / target_div;
/* Convert DPREFCLK DFS Slice DID to divider*/
target_div = dentist_get_divider_from_did(dprefclk_did);
//Get dprefclk in khz
regs_and_bypass->dprefclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
* clk_mgr->base.dentist_vco_freq_khz) / target_div;
/* Convert DCFCLK DFS Slice DID to divider*/
target_div = dentist_get_divider_from_did(dcfclk_did);
//Get dcfclk in khz
regs_and_bypass->dcfclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
* clk_mgr->base.dentist_vco_freq_khz) / target_div;
/* Convert DTBCLK DFS Slice DID to divider*/
target_div = dentist_get_divider_from_did(dtbclk_did);
//Get dtbclk in khz
regs_and_bypass->dtbclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
* clk_mgr->base.dentist_vco_freq_khz) / target_div;
}
static void dcn32_clock_read_ss_info(struct clk_mgr_internal *clk_mgr)
{
struct dc_bios *bp = clk_mgr->base.ctx->dc_bios;
int ss_info_num = bp->funcs->get_ss_entry_number(
bp, AS_SIGNAL_TYPE_GPU_PLL);
if (ss_info_num) {
struct spread_spectrum_info info = { { 0 } };
enum bp_result result = bp->funcs->get_spread_spectrum_info(
bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info);
/* SSInfo.spreadSpectrumPercentage !=0 would be sign
* that SS is enabled
*/
if (result == BP_RESULT_OK &&
info.spread_spectrum_percentage != 0) {
clk_mgr->ss_on_dprefclk = true;
clk_mgr->dprefclk_ss_divider = info.spread_percentage_divider;
if (info.type.CENTER_MODE == 0) {
/* Currently for DP Reference clock we
* need only SS percentage for
* downspread
*/
clk_mgr->dprefclk_ss_percentage =
info.spread_spectrum_percentage;
}
}
}
}
static void dcn32_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
{
unsigned int i;
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
WatermarksExternal_t *table = (WatermarksExternal_t *) clk_mgr->wm_range_table;
if (!clk_mgr->smu_present)
return;
if (!table)
return;
memset(table, 0, sizeof(*table));
/* collect valid ranges, place in pmfw table */
for (i = 0; i < WM_SET_COUNT; i++)
if (clk_mgr->base.bw_params->wm_table.nv_entries[i].valid) {
table->Watermarks.WatermarkRow[i].WmSetting = i;
table->Watermarks.WatermarkRow[i].Flags = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.wm_type;
}
dcn30_smu_set_dram_addr_high(clk_mgr, clk_mgr->wm_range_table_addr >> 32);
dcn30_smu_set_dram_addr_low(clk_mgr, clk_mgr->wm_range_table_addr & 0xFFFFFFFF);
dcn32_smu_transfer_wm_table_dram_2_smu(clk_mgr);
}
/* Set min memclk to minimum, either constrained by the current mode or DPM0 */
static void dcn32_set_hard_min_memclk(struct clk_mgr *clk_mgr_base, bool current_mode)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
if (!clk_mgr->smu_present)
return;
if (current_mode) {
if (clk_mgr_base->clks.p_state_change_support)
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
else
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
clk_mgr_base->bw_params->max_memclk_mhz);
} else {
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz);
}
}
/* Set max memclk to highest DPM value */
static void dcn32_set_hard_max_memclk(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
if (!clk_mgr->smu_present)
return;
dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, clk_mgr_base->bw_params->max_memclk_mhz);
}
/* Get current memclk states, update bounding box */
static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct clk_limit_num_entries *num_entries_per_clk = &clk_mgr_base->bw_params->clk_table.num_entries_per_clk;
unsigned int num_levels;
if (!clk_mgr->smu_present)
return;
/* Refresh memclk and fclk states */
dcn32_init_single_clock(clk_mgr, PPCLK_UCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz,
&num_entries_per_clk->num_memclk_levels);
clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK);
clk_mgr_base->bw_params->dc_mode_softmax_memclk = clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz;
/* memclk must have at least one level */
num_entries_per_clk->num_memclk_levels = num_entries_per_clk->num_memclk_levels ? num_entries_per_clk->num_memclk_levels : 1;
dcn32_init_single_clock(clk_mgr, PPCLK_FCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].fclk_mhz,
&num_entries_per_clk->num_fclk_levels);
clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_FCLK);
if (num_entries_per_clk->num_memclk_levels >= num_entries_per_clk->num_fclk_levels) {
num_levels = num_entries_per_clk->num_memclk_levels;
} else {
num_levels = num_entries_per_clk->num_fclk_levels;
}
clk_mgr_base->bw_params->max_memclk_mhz =
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_memclk_levels - 1].memclk_mhz;
clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
if (clk_mgr->dpm_present && !num_levels)
clk_mgr->dpm_present = false;
if (!clk_mgr->dpm_present)
dcn32_patch_dpm_table(clk_mgr_base->bw_params);
DC_FP_START();
/* Refresh bounding box */
clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
clk_mgr->base.ctx->dc, clk_mgr_base->bw_params);
DC_FP_END();
}
static bool dcn32_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b)
{
if (a->dispclk_khz != b->dispclk_khz)
return false;
else if (a->dppclk_khz != b->dppclk_khz)
return false;
else if (a->dcfclk_khz != b->dcfclk_khz)
return false;
else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
return false;
else if (a->dramclk_khz != b->dramclk_khz)
return false;
else if (a->p_state_change_support != b->p_state_change_support)
return false;
else if (a->fclk_p_state_change_support != b->fclk_p_state_change_support)
return false;
return true;
}
static void dcn32_enable_pme_wa(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
if (!clk_mgr->smu_present)
return;
dcn32_smu_set_pme_workaround(clk_mgr);
}
static bool dcn32_is_smu_present(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
return clk_mgr->smu_present;
}
static void dcn32_set_max_memclk(struct clk_mgr *clk_mgr_base, unsigned int memclk_mhz)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
if (!clk_mgr->smu_present)
return;
dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, memclk_mhz);
}
static void dcn32_set_min_memclk(struct clk_mgr *clk_mgr_base, unsigned int memclk_mhz)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
if (!clk_mgr->smu_present)
return;
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, memclk_mhz);
}
static struct clk_mgr_funcs dcn32_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
.update_clocks = dcn32_update_clocks,
.dump_clk_registers = dcn32_dump_clk_registers,
.init_clocks = dcn32_init_clocks,
.notify_wm_ranges = dcn32_notify_wm_ranges,
.set_hard_min_memclk = dcn32_set_hard_min_memclk,
.set_hard_max_memclk = dcn32_set_hard_max_memclk,
.set_max_memclk = dcn32_set_max_memclk,
.set_min_memclk = dcn32_set_min_memclk,
.get_memclk_states_from_smu = dcn32_get_memclk_states_from_smu,
.are_clock_states_equal = dcn32_are_clock_states_equal,
.enable_pme_wa = dcn32_enable_pme_wa,
.is_smu_present = dcn32_is_smu_present,
.get_dispclk_from_dentist = dcn32_get_dispclk_from_dentist,
};
void dcn32_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr,
struct pp_smu_funcs *pp_smu,
struct dccg *dccg)
{
struct clk_log_info log_info = {0};
clk_mgr->base.ctx = ctx;
clk_mgr->base.funcs = &dcn32_funcs;
if (ASICREV_IS_GC_11_0_2(clk_mgr->base.ctx->asic_id.hw_internal_rev)) {
clk_mgr->regs = &clk_mgr_regs_dcn321;
clk_mgr->clk_mgr_shift = &clk_mgr_shift_dcn321;
clk_mgr->clk_mgr_mask = &clk_mgr_mask_dcn321;
} else {
clk_mgr->regs = &clk_mgr_regs_dcn32;
clk_mgr->clk_mgr_shift = &clk_mgr_shift_dcn32;
clk_mgr->clk_mgr_mask = &clk_mgr_mask_dcn32;
}
clk_mgr->dccg = dccg;
clk_mgr->dfs_bypass_disp_clk = 0;
clk_mgr->dprefclk_ss_percentage = 0;
clk_mgr->dprefclk_ss_divider = 1000;
clk_mgr->ss_on_dprefclk = false;
clk_mgr->dfs_ref_freq_khz = 100000;
/* Changed from DCN3.2_clock_frequency doc to match
* dcn32_dump_clk_registers from 4 * dentist_vco_freq_khz /
* dprefclk DID divider
*/
clk_mgr->base.dprefclk_khz = 716666;
if (ctx->dc->debug.disable_dtb_ref_clk_switch) {
//initialize DTB ref clock value if DPM disabled
if (ctx->dce_version == DCN_VERSION_3_21)
clk_mgr->base.clks.ref_dtbclk_khz = 477800;
else
clk_mgr->base.clks.ref_dtbclk_khz = 268750;
}
/* integer part is now VCO frequency in kHz */
clk_mgr->base.dentist_vco_freq_khz = dcn32_get_vco_frequency_from_reg(clk_mgr);
/* in case we don't get a value from the register, use default */
if (clk_mgr->base.dentist_vco_freq_khz == 0)
clk_mgr->base.dentist_vco_freq_khz = 4300000; /* Updated as per HW docs */
dcn32_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info);
if (ctx->dc->debug.disable_dtb_ref_clk_switch &&
clk_mgr->base.clks.ref_dtbclk_khz != clk_mgr->base.boot_snapshot.dtbclk) {
clk_mgr->base.clks.ref_dtbclk_khz = clk_mgr->base.boot_snapshot.dtbclk;
}
if (clk_mgr->base.boot_snapshot.dprefclk != 0) {
clk_mgr->base.dprefclk_khz = clk_mgr->base.boot_snapshot.dprefclk;
}
dcn32_clock_read_ss_info(clk_mgr);
clk_mgr->dfs_bypass_enabled = false;
clk_mgr->smu_present = false;
clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL);
/* need physical address of table to give to PMFW */
clk_mgr->wm_range_table = dm_helpers_allocate_gpu_mem(clk_mgr->base.ctx,
DC_MEM_ALLOC_TYPE_GART, sizeof(WatermarksExternal_t),
&clk_mgr->wm_range_table_addr);
}
void dcn32_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)
{
kfree(clk_mgr->base.bw_params);
if (clk_mgr->wm_range_table)
dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART,
clk_mgr->wm_range_table);
}
| linux-master | drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dccg.h"
#include "clk_mgr_internal.h"
#include "dce100/dce_clk_mgr.h"
#include "dcn20_clk_mgr.h"
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
#include "navi10_ip_offset.h"
#include "dcn/dcn_2_0_0_offset.h"
#include "dcn/dcn_2_0_0_sh_mask.h"
#include "clk/clk_11_0_0_offset.h"
#include "clk/clk_11_0_0_sh_mask.h"
#undef FN
#define FN(reg_name, field_name) \
clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name
#define REG(reg) \
(clk_mgr->regs->reg)
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
#define SR(reg_name)\
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
mm ## reg_name
#define CLK_BASE_INNER(seg) \
CLK_BASE__INST0_SEG ## seg
static const struct clk_mgr_registers clk_mgr_regs = {
CLK_REG_LIST_NV10()
};
static const struct clk_mgr_shift clk_mgr_shift = {
CLK_MASK_SH_LIST_NV10(__SHIFT)
};
static const struct clk_mgr_mask clk_mgr_mask = {
CLK_MASK_SH_LIST_NV10(_MASK)
};
uint32_t dentist_get_did_from_divider(int divider)
{
uint32_t divider_id;
/* we want to floor here to get higher clock than required rather than lower */
if (divider < DENTIST_DIVIDER_RANGE_2_START) {
if (divider < DENTIST_DIVIDER_RANGE_1_START)
divider_id = DENTIST_BASE_DID_1;
else
divider_id = DENTIST_BASE_DID_1
+ (divider - DENTIST_DIVIDER_RANGE_1_START)
/ DENTIST_DIVIDER_RANGE_1_STEP;
} else if (divider < DENTIST_DIVIDER_RANGE_3_START) {
divider_id = DENTIST_BASE_DID_2
+ (divider - DENTIST_DIVIDER_RANGE_2_START)
/ DENTIST_DIVIDER_RANGE_2_STEP;
} else if (divider < DENTIST_DIVIDER_RANGE_4_START) {
divider_id = DENTIST_BASE_DID_3
+ (divider - DENTIST_DIVIDER_RANGE_3_START)
/ DENTIST_DIVIDER_RANGE_3_STEP;
} else {
divider_id = DENTIST_BASE_DID_4
+ (divider - DENTIST_DIVIDER_RANGE_4_START)
/ DENTIST_DIVIDER_RANGE_4_STEP;
if (divider_id > DENTIST_MAX_DID)
divider_id = DENTIST_MAX_DID;
}
return divider_id;
}
void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
struct dc_state *context, bool safe_to_lower)
{
int i;
clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz;
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
int dpp_inst, dppclk_khz, prev_dppclk_khz;
/* Loop index will match dpp->inst if resource exists,
* and we want to avoid dependency on dpp object
*/
dpp_inst = i;
dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[i];
if (safe_to_lower || prev_dppclk_khz < dppclk_khz)
clk_mgr->dccg->funcs->update_dpp_dto(
clk_mgr->dccg, dpp_inst, dppclk_khz);
}
}
void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct dc_state *context)
{
int dpp_divider = 0;
int disp_divider = 0;
uint32_t dppclk_wdivider = 0;
uint32_t dispclk_wdivider = 0;
uint32_t current_dispclk_wdivider;
uint32_t i;
if (clk_mgr->base.clks.dppclk_khz == 0 || clk_mgr->base.clks.dispclk_khz == 0)
return;
dpp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
* clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dppclk_khz;
disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
* clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dispclk_khz;
dppclk_wdivider = dentist_get_did_from_divider(dpp_divider);
dispclk_wdivider = dentist_get_did_from_divider(disp_divider);
REG_GET(DENTIST_DISPCLK_CNTL,
DENTIST_DISPCLK_WDIVIDER, ¤t_dispclk_wdivider);
/* When changing divider to or from 127, some extra programming is required to prevent corruption */
if (current_dispclk_wdivider == 127 && dispclk_wdivider != 127) {
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
uint32_t fifo_level;
struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg;
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
int32_t N;
int32_t j;
if (!pipe_ctx->stream)
continue;
/* Virtual encoders don't have this function */
if (!stream_enc->funcs->get_fifo_cal_average_level)
continue;
fifo_level = stream_enc->funcs->get_fifo_cal_average_level(
stream_enc);
N = fifo_level / 4;
dccg->funcs->set_fifo_errdet_ovr_en(
dccg,
true);
for (j = 0; j < N - 4; j++)
dccg->funcs->otg_drop_pixel(
dccg,
pipe_ctx->stream_res.tg->inst);
dccg->funcs->set_fifo_errdet_ovr_en(
dccg,
false);
}
} else if (dispclk_wdivider == 127 && current_dispclk_wdivider != 127) {
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DISPCLK_WDIVIDER, 126);
REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg;
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
uint32_t fifo_level;
int32_t N;
int32_t j;
if (!pipe_ctx->stream)
continue;
/* Virtual encoders don't have this function */
if (!stream_enc->funcs->get_fifo_cal_average_level)
continue;
fifo_level = stream_enc->funcs->get_fifo_cal_average_level(
stream_enc);
N = fifo_level / 4;
dccg->funcs->set_fifo_errdet_ovr_en(dccg, true);
for (j = 0; j < 12 - N; j++)
dccg->funcs->otg_add_pixel(dccg,
pipe_ctx->stream_res.tg->inst);
dccg->funcs->set_fifo_errdet_ovr_en(dccg, false);
}
}
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider);
REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider);
REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100);
}
void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
struct pp_smu_funcs_nv *pp_smu = NULL;
int display_count;
bool update_dppclk = false;
bool update_dispclk = false;
bool enter_display_off = false;
bool dpp_clock_lowered = false;
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
bool force_reset = false;
bool p_state_change_support;
int total_plane_count;
if (dc->work_arounds.skip_clock_update)
return;
if (clk_mgr_base->clks.dispclk_khz == 0 ||
dc->debug.force_clock_mode & 0x1) {
//this is from resume or boot up, if forced_clock cfg option used, we bypass program dispclk and DPPCLK, but need set them for S3.
force_reset = true;
dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
//force_clock_mode 0x1: force reset the clock even it is the same clock as long as it is in Passive level.
}
display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
if (dc->res_pool->pp_smu)
pp_smu = &dc->res_pool->pp_smu->nv_funcs;
if (display_count == 0)
enter_display_off = true;
if (enter_display_off == safe_to_lower) {
if (pp_smu && pp_smu->set_display_count)
pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
}
if (dc->debug.force_min_dcfclk_mhz > 0)
new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
if (pp_smu && pp_smu->set_hard_min_dcfclk_by_freq)
pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_khz));
}
if (should_set_clock(safe_to_lower,
new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
if (pp_smu && pp_smu->set_min_deep_sleep_dcfclk)
pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_deep_sleep_khz));
}
if (should_set_clock(safe_to_lower, new_clocks->socclk_khz, clk_mgr_base->clks.socclk_khz)) {
clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
if (pp_smu && pp_smu->set_hard_min_socclk_by_freq)
pp_smu->set_hard_min_socclk_by_freq(&pp_smu->pp_smu, khz_to_mhz_ceil(clk_mgr_base->clks.socclk_khz));
}
total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
clk_mgr_base->clks.p_state_change_support = p_state_change_support;
if (pp_smu && pp_smu->set_pstate_handshake_support)
pp_smu->set_pstate_handshake_support(&pp_smu->pp_smu, clk_mgr_base->clks.p_state_change_support);
}
if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) {
clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz;
if (pp_smu && pp_smu->set_hard_min_uclk_by_freq)
pp_smu->set_hard_min_uclk_by_freq(&pp_smu->pp_smu, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
}
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
dpp_clock_lowered = true;
clk_mgr->base.clks.dppclk_khz = new_clocks->dppclk_khz;
update_dppclk = true;
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
update_dispclk = true;
}
if (update_dppclk || update_dispclk) {
new_clocks->disp_dpp_voltage_level_khz = new_clocks->dppclk_khz;
if (update_dispclk)
new_clocks->disp_dpp_voltage_level_khz = new_clocks->dispclk_khz > new_clocks->dppclk_khz ? new_clocks->dispclk_khz : new_clocks->dppclk_khz;
clk_mgr_base->clks.disp_dpp_voltage_level_khz = new_clocks->disp_dpp_voltage_level_khz;
if (pp_smu && pp_smu->set_voltage_by_freq)
pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.disp_dpp_voltage_level_khz));
}
if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
if (dpp_clock_lowered) {
// if clock is being lowered, increase DTO before lowering refclk
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
dcn20_update_clocks_update_dentist(clk_mgr, context);
} else {
// if clock is being raised, increase refclk before lowering DTO
if (update_dppclk || update_dispclk)
dcn20_update_clocks_update_dentist(clk_mgr, context);
// always update dtos unless clock is lowered and not safe to lower
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
}
if (update_dispclk &&
dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
/*update dmcu for wait_loop count*/
dmcu->funcs->set_psr_wait_loop(dmcu,
clk_mgr_base->clks.dispclk_khz / 1000 / 7);
}
}
void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr,
struct dc_state *context,
bool safe_to_lower)
{
struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
/* Min fclk = 1.2GHz since all the extra scemi logic seems to run off of it */
int fclk_adj = new_clocks->fclk_khz > 1200000 ? new_clocks->fclk_khz : 1200000;
if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr->clks.phyclk_khz)) {
clk_mgr->clks.phyclk_khz = new_clocks->phyclk_khz;
}
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr->clks.dcfclk_khz)) {
clk_mgr->clks.dcfclk_khz = new_clocks->dcfclk_khz;
}
if (should_set_clock(safe_to_lower,
new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) {
clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
}
if (should_set_clock(safe_to_lower, new_clocks->socclk_khz, clk_mgr->clks.socclk_khz)) {
clk_mgr->clks.socclk_khz = new_clocks->socclk_khz;
}
if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr->clks.dramclk_khz)) {
clk_mgr->clks.dramclk_khz = new_clocks->dramclk_khz;
}
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->clks.dppclk_khz)) {
clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz;
}
if (should_set_clock(safe_to_lower, fclk_adj, clk_mgr->clks.fclk_khz)) {
clk_mgr->clks.fclk_khz = fclk_adj;
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr->clks.dispclk_khz)) {
clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
}
/* Both fclk and ref_dppclk run on the same scemi clock.
* So take the higher value since the DPP DTO is typically programmed
* such that max dppclk is 1:1 with ref_dppclk.
*/
if (clk_mgr->clks.fclk_khz > clk_mgr->clks.dppclk_khz)
clk_mgr->clks.dppclk_khz = clk_mgr->clks.fclk_khz;
if (clk_mgr->clks.dppclk_khz > clk_mgr->clks.fclk_khz)
clk_mgr->clks.fclk_khz = clk_mgr->clks.dppclk_khz;
// Both fclk and ref_dppclk run on the same scemi clock.
clk_mgr_int->dccg->ref_dppclk = clk_mgr->clks.fclk_khz;
/* TODO: set dtbclk in correct place */
clk_mgr->clks.dtbclk_en = false;
dm_set_dcn_clocks(clk_mgr->ctx, &clk_mgr->clks);
}
void dcn2_init_clocks(struct clk_mgr *clk_mgr)
{
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
// Assumption is that boot state always supports pstate
clk_mgr->clks.p_state_change_support = true;
clk_mgr->clks.prev_p_state_change_support = true;
}
static void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct pp_smu_funcs_nv *pp_smu = NULL;
if (clk_mgr->pp_smu) {
pp_smu = &clk_mgr->pp_smu->nv_funcs;
if (pp_smu->set_pme_wa_enable)
pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
}
}
void dcn2_read_clocks_from_hw_dentist(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
uint32_t dispclk_wdivider;
uint32_t dppclk_wdivider;
int disp_divider;
int dpp_divider;
REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, &dispclk_wdivider);
REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, &dppclk_wdivider);
disp_divider = dentist_get_divider_from_did(dispclk_wdivider);
dpp_divider = dentist_get_divider_from_did(dppclk_wdivider);
if (disp_divider && dpp_divider) {
/* Calculate the current DFS clock, in kHz.*/
clk_mgr_base->clks.dispclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
* clk_mgr->base.dentist_vco_freq_khz) / disp_divider;
clk_mgr_base->clks.dppclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
* clk_mgr->base.dentist_vco_freq_khz) / dpp_divider;
}
}
void dcn2_get_clock(struct clk_mgr *clk_mgr,
struct dc_state *context,
enum dc_clock_type clock_type,
struct dc_clock_config *clock_cfg)
{
if (clock_type == DC_CLOCK_TYPE_DISPCLK) {
clock_cfg->max_clock_khz = context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz;
clock_cfg->min_clock_khz = DCN_MINIMUM_DISPCLK_Khz;
clock_cfg->current_clock_khz = clk_mgr->clks.dispclk_khz;
clock_cfg->bw_requirequired_clock_khz = context->bw_ctx.bw.dcn.clk.bw_dispclk_khz;
}
if (clock_type == DC_CLOCK_TYPE_DPPCLK) {
clock_cfg->max_clock_khz = context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
clock_cfg->min_clock_khz = DCN_MINIMUM_DPPCLK_Khz;
clock_cfg->current_clock_khz = clk_mgr->clks.dppclk_khz;
clock_cfg->bw_requirequired_clock_khz = context->bw_ctx.bw.dcn.clk.bw_dppclk_khz;
}
}
static bool dcn2_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b)
{
if (a->dispclk_khz != b->dispclk_khz)
return false;
else if (a->dppclk_khz != b->dppclk_khz)
return false;
else if (a->disp_dpp_voltage_level_khz != b->disp_dpp_voltage_level_khz)
return false;
else if (a->dcfclk_khz != b->dcfclk_khz)
return false;
else if (a->socclk_khz != b->socclk_khz)
return false;
else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
return false;
else if (a->dramclk_khz != b->dramclk_khz)
return false;
else if (a->p_state_change_support != b->p_state_change_support)
return false;
return true;
}
/* Notify clk_mgr of a change in link rate, update phyclk frequency if necessary */
static void dcn2_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct dc_link *link)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
unsigned int i, max_phyclk_req = 0;
struct pp_smu_funcs_nv *pp_smu = NULL;
if (!clk_mgr->pp_smu || !clk_mgr->pp_smu->nv_funcs.set_voltage_by_freq)
return;
pp_smu = &clk_mgr->pp_smu->nv_funcs;
clk_mgr->cur_phyclk_req_table[link->link_index] = link->cur_link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
for (i = 0; i < MAX_PIPES * 2; i++) {
if (clk_mgr->cur_phyclk_req_table[i] > max_phyclk_req)
max_phyclk_req = clk_mgr->cur_phyclk_req_table[i];
}
if (max_phyclk_req != clk_mgr_base->clks.phyclk_khz) {
clk_mgr_base->clks.phyclk_khz = max_phyclk_req;
pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PHYCLK, khz_to_mhz_ceil(clk_mgr_base->clks.phyclk_khz));
}
}
static struct clk_mgr_funcs dcn2_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = dcn2_update_clocks,
.init_clocks = dcn2_init_clocks,
.enable_pme_wa = dcn2_enable_pme_wa,
.get_clock = dcn2_get_clock,
.are_clock_states_equal = dcn2_are_clock_states_equal,
.notify_link_rate_change = dcn2_notify_link_rate_change,
};
void dcn20_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr,
struct pp_smu_funcs *pp_smu,
struct dccg *dccg)
{
int dprefclk_did;
int target_div;
uint32_t pll_req_reg;
struct fixed31_32 pll_req;
clk_mgr->base.ctx = ctx;
clk_mgr->pp_smu = pp_smu;
clk_mgr->base.funcs = &dcn2_funcs;
clk_mgr->regs = &clk_mgr_regs;
clk_mgr->clk_mgr_shift = &clk_mgr_shift;
clk_mgr->clk_mgr_mask = &clk_mgr_mask;
clk_mgr->dccg = dccg;
clk_mgr->dfs_bypass_disp_clk = 0;
clk_mgr->dprefclk_ss_percentage = 0;
clk_mgr->dprefclk_ss_divider = 1000;
clk_mgr->ss_on_dprefclk = false;
clk_mgr->base.dprefclk_khz = 700000; // 700 MHz planned if VCO is 3.85 GHz, will be retrieved
/* DFS Slice 2 should be used for DPREFCLK */
dprefclk_did = REG_READ(CLK3_CLK2_DFS_CNTL);
/* Convert DPREFCLK DFS Slice DID to actual divider */
target_div = dentist_get_divider_from_did(dprefclk_did);
/* get FbMult value */
pll_req_reg = REG_READ(CLK3_CLK_PLL_REQ);
/* set up a fixed-point number
* this works because the int part is on the right edge of the register
* and the frac part is on the left edge
*/
pll_req = dc_fixpt_from_int(pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_int);
pll_req.value |= pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_frac;
/* multiply by REFCLK period */
pll_req = dc_fixpt_mul_int(pll_req, 100000);
/* integer part is now VCO frequency in kHz */
clk_mgr->base.dentist_vco_freq_khz = dc_fixpt_floor(pll_req);
/* in case we don't get a value from the register, use default */
if (clk_mgr->base.dentist_vco_freq_khz == 0)
clk_mgr->base.dentist_vco_freq_khz = 3850000;
/* Calculate the DPREFCLK in kHz.*/
clk_mgr->base.dprefclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
* clk_mgr->base.dentist_vco_freq_khz) / target_div;
//Integrated_info table does not exist on dGPU projects so should not be referenced
//anywhere in code for dGPUs.
//Also there is no plan for now that DFS BYPASS will be used on NV10/12/14.
clk_mgr->dfs_bypass_enabled = false;
dce_clock_read_ss_info(clk_mgr);
}
| linux-master | drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "core_types.h"
#include "clk_mgr_internal.h"
#include "rv1_clk_mgr.h"
#include "rv2_clk_mgr.h"
#include "dce112/dce112_clk_mgr.h"
static struct clk_mgr_internal_funcs rv2_clk_internal_funcs = {
.set_dispclk = dce112_set_dispclk,
.set_dprefclk = dce112_set_dprefclk
};
void rv2_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr, struct pp_smu_funcs *pp_smu)
{
rv1_clk_mgr_construct(ctx, clk_mgr, pp_smu);
clk_mgr->funcs = &rv2_clk_internal_funcs;
}
| linux-master | drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv2_clk_mgr.c |
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "reg_helper.h"
#include "clk_mgr_internal.h"
#include "rv1_clk_mgr_clk.h"
#include "ip/Discovery/hwid.h"
#include "ip/Discovery/v1/ip_offset_1.h"
#include "ip/CLK/clk_10_0_default.h"
#include "ip/CLK/clk_10_0_offset.h"
#include "ip/CLK/clk_10_0_reg.h"
#include "ip/CLK/clk_10_0_sh_mask.h"
#include "dce100/dce_clk_mgr.h"
#define CLK_BASE_INNER(inst) \
CLK_BASE__INST ## inst ## _SEG0
#define CLK_REG(reg_name, block, inst)\
CLK_BASE(mm ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## _ ## inst ## _ ## reg_name
#define REG(reg_name) \
CLK_REG(reg_name, CLK0, 0)
/* Only used by testing framework*/
void rv1_dump_clk_registers(struct clk_state_registers *regs, struct clk_bypass *bypass, struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
regs->CLK0_CLK8_CURRENT_CNT = REG_READ(CLK0_CLK8_CURRENT_CNT) / 10; //dcf clk
bypass->dcfclk_bypass = REG_READ(CLK0_CLK8_BYPASS_CNTL) & 0x0007;
if (bypass->dcfclk_bypass < 0 || bypass->dcfclk_bypass > 4)
bypass->dcfclk_bypass = 0;
regs->CLK0_CLK8_DS_CNTL = REG_READ(CLK0_CLK8_DS_CNTL) / 10; //dcf deep sleep divider
regs->CLK0_CLK8_ALLOW_DS = REG_READ(CLK0_CLK8_ALLOW_DS); //dcf deep sleep allow
regs->CLK0_CLK10_CURRENT_CNT = REG_READ(CLK0_CLK10_CURRENT_CNT) / 10; //dpref clk
bypass->dispclk_pypass = REG_READ(CLK0_CLK10_BYPASS_CNTL) & 0x0007;
if (bypass->dispclk_pypass < 0 || bypass->dispclk_pypass > 4)
bypass->dispclk_pypass = 0;
regs->CLK0_CLK11_CURRENT_CNT = REG_READ(CLK0_CLK11_CURRENT_CNT) / 10; //disp clk
bypass->dprefclk_bypass = REG_READ(CLK0_CLK11_BYPASS_CNTL) & 0x0007;
if (bypass->dprefclk_bypass < 0 || bypass->dprefclk_bypass > 4)
bypass->dprefclk_bypass = 0;
}
| linux-master | drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.c |
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "core_types.h"
#include "clk_mgr_internal.h"
#include "reg_helper.h"
#include <linux/delay.h>
#include "rv1_clk_mgr_vbios_smu.h"
#define MAX_INSTANCE 5
#define MAX_SEGMENT 5
struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
};
static const struct IP_BASE MP1_BASE = { { { { 0x00016000, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
#define mmMP1_SMN_C2PMSG_91 0x29B
#define mmMP1_SMN_C2PMSG_83 0x293
#define mmMP1_SMN_C2PMSG_67 0x283
#define mmMP1_SMN_C2PMSG_91_BASE_IDX 0
#define mmMP1_SMN_C2PMSG_83_BASE_IDX 0
#define mmMP1_SMN_C2PMSG_67_BASE_IDX 0
#define MP1_SMN_C2PMSG_91__CONTENT_MASK 0xffffffffL
#define MP1_SMN_C2PMSG_83__CONTENT_MASK 0xffffffffL
#define MP1_SMN_C2PMSG_67__CONTENT_MASK 0xffffffffL
#define MP1_SMN_C2PMSG_91__CONTENT__SHIFT 0x00000000
#define MP1_SMN_C2PMSG_83__CONTENT__SHIFT 0x00000000
#define MP1_SMN_C2PMSG_67__CONTENT__SHIFT 0x00000000
#define REG(reg_name) \
(MP1_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
#define FN(reg_name, field) \
FD(reg_name##__##field)
#define VBIOSSMC_MSG_SetDispclkFreq 0x4
#define VBIOSSMC_MSG_SetDprefclkFreq 0x5
#define VBIOSSMC_Status_BUSY 0x0
#define VBIOSSMC_Result_OK 0x1
#define VBIOSSMC_Result_Failed 0xFF
#define VBIOSSMC_Result_UnknownCmd 0xFE
#define VBIOSSMC_Result_CmdRejectedPrereq 0xFD
#define VBIOSSMC_Result_CmdRejectedBusy 0xFC
/*
* Function to be used instead of REG_WAIT macro because the wait ends when
* the register is NOT EQUAL to zero, and because the translation in msg_if.h
* won't work with REG_WAIT.
*/
static uint32_t rv1_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries)
{
uint32_t res_val = VBIOSSMC_Status_BUSY;
do {
res_val = REG_READ(MP1_SMN_C2PMSG_91);
if (res_val != VBIOSSMC_Status_BUSY)
break;
if (delay_us >= 1000)
msleep(delay_us/1000);
else if (delay_us > 0)
udelay(delay_us);
} while (max_retries--);
return res_val;
}
static int rv1_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
unsigned int msg_id, unsigned int param)
{
uint32_t result;
/* First clear response register */
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Status_BUSY);
/* Set the parameter register for the SMU message, unit is Mhz */
REG_WRITE(MP1_SMN_C2PMSG_83, param);
/* Trigger the message transaction by writing the message ID */
REG_WRITE(MP1_SMN_C2PMSG_67, msg_id);
result = rv1_smu_wait_for_response(clk_mgr, 10, 1000);
ASSERT(result == VBIOSSMC_Result_OK);
/* Actual dispclk set is returned in the parameter register */
return REG_READ(MP1_SMN_C2PMSG_83);
}
int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz)
{
int actual_dispclk_set_mhz = -1;
struct dc *dc = clk_mgr->base.ctx->dc;
struct dmcu *dmcu = dc->res_pool->dmcu;
/* Unit of SMU msg parameter is Mhz */
actual_dispclk_set_mhz = rv1_vbios_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDispclkFreq,
khz_to_mhz_ceil(requested_dispclk_khz));
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz)
dmcu->funcs->set_psr_wait_loop(dmcu,
actual_dispclk_set_mhz / 7);
}
return actual_dispclk_set_mhz * 1000;
}
int rv1_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
{
int actual_dprefclk_set_mhz = -1;
actual_dprefclk_set_mhz = rv1_vbios_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDprefclkFreq,
khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz));
/* TODO: add code for programing DP DTO, currently this is down by command table */
return actual_dprefclk_set_mhz * 1000;
}
| linux-master | drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include <linux/slab.h>
#include "reg_helper.h"
#include "core_types.h"
#include "clk_mgr_internal.h"
#include "rv1_clk_mgr.h"
#include "dce100/dce_clk_mgr.h"
#include "dce112/dce112_clk_mgr.h"
#include "rv1_clk_mgr_vbios_smu.h"
#include "rv1_clk_mgr_clk.h"
static void rv1_init_clocks(struct clk_mgr *clk_mgr)
{
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
}
static int rv1_determine_dppclk_threshold(struct clk_mgr_internal *clk_mgr, struct dc_clocks *new_clocks)
{
bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
bool dispclk_increase = new_clocks->dispclk_khz > clk_mgr->base.clks.dispclk_khz;
int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
bool cur_dpp_div = clk_mgr->base.clks.dispclk_khz > clk_mgr->base.clks.dppclk_khz;
/* increase clock, looking for div is 0 for current, request div is 1*/
if (dispclk_increase) {
/* already divided by 2, no need to reach target clk with 2 steps*/
if (cur_dpp_div)
return new_clocks->dispclk_khz;
/* request disp clk is lower than maximum supported dpp clk,
* no need to reach target clk with two steps.
*/
if (new_clocks->dispclk_khz <= disp_clk_threshold)
return new_clocks->dispclk_khz;
/* target dpp clk not request divided by 2, still within threshold */
if (!request_dpp_div)
return new_clocks->dispclk_khz;
} else {
/* decrease clock, looking for current dppclk divided by 2,
* request dppclk not divided by 2.
*/
/* current dpp clk not divided by 2, no need to ramp*/
if (!cur_dpp_div)
return new_clocks->dispclk_khz;
/* current disp clk is lower than current maximum dpp clk,
* no need to ramp
*/
if (clk_mgr->base.clks.dispclk_khz <= disp_clk_threshold)
return new_clocks->dispclk_khz;
/* request dpp clk need to be divided by 2 */
if (request_dpp_div)
return new_clocks->dispclk_khz;
}
return disp_clk_threshold;
}
static void ramp_up_dispclk_with_dpp(
struct clk_mgr_internal *clk_mgr,
struct dc *dc,
struct dc_clocks *new_clocks,
bool safe_to_lower)
{
int i;
int dispclk_to_dpp_threshold = rv1_determine_dppclk_threshold(clk_mgr, new_clocks);
bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
/* this function is to change dispclk, dppclk and dprefclk according to
* bandwidth requirement. Its call stack is rv1_update_clocks -->
* update_clocks --> dcn10_prepare_bandwidth / dcn10_optimize_bandwidth
* --> prepare_bandwidth / optimize_bandwidth. before change dcn hw,
* prepare_bandwidth will be called first to allow enough clock,
* watermark for change, after end of dcn hw change, optimize_bandwidth
* is executed to lower clock to save power for new dcn hw settings.
*
* below is sequence of commit_planes_for_stream:
*
* step 1: prepare_bandwidth - raise clock to have enough bandwidth
* step 2: lock_doublebuffer_enable
* step 3: pipe_control_lock(true) - make dchubp register change will
* not take effect right way
* step 4: apply_ctx_for_surface - program dchubp
* step 5: pipe_control_lock(false) - dchubp register change take effect
* step 6: optimize_bandwidth --> dc_post_update_surfaces_to_stream
* for full_date, optimize clock to save power
*
* at end of step 1, dcn clocks (dprefclk, dispclk, dppclk) may be
* changed for new dchubp configuration. but real dcn hub dchubps are
* still running with old configuration until end of step 5. this need
* clocks settings at step 1 should not less than that before step 1.
* this is checked by two conditions: 1. if (should_set_clock(safe_to_lower
* , new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) ||
* new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz)
* 2. request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz
*
* the second condition is based on new dchubp configuration. dppclk
* for new dchubp may be different from dppclk before step 1.
* for example, before step 1, dchubps are as below:
* pipe 0: recout=(0,40,1920,980) viewport=(0,0,1920,979)
* pipe 1: recout=(0,0,1920,1080) viewport=(0,0,1920,1080)
* for dppclk for pipe0 need dppclk = dispclk
*
* new dchubp pipe split configuration:
* pipe 0: recout=(0,0,960,1080) viewport=(0,0,960,1080)
* pipe 1: recout=(960,0,960,1080) viewport=(960,0,960,1080)
* dppclk only needs dppclk = dispclk /2.
*
* dispclk, dppclk are not lock by otg master lock. they take effect
* after step 1. during this transition, dispclk are the same, but
* dppclk is changed to half of previous clock for old dchubp
* configuration between step 1 and step 6. This may cause p-state
* warning intermittently.
*
* for new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz, we
* need make sure dppclk are not changed to less between step 1 and 6.
* for new_clocks->dispclk_khz > clk_mgr_base->clks.dispclk_khz,
* new display clock is raised, but we do not know ratio of
* new_clocks->dispclk_khz and clk_mgr_base->clks.dispclk_khz,
* new_clocks->dispclk_khz /2 does not guarantee equal or higher than
* old dppclk. we could ignore power saving different between
* dppclk = displck and dppclk = dispclk / 2 between step 1 and step 6.
* as long as safe_to_lower = false, set dpclk = dispclk to simplify
* condition check.
* todo: review this change for other asic.
**/
if (!safe_to_lower)
request_dpp_div = false;
/* set disp clk to dpp clk threshold */
clk_mgr->funcs->set_dispclk(clk_mgr, dispclk_to_dpp_threshold);
clk_mgr->funcs->set_dprefclk(clk_mgr);
/* update request dpp clk division option */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
if (!pipe_ctx->plane_state)
continue;
pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
pipe_ctx->plane_res.dpp,
request_dpp_div,
true);
}
/* If target clk not same as dppclk threshold, set to target clock */
if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz) {
clk_mgr->funcs->set_dispclk(clk_mgr, new_clocks->dispclk_khz);
clk_mgr->funcs->set_dprefclk(clk_mgr);
}
clk_mgr->base.clks.dispclk_khz = new_clocks->dispclk_khz;
clk_mgr->base.clks.dppclk_khz = new_clocks->dppclk_khz;
clk_mgr->base.clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
}
static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc *dc = clk_mgr_base->ctx->dc;
struct dc_debug_options *debug = &dc->debug;
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct pp_smu_funcs_rv *pp_smu = NULL;
bool send_request_to_increase = false;
bool send_request_to_lower = false;
int display_count;
bool enter_display_off = false;
ASSERT(clk_mgr->pp_smu);
if (dc->work_arounds.skip_clock_update)
return;
pp_smu = &clk_mgr->pp_smu->rv_funcs;
display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
if (display_count == 0)
enter_display_off = true;
if (enter_display_off == safe_to_lower) {
/*
* Notify SMU active displays
* if function pointer not set up, this message is
* sent as part of pplib_apply_display_requirements.
*/
if (pp_smu->set_display_count)
pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
}
if (new_clocks->dispclk_khz > clk_mgr_base->clks.dispclk_khz
|| new_clocks->phyclk_khz > clk_mgr_base->clks.phyclk_khz
|| new_clocks->fclk_khz > clk_mgr_base->clks.fclk_khz
|| new_clocks->dcfclk_khz > clk_mgr_base->clks.dcfclk_khz)
send_request_to_increase = true;
if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz)) {
clk_mgr_base->clks.phyclk_khz = new_clocks->phyclk_khz;
send_request_to_lower = true;
}
// F Clock
if (debug->force_fclk_khz != 0)
new_clocks->fclk_khz = debug->force_fclk_khz;
if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr_base->clks.fclk_khz)) {
clk_mgr_base->clks.fclk_khz = new_clocks->fclk_khz;
send_request_to_lower = true;
}
//DCF Clock
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
send_request_to_lower = true;
}
if (should_set_clock(safe_to_lower,
new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
send_request_to_lower = true;
}
/* make sure dcf clk is before dpp clk to
* make sure we have enough voltage to run dpp clk
*/
if (send_request_to_increase) {
/*use dcfclk to request voltage*/
if (pp_smu->set_hard_min_fclk_by_freq &&
pp_smu->set_hard_min_dcfclk_by_freq &&
pp_smu->set_min_deep_sleep_dcfclk) {
pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, khz_to_mhz_ceil(new_clocks->fclk_khz));
pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, khz_to_mhz_ceil(new_clocks->dcfclk_khz));
pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, khz_to_mhz_ceil(new_clocks->dcfclk_deep_sleep_khz));
}
}
/* dcn1 dppclk is tied to dispclk */
/* program dispclk on = as a w/a for sleep resume clock ramping issues */
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)
|| new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz) {
ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks, safe_to_lower);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
send_request_to_lower = true;
}
if (!send_request_to_increase && send_request_to_lower) {
/*use dcfclk to request voltage*/
if (pp_smu->set_hard_min_fclk_by_freq &&
pp_smu->set_hard_min_dcfclk_by_freq &&
pp_smu->set_min_deep_sleep_dcfclk) {
pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, khz_to_mhz_ceil(new_clocks->fclk_khz));
pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, khz_to_mhz_ceil(new_clocks->dcfclk_khz));
pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, khz_to_mhz_ceil(new_clocks->dcfclk_deep_sleep_khz));
}
}
}
static void rv1_enable_pme_wa(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct pp_smu_funcs_rv *pp_smu = NULL;
if (clk_mgr->pp_smu) {
pp_smu = &clk_mgr->pp_smu->rv_funcs;
if (pp_smu->set_pme_wa_enable)
pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
}
}
static struct clk_mgr_funcs rv1_clk_funcs = {
.init_clocks = rv1_init_clocks,
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = rv1_update_clocks,
.enable_pme_wa = rv1_enable_pme_wa,
};
static struct clk_mgr_internal_funcs rv1_clk_internal_funcs = {
.set_dispclk = rv1_vbios_smu_set_dispclk,
.set_dprefclk = dce112_set_dprefclk
};
void rv1_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr, struct pp_smu_funcs *pp_smu)
{
struct dc_debug_options *debug = &ctx->dc->debug;
struct dc_bios *bp = ctx->dc_bios;
clk_mgr->base.ctx = ctx;
clk_mgr->pp_smu = pp_smu;
clk_mgr->base.funcs = &rv1_clk_funcs;
clk_mgr->funcs = &rv1_clk_internal_funcs;
clk_mgr->dfs_bypass_disp_clk = 0;
clk_mgr->dprefclk_ss_percentage = 0;
clk_mgr->dprefclk_ss_divider = 1000;
clk_mgr->ss_on_dprefclk = false;
clk_mgr->base.dprefclk_khz = 600000;
if (bp->integrated_info)
clk_mgr->base.dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
if (bp->fw_info_valid && clk_mgr->base.dentist_vco_freq_khz == 0) {
clk_mgr->base.dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq;
if (clk_mgr->base.dentist_vco_freq_khz == 0)
clk_mgr->base.dentist_vco_freq_khz = 3600000;
}
if (!debug->disable_dfs_bypass && bp->integrated_info)
if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
clk_mgr->dfs_bypass_enabled = true;
dce_clock_read_ss_info(clk_mgr);
}
| linux-master | drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c |
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "core_types.h"
#include "clk_mgr_internal.h"
#include "reg_helper.h"
#include <linux/delay.h>
#include "renoir_ip_offset.h"
#include "mp/mp_12_0_0_offset.h"
#include "mp/mp_12_0_0_sh_mask.h"
#include "rn_clk_mgr_vbios_smu.h"
#define REG(reg_name) \
(MP0_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
#define FN(reg_name, field) \
FD(reg_name##__##field)
#include "logger_types.h"
#undef DC_LOGGER
#define DC_LOGGER \
CTX->logger
#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); }
#define VBIOSSMC_MSG_TestMessage 0x1
#define VBIOSSMC_MSG_GetSmuVersion 0x2
#define VBIOSSMC_MSG_PowerUpGfx 0x3
#define VBIOSSMC_MSG_SetDispclkFreq 0x4
#define VBIOSSMC_MSG_SetDprefclkFreq 0x5
#define VBIOSSMC_MSG_PowerDownGfx 0x6
#define VBIOSSMC_MSG_SetDppclkFreq 0x7
#define VBIOSSMC_MSG_SetHardMinDcfclkByFreq 0x8
#define VBIOSSMC_MSG_SetMinDeepSleepDcfclk 0x9
#define VBIOSSMC_MSG_SetPhyclkVoltageByFreq 0xA
#define VBIOSSMC_MSG_GetFclkFrequency 0xB
#define VBIOSSMC_MSG_SetDisplayCount 0xC
#define VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown 0xD
#define VBIOSSMC_MSG_UpdatePmeRestore 0xE
#define VBIOSSMC_MSG_IsPeriodicRetrainingDisabled 0xF
#define VBIOSSMC_Status_BUSY 0x0
#define VBIOSSMC_Result_OK 0x1
#define VBIOSSMC_Result_Failed 0xFF
#define VBIOSSMC_Result_UnknownCmd 0xFE
#define VBIOSSMC_Result_CmdRejectedPrereq 0xFD
#define VBIOSSMC_Result_CmdRejectedBusy 0xFC
/*
* Function to be used instead of REG_WAIT macro because the wait ends when
* the register is NOT EQUAL to zero, and because the translation in msg_if.h
* won't work with REG_WAIT.
*/
static uint32_t rn_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries)
{
uint32_t res_val = VBIOSSMC_Status_BUSY;
do {
res_val = REG_READ(MP1_SMN_C2PMSG_91);
if (res_val != VBIOSSMC_Status_BUSY)
break;
if (delay_us >= 1000)
msleep(delay_us/1000);
else if (delay_us > 0)
udelay(delay_us);
} while (max_retries--);
return res_val;
}
static int rn_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
unsigned int msg_id,
unsigned int param)
{
uint32_t result;
result = rn_smu_wait_for_response(clk_mgr, 10, 200000);
if (result != VBIOSSMC_Result_OK)
smu_print("SMU Response was not OK. SMU response after wait received is: %d\n", result);
if (result == VBIOSSMC_Status_BUSY) {
return -1;
}
/* First clear response register */
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Status_BUSY);
/* Set the parameter register for the SMU message, unit is Mhz */
REG_WRITE(MP1_SMN_C2PMSG_83, param);
/* Trigger the message transaction by writing the message ID */
REG_WRITE(MP1_SMN_C2PMSG_67, msg_id);
result = rn_smu_wait_for_response(clk_mgr, 10, 200000);
ASSERT(result == VBIOSSMC_Result_OK || result == VBIOSSMC_Result_UnknownCmd);
/* Actual dispclk set is returned in the parameter register */
return REG_READ(MP1_SMN_C2PMSG_83);
}
int rn_vbios_smu_get_smu_version(struct clk_mgr_internal *clk_mgr)
{
return rn_vbios_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_GetSmuVersion,
0);
}
int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz)
{
int actual_dispclk_set_mhz = -1;
struct dc *dc = clk_mgr->base.ctx->dc;
struct dmcu *dmcu = dc->res_pool->dmcu;
/* Unit of SMU msg parameter is Mhz */
actual_dispclk_set_mhz = rn_vbios_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDispclkFreq,
khz_to_mhz_ceil(requested_dispclk_khz));
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz)
dmcu->funcs->set_psr_wait_loop(dmcu,
actual_dispclk_set_mhz / 7);
}
// pmfw always set clock more than or equal requested clock
ASSERT(actual_dispclk_set_mhz >= khz_to_mhz_ceil(requested_dispclk_khz));
return actual_dispclk_set_mhz * 1000;
}
int rn_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
{
int actual_dprefclk_set_mhz = -1;
actual_dprefclk_set_mhz = rn_vbios_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDprefclkFreq,
khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz));
/* TODO: add code for programing DP DTO, currently this is down by command table */
return actual_dprefclk_set_mhz * 1000;
}
int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz)
{
int actual_dcfclk_set_mhz = -1;
if (clk_mgr->smu_ver < 0x370c00)
return actual_dcfclk_set_mhz;
actual_dcfclk_set_mhz = rn_vbios_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
khz_to_mhz_ceil(requested_dcfclk_khz));
#ifdef DBG
smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
#endif
return actual_dcfclk_set_mhz * 1000;
}
int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz)
{
int actual_min_ds_dcfclk_mhz = -1;
if (clk_mgr->smu_ver < 0x370c00)
return actual_min_ds_dcfclk_mhz;
actual_min_ds_dcfclk_mhz = rn_vbios_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetMinDeepSleepDcfclk,
khz_to_mhz_ceil(requested_min_ds_dcfclk_khz));
return actual_min_ds_dcfclk_mhz * 1000;
}
void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz)
{
rn_vbios_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetPhyclkVoltageByFreq,
khz_to_mhz_ceil(requested_phyclk_khz));
}
int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz)
{
int actual_dppclk_set_mhz = -1;
actual_dppclk_set_mhz = rn_vbios_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDppclkFreq,
khz_to_mhz_ceil(requested_dpp_khz));
ASSERT(actual_dppclk_set_mhz >= khz_to_mhz_ceil(requested_dpp_khz));
return actual_dppclk_set_mhz * 1000;
}
void rn_vbios_smu_set_dcn_low_power_state(struct clk_mgr_internal *clk_mgr, enum dcn_pwr_state state)
{
int disp_count;
if (state == DCN_PWR_STATE_LOW_POWER)
disp_count = 0;
else
disp_count = 1;
rn_vbios_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDisplayCount,
disp_count);
}
void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
{
rn_vbios_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown,
enable);
}
void rn_vbios_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr)
{
rn_vbios_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_UpdatePmeRestore,
0);
}
int rn_vbios_smu_is_periodic_retraining_disabled(struct clk_mgr_internal *clk_mgr)
{
return rn_vbios_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_IsPeriodicRetrainingDisabled,
1); // if PMFW doesn't support this message, assume retraining is disabled
// so we only use most optimal watermark if we know retraining is enabled.
}
| linux-master | drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dccg.h"
#include "rn_clk_mgr.h"
#include "dcn20/dcn20_clk_mgr.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "dce100/dce_clk_mgr.h"
#include "rn_clk_mgr_vbios_smu.h"
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
#include "atomfirmware.h"
#include "clk/clk_10_0_2_offset.h"
#include "clk/clk_10_0_2_sh_mask.h"
#include "renoir_ip_offset.h"
/* Constants */
#define SMU_VER_55_51_0 0x373300 /* SMU Version that is able to set DISPCLK below 100MHz */
/* Macros */
#define REG(reg_name) \
(CLK_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
/* TODO: evaluate how to lower or disable all dcn clocks in screen off case */
static int rn_get_active_display_cnt_wa(struct dc *dc, struct dc_state *context)
{
int i, display_count;
bool tmds_present = false;
display_count = 0;
for (i = 0; i < context->stream_count; i++) {
const struct dc_stream_state *stream = context->streams[i];
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
tmds_present = true;
}
for (i = 0; i < dc->link_count; i++) {
const struct dc_link *link = dc->links[i];
/* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
if (link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc))
display_count++;
}
/* WA for hang on HDMI after display off back back on*/
if (display_count == 0 && tmds_present)
display_count = 1;
return display_count;
}
static void rn_set_low_power_state(struct clk_mgr *clk_mgr_base)
{
int display_count;
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc *dc = clk_mgr_base->ctx->dc;
struct dc_state *context = dc->current_state;
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
display_count = rn_get_active_display_cnt_wa(dc, context);
/* if we can go lower, go lower */
if (display_count == 0) {
rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER);
/* update power state */
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
}
}
}
static void rn_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
struct dc_state *context, int ref_dpp_clk, bool safe_to_lower)
{
int i;
clk_mgr->dccg->ref_dppclk = ref_dpp_clk;
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
int dpp_inst, dppclk_khz, prev_dppclk_khz;
/* Loop index may not match dpp->inst if some pipes disabled,
* so select correct inst from res_pool
*/
dpp_inst = clk_mgr->base.ctx->dc->res_pool->dpps[i]->inst;
dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[dpp_inst];
if (safe_to_lower || prev_dppclk_khz < dppclk_khz)
clk_mgr->dccg->funcs->update_dpp_dto(
clk_mgr->dccg, dpp_inst, dppclk_khz);
}
}
static void rn_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
int display_count;
bool update_dppclk = false;
bool update_dispclk = false;
bool dpp_clock_lowered = false;
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
if (dc->work_arounds.skip_clock_update)
return;
/*
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
* also if safe to lower is false, we just go in the higher state
*/
if (safe_to_lower && !dc->debug.disable_48mhz_pwrdwn) {
/* check that we're not already in lower */
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
display_count = rn_get_active_display_cnt_wa(dc, context);
/* if we can go lower, go lower */
if (display_count == 0) {
rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER);
/* update power state */
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
}
}
} else {
/* check that we're not already in D0 */
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) {
rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_MISSION_MODE);
/* update power state */
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_MISSION_MODE;
}
}
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
rn_vbios_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz);
}
if (should_set_clock(safe_to_lower,
new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
rn_vbios_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz);
}
// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
// Do not adjust dppclk if dppclk is 0 to avoid unexpected result
if (new_clocks->dppclk_khz < 100000 && new_clocks->dppclk_khz > 0)
new_clocks->dppclk_khz = 100000;
/*
* Temporally ignore thew 0 cases for disp and dpp clks.
* We may have a new feature that requires 0 clks in the future.
*/
if (new_clocks->dppclk_khz == 0 || new_clocks->dispclk_khz == 0) {
new_clocks->dppclk_khz = clk_mgr_base->clks.dppclk_khz;
new_clocks->dispclk_khz = clk_mgr_base->clks.dispclk_khz;
}
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr_base->clks.dppclk_khz)) {
if (clk_mgr_base->clks.dppclk_khz > new_clocks->dppclk_khz)
dpp_clock_lowered = true;
clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
update_dppclk = true;
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
clk_mgr_base->clks.actual_dispclk_khz = rn_vbios_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
update_dispclk = true;
}
if (dpp_clock_lowered) {
// increase per DPP DTO before lowering global dppclk with requested dppclk
rn_update_clocks_update_dpp_dto(
clk_mgr,
context,
clk_mgr_base->clks.dppclk_khz,
safe_to_lower);
clk_mgr_base->clks.actual_dppclk_khz =
rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
//update dpp dto with actual dpp clk.
rn_update_clocks_update_dpp_dto(
clk_mgr,
context,
clk_mgr_base->clks.actual_dppclk_khz,
safe_to_lower);
} else {
// increase global DPPCLK before lowering per DPP DTO
if (update_dppclk || update_dispclk)
clk_mgr_base->clks.actual_dppclk_khz =
rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
// always update dtos unless clock is lowered and not safe to lower
rn_update_clocks_update_dpp_dto(
clk_mgr,
context,
clk_mgr_base->clks.actual_dppclk_khz,
safe_to_lower);
}
if (update_dispclk &&
dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
/*update dmcu for wait_loop count*/
dmcu->funcs->set_psr_wait_loop(dmcu,
clk_mgr_base->clks.dispclk_khz / 1000 / 7);
}
}
static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
{
/* get FbMult value */
struct fixed31_32 pll_req;
unsigned int fbmult_frac_val = 0;
unsigned int fbmult_int_val = 0;
/*
* Register value of fbmult is in 8.16 format, we are converting to 31.32
* to leverage the fix point operations available in driver
*/
REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/
REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */
pll_req = dc_fixpt_from_int(fbmult_int_val);
/*
* since fractional part is only 16 bit in register definition but is 32 bit
* in our fix point definiton, need to shift left by 16 to obtain correct value
*/
pll_req.value |= fbmult_frac_val << 16;
/* multiply by REFCLK period */
pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz);
/* integer part is now VCO frequency in kHz */
return dc_fixpt_floor(pll_req);
}
static void rn_dump_clk_registers_internal(struct rn_clk_internal *internal, struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
internal->CLK1_CLK3_CURRENT_CNT = REG_READ(CLK1_CLK3_CURRENT_CNT);
internal->CLK1_CLK3_BYPASS_CNTL = REG_READ(CLK1_CLK3_BYPASS_CNTL);
internal->CLK1_CLK3_DS_CNTL = REG_READ(CLK1_CLK3_DS_CNTL); //dcf deep sleep divider
internal->CLK1_CLK3_ALLOW_DS = REG_READ(CLK1_CLK3_ALLOW_DS);
internal->CLK1_CLK1_CURRENT_CNT = REG_READ(CLK1_CLK1_CURRENT_CNT);
internal->CLK1_CLK1_BYPASS_CNTL = REG_READ(CLK1_CLK1_BYPASS_CNTL);
internal->CLK1_CLK2_CURRENT_CNT = REG_READ(CLK1_CLK2_CURRENT_CNT);
internal->CLK1_CLK2_BYPASS_CNTL = REG_READ(CLK1_CLK2_BYPASS_CNTL);
internal->CLK1_CLK0_CURRENT_CNT = REG_READ(CLK1_CLK0_CURRENT_CNT);
internal->CLK1_CLK0_BYPASS_CNTL = REG_READ(CLK1_CLK0_BYPASS_CNTL);
}
/* This function collect raw clk register values */
static void rn_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
{
struct rn_clk_internal internal = {0};
char *bypass_clks[5] = {"0x0 DFS", "0x1 REFCLK", "0x2 ERROR", "0x3 400 FCH", "0x4 600 FCH"};
unsigned int chars_printed = 0;
unsigned int remaining_buffer = log_info->bufSize;
rn_dump_clk_registers_internal(&internal, clk_mgr_base);
regs_and_bypass->dcfclk = internal.CLK1_CLK3_CURRENT_CNT / 10;
regs_and_bypass->dcf_deep_sleep_divider = internal.CLK1_CLK3_DS_CNTL / 10;
regs_and_bypass->dcf_deep_sleep_allow = internal.CLK1_CLK3_ALLOW_DS;
regs_and_bypass->dprefclk = internal.CLK1_CLK2_CURRENT_CNT / 10;
regs_and_bypass->dispclk = internal.CLK1_CLK0_CURRENT_CNT / 10;
regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10;
regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007;
if (regs_and_bypass->dppclk_bypass < 0 || regs_and_bypass->dppclk_bypass > 4)
regs_and_bypass->dppclk_bypass = 0;
regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007;
if (regs_and_bypass->dcfclk_bypass < 0 || regs_and_bypass->dcfclk_bypass > 4)
regs_and_bypass->dcfclk_bypass = 0;
regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007;
if (regs_and_bypass->dispclk_bypass < 0 || regs_and_bypass->dispclk_bypass > 4)
regs_and_bypass->dispclk_bypass = 0;
regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007;
if (regs_and_bypass->dprefclk_bypass < 0 || regs_and_bypass->dprefclk_bypass > 4)
regs_and_bypass->dprefclk_bypass = 0;
if (log_info->enabled) {
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "clk_type,clk_value,deepsleep_cntl,deepsleep_allow,bypass\n");
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "dcfclk,%d,%d,%d,%s\n",
regs_and_bypass->dcfclk,
regs_and_bypass->dcf_deep_sleep_divider,
regs_and_bypass->dcf_deep_sleep_allow,
bypass_clks[(int) regs_and_bypass->dcfclk_bypass]);
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "dprefclk,%d,N/A,N/A,%s\n",
regs_and_bypass->dprefclk,
bypass_clks[(int) regs_and_bypass->dprefclk_bypass]);
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "dispclk,%d,N/A,N/A,%s\n",
regs_and_bypass->dispclk,
bypass_clks[(int) regs_and_bypass->dispclk_bypass]);
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
//split
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "SPLIT\n");
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
// REGISTER VALUES
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "reg_name,value,clk_type\n");
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK3_CURRENT_CNT,%d,dcfclk\n",
internal.CLK1_CLK3_CURRENT_CNT);
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK3_DS_CNTL,%d,dcf_deep_sleep_divider\n",
internal.CLK1_CLK3_DS_CNTL);
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK3_ALLOW_DS,%d,dcf_deep_sleep_allow\n",
internal.CLK1_CLK3_ALLOW_DS);
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK2_CURRENT_CNT,%d,dprefclk\n",
internal.CLK1_CLK2_CURRENT_CNT);
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK0_CURRENT_CNT,%d,dispclk\n",
internal.CLK1_CLK0_CURRENT_CNT);
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK1_CURRENT_CNT,%d,dppclk\n",
internal.CLK1_CLK1_CURRENT_CNT);
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK3_BYPASS_CNTL,%d,dcfclk_bypass\n",
internal.CLK1_CLK3_BYPASS_CNTL);
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK2_BYPASS_CNTL,%d,dprefclk_bypass\n",
internal.CLK1_CLK2_BYPASS_CNTL);
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK0_BYPASS_CNTL,%d,dispclk_bypass\n",
internal.CLK1_CLK0_BYPASS_CNTL);
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK1_BYPASS_CNTL,%d,dppclk_bypass\n",
internal.CLK1_CLK1_BYPASS_CNTL);
remaining_buffer -= chars_printed;
*log_info->sum_chars_printed += chars_printed;
log_info->pBuf += chars_printed;
}
}
static void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
rn_vbios_smu_enable_pme_wa(clk_mgr);
}
static void rn_init_clocks(struct clk_mgr *clk_mgr)
{
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
// Assumption is that boot state always supports pstate
clk_mgr->clks.p_state_change_support = true;
clk_mgr->clks.prev_p_state_change_support = true;
clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
}
static void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges)
{
int i, num_valid_sets;
num_valid_sets = 0;
for (i = 0; i < WM_SET_COUNT; i++) {
/* skip empty entries, the smu array has no holes*/
if (!bw_params->wm_table.entries[i].valid)
continue;
ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst;
ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;
/* We will not select WM based on fclk, so leave it as unconstrained */
ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
/* dcfclk wil be used to select WM*/
if (ranges->reader_wm_sets[num_valid_sets].wm_type == WM_TYPE_PSTATE_CHG) {
if (i == 0)
ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = 0;
else {
/* add 1 to make it non-overlapping with next lvl */
ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1;
}
ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
} else {
/* unconstrained for memory retraining */
ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
/* Modify previous watermark range to cover up to max */
ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
}
num_valid_sets++;
}
ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */
ranges->num_reader_wm_sets = num_valid_sets;
/* modify the min and max to make sure we cover the whole range*/
ranges->reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
ranges->reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
/* This is for writeback only, does not matter currently as no writeback support*/
ranges->num_writer_wm_sets = 1;
ranges->writer_wm_sets[0].wm_inst = WM_A;
ranges->writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
ranges->writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
ranges->writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
ranges->writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
}
static void rn_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
{
struct dc_debug_options *debug = &clk_mgr_base->ctx->dc->debug;
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct pp_smu_funcs *pp_smu = clk_mgr->pp_smu;
if (!debug->disable_pplib_wm_range) {
build_watermark_ranges(clk_mgr_base->bw_params, &clk_mgr_base->ranges);
/* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
if (pp_smu && pp_smu->rn_funcs.set_wm_ranges)
pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &clk_mgr_base->ranges);
}
}
static bool rn_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b)
{
if (a->dispclk_khz != b->dispclk_khz)
return false;
else if (a->dppclk_khz != b->dppclk_khz)
return false;
else if (a->dcfclk_khz != b->dcfclk_khz)
return false;
else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
return false;
return true;
}
/* Notify clk_mgr of a change in link rate, update phyclk frequency if necessary */
static void rn_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct dc_link *link)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
unsigned int i, max_phyclk_req = 0;
clk_mgr->cur_phyclk_req_table[link->link_index] = link->cur_link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
for (i = 0; i < MAX_PIPES * 2; i++) {
if (clk_mgr->cur_phyclk_req_table[i] > max_phyclk_req)
max_phyclk_req = clk_mgr->cur_phyclk_req_table[i];
}
if (max_phyclk_req != clk_mgr_base->clks.phyclk_khz) {
clk_mgr_base->clks.phyclk_khz = max_phyclk_req;
rn_vbios_smu_set_phyclk(clk_mgr, clk_mgr_base->clks.phyclk_khz);
}
}
static struct clk_mgr_funcs dcn21_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = rn_update_clocks,
.init_clocks = rn_init_clocks,
.enable_pme_wa = rn_enable_pme_wa,
.are_clock_states_equal = rn_are_clock_states_equal,
.set_low_power_state = rn_set_low_power_state,
.notify_wm_ranges = rn_notify_wm_ranges,
.notify_link_rate_change = rn_notify_link_rate_change,
};
static struct clk_bw_params rn_bw_params = {
.vram_type = Ddr4MemType,
.num_channels = 1,
.clk_table = {
.entries = {
{
.voltage = 0,
.dcfclk_mhz = 400,
.fclk_mhz = 400,
.memclk_mhz = 800,
.socclk_mhz = 0,
},
{
.voltage = 0,
.dcfclk_mhz = 483,
.fclk_mhz = 800,
.memclk_mhz = 1600,
.socclk_mhz = 0,
},
{
.voltage = 0,
.dcfclk_mhz = 602,
.fclk_mhz = 1067,
.memclk_mhz = 1067,
.socclk_mhz = 0,
},
{
.voltage = 0,
.dcfclk_mhz = 738,
.fclk_mhz = 1333,
.memclk_mhz = 1600,
.socclk_mhz = 0,
},
},
.num_entries = 4,
},
};
static unsigned int find_socclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage)
{
int i;
for (i = 0; i < PP_SMU_NUM_SOCCLK_DPM_LEVELS; i++) {
if (clock_table->SocClocks[i].Vol == voltage)
return clock_table->SocClocks[i].Freq;
}
ASSERT(0);
return 0;
}
static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage)
{
int i;
for (i = 0; i < PP_SMU_NUM_DCFCLK_DPM_LEVELS; i++) {
if (clock_table->DcfClocks[i].Vol == voltage)
return clock_table->DcfClocks[i].Freq;
}
ASSERT(0);
return 0;
}
static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct integrated_info *bios_info)
{
int i, j = 0;
j = -1;
ASSERT(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL);
/* Find lowest DPM, FCLK is filled in reverse order*/
for (i = PP_SMU_NUM_FCLK_DPM_LEVELS - 1; i >= 0; i--) {
if (clock_table->FClocks[i].Freq != 0 && clock_table->FClocks[i].Vol != 0) {
j = i;
break;
}
}
if (j == -1) {
/* clock table is all 0s, just use our own hardcode */
ASSERT(0);
return;
}
bw_params->clk_table.num_entries = j + 1;
for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
bw_params->clk_table.entries[i].fclk_mhz = clock_table->FClocks[j].Freq;
bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemClocks[j].Freq;
bw_params->clk_table.entries[i].voltage = clock_table->FClocks[j].Vol;
bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->FClocks[j].Vol);
bw_params->clk_table.entries[i].socclk_mhz = find_socclk_for_voltage(clock_table,
bw_params->clk_table.entries[i].voltage);
}
bw_params->vram_type = bios_info->memory_type;
bw_params->num_channels = bios_info->ma_channel_number;
for (i = 0; i < WM_SET_COUNT; i++) {
bw_params->wm_table.entries[i].wm_inst = i;
if (i >= bw_params->clk_table.num_entries) {
bw_params->wm_table.entries[i].valid = false;
continue;
}
bw_params->wm_table.entries[i].wm_type = WM_TYPE_PSTATE_CHG;
bw_params->wm_table.entries[i].valid = true;
}
if (bw_params->vram_type == LpDdr4MemType) {
/*
* WM set D will be re-purposed for memory retraining
*/
DC_FP_START();
dcn21_clk_mgr_set_bw_params_wm_table(bw_params);
DC_FP_END();
}
}
void rn_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr,
struct pp_smu_funcs *pp_smu,
struct dccg *dccg)
{
struct dc_debug_options *debug = &ctx->dc->debug;
struct dpm_clocks clock_table = { 0 };
enum pp_smu_status status = 0;
int is_green_sardine = 0;
struct clk_log_info log_info = {0};
#if defined(CONFIG_DRM_AMD_DC_FP)
is_green_sardine = ASICREV_IS_GREEN_SARDINE(ctx->asic_id.hw_internal_rev);
#endif
clk_mgr->base.ctx = ctx;
clk_mgr->base.funcs = &dcn21_funcs;
clk_mgr->pp_smu = pp_smu;
clk_mgr->dccg = dccg;
clk_mgr->dfs_bypass_disp_clk = 0;
clk_mgr->dprefclk_ss_percentage = 0;
clk_mgr->dprefclk_ss_divider = 1000;
clk_mgr->ss_on_dprefclk = false;
clk_mgr->dfs_ref_freq_khz = 48000;
clk_mgr->smu_ver = rn_vbios_smu_get_smu_version(clk_mgr);
clk_mgr->periodic_retraining_disabled = rn_vbios_smu_is_periodic_retraining_disabled(clk_mgr);
/* SMU Version 55.51.0 and up no longer have an issue
* that needs to limit minimum dispclk */
if (clk_mgr->smu_ver >= SMU_VER_55_51_0)
debug->min_disp_clk_khz = 0;
/* TODO: Check we get what we expect during bringup */
clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr);
/* in case we don't get a value from the register, use default */
if (clk_mgr->base.dentist_vco_freq_khz == 0)
clk_mgr->base.dentist_vco_freq_khz = 3600000;
if (ctx->dc_bios->integrated_info->memory_type == LpDdr4MemType) {
if (clk_mgr->periodic_retraining_disabled) {
rn_bw_params.wm_table = lpddr4_wm_table_with_disabled_ppt;
} else {
if (is_green_sardine)
rn_bw_params.wm_table = lpddr4_wm_table_gs;
else
rn_bw_params.wm_table = lpddr4_wm_table_rn;
}
} else {
if (is_green_sardine)
rn_bw_params.wm_table = ddr4_wm_table_gs;
else {
if (ctx->dc->config.is_single_rank_dimm)
rn_bw_params.wm_table = ddr4_1R_wm_table_rn;
else
rn_bw_params.wm_table = ddr4_wm_table_rn;
}
}
/* Saved clocks configured at boot for debug purposes */
rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info);
clk_mgr->base.dprefclk_khz = 600000;
dce_clock_read_ss_info(clk_mgr);
clk_mgr->base.bw_params = &rn_bw_params;
if (pp_smu && pp_smu->rn_funcs.get_dpm_clock_table) {
status = pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table);
if (status == PP_SMU_RESULT_OK &&
ctx->dc_bios && ctx->dc_bios->integrated_info) {
rn_clk_mgr_helper_populate_bw_params (clk_mgr->base.bw_params, &clock_table, ctx->dc_bios->integrated_info);
/* treat memory config as single channel if memory is asymmetrics. */
if (ctx->dc->config.is_asymmetric_memory)
clk_mgr->base.bw_params->num_channels = 1;
}
}
/* enable powerfeatures when displaycount goes to 0 */
if (clk_mgr->smu_ver >= 0x00371500)
rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(clk_mgr, !debug->disable_48mhz_pwrdwn);
}
| linux-master | drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c |
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "core_types.h"
#include "clk_mgr_internal.h"
#include "reg_helper.h"
#include "dm_helpers.h"
#include "dcn315_smu.h"
#include "mp/mp_13_0_5_offset.h"
#define MAX_INSTANCE 6
#define MAX_SEGMENT 6
#define SMU_REGISTER_WRITE_RETRY_COUNT 5
struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
};
static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0x00DC0000, 0x00E00000, 0x00E40000, 0x0243FC00, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0x0241B000, 0x04040000 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
#define regBIF_BX_PF2_RSMU_INDEX 0x0000
#define regBIF_BX_PF2_RSMU_INDEX_BASE_IDX 1
#define regBIF_BX_PF2_RSMU_DATA 0x0001
#define regBIF_BX_PF2_RSMU_DATA_BASE_IDX 1
#define REG(reg_name) \
(MP0_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
#define FN(reg_name, field) \
FD(reg_name##__##field)
#define REG_NBIO(reg_name) \
(NBIO_BASE.instance[0].segment[regBIF_BX_PF2_ ## reg_name ## _BASE_IDX] + regBIF_BX_PF2_ ## reg_name)
#include "logger_types.h"
#undef DC_LOGGER
#define DC_LOGGER \
CTX->logger
#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); }
#define mmMP1_C2PMSG_3 0x3B1050C
#define VBIOSSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team
#define VBIOSSMC_MSG_GetPmfwVersion 0x02 ///< Get PMFW version
#define VBIOSSMC_MSG_Spare0 0x03 ///< Spare0
#define VBIOSSMC_MSG_SetDispclkFreq 0x04 ///< Set display clock frequency in MHZ
#define VBIOSSMC_MSG_Spare1 0x05 ///< Spare1
#define VBIOSSMC_MSG_SetDppclkFreq 0x06 ///< Set DPP clock frequency in MHZ
#define VBIOSSMC_MSG_SetHardMinDcfclkByFreq 0x07 ///< Set DCF clock frequency hard min in MHZ
#define VBIOSSMC_MSG_SetMinDeepSleepDcfclk 0x08 ///< Set DCF clock minimum frequency in deep sleep in MHZ
#define VBIOSSMC_MSG_GetDtbclkFreq 0x09 ///< Get display dtb clock frequency in MHZ in case VMIN does not support phy frequency
#define VBIOSSMC_MSG_SetDtbClk 0x0A ///< Set dtb clock frequency, return frequemcy in MHZ
#define VBIOSSMC_MSG_SetDisplayCount 0x0B ///< Inform PMFW of number of display connected
#define VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown 0x0C ///< To ask PMFW turn off TMDP 48MHz refclk during display off to save power
#define VBIOSSMC_MSG_UpdatePmeRestore 0x0D ///< To ask PMFW to write into Azalia for PME wake up event
#define VBIOSSMC_MSG_SetVbiosDramAddrHigh 0x0E ///< Set DRAM address high 32 bits for WM table transfer
#define VBIOSSMC_MSG_SetVbiosDramAddrLow 0x0F ///< Set DRAM address low 32 bits for WM table transfer
#define VBIOSSMC_MSG_TransferTableSmu2Dram 0x10 ///< Transfer table from PMFW SRAM to system DRAM
#define VBIOSSMC_MSG_TransferTableDram2Smu 0x11 ///< Transfer table from system DRAM to PMFW
#define VBIOSSMC_MSG_SetDisplayIdleOptimizations 0x12 ///< Set Idle state optimization for display off
#define VBIOSSMC_MSG_GetDprefclkFreq 0x13 ///< Get DPREF clock frequency. Return in MHZ
#define VBIOSSMC_Message_Count 0x14 ///< Total number of VBIS and DAL messages
#define VBIOSSMC_Status_BUSY 0x0
#define VBIOSSMC_Result_OK 0x01 ///< Message Response OK
#define VBIOSSMC_Result_Failed 0xFF ///< Message Response Failed
#define VBIOSSMC_Result_UnknownCmd 0xFE ///< Message Response Unknown Command
#define VBIOSSMC_Result_CmdRejectedPrereq 0xFD ///< Message Response Command Failed Prerequisite
#define VBIOSSMC_Result_CmdRejectedBusy 0xFC ///< Message Response Command Rejected due to PMFW is busy. Sender should retry sending this message
/*
* Function to be used instead of REG_WAIT macro because the wait ends when
* the register is NOT EQUAL to zero, and because the translation in msg_if.h
* won't work with REG_WAIT.
*/
static uint32_t dcn315_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries)
{
uint32_t res_val = VBIOSSMC_Status_BUSY;
do {
res_val = REG_READ(MP1_SMN_C2PMSG_38);
if (res_val != VBIOSSMC_Status_BUSY)
break;
if (delay_us >= 1000)
msleep(delay_us/1000);
else if (delay_us > 0)
udelay(delay_us);
} while (max_retries--);
return res_val;
}
static int dcn315_smu_send_msg_with_param(
struct clk_mgr_internal *clk_mgr,
unsigned int msg_id, unsigned int param)
{
uint32_t result;
uint32_t i = 0;
uint32_t read_back_data;
result = dcn315_smu_wait_for_response(clk_mgr, 10, 200000);
if (result != VBIOSSMC_Result_OK)
smu_print("SMU Response was not OK. SMU response after wait received is: %d\n", result);
if (result == VBIOSSMC_Status_BUSY) {
return -1;
}
/* First clear response register */
REG_WRITE(MP1_SMN_C2PMSG_38, VBIOSSMC_Status_BUSY);
/* Set the parameter register for the SMU message, unit is Mhz */
REG_WRITE(MP1_SMN_C2PMSG_37, param);
for (i = 0; i < SMU_REGISTER_WRITE_RETRY_COUNT; i++) {
/* Trigger the message transaction by writing the message ID */
generic_write_indirect_reg(CTX,
REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
mmMP1_C2PMSG_3, msg_id);
read_back_data = generic_read_indirect_reg(CTX,
REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
mmMP1_C2PMSG_3);
if (read_back_data == msg_id)
break;
udelay(2);
smu_print("SMU msg id write fail %x times. \n", i + 1);
}
result = dcn315_smu_wait_for_response(clk_mgr, 10, 200000);
if (result == VBIOSSMC_Status_BUSY) {
ASSERT(0);
dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000);
}
return REG_READ(MP1_SMN_C2PMSG_37);
}
int dcn315_smu_get_smu_version(struct clk_mgr_internal *clk_mgr)
{
return dcn315_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_GetPmfwVersion,
0);
}
int dcn315_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz)
{
int actual_dispclk_set_mhz = -1;
if (!clk_mgr->smu_present)
return requested_dispclk_khz;
/* Unit of SMU msg parameter is Mhz */
actual_dispclk_set_mhz = dcn315_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDispclkFreq,
khz_to_mhz_ceil(requested_dispclk_khz));
return actual_dispclk_set_mhz * 1000;
}
int dcn315_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz)
{
int actual_dcfclk_set_mhz = -1;
if (!clk_mgr->base.ctx->dc->debug.pstate_enabled)
return -1;
if (!clk_mgr->smu_present)
return requested_dcfclk_khz;
actual_dcfclk_set_mhz = dcn315_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
khz_to_mhz_ceil(requested_dcfclk_khz));
#ifdef DBG
smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
#endif
return actual_dcfclk_set_mhz * 1000;
}
int dcn315_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz)
{
int actual_min_ds_dcfclk_mhz = -1;
if (!clk_mgr->base.ctx->dc->debug.pstate_enabled)
return -1;
if (!clk_mgr->smu_present)
return requested_min_ds_dcfclk_khz;
actual_min_ds_dcfclk_mhz = dcn315_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetMinDeepSleepDcfclk,
khz_to_mhz_ceil(requested_min_ds_dcfclk_khz));
return actual_min_ds_dcfclk_mhz * 1000;
}
int dcn315_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz)
{
int actual_dppclk_set_mhz = -1;
if (!clk_mgr->smu_present)
return requested_dpp_khz;
actual_dppclk_set_mhz = dcn315_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDppclkFreq,
khz_to_mhz_ceil(requested_dpp_khz));
return actual_dppclk_set_mhz * 1000;
}
void dcn315_smu_set_display_idle_optimization(struct clk_mgr_internal *clk_mgr, uint32_t idle_info)
{
if (!clk_mgr->base.ctx->dc->debug.pstate_enabled)
return;
if (!clk_mgr->smu_present)
return;
//TODO: Work with smu team to define optimization options.
dcn315_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDisplayIdleOptimizations,
idle_info);
}
void dcn315_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
{
union display_idle_optimization_u idle_info = { 0 };
if (!clk_mgr->smu_present)
return;
if (enable) {
idle_info.idle_info.df_request_disabled = 1;
idle_info.idle_info.phy_ref_clk_off = 1;
}
dcn315_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDisplayIdleOptimizations,
idle_info.data);
}
void dcn315_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr)
{
if (!clk_mgr->smu_present)
return;
dcn315_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_UpdatePmeRestore,
0);
}
void dcn315_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high)
{
if (!clk_mgr->smu_present)
return;
dcn315_smu_send_msg_with_param(clk_mgr,
VBIOSSMC_MSG_SetVbiosDramAddrHigh, addr_high);
}
void dcn315_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low)
{
if (!clk_mgr->smu_present)
return;
dcn315_smu_send_msg_with_param(clk_mgr,
VBIOSSMC_MSG_SetVbiosDramAddrLow, addr_low);
}
void dcn315_smu_transfer_dpm_table_smu_2_dram(struct clk_mgr_internal *clk_mgr)
{
if (!clk_mgr->smu_present)
return;
dcn315_smu_send_msg_with_param(clk_mgr,
VBIOSSMC_MSG_TransferTableSmu2Dram, TABLE_DPMCLOCKS);
}
void dcn315_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr)
{
if (!clk_mgr->smu_present)
return;
dcn315_smu_send_msg_with_param(clk_mgr,
VBIOSSMC_MSG_TransferTableDram2Smu, TABLE_WATERMARKS);
}
int dcn315_smu_get_dpref_clk(struct clk_mgr_internal *clk_mgr)
{
int dprefclk_get_mhz = -1;
if (clk_mgr->smu_present) {
dprefclk_get_mhz = dcn315_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_GetDprefclkFreq,
0);
}
return (dprefclk_get_mhz * 1000);
}
int dcn315_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr)
{
int fclk_get_mhz = -1;
if (clk_mgr->smu_present) {
fclk_get_mhz = dcn315_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_GetDtbclkFreq,
0);
}
return (fclk_get_mhz * 1000);
}
void dcn315_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable)
{
if (!clk_mgr->smu_present)
return;
dcn315_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDtbClk,
enable);
}
| linux-master | drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.